Mercurial > libavcodec.hg
changeset 10205:89a852950c34 libavcodec
ARM: interleave cos/sin tables for improved NEON MDCT
author | mru |
---|---|
date | Mon, 21 Sep 2009 02:56:09 +0000 |
parents | db033d1fbf44 |
children | 87ab0f0e0baf |
files | arm/fft_init_arm.c arm/mdct_neon.S |
diffstat | 2 files changed, 18 insertions(+), 37 deletions(-) [+] |
line wrap: on
line diff
--- a/arm/fft_init_arm.c Mon Sep 21 02:56:06 2009 +0000 +++ b/arm/fft_init_arm.c Mon Sep 21 02:56:09 2009 +0000 @@ -35,5 +35,6 @@ s->imdct_calc = ff_imdct_calc_neon; s->imdct_half = ff_imdct_half_neon; s->mdct_calc = ff_mdct_calc_neon; + s->permutation = FF_MDCT_PERM_INTERLEAVE; } }
--- a/arm/mdct_neon.S Mon Sep 21 02:56:06 2009 +0000 +++ b/arm/mdct_neon.S Mon Sep 21 02:56:09 2009 +0000 @@ -30,20 +30,18 @@ mov r12, #1 ldr lr, [r0, #28] @ mdct_bits ldr r4, [r0, #32] @ tcos - ldr r5, [r0, #36] @ tsin ldr r3, [r0, #8] @ revtab lsl r12, r12, lr @ n = 1 << nbits lsr lr, r12, #2 @ n4 = n >> 2 add r7, r2, r12, lsl #1 - mov r12, #-16 + mov r12, #-16 sub r7, r7, #16 vld2.32 {d16-d17},[r7,:128],r12 @ d16=x,n1 d17=x,n0 vld2.32 {d0-d1}, [r2,:128]! @ d0 =m0,x d1 =m1,x vrev64.32 d17, d17 - vld1.32 {d2}, [r4,:64]! @ d2=c0,c1 + vld2.32 {d2,d3}, [r4,:128]! @ d2=c0,c1 d3=s0,s2 vmul.f32 d6, d17, d2 - vld1.32 {d3}, [r5,:64]! @ d3=s0,s1 vmul.f32 d7, d0, d2 1: subs lr, lr, #2 @@ -60,9 +58,8 @@ vld2.32 {d16-d17},[r7,:128],r12 vld2.32 {d0-d1}, [r2,:128]! vrev64.32 d17, d17 - vld1.32 {d2}, [r4,:64]! + vld2.32 {d2,d3}, [r4,:128]! @ d2=c0,c1 d3=s0,s2 vmul.f32 d6, d17, d2 - vld1.32 {d3}, [r5,:64]! vmul.f32 d7, d0, d2 vst2.32 {d4[0],d5[0]}, [r6,:64] vst2.32 {d4[1],d5[1]}, [r8,:64] @@ -77,34 +74,28 @@ mov r12, #1 ldr lr, [r4, #28] @ mdct_bits - ldr r5, [r4, #36] @ tsin ldr r4, [r4, #32] @ tcos lsl r12, r12, lr @ n = 1 << nbits lsr lr, r12, #3 @ n8 = n >> 3 - add r4, r4, lr, lsl #2 - add r5, r5, lr, lsl #2 + add r4, r4, lr, lsl #3 add r6, r6, lr, lsl #3 - sub r1, r4, #8 - sub r2, r5, #8 + sub r1, r4, #16 sub r3, r6, #16 mov r7, #-16 - mov r12, #-8 mov r8, r6 mov r0, r3 vld2.32 {d0-d1}, [r3,:128], r7 @ d0 =i1,r1 d1 =i0,r0 vld2.32 {d20-d21},[r6,:128]! @ d20=i2,r2 d21=i3,r3 - vld1.32 {d18}, [r2,:64], r12 @ d18=s1,s0 + vld2.32 {d16,d18},[r1,:128], r7 @ d16=c1,c0 d18=s1,s0 1: subs lr, lr, #2 vmul.f32 d7, d0, d18 - vld1.32 {d19}, [r5,:64]! @ d19=s2,s3 + vld2.32 {d17,d19},[r4,:128]! @ d17=c2,c3 d19=s2,s3 vmul.f32 d4, d1, d18 - vld1.32 {d16}, [r1,:64], r12 @ d16=c1,c0 vmul.f32 d5, d21, d19 - vld1.32 {d17}, [r4,:64]! @ d17=c2,c3 vmul.f32 d6, d20, d19 vmul.f32 d22, d1, d16 vmul.f32 d23, d21, d17 @@ -117,7 +108,7 @@ beq 1f vld2.32 {d0-d1}, [r3,:128], r7 vld2.32 {d20-d21},[r6,:128]! - vld1.32 {d18}, [r2,:64], r12 + vld2.32 {d16,d18},[r1,:128], r7 @ d16=c1,c0 d18=s1,s0 vrev64.32 q3, q3 vst2.32 {d4,d6}, [r0,:128], r7 vst2.32 {d5,d7}, [r8,:128]! @@ -172,7 +163,6 @@ mov r12, #1 ldr lr, [r0, #28] @ mdct_bits ldr r4, [r0, #32] @ tcos - ldr r5, [r0, #36] @ tsin ldr r3, [r0, #8] @ revtab lsl lr, r12, lr @ n = 1 << nbits add r7, r2, lr @ in4u @@ -187,9 +177,8 @@ vrev64.32 q9, q9 @ in4d0,in4d1 in3d0,in3d1 vld2.32 {d0, d1}, [r2,:128]! @ in3u0,in3u1 x,x vsub.f32 d20, d18, d20 @ in4d-in4u I - vld1.32 {d2}, [r4,:64]! @ c0,c1 + vld2.32 {d2,d3}, [r4,:128]! @ c0,c1 s0,s1 vadd.f32 d0, d0, d19 @ in3u+in3d -R - vld1.32 {d3}, [r5,:64]! @ s0,s1 1: vmul.f32 d7, d20, d3 @ I*s vmul.f32 d6, d0, d2 @ -R*c @@ -211,9 +200,8 @@ vrev64.32 q9, q9 @ in4d0,in4d1 in3d0,in3d1 vld2.32 {d0, d1}, [r2,:128]! @ in3u0,in3u1 x,x vsub.f32 d20, d18, d20 @ in4d-in4u I - vld1.32 {d2}, [r4,:64]! @ c0,c1 + vld2.32 {d2,d3}, [r4,:128]! @ c0,c1 s0,s1 vadd.f32 d0, d0, d19 @ in3u+in3d -R - vld1.32 {d3}, [r5,:64]! @ s0,s1 vst2.32 {d6[0],d7[0]}, [r6,:64] vst2.32 {d6[1],d7[1]}, [r10,:64] b 1b @@ -236,9 +224,8 @@ vrev64.32 q9, q9 @ in2d0,in2d1 in1d0,in1d1 vld2.32 {d0, d1}, [r2,:128]! @ in0u0,in0u1 x,x vsub.f32 d0, d0, d18 @ in0u-in2d R - vld1.32 {d2}, [r4,:64]! @ c0,c1 + vld2.32 {d2,d3}, [r4,:128]! @ c0,c1 s0,s1 vadd.f32 d20, d20, d19 @ in2u+in1d -I - vld1.32 {d3}, [r5,:64]! @ s0,s1 1: vmul.f32 d6, d0, d2 @ R*c vmul.f32 d7, d20, d3 @ -I*s @@ -259,9 +246,8 @@ vrev64.32 q9, q9 @ in2d0,in2d1 in1d0,in1d1 vld2.32 {d0, d1}, [r2,:128]! @ in0u0,in0u1 x,x vsub.f32 d0, d0, d18 @ in0u-in2d R - vld1.32 {d2}, [r4,:64]! @ c0,c1 + vld2.32 {d2,d3}, [r4,:128]! @ c0,c1 s0,s1 vadd.f32 d20, d20, d19 @ in2u+in1d -I - vld1.32 {d3}, [r5,:64]! @ s0,s1 vst2.32 {d6[0],d7[0]}, [r6,:64] vst2.32 {d6[1],d7[1]}, [r10,:64] b 1b @@ -275,34 +261,28 @@ mov r12, #1 ldr lr, [r4, #28] @ mdct_bits - ldr r5, [r4, #36] @ tsin ldr r4, [r4, #32] @ tcos lsl r12, r12, lr @ n = 1 << nbits lsr lr, r12, #3 @ n8 = n >> 3 - add r4, r4, lr, lsl #2 - add r5, r5, lr, lsl #2 + add r4, r4, lr, lsl #3 add r6, r6, lr, lsl #3 - sub r1, r4, #8 - sub r2, r5, #8 + sub r1, r4, #16 sub r3, r6, #16 mov r7, #-16 - mov r12, #-8 mov r8, r6 mov r0, r3 vld2.32 {d0-d1}, [r3,:128], r7 @ d0 =r1,i1 d1 =r0,i0 vld2.32 {d20-d21},[r6,:128]! @ d20=r2,i2 d21=r3,i3 - vld1.32 {d18}, [r2,:64], r12 @ d18=s1,s0 + vld2.32 {d16,d18},[r1,:128], r7 @ c1,c0 s1,s0 1: subs lr, lr, #2 vmul.f32 d7, d0, d18 @ r1*s1,r0*s0 - vld1.32 {d19}, [r5,:64]! @ s2,s3 + vld2.32 {d17,d19},[r4,:128]! @ c2,c3 s2,s3 vmul.f32 d4, d1, d18 @ i1*s1,i0*s0 - vld1.32 {d16}, [r1,:64], r12 @ c1,c0 vmul.f32 d5, d21, d19 @ i2*s2,i3*s3 - vld1.32 {d17}, [r4,:64]! @ c2,c3 vmul.f32 d6, d20, d19 @ r2*s2,r3*s3 vmul.f32 d24, d0, d16 @ r1*c1,r0*c0 vmul.f32 d25, d20, d17 @ r2*c2,r3*c3 @@ -316,7 +296,7 @@ beq 1f vld2.32 {d0-d1}, [r3,:128], r7 vld2.32 {d20-d21},[r6,:128]! - vld1.32 {d18}, [r2,:64], r12 + vld2.32 {d16,d18},[r1,:128], r7 @ c1,c0 s1,s0 vrev64.32 q3, q3 vst2.32 {d4,d6}, [r0,:128], r7 vst2.32 {d5,d7}, [r8,:128]!