Mercurial > libavcodec.hg
changeset 5000:743a8b12b7de libavcodec
Faster SSE FFT/MDCT, patch by Zuxy Meng %zuxy P meng A gmail P com%
unrolls some loops, utilizing all 8 xmm registers. fft-test
shows ~10% speed up in (I)FFT and ~8% speed up in (I)MDCT on Dothan
author | gpoirier |
---|---|
date | Sun, 13 May 2007 16:32:32 +0000 |
parents | 0c4bf6b7d1c6 |
children | 75bf61c6c385 |
files | i386/fft_sse.c |
diffstat | 1 files changed, 66 insertions(+), 14 deletions(-) [+] |
line wrap: on
line diff
--- a/i386/fft_sse.c Sun May 13 14:01:15 2007 +0000 +++ b/i386/fft_sse.c Sun May 13 16:32:32 2007 +0000 @@ -100,20 +100,33 @@ i = nloops*8; asm volatile( "1: \n\t" - "sub $16, %0 \n\t" + "sub $32, %0 \n\t" "movaps (%2,%0), %%xmm1 \n\t" "movaps (%1,%0), %%xmm0 \n\t" + "movaps 16(%2,%0), %%xmm5 \n\t" + "movaps 16(%1,%0), %%xmm4 \n\t" "movaps %%xmm1, %%xmm2 \n\t" + "movaps %%xmm5, %%xmm6 \n\t" "shufps $0xA0, %%xmm1, %%xmm1 \n\t" "shufps $0xF5, %%xmm2, %%xmm2 \n\t" + "shufps $0xA0, %%xmm5, %%xmm5 \n\t" + "shufps $0xF5, %%xmm6, %%xmm6 \n\t" "mulps (%3,%0,2), %%xmm1 \n\t" // cre*re cim*re "mulps 16(%3,%0,2), %%xmm2 \n\t" // -cim*im cre*im + "mulps 32(%3,%0,2), %%xmm5 \n\t" // cre*re cim*re + "mulps 48(%3,%0,2), %%xmm6 \n\t" // -cim*im cre*im "addps %%xmm2, %%xmm1 \n\t" + "addps %%xmm6, %%xmm5 \n\t" "movaps %%xmm0, %%xmm3 \n\t" + "movaps %%xmm4, %%xmm7 \n\t" "addps %%xmm1, %%xmm0 \n\t" "subps %%xmm1, %%xmm3 \n\t" + "addps %%xmm5, %%xmm4 \n\t" + "subps %%xmm5, %%xmm7 \n\t" "movaps %%xmm0, (%1,%0) \n\t" "movaps %%xmm3, (%2,%0) \n\t" + "movaps %%xmm4, 16(%1,%0) \n\t" + "movaps %%xmm7, 16(%2,%0) \n\t" "jg 1b \n\t" :"+r"(i) :"r"(p), "r"(p + nloops), "r"(cptr) @@ -141,67 +154,106 @@ n4 = n >> 2; n8 = n >> 3; - asm volatile ("movaps %0, %%xmm7\n\t"::"m"(*p1m1p1m1)); +#ifdef ARCH_X86_64 + asm volatile ("movaps %0, %%xmm8\n\t"::"m"(*p1m1p1m1)); +#define P1M1P1M1 "%%xmm8" +#else +#define P1M1P1M1 "%4" +#endif /* pre rotation */ in1 = input; in2 = input + n2 - 4; - /* Complex multiplication - Two complex products per iteration, we could have 4 with 8 xmm - registers, 8 with 16 xmm registers. - Maybe we should unroll more. - */ - for (k = 0; k < n4; k += 2) { + /* Complex multiplication */ + for (k = 0; k < n4; k += 4) { asm volatile ( "movaps %0, %%xmm0 \n\t" // xmm0 = r0 X r1 X : in2 "movaps %1, %%xmm3 \n\t" // xmm3 = X i1 X i0: in1 + "movaps -16+%0, %%xmm4 \n\t" // xmm4 = r0 X r1 X : in2 + "movaps 16+%1, %%xmm7 \n\t" // xmm7 = X i1 X i0: in1 "movlps %2, %%xmm1 \n\t" // xmm1 = X X R1 R0: tcos "movlps %3, %%xmm2 \n\t" // xmm2 = X X I1 I0: tsin + "movlps 8+%2, %%xmm5 \n\t" // xmm5 = X X R1 R0: tcos + "movlps 8+%3, %%xmm6 \n\t" // xmm6 = X X I1 I0: tsin "shufps $95, %%xmm0, %%xmm0 \n\t" // xmm0 = r1 r1 r0 r0 "shufps $160,%%xmm3, %%xmm3 \n\t" // xmm3 = i1 i1 i0 i0 + "shufps $95, %%xmm4, %%xmm4 \n\t" // xmm4 = r1 r1 r0 r0 + "shufps $160,%%xmm7, %%xmm7 \n\t" // xmm7 = i1 i1 i0 i0 "unpcklps %%xmm2, %%xmm1 \n\t" // xmm1 = I1 R1 I0 R0 + "unpcklps %%xmm6, %%xmm5 \n\t" // xmm5 = I1 R1 I0 R0 "movaps %%xmm1, %%xmm2 \n\t" // xmm2 = I1 R1 I0 R0 - "xorps %%xmm7, %%xmm2 \n\t" // xmm2 = -I1 R1 -I0 R0 + "movaps %%xmm5, %%xmm6 \n\t" // xmm6 = I1 R1 I0 R0 + "xorps "P1M1P1M1", %%xmm2 \n\t" // xmm2 = -I1 R1 -I0 R0 + "xorps "P1M1P1M1", %%xmm6 \n\t" // xmm6 = -I1 R1 -I0 R0 "mulps %%xmm1, %%xmm0 \n\t" // xmm0 = rI rR rI rR + "mulps %%xmm5, %%xmm4 \n\t" // xmm4 = rI rR rI rR "shufps $177,%%xmm2, %%xmm2 \n\t" // xmm2 = R1 -I1 R0 -I0 + "shufps $177,%%xmm6, %%xmm6 \n\t" // xmm6 = R1 -I1 R0 -I0 "mulps %%xmm2, %%xmm3 \n\t" // xmm3 = Ri -Ii Ri -Ii + "mulps %%xmm6, %%xmm7 \n\t" // xmm7 = Ri -Ii Ri -Ii "addps %%xmm3, %%xmm0 \n\t" // xmm0 = result + "addps %%xmm7, %%xmm4 \n\t" // xmm4 = result ::"m"(in2[-2*k]), "m"(in1[2*k]), "m"(tcos[k]), "m"(tsin[k]) +#ifndef ARCH_X86_64 + ,"m"(*p1m1p1m1) +#endif ); /* Should be in the same block, hack for gcc2.95 & gcc3 */ asm ( "movlps %%xmm0, %0 \n\t" "movhps %%xmm0, %1 \n\t" - :"=m"(z[revtab[k]]), "=m"(z[revtab[k + 1]]) + "movlps %%xmm4, %2 \n\t" + "movhps %%xmm4, %3 \n\t" + :"=m"(z[revtab[k]]), "=m"(z[revtab[k + 1]]), + "=m"(z[revtab[k + 2]]), "=m"(z[revtab[k + 3]]) ); } ff_fft_calc_sse(&s->fft, z); - /* Not currently needed, added for safety */ - asm volatile ("movaps %0, %%xmm7\n\t"::"m"(*p1m1p1m1)); +#ifndef ARCH_X86_64 +#undef P1M1P1M1 +#define P1M1P1M1 "%3" +#endif /* post rotation + reordering */ - for (k = 0; k < n4; k += 2) { + for (k = 0; k < n4; k += 4) { asm ( "movaps %0, %%xmm0 \n\t" // xmm0 = i1 r1 i0 r0: z + "movaps 16+%0, %%xmm4 \n\t" // xmm4 = i1 r1 i0 r0: z "movlps %1, %%xmm1 \n\t" // xmm1 = X X R1 R0: tcos + "movlps 8+%1, %%xmm5 \n\t" // xmm5 = X X R1 R0: tcos "movaps %%xmm0, %%xmm3 \n\t" // xmm3 = i1 r1 i0 r0 + "movaps %%xmm4, %%xmm7 \n\t" // xmm7 = i1 r1 i0 r0 "movlps %2, %%xmm2 \n\t" // xmm2 = X X I1 I0: tsin + "movlps 8+%2, %%xmm6 \n\t" // xmm6 = X X I1 I0: tsin "shufps $160,%%xmm0, %%xmm0 \n\t" // xmm0 = r1 r1 r0 r0 "shufps $245,%%xmm3, %%xmm3 \n\t" // xmm3 = i1 i1 i0 i0 + "shufps $160,%%xmm4, %%xmm4 \n\t" // xmm4 = r1 r1 r0 r0 + "shufps $245,%%xmm7, %%xmm7 \n\t" // xmm7 = i1 i1 i0 i0 "unpcklps %%xmm2, %%xmm1 \n\t" // xmm1 = I1 R1 I0 R0 + "unpcklps %%xmm6, %%xmm5 \n\t" // xmm5 = I1 R1 I0 R0 "movaps %%xmm1, %%xmm2 \n\t" // xmm2 = I1 R1 I0 R0 - "xorps %%xmm7, %%xmm2 \n\t" // xmm2 = -I1 R1 -I0 R0 + "movaps %%xmm5, %%xmm6 \n\t" // xmm6 = I1 R1 I0 R0 + "xorps "P1M1P1M1", %%xmm2 \n\t" // xmm2 = -I1 R1 -I0 R0 "mulps %%xmm1, %%xmm0 \n\t" // xmm0 = rI rR rI rR + "xorps "P1M1P1M1", %%xmm6 \n\t" // xmm6 = -I1 R1 -I0 R0 + "mulps %%xmm5, %%xmm4 \n\t" // xmm4 = rI rR rI rR "shufps $177,%%xmm2, %%xmm2 \n\t" // xmm2 = R1 -I1 R0 -I0 + "shufps $177,%%xmm6, %%xmm6 \n\t" // xmm6 = R1 -I1 R0 -I0 "mulps %%xmm2, %%xmm3 \n\t" // xmm3 = Ri -Ii Ri -Ii + "mulps %%xmm6, %%xmm7 \n\t" // xmm7 = Ri -Ii Ri -Ii "addps %%xmm3, %%xmm0 \n\t" // xmm0 = result + "addps %%xmm7, %%xmm4 \n\t" // xmm4 = result "movaps %%xmm0, %0 \n\t" + "movaps %%xmm4, 16+%0 \n\t" :"+m"(z[k]) :"m"(tcos[k]), "m"(tsin[k]) +#ifndef ARCH_X86_64 + ,"m"(*p1m1p1m1) +#endif ); }