Mercurial > libavcodec.hg
diff i386/snowdsp_mmx.c @ 5591:642588a60570 libavcodec
update mmx code to latest snow changes
note, the code likely can overflow and thus needs some more changes
sse2 updated too but disabled as it is untested
author | michael |
---|---|
date | Sat, 25 Aug 2007 15:20:56 +0000 |
parents | 93082c591c8b |
children | fadffa1e5aef |
line wrap: on
line diff
--- a/i386/snowdsp_mmx.c Sat Aug 25 09:23:17 2007 +0000 +++ b/i386/snowdsp_mmx.c Sat Aug 25 15:20:56 2007 +0000 @@ -23,18 +23,18 @@ #include "snow.h" #include "x86_cpu.h" -void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width){ +void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){ const int w2= (width+1)>>1; // SSE2 code runs faster with pointers aligned on a 32-byte boundary. - DWTELEM temp_buf[(width>>1) + 4]; - DWTELEM * const temp = temp_buf + 4 - (((int)temp_buf & 0xF) >> 2); + IDWTELEM temp_buf[(width>>1) + 4]; + IDWTELEM * const temp = temp_buf + 4 - (((int)temp_buf & 0xF) >> 2); const int w_l= (width>>1); const int w_r= w2 - 1; int i; { // Lift 0 - DWTELEM * const ref = b + w2 - 1; - DWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice + IDWTELEM * const ref = b + w2 - 1; + IDWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice // (the first time erroneously), we allow the SSE2 code to run an extra pass. // The savings in code and time are well worth having to store this value and // calculate b[0] correctly afterwards. @@ -42,31 +42,31 @@ i = 0; asm volatile( "pcmpeqd %%xmm7, %%xmm7 \n\t" - "pslld $31, %%xmm7 \n\t" - "psrld $29, %%xmm7 \n\t" + "psllw $15, %%xmm7 \n\t" + "psrlw $13, %%xmm7 \n\t" ::); - for(; i<w_l-7; i+=8){ + for(; i<w_l-15; i+=16){ asm volatile( "movdqu (%1), %%xmm1 \n\t" "movdqu 16(%1), %%xmm5 \n\t" - "movdqu 4(%1), %%xmm2 \n\t" - "movdqu 20(%1), %%xmm6 \n\t" - "paddd %%xmm1, %%xmm2 \n\t" - "paddd %%xmm5, %%xmm6 \n\t" + "movdqu 2(%1), %%xmm2 \n\t" + "movdqu 18(%1), %%xmm6 \n\t" + "paddw %%xmm1, %%xmm2 \n\t" + "paddw %%xmm5, %%xmm6 \n\t" "movdqa %%xmm2, %%xmm0 \n\t" "movdqa %%xmm6, %%xmm4 \n\t" - "paddd %%xmm2, %%xmm2 \n\t" - "paddd %%xmm6, %%xmm6 \n\t" - "paddd %%xmm0, %%xmm2 \n\t" - "paddd %%xmm4, %%xmm6 \n\t" - "paddd %%xmm7, %%xmm2 \n\t" - "paddd %%xmm7, %%xmm6 \n\t" - "psrad $3, %%xmm2 \n\t" - "psrad $3, %%xmm6 \n\t" + "paddw %%xmm2, %%xmm2 \n\t" + "paddw %%xmm6, %%xmm6 \n\t" + "paddw %%xmm0, %%xmm2 \n\t" + "paddw %%xmm4, %%xmm6 \n\t" + "paddw %%xmm7, %%xmm2 \n\t" + "paddw %%xmm7, %%xmm6 \n\t" + "psraw $3, %%xmm2 \n\t" + "psraw $3, %%xmm6 \n\t" "movdqa (%0), %%xmm0 \n\t" "movdqa 16(%0), %%xmm4 \n\t" - "psubd %%xmm2, %%xmm0 \n\t" - "psubd %%xmm6, %%xmm4 \n\t" + "psubw %%xmm2, %%xmm0 \n\t" + "psubw %%xmm6, %%xmm4 \n\t" "movdqa %%xmm0, (%0) \n\t" "movdqa %%xmm4, 16(%0) \n\t" :: "r"(&b[i]), "r"(&ref[i]) @@ -78,24 +78,24 @@ } { // Lift 1 - DWTELEM * const dst = b+w2; + IDWTELEM * const dst = b+w2; i = 0; - for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){ + for(; (((long)&dst[i]) & 0x1F) && i<w_r; i++){ dst[i] = dst[i] - (b[i] + b[i + 1]); } - for(; i<w_r-7; i+=8){ + for(; i<w_r-15; i+=16){ asm volatile( "movdqu (%1), %%xmm1 \n\t" "movdqu 16(%1), %%xmm5 \n\t" - "movdqu 4(%1), %%xmm2 \n\t" - "movdqu 20(%1), %%xmm6 \n\t" - "paddd %%xmm1, %%xmm2 \n\t" - "paddd %%xmm5, %%xmm6 \n\t" + "movdqu 2(%1), %%xmm2 \n\t" + "movdqu 18(%1), %%xmm6 \n\t" + "paddw %%xmm1, %%xmm2 \n\t" + "paddw %%xmm5, %%xmm6 \n\t" "movdqa (%0), %%xmm0 \n\t" "movdqa 16(%0), %%xmm4 \n\t" - "psubd %%xmm2, %%xmm0 \n\t" - "psubd %%xmm6, %%xmm4 \n\t" + "psubw %%xmm2, %%xmm0 \n\t" + "psubw %%xmm6, %%xmm4 \n\t" "movdqa %%xmm0, (%0) \n\t" "movdqa %%xmm4, 16(%0) \n\t" :: "r"(&dst[i]), "r"(&b[i]) @@ -106,33 +106,33 @@ } { // Lift 2 - DWTELEM * const ref = b+w2 - 1; - DWTELEM b_0 = b[0]; + IDWTELEM * const ref = b+w2 - 1; + IDWTELEM b_0 = b[0]; i = 0; asm volatile( - "pslld $1, %%xmm7 \n\t" + "psllw $1, %%xmm7 \n\t" ::); - for(; i<w_l-7; i+=8){ + for(; i<w_l-15; i+=16){ asm volatile( "movdqu (%1), %%xmm1 \n\t" "movdqu 16(%1), %%xmm5 \n\t" - "movdqu 4(%1), %%xmm0 \n\t" - "movdqu 20(%1), %%xmm4 \n\t" //FIXME try aligned reads and shifts - "paddd %%xmm1, %%xmm0 \n\t" - "paddd %%xmm5, %%xmm4 \n\t" - "paddd %%xmm7, %%xmm0 \n\t" - "paddd %%xmm7, %%xmm4 \n\t" + "movdqu 2(%1), %%xmm0 \n\t" + "movdqu 18(%1), %%xmm4 \n\t" //FIXME try aligned reads and shifts + "paddw %%xmm1, %%xmm0 \n\t" + "paddw %%xmm5, %%xmm4 \n\t" + "paddw %%xmm7, %%xmm0 \n\t" + "paddw %%xmm7, %%xmm4 \n\t" "movdqa (%0), %%xmm1 \n\t" "movdqa 16(%0), %%xmm5 \n\t" - "psrad $2, %%xmm0 \n\t" - "psrad $2, %%xmm4 \n\t" - "paddd %%xmm1, %%xmm0 \n\t" - "paddd %%xmm5, %%xmm4 \n\t" - "psrad $2, %%xmm0 \n\t" - "psrad $2, %%xmm4 \n\t" - "paddd %%xmm1, %%xmm0 \n\t" - "paddd %%xmm5, %%xmm4 \n\t" + "psraw $2, %%xmm0 \n\t" + "psraw $2, %%xmm4 \n\t" + "paddw %%xmm1, %%xmm0 \n\t" + "paddw %%xmm5, %%xmm4 \n\t" + "psraw $2, %%xmm0 \n\t" + "psraw $2, %%xmm4 \n\t" + "paddw %%xmm1, %%xmm0 \n\t" + "paddw %%xmm5, %%xmm4 \n\t" "movdqa %%xmm0, (%0) \n\t" "movdqa %%xmm4, 16(%0) \n\t" :: "r"(&b[i]), "r"(&ref[i]) @@ -144,26 +144,26 @@ } { // Lift 3 - DWTELEM * const src = b+w2; + IDWTELEM * const src = b+w2; i = 0; - for(; (((long)&temp[i]) & 0xF) && i<w_r; i++){ + for(; (((long)&temp[i]) & 0x1F) && i<w_r; i++){ temp[i] = src[i] - ((-W_AM*(b[i] + b[i+1]))>>W_AS); } for(; i<w_r-7; i+=8){ asm volatile( - "movdqu 4(%1), %%xmm2 \n\t" - "movdqu 20(%1), %%xmm6 \n\t" - "paddd (%1), %%xmm2 \n\t" - "paddd 16(%1), %%xmm6 \n\t" + "movdqu 2(%1), %%xmm2 \n\t" + "movdqu 18(%1), %%xmm6 \n\t" + "paddw (%1), %%xmm2 \n\t" + "paddw 16(%1), %%xmm6 \n\t" "movdqu (%0), %%xmm0 \n\t" "movdqu 16(%0), %%xmm4 \n\t" - "paddd %%xmm2, %%xmm0 \n\t" - "paddd %%xmm6, %%xmm4 \n\t" - "psrad $1, %%xmm2 \n\t" - "psrad $1, %%xmm6 \n\t" - "paddd %%xmm0, %%xmm2 \n\t" - "paddd %%xmm4, %%xmm6 \n\t" + "paddw %%xmm2, %%xmm0 \n\t" + "paddw %%xmm6, %%xmm4 \n\t" + "psraw $1, %%xmm2 \n\t" + "psraw $1, %%xmm6 \n\t" + "paddw %%xmm0, %%xmm2 \n\t" + "paddw %%xmm4, %%xmm6 \n\t" "movdqa %%xmm2, (%2) \n\t" "movdqa %%xmm6, 16(%2) \n\t" :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i]) @@ -176,183 +176,183 @@ { snow_interleave_line_header(&i, width, b, temp); + for (; (i & 0x3E) != 0x3E; i-=2){ + b[i+1] = temp[i>>1]; + b[i] = b[i>>1]; + } + for (i-=62; i>=0; i-=64){ + asm volatile( + "movdqa (%1), %%xmm0 \n\t" + "movdqa 16(%1), %%xmm2 \n\t" + "movdqa 32(%1), %%xmm4 \n\t" + "movdqa 48(%1), %%xmm6 \n\t" + "movdqa (%1), %%xmm1 \n\t" + "movdqa 16(%1), %%xmm3 \n\t" + "movdqa 32(%1), %%xmm5 \n\t" + "movdqa 48(%1), %%xmm7 \n\t" + "punpcklwd (%2), %%xmm0 \n\t" + "punpcklwd 16(%2), %%xmm2 \n\t" + "punpcklwd 32(%2), %%xmm4 \n\t" + "punpcklwd 48(%2), %%xmm6 \n\t" + "movdqa %%xmm0, (%0) \n\t" + "movdqa %%xmm2, 32(%0) \n\t" + "movdqa %%xmm4, 64(%0) \n\t" + "movdqa %%xmm6, 96(%0) \n\t" + "punpckhwd (%2), %%xmm1 \n\t" + "punpckhwd 16(%2), %%xmm3 \n\t" + "punpckhwd 32(%2), %%xmm5 \n\t" + "punpckhwd 48(%2), %%xmm7 \n\t" + "movdqa %%xmm1, 16(%0) \n\t" + "movdqa %%xmm3, 48(%0) \n\t" + "movdqa %%xmm5, 80(%0) \n\t" + "movdqa %%xmm7, 112(%0) \n\t" + :: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1]) + : "memory" + ); + } + } +} + +void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width){ + const int w2= (width+1)>>1; + IDWTELEM temp[width >> 1]; + const int w_l= (width>>1); + const int w_r= w2 - 1; + int i; + + { // Lift 0 + IDWTELEM * const ref = b + w2 - 1; + + i = 1; + b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); + asm volatile( + "pcmpeqw %%mm7, %%mm7 \n\t" + "psllw $15, %%mm7 \n\t" + "psrlw $13, %%mm7 \n\t" + ::); + for(; i<w_l-7; i+=8){ + asm volatile( + "movq (%1), %%mm2 \n\t" + "movq 8(%1), %%mm6 \n\t" + "paddw 2(%1), %%mm2 \n\t" + "paddw 10(%1), %%mm6 \n\t" + "movq %%mm2, %%mm0 \n\t" + "movq %%mm6, %%mm4 \n\t" + "paddw %%mm2, %%mm2 \n\t" + "paddw %%mm6, %%mm6 \n\t" + "paddw %%mm0, %%mm2 \n\t" + "paddw %%mm4, %%mm6 \n\t" + "paddw %%mm7, %%mm2 \n\t" + "paddw %%mm7, %%mm6 \n\t" + "psraw $3, %%mm2 \n\t" + "psraw $3, %%mm6 \n\t" + "movq (%0), %%mm0 \n\t" + "movq 8(%0), %%mm4 \n\t" + "psubw %%mm2, %%mm0 \n\t" + "psubw %%mm6, %%mm4 \n\t" + "movq %%mm0, (%0) \n\t" + "movq %%mm4, 8(%0) \n\t" + :: "r"(&b[i]), "r"(&ref[i]) + : "memory" + ); + } + snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS); + } + + { // Lift 1 + IDWTELEM * const dst = b+w2; + + i = 0; + for(; i<w_r-7; i+=8){ + asm volatile( + "movq (%1), %%mm2 \n\t" + "movq 8(%1), %%mm6 \n\t" + "paddw 2(%1), %%mm2 \n\t" + "paddw 10(%1), %%mm6 \n\t" + "movq (%0), %%mm0 \n\t" + "movq 8(%0), %%mm4 \n\t" + "psubw %%mm2, %%mm0 \n\t" + "psubw %%mm6, %%mm4 \n\t" + "movq %%mm0, (%0) \n\t" + "movq %%mm4, 8(%0) \n\t" + :: "r"(&dst[i]), "r"(&b[i]) + : "memory" + ); + } + snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS); + } + + { // Lift 2 + IDWTELEM * const ref = b+w2 - 1; + + i = 1; + b[0] = b[0] + (((2 * ref[1] + W_BO) + 4 * b[0]) >> W_BS); + asm volatile( + "psllw $1, %%mm7 \n\t" + ::); + for(; i<w_l-7; i+=8){ + asm volatile( + "movq (%1), %%mm0 \n\t" + "movq 8(%1), %%mm4 \n\t" + "paddw 2(%1), %%mm0 \n\t" + "paddw 10(%1), %%mm4 \n\t" + "paddw %%mm7, %%mm0 \n\t" + "paddw %%mm7, %%mm4 \n\t" + "psraw $2, %%mm0 \n\t" + "psraw $2, %%mm4 \n\t" + "movq (%0), %%mm1 \n\t" + "movq 8(%0), %%mm5 \n\t" + "paddw %%mm1, %%mm0 \n\t" + "paddw %%mm5, %%mm4 \n\t" + "psraw $2, %%mm0 \n\t" + "psraw $2, %%mm4 \n\t" + "paddw %%mm1, %%mm0 \n\t" + "paddw %%mm5, %%mm4 \n\t" + "movq %%mm0, (%0) \n\t" + "movq %%mm4, 8(%0) \n\t" + :: "r"(&b[i]), "r"(&ref[i]) + : "memory" + ); + } + snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l); + } + + { // Lift 3 + IDWTELEM * const src = b+w2; + i = 0; + + for(; i<w_r-7; i+=8){ + asm volatile( + "movq 2(%1), %%mm2 \n\t" + "movq 10(%1), %%mm6 \n\t" + "paddw (%1), %%mm2 \n\t" + "paddw 8(%1), %%mm6 \n\t" + "movq (%0), %%mm0 \n\t" + "movq 8(%0), %%mm4 \n\t" + "paddw %%mm2, %%mm0 \n\t" + "paddw %%mm6, %%mm4 \n\t" + "psraw $1, %%mm2 \n\t" + "psraw $1, %%mm6 \n\t" + "paddw %%mm0, %%mm2 \n\t" + "paddw %%mm4, %%mm6 \n\t" + "movq %%mm2, (%2) \n\t" + "movq %%mm6, 8(%2) \n\t" + :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i]) + : "memory" + ); + } + snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS); + } + + { + snow_interleave_line_header(&i, width, b, temp); + for (; (i & 0x1E) != 0x1E; i-=2){ b[i+1] = temp[i>>1]; b[i] = b[i>>1]; } for (i-=30; i>=0; i-=32){ asm volatile( - "movdqa (%1), %%xmm0 \n\t" - "movdqa 16(%1), %%xmm2 \n\t" - "movdqa 32(%1), %%xmm4 \n\t" - "movdqa 48(%1), %%xmm6 \n\t" - "movdqa (%1), %%xmm1 \n\t" - "movdqa 16(%1), %%xmm3 \n\t" - "movdqa 32(%1), %%xmm5 \n\t" - "movdqa 48(%1), %%xmm7 \n\t" - "punpckldq (%2), %%xmm0 \n\t" - "punpckldq 16(%2), %%xmm2 \n\t" - "punpckldq 32(%2), %%xmm4 \n\t" - "punpckldq 48(%2), %%xmm6 \n\t" - "movdqa %%xmm0, (%0) \n\t" - "movdqa %%xmm2, 32(%0) \n\t" - "movdqa %%xmm4, 64(%0) \n\t" - "movdqa %%xmm6, 96(%0) \n\t" - "punpckhdq (%2), %%xmm1 \n\t" - "punpckhdq 16(%2), %%xmm3 \n\t" - "punpckhdq 32(%2), %%xmm5 \n\t" - "punpckhdq 48(%2), %%xmm7 \n\t" - "movdqa %%xmm1, 16(%0) \n\t" - "movdqa %%xmm3, 48(%0) \n\t" - "movdqa %%xmm5, 80(%0) \n\t" - "movdqa %%xmm7, 112(%0) \n\t" - :: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1]) - : "memory" - ); - } - } -} - -void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width){ - const int w2= (width+1)>>1; - DWTELEM temp[width >> 1]; - const int w_l= (width>>1); - const int w_r= w2 - 1; - int i; - - { // Lift 0 - DWTELEM * const ref = b + w2 - 1; - - i = 1; - b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); - asm volatile( - "pcmpeqd %%mm7, %%mm7 \n\t" - "pslld $31, %%mm7 \n\t" - "psrld $29, %%mm7 \n\t" - ::); - for(; i<w_l-3; i+=4){ - asm volatile( - "movq (%1), %%mm2 \n\t" - "movq 8(%1), %%mm6 \n\t" - "paddd 4(%1), %%mm2 \n\t" - "paddd 12(%1), %%mm6 \n\t" - "movq %%mm2, %%mm0 \n\t" - "movq %%mm6, %%mm4 \n\t" - "paddd %%mm2, %%mm2 \n\t" - "paddd %%mm6, %%mm6 \n\t" - "paddd %%mm0, %%mm2 \n\t" - "paddd %%mm4, %%mm6 \n\t" - "paddd %%mm7, %%mm2 \n\t" - "paddd %%mm7, %%mm6 \n\t" - "psrad $3, %%mm2 \n\t" - "psrad $3, %%mm6 \n\t" - "movq (%0), %%mm0 \n\t" - "movq 8(%0), %%mm4 \n\t" - "psubd %%mm2, %%mm0 \n\t" - "psubd %%mm6, %%mm4 \n\t" - "movq %%mm0, (%0) \n\t" - "movq %%mm4, 8(%0) \n\t" - :: "r"(&b[i]), "r"(&ref[i]) - : "memory" - ); - } - snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS); - } - - { // Lift 1 - DWTELEM * const dst = b+w2; - - i = 0; - for(; i<w_r-3; i+=4){ - asm volatile( - "movq (%1), %%mm2 \n\t" - "movq 8(%1), %%mm6 \n\t" - "paddd 4(%1), %%mm2 \n\t" - "paddd 12(%1), %%mm6 \n\t" - "movq (%0), %%mm0 \n\t" - "movq 8(%0), %%mm4 \n\t" - "psubd %%mm2, %%mm0 \n\t" - "psubd %%mm6, %%mm4 \n\t" - "movq %%mm0, (%0) \n\t" - "movq %%mm4, 8(%0) \n\t" - :: "r"(&dst[i]), "r"(&b[i]) - : "memory" - ); - } - snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS); - } - - { // Lift 2 - DWTELEM * const ref = b+w2 - 1; - - i = 1; - b[0] = b[0] + (((2 * ref[1] + W_BO) + 4 * b[0]) >> W_BS); - asm volatile( - "pslld $1, %%mm7 \n\t" - ::); - for(; i<w_l-3; i+=4){ - asm volatile( - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm4 \n\t" - "paddd 4(%1), %%mm0 \n\t" - "paddd 12(%1), %%mm4 \n\t" - "paddd %%mm7, %%mm0 \n\t" - "paddd %%mm7, %%mm4 \n\t" - "psrad $2, %%mm0 \n\t" - "psrad $2, %%mm4 \n\t" - "movq (%0), %%mm1 \n\t" - "movq 8(%0), %%mm5 \n\t" - "paddd %%mm1, %%mm0 \n\t" - "paddd %%mm5, %%mm4 \n\t" - "psrad $2, %%mm0 \n\t" - "psrad $2, %%mm4 \n\t" - "paddd %%mm1, %%mm0 \n\t" - "paddd %%mm5, %%mm4 \n\t" - "movq %%mm0, (%0) \n\t" - "movq %%mm4, 8(%0) \n\t" - :: "r"(&b[i]), "r"(&ref[i]) - : "memory" - ); - } - snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l); - } - - { // Lift 3 - DWTELEM * const src = b+w2; - i = 0; - - for(; i<w_r-3; i+=4){ - asm volatile( - "movq 4(%1), %%mm2 \n\t" - "movq 12(%1), %%mm6 \n\t" - "paddd (%1), %%mm2 \n\t" - "paddd 8(%1), %%mm6 \n\t" - "movq (%0), %%mm0 \n\t" - "movq 8(%0), %%mm4 \n\t" - "paddd %%mm2, %%mm0 \n\t" - "paddd %%mm6, %%mm4 \n\t" - "psrad $1, %%mm2 \n\t" - "psrad $1, %%mm6 \n\t" - "paddd %%mm0, %%mm2 \n\t" - "paddd %%mm4, %%mm6 \n\t" - "movq %%mm2, (%2) \n\t" - "movq %%mm6, 8(%2) \n\t" - :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i]) - : "memory" - ); - } - snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS); - } - - { - snow_interleave_line_header(&i, width, b, temp); - - for (; (i & 0xE) != 0xE; i-=2){ - b[i+1] = temp[i>>1]; - b[i] = b[i>>1]; - } - for (i-=14; i>=0; i-=16){ - asm volatile( "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm2 \n\t" "movq 16(%1), %%mm4 \n\t" @@ -361,18 +361,18 @@ "movq 8(%1), %%mm3 \n\t" "movq 16(%1), %%mm5 \n\t" "movq 24(%1), %%mm7 \n\t" - "punpckldq (%2), %%mm0 \n\t" - "punpckldq 8(%2), %%mm2 \n\t" - "punpckldq 16(%2), %%mm4 \n\t" - "punpckldq 24(%2), %%mm6 \n\t" + "punpcklwd (%2), %%mm0 \n\t" + "punpcklwd 8(%2), %%mm2 \n\t" + "punpcklwd 16(%2), %%mm4 \n\t" + "punpcklwd 24(%2), %%mm6 \n\t" "movq %%mm0, (%0) \n\t" "movq %%mm2, 16(%0) \n\t" "movq %%mm4, 32(%0) \n\t" "movq %%mm6, 48(%0) \n\t" - "punpckhdq (%2), %%mm1 \n\t" - "punpckhdq 8(%2), %%mm3 \n\t" - "punpckhdq 16(%2), %%mm5 \n\t" - "punpckhdq 24(%2), %%mm7 \n\t" + "punpckhwd (%2), %%mm1 \n\t" + "punpckhwd 8(%2), %%mm3 \n\t" + "punpckhwd 16(%2), %%mm5 \n\t" + "punpckhwd 24(%2), %%mm7 \n\t" "movq %%mm1, 8(%0) \n\t" "movq %%mm3, 24(%0) \n\t" "movq %%mm5, 40(%0) \n\t" @@ -385,40 +385,40 @@ } #define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\ - ""op" (%%"r",%%"REG_d",4), %%"t0" \n\t"\ - ""op" 16(%%"r",%%"REG_d",4), %%"t1" \n\t"\ - ""op" 32(%%"r",%%"REG_d",4), %%"t2" \n\t"\ - ""op" 48(%%"r",%%"REG_d",4), %%"t3" \n\t" + ""op" (%%"r",%%"REG_d",2), %%"t0" \n\t"\ + ""op" 16(%%"r",%%"REG_d",2), %%"t1" \n\t"\ + ""op" 32(%%"r",%%"REG_d",2), %%"t2" \n\t"\ + ""op" 48(%%"r",%%"REG_d",2), %%"t3" \n\t" #define snow_vertical_compose_sse2_load(r,t0,t1,t2,t3)\ snow_vertical_compose_sse2_load_add("movdqa",r,t0,t1,t2,t3) #define snow_vertical_compose_sse2_add(r,t0,t1,t2,t3)\ - snow_vertical_compose_sse2_load_add("paddd",r,t0,t1,t2,t3) + snow_vertical_compose_sse2_load_add("paddw",r,t0,t1,t2,t3) #define snow_vertical_compose_sse2_sub(s0,s1,s2,s3,t0,t1,t2,t3)\ - "psubd %%"s0", %%"t0" \n\t"\ - "psubd %%"s1", %%"t1" \n\t"\ - "psubd %%"s2", %%"t2" \n\t"\ - "psubd %%"s3", %%"t3" \n\t" + "psubw %%"s0", %%"t0" \n\t"\ + "psubw %%"s1", %%"t1" \n\t"\ + "psubw %%"s2", %%"t2" \n\t"\ + "psubw %%"s3", %%"t3" \n\t" #define snow_vertical_compose_sse2_store(w,s0,s1,s2,s3)\ - "movdqa %%"s0", (%%"w",%%"REG_d",4) \n\t"\ - "movdqa %%"s1", 16(%%"w",%%"REG_d",4) \n\t"\ - "movdqa %%"s2", 32(%%"w",%%"REG_d",4) \n\t"\ - "movdqa %%"s3", 48(%%"w",%%"REG_d",4) \n\t" + "movdqa %%"s0", (%%"w",%%"REG_d",2) \n\t"\ + "movdqa %%"s1", 16(%%"w",%%"REG_d",2) \n\t"\ + "movdqa %%"s2", 32(%%"w",%%"REG_d",2) \n\t"\ + "movdqa %%"s3", 48(%%"w",%%"REG_d",2) \n\t" #define snow_vertical_compose_sse2_sra(n,t0,t1,t2,t3)\ - "psrad $"n", %%"t0" \n\t"\ - "psrad $"n", %%"t1" \n\t"\ - "psrad $"n", %%"t2" \n\t"\ - "psrad $"n", %%"t3" \n\t" + "psraw $"n", %%"t0" \n\t"\ + "psraw $"n", %%"t1" \n\t"\ + "psraw $"n", %%"t2" \n\t"\ + "psraw $"n", %%"t3" \n\t" #define snow_vertical_compose_sse2_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\ - "paddd %%"s0", %%"t0" \n\t"\ - "paddd %%"s1", %%"t1" \n\t"\ - "paddd %%"s2", %%"t2" \n\t"\ - "paddd %%"s3", %%"t3" \n\t" + "paddw %%"s0", %%"t0" \n\t"\ + "paddw %%"s1", %%"t1" \n\t"\ + "paddw %%"s2", %%"t2" \n\t"\ + "paddw %%"s3", %%"t3" \n\t" #define snow_vertical_compose_sse2_move(s0,s1,s2,s3,t0,t1,t2,t3)\ "movdqa %%"s0", %%"t0" \n\t"\ @@ -426,10 +426,10 @@ "movdqa %%"s2", %%"t2" \n\t"\ "movdqa %%"s3", %%"t3" \n\t" -void ff_snow_vertical_compose97i_sse2(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width){ +void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){ long i = width; - while(i & 0xF) + while(i & 0x1F) { i--; b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS; @@ -452,8 +452,8 @@ snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6") "pcmpeqd %%xmm1, %%xmm1 \n\t" - "pslld $31, %%xmm1 \n\t" - "psrld $29, %%xmm1 \n\t" + "psllw $15, %%xmm1 \n\t" + "psrlw $13, %%xmm1 \n\t" "mov %5, %%"REG_a" \n\t" snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6") @@ -472,8 +472,8 @@ snow_vertical_compose_sse2_add(REG_c,"xmm0","xmm2","xmm4","xmm6") "pcmpeqd %%xmm1, %%xmm1 \n\t" - "pslld $31, %%xmm1 \n\t" - "psrld $30, %%xmm1 \n\t" + "psllw $15, %%xmm1 \n\t" + "psrlw $14, %%xmm1 \n\t" "mov %1, %%"REG_S" \n\t" snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6") @@ -488,7 +488,7 @@ snow_vertical_compose_sse2_store(REG_a,"xmm0","xmm2","xmm4","xmm6") "2: \n\t" - "sub $16, %%"REG_d" \n\t" + "sub $32, %%"REG_d" \n\t" "jge 1b \n\t" :"+d"(i) : @@ -497,25 +497,25 @@ } #define snow_vertical_compose_mmx_load_add(op,r,t0,t1,t2,t3)\ - ""op" (%%"r",%%"REG_d",4), %%"t0" \n\t"\ - ""op" 8(%%"r",%%"REG_d",4), %%"t1" \n\t"\ - ""op" 16(%%"r",%%"REG_d",4), %%"t2" \n\t"\ - ""op" 24(%%"r",%%"REG_d",4), %%"t3" \n\t" + ""op" (%%"r",%%"REG_d",2), %%"t0" \n\t"\ + ""op" 8(%%"r",%%"REG_d",2), %%"t1" \n\t"\ + ""op" 16(%%"r",%%"REG_d",2), %%"t2" \n\t"\ + ""op" 24(%%"r",%%"REG_d",2), %%"t3" \n\t" #define snow_vertical_compose_mmx_load(r,t0,t1,t2,t3)\ snow_vertical_compose_mmx_load_add("movq",r,t0,t1,t2,t3) #define snow_vertical_compose_mmx_add(r,t0,t1,t2,t3)\ - snow_vertical_compose_mmx_load_add("paddd",r,t0,t1,t2,t3) + snow_vertical_compose_mmx_load_add("paddw",r,t0,t1,t2,t3) #define snow_vertical_compose_mmx_sub(s0,s1,s2,s3,t0,t1,t2,t3)\ snow_vertical_compose_sse2_sub(s0,s1,s2,s3,t0,t1,t2,t3) #define snow_vertical_compose_mmx_store(w,s0,s1,s2,s3)\ - "movq %%"s0", (%%"w",%%"REG_d",4) \n\t"\ - "movq %%"s1", 8(%%"w",%%"REG_d",4) \n\t"\ - "movq %%"s2", 16(%%"w",%%"REG_d",4) \n\t"\ - "movq %%"s3", 24(%%"w",%%"REG_d",4) \n\t" + "movq %%"s0", (%%"w",%%"REG_d",2) \n\t"\ + "movq %%"s1", 8(%%"w",%%"REG_d",2) \n\t"\ + "movq %%"s2", 16(%%"w",%%"REG_d",2) \n\t"\ + "movq %%"s3", 24(%%"w",%%"REG_d",2) \n\t" #define snow_vertical_compose_mmx_sra(n,t0,t1,t2,t3)\ snow_vertical_compose_sse2_sra(n,t0,t1,t2,t3) @@ -529,9 +529,9 @@ "movq %%"s2", %%"t2" \n\t"\ "movq %%"s3", %%"t3" \n\t" -void ff_snow_vertical_compose97i_mmx(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width){ +void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){ long i = width; - while(i & 0x7) + while(i & 15) { i--; b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS; @@ -553,9 +553,9 @@ snow_vertical_compose_mmx_r2r_add("mm0","mm2","mm4","mm6","mm0","mm2","mm4","mm6") snow_vertical_compose_mmx_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6") - "pcmpeqd %%mm1, %%mm1 \n\t" - "pslld $31, %%mm1 \n\t" - "psrld $29, %%mm1 \n\t" + "pcmpeqw %%mm1, %%mm1 \n\t" + "psllw $15, %%mm1 \n\t" + "psrlw $13, %%mm1 \n\t" "mov %5, %%"REG_a" \n\t" snow_vertical_compose_mmx_r2r_add("mm1","mm1","mm1","mm1","mm0","mm2","mm4","mm6") @@ -573,9 +573,9 @@ snow_vertical_compose_mmx_sra("2","mm0","mm2","mm4","mm6") snow_vertical_compose_mmx_add(REG_c,"mm0","mm2","mm4","mm6") - "pcmpeqd %%mm1, %%mm1 \n\t" - "pslld $31, %%mm1 \n\t" - "psrld $30, %%mm1 \n\t" + "pcmpeqw %%mm1, %%mm1 \n\t" + "psllw $15, %%mm1 \n\t" + "psrlw $14, %%mm1 \n\t" "mov %1, %%"REG_S" \n\t" snow_vertical_compose_mmx_r2r_add("mm1","mm1","mm1","mm1","mm0","mm2","mm4","mm6") @@ -590,7 +590,7 @@ snow_vertical_compose_mmx_store(REG_a,"mm0","mm2","mm4","mm6") "2: \n\t" - "sub $8, %%"REG_d" \n\t" + "sub $16, %%"REG_d" \n\t" "jge 1b \n\t" :"+d"(i) : @@ -599,7 +599,7 @@ } #define snow_inner_add_yblock_sse2_header \ - DWTELEM * * dst_array = sb->line + src_y;\ + IDWTELEM * * dst_array = sb->line + src_y;\ long tmp;\ asm volatile(\ "mov %7, %%"REG_c" \n\t"\ @@ -607,8 +607,8 @@ "mov %4, %%"REG_S" \n\t"\ "pxor %%xmm7, %%xmm7 \n\t" /* 0 */\ "pcmpeqd %%xmm3, %%xmm3 \n\t"\ - "pslld $31, %%xmm3 \n\t"\ - "psrld $24, %%xmm3 \n\t" /* FRAC_BITS >> 1 */\ + "psllw $15, %%xmm3 \n\t"\ + "psrlw $12, %%xmm3 \n\t" /* FRAC_BITS >> 1 */\ "1: \n\t"\ "mov %1, %%"REG_D" \n\t"\ "mov (%%"REG_D"), %%"REG_D" \n\t"\ @@ -662,7 +662,7 @@ "jnz 1b \n\t"\ :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\ :\ - "rm"((long)(src_x<<2)),"m"(obmc),"a"(block),"m"((long)b_h),"m"((long)src_stride):\ + "rm"((long)(src_x<<1)),"m"(obmc),"a"(block),"m"((long)b_h),"m"((long)src_stride):\ "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d""); #define snow_inner_add_yblock_sse2_end_8\ @@ -736,36 +736,23 @@ snow_inner_add_yblock_sse2_accum_16("0", "528") "mov %0, %%"REG_d" \n\t" - "movdqa %%xmm1, %%xmm0 \n\t" - "movdqa %%xmm5, %%xmm4 \n\t" - "punpcklwd %%xmm7, %%xmm0 \n\t" - "paddd (%%"REG_D"), %%xmm0 \n\t" - "punpckhwd %%xmm7, %%xmm1 \n\t" - "paddd 16(%%"REG_D"), %%xmm1 \n\t" - "punpcklwd %%xmm7, %%xmm4 \n\t" - "paddd 32(%%"REG_D"), %%xmm4 \n\t" - "punpckhwd %%xmm7, %%xmm5 \n\t" - "paddd 48(%%"REG_D"), %%xmm5 \n\t" - "paddd %%xmm3, %%xmm0 \n\t" - "paddd %%xmm3, %%xmm1 \n\t" - "paddd %%xmm3, %%xmm4 \n\t" - "paddd %%xmm3, %%xmm5 \n\t" - "psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */ - "psrad $8, %%xmm1 \n\t" /* FRAC_BITS. */ - "psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */ - "psrad $8, %%xmm5 \n\t" /* FRAC_BITS. */ + "psrlw $4, %%xmm1 \n\t" + "psrlw $4, %%xmm5 \n\t" + "paddw (%%"REG_D"), %%xmm1 \n\t" + "paddw 16(%%"REG_D"), %%xmm5 \n\t" + "paddw %%xmm3, %%xmm1 \n\t" + "paddw %%xmm3, %%xmm5 \n\t" + "psraw $4, %%xmm1 \n\t" /* FRAC_BITS. */ + "psraw $4, %%xmm5 \n\t" /* FRAC_BITS. */ + "packuswb %%xmm5, %%xmm1 \n\t" - "packssdw %%xmm1, %%xmm0 \n\t" - "packssdw %%xmm5, %%xmm4 \n\t" - "packuswb %%xmm4, %%xmm0 \n\t" - - "movdqu %%xmm0, (%%"REG_d") \n\t" + "movdqu %%xmm1, (%%"REG_d") \n\t" snow_inner_add_yblock_sse2_end_16 } #define snow_inner_add_yblock_mmx_header \ - DWTELEM * * dst_array = sb->line + src_y;\ + IDWTELEM * * dst_array = sb->line + src_y;\ long tmp;\ asm volatile(\ "mov %7, %%"REG_c" \n\t"\ @@ -773,8 +760,8 @@ "mov %4, %%"REG_S" \n\t"\ "pxor %%mm7, %%mm7 \n\t" /* 0 */\ "pcmpeqd %%mm3, %%mm3 \n\t"\ - "pslld $31, %%mm3 \n\t"\ - "psrld $24, %%mm3 \n\t" /* FRAC_BITS >> 1 */\ + "psllw $15, %%mm3 \n\t"\ + "psrlw $12, %%mm3 \n\t" /* FRAC_BITS >> 1 */\ "1: \n\t"\ "mov %1, %%"REG_D" \n\t"\ "mov (%%"REG_D"), %%"REG_D" \n\t"\ @@ -800,29 +787,16 @@ #define snow_inner_add_yblock_mmx_mix(read_offset, write_offset)\ "mov %0, %%"REG_d" \n\t"\ - "movq %%mm1, %%mm0 \n\t"\ - "movq %%mm5, %%mm4 \n\t"\ - "punpcklwd %%mm7, %%mm0 \n\t"\ - "paddd "read_offset"(%%"REG_D"), %%mm0 \n\t"\ - "punpckhwd %%mm7, %%mm1 \n\t"\ - "paddd "read_offset"+8(%%"REG_D"), %%mm1 \n\t"\ - "punpcklwd %%mm7, %%mm4 \n\t"\ - "paddd "read_offset"+16(%%"REG_D"), %%mm4 \n\t"\ - "punpckhwd %%mm7, %%mm5 \n\t"\ - "paddd "read_offset"+24(%%"REG_D"), %%mm5 \n\t"\ - "paddd %%mm3, %%mm0 \n\t"\ - "paddd %%mm3, %%mm1 \n\t"\ - "paddd %%mm3, %%mm4 \n\t"\ - "paddd %%mm3, %%mm5 \n\t"\ - "psrad $8, %%mm0 \n\t"\ - "psrad $8, %%mm1 \n\t"\ - "psrad $8, %%mm4 \n\t"\ - "psrad $8, %%mm5 \n\t"\ -\ - "packssdw %%mm1, %%mm0 \n\t"\ - "packssdw %%mm5, %%mm4 \n\t"\ - "packuswb %%mm4, %%mm0 \n\t"\ - "movq %%mm0, "write_offset"(%%"REG_d") \n\t" + "psrlw $4, %%mm1 \n\t"\ + "psrlw $4, %%mm5 \n\t"\ + "paddw "read_offset"(%%"REG_D"), %%mm1 \n\t"\ + "paddw "read_offset"+8(%%"REG_D"), %%mm5 \n\t"\ + "paddw %%mm3, %%mm1 \n\t"\ + "paddw %%mm3, %%mm5 \n\t"\ + "psraw $4, %%mm1 \n\t"\ + "psraw $4, %%mm5 \n\t"\ + "packuswb %%mm5, %%mm1 \n\t"\ + "movq %%mm1, "write_offset"(%%"REG_d") \n\t" #define snow_inner_add_yblock_mmx_end(s_step)\ "add $"s_step", %%"REG_S" \n\t"\ @@ -836,7 +810,7 @@ "jnz 1b \n\t"\ :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\ :\ - "rm"((long)(src_x<<2)),"m"(obmc),"a"(block),"m"((long)b_h),"m"((long)src_stride):\ + "rm"((long)(src_x<<1)),"m"(obmc),"a"(block),"m"((long)b_h),"m"((long)src_stride):\ "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d""); static void inner_add_yblock_bw_8_obmc_16_mmx(const uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h, @@ -863,7 +837,7 @@ snow_inner_add_yblock_mmx_accum("2", "24", "8") snow_inner_add_yblock_mmx_accum("1", "520", "8") snow_inner_add_yblock_mmx_accum("0", "536", "8") -snow_inner_add_yblock_mmx_mix("32", "8") +snow_inner_add_yblock_mmx_mix("16", "8") snow_inner_add_yblock_mmx_end("32") }