Mercurial > libavcodec.hg
diff i386/snowdsp_mmx.c @ 5565:93082c591c8b libavcodec
Change rounding of the horizontal DWT to match the vertical one.
This allows some simplifications and optimizations and should
not have any effect on quality.
author | michael |
---|---|
date | Tue, 21 Aug 2007 16:29:40 +0000 |
parents | 538d55152d09 |
children | 642588a60570 |
line wrap: on
line diff
--- a/i386/snowdsp_mmx.c Tue Aug 21 15:48:08 2007 +0000 +++ b/i386/snowdsp_mmx.c Tue Aug 21 16:29:40 2007 +0000 @@ -111,8 +111,7 @@ i = 0; asm volatile( - "pcmpeqd %%xmm7, %%xmm7 \n\t" - "psrad $29, %%xmm7 \n\t" + "pslld $1, %%xmm7 \n\t" ::); for(; i<w_l-7; i+=8){ asm volatile( @@ -157,25 +156,21 @@ "movdqu 20(%1), %%xmm6 \n\t" "paddd (%1), %%xmm2 \n\t" "paddd 16(%1), %%xmm6 \n\t" - "movdqa %%xmm2, %%xmm0 \n\t" - "movdqa %%xmm6, %%xmm4 \n\t" - "pslld $2, %%xmm2 \n\t" - "pslld $2, %%xmm6 \n\t" - "psubd %%xmm2, %%xmm0 \n\t" - "psubd %%xmm6, %%xmm4 \n\t" - "psrad $1, %%xmm0 \n\t" - "psrad $1, %%xmm4 \n\t" - "movdqu (%0), %%xmm2 \n\t" - "movdqu 16(%0), %%xmm6 \n\t" - "psubd %%xmm0, %%xmm2 \n\t" - "psubd %%xmm4, %%xmm6 \n\t" + "movdqu (%0), %%xmm0 \n\t" + "movdqu 16(%0), %%xmm4 \n\t" + "paddd %%xmm2, %%xmm0 \n\t" + "paddd %%xmm6, %%xmm4 \n\t" + "psrad $1, %%xmm2 \n\t" + "psrad $1, %%xmm6 \n\t" + "paddd %%xmm0, %%xmm2 \n\t" + "paddd %%xmm4, %%xmm6 \n\t" "movdqa %%xmm2, (%2) \n\t" "movdqa %%xmm6, 16(%2) \n\t" :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i]) : "memory" ); } - snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO, W_AS); + snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS); } { @@ -291,10 +286,9 @@ DWTELEM * const ref = b+w2 - 1; i = 1; - b[0] = b[0] + (((2 * ref[1] + W_BO-1) + 4 * b[0]) >> W_BS); + b[0] = b[0] + (((2 * ref[1] + W_BO) + 4 * b[0]) >> W_BS); asm volatile( - "pcmpeqd %%mm7, %%mm7 \n\t" - "psrld $29, %%mm7 \n\t" + "pslld $1, %%mm7 \n\t" ::); for(; i<w_l-3; i+=4){ asm volatile( @@ -333,16 +327,12 @@ "movq 12(%1), %%mm6 \n\t" "paddd (%1), %%mm2 \n\t" "paddd 8(%1), %%mm6 \n\t" - "pxor %%mm0, %%mm0 \n\t" //note: the 2 xor could be avoided if we would flip the rounding direction - "pxor %%mm4, %%mm4 \n\t" - "psubd %%mm2, %%mm0 \n\t" - "psubd %%mm6, %%mm4 \n\t" - "psrad $1, %%mm0 \n\t" - "psrad $1, %%mm4 \n\t" - "psubd %%mm0, %%mm2 \n\t" - "psubd %%mm4, %%mm6 \n\t" "movq (%0), %%mm0 \n\t" "movq 8(%0), %%mm4 \n\t" + "paddd %%mm2, %%mm0 \n\t" + "paddd %%mm6, %%mm4 \n\t" + "psrad $1, %%mm2 \n\t" + "psrad $1, %%mm6 \n\t" "paddd %%mm0, %%mm2 \n\t" "paddd %%mm4, %%mm6 \n\t" "movq %%mm2, (%2) \n\t" @@ -351,7 +341,7 @@ : "memory" ); } - snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO, W_AS); + snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS); } {