Mercurial > libavcodec.hg
changeset 12457:2982071047a2 libavcodec
Use "d" suffix for general-purpose registers used with movd.
This increases compatibilty with nasm and is also more consistent,
e.g. with h264_intrapred.asm and h264_chromamc.asm that already
do it that way.
author | reimar |
---|---|
date | Sun, 05 Sep 2010 10:10:16 +0000 |
parents | a5ddb39627fd |
children | 4a425a99f543 |
files | x86/h264_weight.asm x86/vc1dsp_yasm.asm x86/vp3dsp.asm x86/vp8dsp.asm |
diffstat | 4 files changed, 30 insertions(+), 30 deletions(-) [+] |
line wrap: on
line diff
--- a/x86/h264_weight.asm Sat Sep 04 09:59:08 2010 +0000 +++ b/x86/h264_weight.asm Sun Sep 05 10:10:16 2010 +0000 @@ -40,9 +40,9 @@ %macro WEIGHT_SETUP 0 add r4, r4 inc r4 - movd m3, r3 - movd m5, r4 - movd m6, r2 + movd m3, r3d + movd m5, r4d + movd m6, r2d pslld m5, m6 psrld m5, 1 %if mmsize == 16 @@ -156,10 +156,10 @@ add r6, 1 or r6, 1 add r3, 1 - movd m3, r4 - movd m4, r5 - movd m5, r6 - movd m6, r3 + movd m3, r4d + movd m4, r5d + movd m5, r6d + movd m6, r3d pslld m5, m6 psrld m5, 1 %if mmsize == 16 @@ -291,10 +291,10 @@ add r6, 1 or r6, 1 add r3, 1 - movd m4, r4 - movd m0, r5 - movd m5, r6 - movd m6, r3 + movd m4, r4d + movd m0, r5d + movd m5, r6d + movd m6, r3d pslld m5, m6 psrld m5, 1 punpcklbw m4, m0
--- a/x86/vc1dsp_yasm.asm Sat Sep 04 09:59:08 2010 +0000 +++ b/x86/vc1dsp_yasm.asm Sun Sep 05 10:10:16 2010 +0000 @@ -36,7 +36,7 @@ %endmacro %macro STORE_4_WORDS_MMX 6 - movd %6, %5 + movd %6d, %5 %if mmsize==16 psrldq %5, 4 %else @@ -45,7 +45,7 @@ mov %1, %6w shr %6, 16 mov %2, %6w - movd %6, %5 + movd %6d, %5 mov %3, %6w shr %6, 16 mov %4, %6w @@ -88,7 +88,7 @@ pxor m7, m3 ; d_sign ^= a0_sign pxor m5, m5 - movd m3, r2 + movd m3, r2d %if %1 > 4 punpcklbw m3, m3 %endif
--- a/x86/vp3dsp.asm Sat Sep 04 09:59:08 2010 +0000 +++ b/x86/vp3dsp.asm Sun Sep 05 10:10:16 2010 +0000 @@ -93,12 +93,12 @@ %endmacro %macro STORE_4_WORDS 1 - movd r2, %1 + movd r2d, %1 mov [r0 -1], r2w psrlq %1, 32 shr r2, 16 mov [r0+r1 -1], r2w - movd r2, %1 + movd r2d, %1 mov [r0+r1*2-1], r2w shr r2, 16 mov [r0+r3 -1], r2w @@ -606,7 +606,7 @@ movsx r2, word [r2] add r2, 15 sar r2, 5 - movd m0, r2 + movd m0, r2d pshufw m0, m0, 0x0 pxor m1, m1 psubw m1, m0
--- a/x86/vp8dsp.asm Sat Sep 04 09:59:08 2010 +0000 +++ b/x86/vp8dsp.asm Sun Sep 05 10:10:16 2010 +0000 @@ -1342,7 +1342,7 @@ psrldq m%2, 4 %if %10 == 8 movd [%5+%8*2], m%1 - movd %5, m%3 + movd %5d, m%3 %endif psrldq m%3, 4 psrldq m%4, 4 @@ -1379,26 +1379,26 @@ ; 4 is a pointer to the destination's 4th line ; 5/6 is -stride and +stride %macro WRITE_2x4W 6 - movd %3, %1 + movd %3d, %1 punpckhdq %1, %1 mov [%4+%5*4], %3w shr %3, 16 add %4, %6 mov [%4+%5*4], %3w - movd %3, %1 + movd %3d, %1 add %4, %5 mov [%4+%5*2], %3w shr %3, 16 mov [%4+%5 ], %3w - movd %3, %2 + movd %3d, %2 punpckhdq %2, %2 mov [%4 ], %3w shr %3, 16 mov [%4+%6 ], %3w - movd %3, %2 + movd %3d, %2 add %4, %6 mov [%4+%6 ], %3w shr %3, 16 @@ -1407,27 +1407,27 @@ %endmacro %macro WRITE_8W_SSE2 5 - movd %2, %1 + movd %2d, %1 psrldq %1, 4 mov [%3+%4*4], %2w shr %2, 16 add %3, %5 mov [%3+%4*4], %2w - movd %2, %1 + movd %2d, %1 psrldq %1, 4 add %3, %4 mov [%3+%4*2], %2w shr %2, 16 mov [%3+%4 ], %2w - movd %2, %1 + movd %2d, %1 psrldq %1, 4 mov [%3 ], %2w shr %2, 16 mov [%3+%5 ], %2w - movd %2, %1 + movd %2d, %1 add %3, %5 mov [%3+%5 ], %2w shr %2, 16 @@ -1446,27 +1446,27 @@ %endmacro %macro SPLATB_REG_MMX 2-3 - movd %1, %2 + movd %1, %2d punpcklbw %1, %1 punpcklwd %1, %1 punpckldq %1, %1 %endmacro %macro SPLATB_REG_MMXEXT 2-3 - movd %1, %2 + movd %1, %2d punpcklbw %1, %1 pshufw %1, %1, 0x0 %endmacro %macro SPLATB_REG_SSE2 2-3 - movd %1, %2 + movd %1, %2d punpcklbw %1, %1 pshuflw %1, %1, 0x0 punpcklqdq %1, %1 %endmacro %macro SPLATB_REG_SSSE3 3 - movd %1, %2 + movd %1, %2d pshufb %1, %3 %endmacro