diff x86/vp8dsp.asm @ 12457:2982071047a2 libavcodec

Use "d" suffix for general-purpose registers used with movd. This increases compatibilty with nasm and is also more consistent, e.g. with h264_intrapred.asm and h264_chromamc.asm that already do it that way.
author reimar
date Sun, 05 Sep 2010 10:10:16 +0000
parents e6e4059ea421
children
line wrap: on
line diff
--- a/x86/vp8dsp.asm	Sat Sep 04 09:59:08 2010 +0000
+++ b/x86/vp8dsp.asm	Sun Sep 05 10:10:16 2010 +0000
@@ -1342,7 +1342,7 @@
     psrldq        m%2, 4
 %if %10 == 8
     movd    [%5+%8*2], m%1
-    movd           %5, m%3
+    movd          %5d, m%3
 %endif
     psrldq        m%3, 4
     psrldq        m%4, 4
@@ -1379,26 +1379,26 @@
 ; 4 is a pointer to the destination's 4th line
 ; 5/6 is -stride and +stride
 %macro WRITE_2x4W 6
-    movd             %3, %1
+    movd            %3d, %1
     punpckhdq        %1, %1
     mov       [%4+%5*4], %3w
     shr              %3, 16
     add              %4, %6
     mov       [%4+%5*4], %3w
 
-    movd             %3, %1
+    movd            %3d, %1
     add              %4, %5
     mov       [%4+%5*2], %3w
     shr              %3, 16
     mov       [%4+%5  ], %3w
 
-    movd             %3, %2
+    movd            %3d, %2
     punpckhdq        %2, %2
     mov       [%4     ], %3w
     shr              %3, 16
     mov       [%4+%6  ], %3w
 
-    movd             %3, %2
+    movd            %3d, %2
     add              %4, %6
     mov       [%4+%6  ], %3w
     shr              %3, 16
@@ -1407,27 +1407,27 @@
 %endmacro
 
 %macro WRITE_8W_SSE2 5
-    movd             %2, %1
+    movd            %2d, %1
     psrldq           %1, 4
     mov       [%3+%4*4], %2w
     shr              %2, 16
     add              %3, %5
     mov       [%3+%4*4], %2w
 
-    movd             %2, %1
+    movd            %2d, %1
     psrldq           %1, 4
     add              %3, %4
     mov       [%3+%4*2], %2w
     shr              %2, 16
     mov       [%3+%4  ], %2w
 
-    movd             %2, %1
+    movd            %2d, %1
     psrldq           %1, 4
     mov       [%3     ], %2w
     shr              %2, 16
     mov       [%3+%5  ], %2w
 
-    movd             %2, %1
+    movd            %2d, %1
     add              %3, %5
     mov       [%3+%5  ], %2w
     shr              %2, 16
@@ -1446,27 +1446,27 @@
 %endmacro
 
 %macro SPLATB_REG_MMX 2-3
-    movd           %1, %2
+    movd           %1, %2d
     punpcklbw      %1, %1
     punpcklwd      %1, %1
     punpckldq      %1, %1
 %endmacro
 
 %macro SPLATB_REG_MMXEXT 2-3
-    movd           %1, %2
+    movd           %1, %2d
     punpcklbw      %1, %1
     pshufw         %1, %1, 0x0
 %endmacro
 
 %macro SPLATB_REG_SSE2 2-3
-    movd           %1, %2
+    movd           %1, %2d
     punpcklbw      %1, %1
     pshuflw        %1, %1, 0x0
     punpcklqdq     %1, %1
 %endmacro
 
 %macro SPLATB_REG_SSSE3 3
-    movd           %1, %2
+    movd           %1, %2d
     pshufb         %1, %3
 %endmacro