diff i386/motion_est_mmx.c @ 8031:eebc7209c47f libavcodec

Convert asm keyword into __asm__. Neither the asm() nor the __asm__() keyword is part of the C99 standard, but while GCC accepts the former in C89 syntax, it is not accepted in C99 unless GNU extensions are turned on (with -fasm). The latter form is accepted in any syntax as an extension (without requiring further command-line options). Sun Studio C99 compiler also does not accept asm() while accepting __asm__(), albeit reporting warnings that it's not valid C99 syntax.
author flameeyes
date Thu, 16 Oct 2008 13:34:09 +0000
parents f7cbb7733146
children 0d108ec85620
line wrap: on
line diff
--- a/i386/motion_est_mmx.c	Wed Oct 15 08:01:54 2008 +0000
+++ b/i386/motion_est_mmx.c	Thu Oct 16 13:34:09 2008 +0000
@@ -36,7 +36,7 @@
 static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
 {
     x86_reg len= -(stride*h);
-    asm volatile(
+    __asm__ volatile(
         ASMALIGN(4)
         "1:                             \n\t"
         "movq (%1, %%"REG_a"), %%mm0    \n\t"
@@ -71,7 +71,7 @@
 
 static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
 {
-    asm volatile(
+    __asm__ volatile(
         ASMALIGN(4)
         "1:                             \n\t"
         "movq (%1), %%mm0               \n\t"
@@ -92,7 +92,7 @@
 static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
 {
     int ret;
-    asm volatile(
+    __asm__ volatile(
         "pxor %%xmm6, %%xmm6            \n\t"
         ASMALIGN(4)
         "1:                             \n\t"
@@ -109,7 +109,7 @@
         : "+r" (h), "+r" (blk1), "+r" (blk2)
         : "r" ((x86_reg)stride)
     );
-    asm volatile(
+    __asm__ volatile(
         "movhlps %%xmm6, %%xmm0         \n\t"
         "paddw   %%xmm0, %%xmm6         \n\t"
         "movd    %%xmm6, %0             \n\t"
@@ -120,7 +120,7 @@
 
 static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
 {
-    asm volatile(
+    __asm__ volatile(
         ASMALIGN(4)
         "1:                             \n\t"
         "movq (%1), %%mm0               \n\t"
@@ -142,7 +142,7 @@
 
 static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
 {
-    asm volatile(
+    __asm__ volatile(
         "movq (%1), %%mm0               \n\t"
         "add %3, %1                     \n\t"
         ASMALIGN(4)
@@ -167,7 +167,7 @@
 
 static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
 {
-    asm volatile(
+    __asm__ volatile(
         "movq "MANGLE(bone)", %%mm5     \n\t"
         "movq (%1), %%mm0               \n\t"
         "pavgb 1(%1), %%mm0             \n\t"
@@ -198,7 +198,7 @@
 static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
 {
     x86_reg len= -(stride*h);
-    asm volatile(
+    __asm__ volatile(
         ASMALIGN(4)
         "1:                             \n\t"
         "movq (%1, %%"REG_a"), %%mm0    \n\t"
@@ -236,7 +236,7 @@
 static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
 {
     x86_reg len= -(stride*h);
-    asm volatile(
+    __asm__ volatile(
         "movq (%1, %%"REG_a"), %%mm0    \n\t"
         "movq 1(%1, %%"REG_a"), %%mm2   \n\t"
         "movq %%mm0, %%mm1              \n\t"
@@ -289,7 +289,7 @@
 static inline int sum_mmx(void)
 {
     int ret;
-    asm volatile(
+    __asm__ volatile(
         "movq %%mm6, %%mm0              \n\t"
         "psrlq $32, %%mm6               \n\t"
         "paddw %%mm0, %%mm6             \n\t"
@@ -305,7 +305,7 @@
 static inline int sum_mmx2(void)
 {
     int ret;
-    asm volatile(
+    __asm__ volatile(
         "movd %%mm6, %0                 \n\t"
         : "=r" (ret)
     );
@@ -326,7 +326,7 @@
 static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
 {\
     assert(h==8);\
-    asm volatile("pxor %%mm7, %%mm7     \n\t"\
+    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
                  "pxor %%mm6, %%mm6     \n\t":);\
 \
     sad8_1_ ## suf(blk1, blk2, stride, 8);\
@@ -336,7 +336,7 @@
 static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
 {\
     assert(h==8);\
-    asm volatile("pxor %%mm7, %%mm7     \n\t"\
+    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
                  "pxor %%mm6, %%mm6     \n\t"\
                  "movq %0, %%mm5        \n\t"\
                  :: "m"(round_tab[1]) \
@@ -350,7 +350,7 @@
 static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
 {\
     assert(h==8);\
-    asm volatile("pxor %%mm7, %%mm7     \n\t"\
+    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
                  "pxor %%mm6, %%mm6     \n\t"\
                  "movq %0, %%mm5        \n\t"\
                  :: "m"(round_tab[1]) \
@@ -364,7 +364,7 @@
 static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
 {\
     assert(h==8);\
-    asm volatile("pxor %%mm7, %%mm7     \n\t"\
+    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
                  "pxor %%mm6, %%mm6     \n\t"\
                  ::);\
 \
@@ -375,7 +375,7 @@
 \
 static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
 {\
-    asm volatile("pxor %%mm7, %%mm7     \n\t"\
+    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
                  "pxor %%mm6, %%mm6     \n\t":);\
 \
     sad8_1_ ## suf(blk1  , blk2  , stride, h);\
@@ -385,7 +385,7 @@
 }\
 static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
 {\
-    asm volatile("pxor %%mm7, %%mm7     \n\t"\
+    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
                  "pxor %%mm6, %%mm6     \n\t"\
                  "movq %0, %%mm5        \n\t"\
                  :: "m"(round_tab[1]) \
@@ -398,7 +398,7 @@
 }\
 static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
 {\
-    asm volatile("pxor %%mm7, %%mm7     \n\t"\
+    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
                  "pxor %%mm6, %%mm6     \n\t"\
                  "movq %0, %%mm5        \n\t"\
                  :: "m"(round_tab[1]) \
@@ -411,7 +411,7 @@
 }\
 static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
 {\
-    asm volatile("pxor %%mm7, %%mm7     \n\t"\
+    __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
                  "pxor %%mm6, %%mm6     \n\t"\
                  ::);\
 \