changeset 578:b20a4eb68876 libavutil

Convert asm keyword into __asm__. Neither the asm() nor the __asm__() keyword is part of the C99 standard, but while GCC accepts the former in C89 syntax, it is not accepted in C99 unless GNU extensions are turned on (with -fasm). The latter form is accepted in any syntax as an extension (without requiring further command-line options). Sun Studio C99 compiler also does not accept asm() while accepting __asm__(), albeit reporting warnings that it's not valid C99 syntax.
author flameeyes
date Thu, 16 Oct 2008 13:34:09 +0000
parents 2fe796dc4c68
children 2ec148d26d9f
files bswap.h common.h internal.h
diffstat 3 files changed, 20 insertions(+), 20 deletions(-) [+]
line wrap: on
line diff
--- a/bswap.h	Wed Oct 15 08:06:12 2008 +0000
+++ b/bswap.h	Thu Oct 16 13:34:09 2008 +0000
@@ -33,11 +33,11 @@
 static av_always_inline av_const uint16_t bswap_16(uint16_t x)
 {
 #if defined(ARCH_X86)
-    asm("rorw $8, %0" : "+r"(x));
+    __asm__("rorw $8, %0" : "+r"(x));
 #elif defined(ARCH_SH4)
-    asm("swap.b %0,%0" : "=r"(x) : "0"(x));
+    __asm__("swap.b %0,%0" : "=r"(x) : "0"(x));
 #elif defined(HAVE_ARMV6)
-    asm("rev16 %0, %0" : "+r"(x));
+    __asm__("rev16 %0, %0" : "+r"(x));
 #else
     x= (x>>8) | (x<<8);
 #endif
@@ -48,30 +48,30 @@
 {
 #if defined(ARCH_X86)
 #ifdef HAVE_BSWAP
-    asm("bswap   %0" : "+r" (x));
+    __asm__("bswap   %0" : "+r" (x));
 #else
-    asm("rorw    $8,  %w0 \n\t"
+    __asm__("rorw    $8,  %w0 \n\t"
         "rorl    $16, %0  \n\t"
         "rorw    $8,  %w0"
         : "+r"(x));
 #endif
 #elif defined(ARCH_SH4)
-    asm("swap.b %0,%0\n"
+    __asm__("swap.b %0,%0\n"
         "swap.w %0,%0\n"
         "swap.b %0,%0\n"
         : "=r"(x) : "0"(x));
 #elif defined(HAVE_ARMV6)
-    asm("rev %0, %0" : "+r"(x));
+    __asm__("rev %0, %0" : "+r"(x));
 #elif defined(ARCH_ARMV4L)
     uint32_t t;
-    asm ("eor %1, %0, %0, ror #16 \n\t"
+    __asm__ ("eor %1, %0, %0, ror #16 \n\t"
          "bic %1, %1, #0xFF0000   \n\t"
          "mov %0, %0, ror #8      \n\t"
          "eor %0, %0, %1, lsr #8  \n\t"
          : "+r"(x), "+r"(t));
 #elif defined(ARCH_BFIN)
     unsigned tmp;
-    asm("%1 = %0 >> 8 (V);      \n\t"
+    __asm__("%1 = %0 >> 8 (V);      \n\t"
         "%0 = %0 << 8 (V);      \n\t"
         "%0 = %0 | %1;          \n\t"
         "%0 = PACK(%0.L, %0.H); \n\t"
@@ -90,7 +90,7 @@
     x= ((x<<16)&0xFFFF0000FFFF0000ULL) | ((x>>16)&0x0000FFFF0000FFFFULL);
     return (x>>32) | (x<<32);
 #elif defined(ARCH_X86_64)
-  asm("bswap  %0": "=r" (x) : "0" (x));
+  __asm__("bswap  %0": "=r" (x) : "0" (x));
   return x;
 #else
     union {
--- a/common.h	Wed Oct 15 08:06:12 2008 +0000
+++ b/common.h	Thu Oct 16 13:34:09 2008 +0000
@@ -154,7 +154,7 @@
 {
 #ifdef HAVE_CMOV
     int i=b;
-    asm volatile(
+    __asm__ volatile(
         "cmp    %2, %1 \n\t"
         "cmovg  %1, %0 \n\t"
         "cmovg  %2, %1 \n\t"
@@ -327,7 +327,7 @@
 static inline uint64_t read_time(void)
 {
     uint64_t a, d;
-    asm volatile("rdtsc\n\t"
+    __asm__ volatile("rdtsc\n\t"
                  : "=a" (a), "=d" (d));
     return (d << 32) | (a & 0xffffffff);
 }
@@ -335,7 +335,7 @@
 static inline long long read_time(void)
 {
     long long l;
-    asm volatile("rdtsc\n\t"
+    __asm__ volatile("rdtsc\n\t"
                  : "=A" (l));
     return l;
 }
@@ -349,7 +349,7 @@
         } p;
         unsigned long long c;
     } t;
-    asm volatile ("%0=cycles; %1=cycles2;" : "=d" (t.p.lo), "=d" (t.p.hi));
+    __asm__ volatile ("%0=cycles; %1=cycles2;" : "=d" (t.p.lo), "=d" (t.p.hi));
     return t.c;
 }
 #else //FIXME check ppc64
@@ -358,7 +358,7 @@
     uint32_t tbu, tbl, temp;
 
      /* from section 2.2.1 of the 32-bit PowerPC PEM */
-     asm volatile(
+     __asm__ volatile(
          "1:\n"
          "mftbu  %2\n"
          "mftb   %0\n"
--- a/internal.h	Wed Oct 15 08:06:12 2008 +0000
+++ b/internal.h	Thu Oct 16 13:34:09 2008 +0000
@@ -130,7 +130,7 @@
 #    define FASTDIV(a,b) \
     ({\
         int ret,dmy;\
-        asm volatile(\
+        __asm__ volatile(\
             "mull %3"\
             :"=d"(ret),"=a"(dmy)\
             :"1"(a),"g"(ff_inverse[b])\
@@ -141,7 +141,7 @@
 static inline av_const int FASTDIV(int a, int b)
 {
     int r;
-    asm volatile("cmp   %2, #0        \n\t"
+    __asm__ volatile("cmp   %2, #0        \n\t"
                  "smmul %0, %1, %2    \n\t"
                  "rsblt %0, %0, #0    \n\t"
                  : "=r"(r) : "r"(a), "r"(ff_inverse[b]));
@@ -151,7 +151,7 @@
 #    define FASTDIV(a,b) \
     ({\
         int ret,dmy;\
-        asm volatile(\
+        __asm__ volatile(\
             "umull %1, %0, %2, %3"\
             :"=&r"(ret),"=&r"(dmy)\
             :"r"(a),"r"(ff_inverse[b])\
@@ -190,7 +190,7 @@
 
 #if defined(ARCH_X86)
 #define MASK_ABS(mask, level)\
-            asm volatile(\
+            __asm__ volatile(\
                 "cltd                   \n\t"\
                 "xorl %1, %0            \n\t"\
                 "subl %1, %0            \n\t"\
@@ -204,7 +204,7 @@
 
 #ifdef HAVE_CMOV
 #define COPY3_IF_LT(x,y,a,b,c,d)\
-asm volatile (\
+__asm__ volatile (\
     "cmpl %0, %3        \n\t"\
     "cmovl %3, %0       \n\t"\
     "cmovl %4, %1       \n\t"\