# HG changeset patch # User flameeyes # Date 1224164049 0 # Node ID eebc7209c47f9fbbf1398f22a8dc8db8165f8faf # Parent a512ac8fa54002c5564fe0879e6b329cc619c9af Convert asm keyword into __asm__. Neither the asm() nor the __asm__() keyword is part of the C99 standard, but while GCC accepts the former in C89 syntax, it is not accepted in C99 unless GNU extensions are turned on (with -fasm). The latter form is accepted in any syntax as an extension (without requiring further command-line options). Sun Studio C99 compiler also does not accept asm() while accepting __asm__(), albeit reporting warnings that it's not valid C99 syntax. diff -r a512ac8fa540 -r eebc7209c47f alpha/asm.h --- a/alpha/asm.h Wed Oct 15 08:01:54 2008 +0000 +++ b/alpha/asm.h Thu Oct 16 13:34:09 2008 +0000 @@ -105,21 +105,21 @@ #define implver __builtin_alpha_implver #define rpcc __builtin_alpha_rpcc #else -#define prefetch(p) asm volatile("ldl $31,%0" : : "m"(*(const char *) (p)) : "memory") -#define prefetch_en(p) asm volatile("ldq $31,%0" : : "m"(*(const char *) (p)) : "memory") -#define prefetch_m(p) asm volatile("lds $f31,%0" : : "m"(*(const char *) (p)) : "memory") -#define prefetch_men(p) asm volatile("ldt $f31,%0" : : "m"(*(const char *) (p)) : "memory") -#define cmpbge(a, b) ({ uint64_t __r; asm ("cmpbge %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) -#define extql(a, b) ({ uint64_t __r; asm ("extql %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) -#define extwl(a, b) ({ uint64_t __r; asm ("extwl %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) -#define extqh(a, b) ({ uint64_t __r; asm ("extqh %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) -#define zap(a, b) ({ uint64_t __r; asm ("zap %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) -#define zapnot(a, b) ({ uint64_t __r; asm ("zapnot %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) -#define amask(a) ({ uint64_t __r; asm ("amask %1,%0" : "=r" (__r) : "rI" (a)); __r; }) -#define implver() ({ uint64_t __r; asm ("implver %0" : "=r" (__r)); __r; }) -#define rpcc() ({ uint64_t __r; asm volatile ("rpcc %0" : "=r" (__r)); __r; }) +#define prefetch(p) __asm__ volatile("ldl $31,%0" : : "m"(*(const char *) (p)) : "memory") +#define prefetch_en(p) __asm__ volatile("ldq $31,%0" : : "m"(*(const char *) (p)) : "memory") +#define prefetch_m(p) __asm__ volatile("lds $f31,%0" : : "m"(*(const char *) (p)) : "memory") +#define prefetch_men(p) __asm__ volatile("ldt $f31,%0" : : "m"(*(const char *) (p)) : "memory") +#define cmpbge(a, b) ({ uint64_t __r; __asm__ ("cmpbge %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) +#define extql(a, b) ({ uint64_t __r; __asm__ ("extql %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) +#define extwl(a, b) ({ uint64_t __r; __asm__ ("extwl %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) +#define extqh(a, b) ({ uint64_t __r; __asm__ ("extqh %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) +#define zap(a, b) ({ uint64_t __r; __asm__ ("zap %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) +#define zapnot(a, b) ({ uint64_t __r; __asm__ ("zapnot %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) +#define amask(a) ({ uint64_t __r; __asm__ ("amask %1,%0" : "=r" (__r) : "rI" (a)); __r; }) +#define implver() ({ uint64_t __r; __asm__ ("implver %0" : "=r" (__r)); __r; }) +#define rpcc() ({ uint64_t __r; __asm__ volatile ("rpcc %0" : "=r" (__r)); __r; }) #endif -#define wh64(p) asm volatile("wh64 (%0)" : : "r"(p) : "memory") +#define wh64(p) __asm__ volatile("wh64 (%0)" : : "r"(p) : "memory") #if GNUC_PREREQ(3,3) && defined(__alpha_max__) #define minub8 __builtin_alpha_minub8 @@ -136,19 +136,19 @@ #define unpkbl __builtin_alpha_unpkbl #define unpkbw __builtin_alpha_unpkbw #else -#define minub8(a, b) ({ uint64_t __r; asm (".arch ev6; minub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) -#define minsb8(a, b) ({ uint64_t __r; asm (".arch ev6; minsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) -#define minuw4(a, b) ({ uint64_t __r; asm (".arch ev6; minuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) -#define minsw4(a, b) ({ uint64_t __r; asm (".arch ev6; minsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) -#define maxub8(a, b) ({ uint64_t __r; asm (".arch ev6; maxub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) -#define maxsb8(a, b) ({ uint64_t __r; asm (".arch ev6; maxsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) -#define maxuw4(a, b) ({ uint64_t __r; asm (".arch ev6; maxuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) -#define maxsw4(a, b) ({ uint64_t __r; asm (".arch ev6; maxsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) -#define perr(a, b) ({ uint64_t __r; asm (".arch ev6; perr %r1,%r2,%0" : "=r" (__r) : "%rJ" (a), "rJ" (b)); __r; }) -#define pklb(a) ({ uint64_t __r; asm (".arch ev6; pklb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; }) -#define pkwb(a) ({ uint64_t __r; asm (".arch ev6; pkwb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; }) -#define unpkbl(a) ({ uint64_t __r; asm (".arch ev6; unpkbl %r1,%0" : "=r" (__r) : "rJ" (a)); __r; }) -#define unpkbw(a) ({ uint64_t __r; asm (".arch ev6; unpkbw %r1,%0" : "=r" (__r) : "rJ" (a)); __r; }) +#define minub8(a, b) ({ uint64_t __r; __asm__ (".arch ev6; minub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define minsb8(a, b) ({ uint64_t __r; __asm__ (".arch ev6; minsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define minuw4(a, b) ({ uint64_t __r; __asm__ (".arch ev6; minuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define minsw4(a, b) ({ uint64_t __r; __asm__ (".arch ev6; minsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define maxub8(a, b) ({ uint64_t __r; __asm__ (".arch ev6; maxub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define maxsb8(a, b) ({ uint64_t __r; __asm__ (".arch ev6; maxsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define maxuw4(a, b) ({ uint64_t __r; __asm__ (".arch ev6; maxuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define maxsw4(a, b) ({ uint64_t __r; __asm__ (".arch ev6; maxsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define perr(a, b) ({ uint64_t __r; __asm__ (".arch ev6; perr %r1,%r2,%0" : "=r" (__r) : "%rJ" (a), "rJ" (b)); __r; }) +#define pklb(a) ({ uint64_t __r; __asm__ (".arch ev6; pklb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; }) +#define pkwb(a) ({ uint64_t __r; __asm__ (".arch ev6; pkwb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; }) +#define unpkbl(a) ({ uint64_t __r; __asm__ (".arch ev6; unpkbl %r1,%0" : "=r" (__r) : "rJ" (a)); __r; }) +#define unpkbw(a) ({ uint64_t __r; __asm__ (".arch ev6; unpkbw %r1,%0" : "=r" (__r) : "rJ" (a)); __r; }) #endif #elif defined(__DECC) /* Digital/Compaq/hp "ccc" compiler */ @@ -158,31 +158,31 @@ #define ldl(p) (*(const int32_t *) (p)) #define stq(l, p) do { *(uint64_t *) (p) = (l); } while (0) #define stl(l, p) do { *(int32_t *) (p) = (l); } while (0) -#define ldq_u(a) asm ("ldq_u %v0,0(%a0)", a) +#define ldq_u(a) __asm__ ("ldq_u %v0,0(%a0)", a) #define uldq(a) (*(const __unaligned uint64_t *) (a)) -#define cmpbge(a, b) asm ("cmpbge %a0,%a1,%v0", a, b) -#define extql(a, b) asm ("extql %a0,%a1,%v0", a, b) -#define extwl(a, b) asm ("extwl %a0,%a1,%v0", a, b) -#define extqh(a, b) asm ("extqh %a0,%a1,%v0", a, b) -#define zap(a, b) asm ("zap %a0,%a1,%v0", a, b) -#define zapnot(a, b) asm ("zapnot %a0,%a1,%v0", a, b) -#define amask(a) asm ("amask %a0,%v0", a) -#define implver() asm ("implver %v0") -#define rpcc() asm ("rpcc %v0") -#define minub8(a, b) asm ("minub8 %a0,%a1,%v0", a, b) -#define minsb8(a, b) asm ("minsb8 %a0,%a1,%v0", a, b) -#define minuw4(a, b) asm ("minuw4 %a0,%a1,%v0", a, b) -#define minsw4(a, b) asm ("minsw4 %a0,%a1,%v0", a, b) -#define maxub8(a, b) asm ("maxub8 %a0,%a1,%v0", a, b) -#define maxsb8(a, b) asm ("maxsb8 %a0,%a1,%v0", a, b) -#define maxuw4(a, b) asm ("maxuw4 %a0,%a1,%v0", a, b) -#define maxsw4(a, b) asm ("maxsw4 %a0,%a1,%v0", a, b) -#define perr(a, b) asm ("perr %a0,%a1,%v0", a, b) -#define pklb(a) asm ("pklb %a0,%v0", a) -#define pkwb(a) asm ("pkwb %a0,%v0", a) -#define unpkbl(a) asm ("unpkbl %a0,%v0", a) -#define unpkbw(a) asm ("unpkbw %a0,%v0", a) -#define wh64(a) asm ("wh64 %a0", a) +#define cmpbge(a, b) __asm__ ("cmpbge %a0,%a1,%v0", a, b) +#define extql(a, b) __asm__ ("extql %a0,%a1,%v0", a, b) +#define extwl(a, b) __asm__ ("extwl %a0,%a1,%v0", a, b) +#define extqh(a, b) __asm__ ("extqh %a0,%a1,%v0", a, b) +#define zap(a, b) __asm__ ("zap %a0,%a1,%v0", a, b) +#define zapnot(a, b) __asm__ ("zapnot %a0,%a1,%v0", a, b) +#define amask(a) __asm__ ("amask %a0,%v0", a) +#define implver() __asm__ ("implver %v0") +#define rpcc() __asm__ ("rpcc %v0") +#define minub8(a, b) __asm__ ("minub8 %a0,%a1,%v0", a, b) +#define minsb8(a, b) __asm__ ("minsb8 %a0,%a1,%v0", a, b) +#define minuw4(a, b) __asm__ ("minuw4 %a0,%a1,%v0", a, b) +#define minsw4(a, b) __asm__ ("minsw4 %a0,%a1,%v0", a, b) +#define maxub8(a, b) __asm__ ("maxub8 %a0,%a1,%v0", a, b) +#define maxsb8(a, b) __asm__ ("maxsb8 %a0,%a1,%v0", a, b) +#define maxuw4(a, b) __asm__ ("maxuw4 %a0,%a1,%v0", a, b) +#define maxsw4(a, b) __asm__ ("maxsw4 %a0,%a1,%v0", a, b) +#define perr(a, b) __asm__ ("perr %a0,%a1,%v0", a, b) +#define pklb(a) __asm__ ("pklb %a0,%v0", a) +#define pkwb(a) __asm__ ("pkwb %a0,%v0", a) +#define unpkbl(a) __asm__ ("unpkbl %a0,%v0", a) +#define unpkbw(a) __asm__ ("unpkbw %a0,%v0", a) +#define wh64(a) __asm__ ("wh64 %a0", a) #else #error "Unknown compiler!" diff -r a512ac8fa540 -r eebc7209c47f armv4l/dsputil_arm.c --- a/armv4l/dsputil_arm.c Wed Oct 15 08:01:54 2008 +0000 +++ b/armv4l/dsputil_arm.c Thu Oct 16 13:34:09 2008 +0000 @@ -66,7 +66,7 @@ static void add_pixels_clamped_ARM(short *block, unsigned char *dest, int line_size) { - asm volatile ( + __asm__ volatile ( "mov r10, #8 \n\t" "1: \n\t" @@ -206,7 +206,7 @@ #ifdef HAVE_ARMV5TE static void prefetch_arm(void *mem, int stride, int h) { - asm volatile( + __asm__ volatile( "1: \n\t" "subs %0, %0, #1 \n\t" "pld [%1] \n\t" diff -r a512ac8fa540 -r eebc7209c47f armv4l/dsputil_iwmmxt.c --- a/armv4l/dsputil_iwmmxt.c Wed Oct 15 08:01:54 2008 +0000 +++ b/armv4l/dsputil_iwmmxt.c Thu Oct 16 13:34:09 2008 +0000 @@ -22,7 +22,7 @@ #include "libavcodec/dsputil.h" #define DEF(x, y) x ## _no_rnd_ ## y ##_iwmmxt -#define SET_RND(regd) asm volatile ("mov r12, #1 \n\t tbcsth " #regd ", r12":::"r12"); +#define SET_RND(regd) __asm__ volatile ("mov r12, #1 \n\t tbcsth " #regd ", r12":::"r12"); #define WAVG2B "wavg2b" #include "dsputil_iwmmxt_rnd.h" #undef DEF @@ -30,7 +30,7 @@ #undef WAVG2B #define DEF(x, y) x ## _ ## y ##_iwmmxt -#define SET_RND(regd) asm volatile ("mov r12, #2 \n\t tbcsth " #regd ", r12":::"r12"); +#define SET_RND(regd) __asm__ volatile ("mov r12, #2 \n\t tbcsth " #regd ", r12":::"r12"); #define WAVG2B "wavg2br" #include "dsputil_iwmmxt_rnd.h" #undef DEF @@ -39,7 +39,7 @@ // need scheduling #define OP(AVG) \ - asm volatile ( \ + __asm__ volatile ( \ /* alignment */ \ "and r12, %[pixels], #7 \n\t" \ "bic %[pixels], %[pixels], #7 \n\t" \ @@ -89,7 +89,7 @@ { uint8_t *pixels2 = pixels + line_size; - asm volatile ( + __asm__ volatile ( "mov r12, #4 \n\t" "1: \n\t" "pld [%[pixels], %[line_size2]] \n\t" @@ -125,7 +125,7 @@ static void clear_blocks_iwmmxt(DCTELEM *blocks) { - asm volatile( + __asm__ volatile( "wzero wr0 \n\t" "mov r1, #(128 * 6 / 32) \n\t" "1: \n\t" diff -r a512ac8fa540 -r eebc7209c47f armv4l/dsputil_iwmmxt_rnd.h --- a/armv4l/dsputil_iwmmxt_rnd.h Wed Oct 15 08:01:54 2008 +0000 +++ b/armv4l/dsputil_iwmmxt_rnd.h Thu Oct 16 13:34:09 2008 +0000 @@ -26,7 +26,7 @@ void DEF(put, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_size, int h) { int stride = line_size; - asm volatile ( + __asm__ volatile ( "and r12, %[pixels], #7 \n\t" "bic %[pixels], %[pixels], #7 \n\t" "tmcr wcgr1, r12 \n\t" @@ -60,7 +60,7 @@ void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_size, int h) { int stride = line_size; - asm volatile ( + __asm__ volatile ( "and r12, %[pixels], #7 \n\t" "bic %[pixels], %[pixels], #7 \n\t" "tmcr wcgr1, r12 \n\t" @@ -102,7 +102,7 @@ void DEF(put, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_size, int h) { int stride = line_size; - asm volatile ( + __asm__ volatile ( "and r12, %[pixels], #7 \n\t" "bic %[pixels], %[pixels], #7 \n\t" "tmcr wcgr1, r12 \n\t" @@ -142,7 +142,7 @@ void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_size, int h) { int stride = line_size; - asm volatile ( + __asm__ volatile ( "pld [%[pixels]] \n\t" "pld [%[pixels], #32] \n\t" "pld [%[block]] \n\t" @@ -201,7 +201,7 @@ // [wr0 wr1 wr2 wr3] for previous line // [wr4 wr5 wr6 wr7] for current line SET_RND(wr15); // =2 for rnd and =1 for no_rnd version - asm volatile( + __asm__ volatile( "pld [%[pixels]] \n\t" "pld [%[pixels], #32] \n\t" "and r12, %[pixels], #7 \n\t" @@ -250,7 +250,7 @@ // [wr0 wr1 wr2 wr3] for previous line // [wr4 wr5 wr6 wr7] for current line SET_RND(wr15); // =2 for rnd and =1 for no_rnd version - asm volatile( + __asm__ volatile( "pld [%[pixels]] \n\t" "pld [%[pixels], #32] \n\t" "and r12, %[pixels], #7 \n\t" @@ -311,7 +311,7 @@ // [wr0 wr1 wr2 wr3] for previous line // [wr4 wr5 wr6 wr7] for current line SET_RND(wr15); // =2 for rnd and =1 for no_rnd version - asm volatile( + __asm__ volatile( "pld [%[pixels]] \n\t" "pld [%[pixels], #32] \n\t" "pld [%[block]] \n\t" @@ -372,7 +372,7 @@ // [wr0 wr1 wr2 wr3] for previous line // [wr4 wr5 wr6 wr7] for current line SET_RND(wr15); // =2 for rnd and =1 for no_rnd version - asm volatile( + __asm__ volatile( "pld [%[pixels]] \n\t" "pld [%[pixels], #32] \n\t" "pld [%[block]] \n\t" @@ -448,7 +448,7 @@ int stride = line_size; // [wr0 wr1 wr2 wr3] for previous line // [wr4 wr5 wr6 wr7] for current line - asm volatile( + __asm__ volatile( "pld [%[pixels]] \n\t" "pld [%[pixels], #32] \n\t" "and r12, %[pixels], #7 \n\t" @@ -502,7 +502,7 @@ int stride = line_size; // [wr0 wr1 wr2 wr3] for previous line // [wr4 wr5 wr6 wr7] for current line - asm volatile( + __asm__ volatile( "pld [%[pixels]] \n\t" "pld [%[pixels], #32] \n\t" "and r12, %[pixels], #7 \n\t" @@ -559,7 +559,7 @@ int stride = line_size; // [wr0 wr1 wr2 wr3] for previous line // [wr4 wr5 wr6 wr7] for current line - asm volatile( + __asm__ volatile( "pld [%[pixels]] \n\t" "pld [%[pixels], #32] \n\t" "and r12, %[pixels], #7 \n\t" @@ -627,7 +627,7 @@ // [wr0 wr1 wr2 wr3] for previous line // [wr4 wr5 wr6 wr7] for current line SET_RND(wr15); // =2 for rnd and =1 for no_rnd version - asm volatile( + __asm__ volatile( "pld [%[pixels]] \n\t" "mov r12, #2 \n\t" "pld [%[pixels], #32] \n\t" @@ -721,7 +721,7 @@ // [wr0 wr1 wr2 wr3] for previous line // [wr4 wr5 wr6 wr7] for current line SET_RND(wr15); // =2 for rnd and =1 for no_rnd version - asm volatile( + __asm__ volatile( "pld [%[pixels]] \n\t" "mov r12, #2 \n\t" "pld [%[pixels], #32] \n\t" @@ -863,7 +863,7 @@ // [wr0 wr1 wr2 wr3] for previous line // [wr4 wr5 wr6 wr7] for current line SET_RND(wr15); // =2 for rnd and =1 for no_rnd version - asm volatile( + __asm__ volatile( "pld [%[block]] \n\t" "pld [%[block], #32] \n\t" "pld [%[pixels]] \n\t" @@ -967,7 +967,7 @@ // [wr0 wr1 wr2 wr3] for previous line // [wr4 wr5 wr6 wr7] for current line SET_RND(wr15); // =2 for rnd and =1 for no_rnd version - asm volatile( + __asm__ volatile( "pld [%[block]] \n\t" "pld [%[block], #32] \n\t" "pld [%[pixels]] \n\t" diff -r a512ac8fa540 -r eebc7209c47f armv4l/float_arm_vfp.c --- a/armv4l/float_arm_vfp.c Wed Oct 15 08:01:54 2008 +0000 +++ b/armv4l/float_arm_vfp.c Thu Oct 16 13:34:09 2008 +0000 @@ -42,7 +42,7 @@ static void vector_fmul_vfp(float *dst, const float *src, int len) { int tmp; - asm volatile( + __asm__ volatile( "fmrx %[tmp], fpscr\n\t" "orr %[tmp], %[tmp], #(3 << 16)\n\t" /* set vector size to 4 */ "fmxr fpscr, %[tmp]\n\t" @@ -90,7 +90,7 @@ static void vector_fmul_reverse_vfp(float *dst, const float *src0, const float *src1, int len) { src1 += len; - asm volatile( + __asm__ volatile( "fldmdbs %[src1]!, {s0-s3}\n\t" "fldmias %[src0]!, {s8-s11}\n\t" "fldmdbs %[src1]!, {s4-s7}\n\t" @@ -149,7 +149,7 @@ */ void float_to_int16_vfp(int16_t *dst, const float *src, int len) { - asm volatile( + __asm__ volatile( "fldmias %[src]!, {s16-s23}\n\t" "ftosis s0, s16\n\t" "ftosis s1, s17\n\t" diff -r a512ac8fa540 -r eebc7209c47f armv4l/mathops.h --- a/armv4l/mathops.h Wed Oct 15 08:01:54 2008 +0000 +++ b/armv4l/mathops.h Thu Oct 16 13:34:09 2008 +0000 @@ -25,7 +25,7 @@ #ifdef FRAC_BITS # define MULL(a, b) \ ({ int lo, hi;\ - asm("smull %0, %1, %2, %3 \n\t"\ + __asm__("smull %0, %1, %2, %3 \n\t"\ "mov %0, %0, lsr %4\n\t"\ "add %1, %0, %1, lsl %5\n\t"\ : "=&r"(lo), "=&r"(hi)\ @@ -37,21 +37,21 @@ static inline av_const int MULH(int a, int b) { int r; - asm ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b)); + __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b)); return r; } #define MULH MULH #else #define MULH(a, b) \ ({ int lo, hi;\ - asm ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));\ + __asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));\ hi; }) #endif static inline av_const int64_t MUL64(int a, int b) { union { uint64_t x; unsigned hl[2]; } x; - asm ("smull %0, %1, %2, %3" + __asm__ ("smull %0, %1, %2, %3" : "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b)); return x.x; } @@ -60,7 +60,7 @@ static inline av_const int64_t MAC64(int64_t d, int a, int b) { union { uint64_t x; unsigned hl[2]; } x = { d }; - asm ("smlal %0, %1, %2, %3" + __asm__ ("smlal %0, %1, %2, %3" : "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b)); return x.x; } @@ -71,11 +71,11 @@ /* signed 16x16 -> 32 multiply add accumulate */ # define MAC16(rt, ra, rb) \ - asm ("smlabb %0, %2, %3, %0" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb)); + __asm__ ("smlabb %0, %2, %3, %0" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb)); /* signed 16x16 -> 32 multiply */ # define MUL16(ra, rb) \ ({ int __rt; \ - asm ("smulbb %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); \ + __asm__ ("smulbb %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); \ __rt; }) #endif diff -r a512ac8fa540 -r eebc7209c47f armv4l/mpegvideo_armv5te.c --- a/armv4l/mpegvideo_armv5te.c Wed Oct 15 08:01:54 2008 +0000 +++ b/armv4l/mpegvideo_armv5te.c Thu Oct 16 13:34:09 2008 +0000 @@ -65,7 +65,7 @@ ({ DCTELEM *xblock = xxblock; \ int xqmul = xxqmul, xqadd = xxqadd, xcount = xxcount, xtmp; \ int xdata1, xdata2; \ -asm volatile( \ +__asm__ volatile( \ "subs %[count], %[count], #2 \n\t" \ "ble 2f \n\t" \ "ldrd r4, [%[block], #0] \n\t" \ diff -r a512ac8fa540 -r eebc7209c47f armv4l/mpegvideo_iwmmxt.c --- a/armv4l/mpegvideo_iwmmxt.c Wed Oct 15 08:01:54 2008 +0000 +++ b/armv4l/mpegvideo_iwmmxt.c Thu Oct 16 13:34:09 2008 +0000 @@ -48,7 +48,7 @@ else nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; - asm volatile ( + __asm__ volatile ( /* "movd %1, %%mm6 \n\t" //qmul */ /* "packssdw %%mm6, %%mm6 \n\t" */ /* "packssdw %%mm6, %%mm6 \n\t" */ diff -r a512ac8fa540 -r eebc7209c47f bfin/dsputil_bfin.c --- a/bfin/dsputil_bfin.c Wed Oct 15 08:01:54 2008 +0000 +++ b/bfin/dsputil_bfin.c Thu Oct 16 13:34:09 2008 +0000 @@ -77,7 +77,7 @@ { // This is just a simple memset. // - asm("P0=192; " + __asm__("P0=192; " "I0=%0; " "R0=0; " "LSETUP(clear_blocks_blkfn_lab,clear_blocks_blkfn_lab)LC0=P0;" diff -r a512ac8fa540 -r eebc7209c47f bfin/mathops.h --- a/bfin/mathops.h Wed Oct 15 08:01:54 2008 +0000 +++ b/bfin/mathops.h Thu Oct 16 13:34:09 2008 +0000 @@ -24,7 +24,7 @@ #ifdef CONFIG_MPEGAUDIO_HP #define MULH(X,Y) ({ int xxo; \ - asm ( \ + __asm__ ( \ "a1 = %2.L * %1.L (FU);\n\t" \ "a1 = a1 >> 16;\n\t" \ "a1 += %2.H * %1.L (IS,M);\n\t" \ @@ -34,7 +34,7 @@ : "=d" (xxo) : "d" (X), "d" (Y) : "A0","A1"); xxo; }) #else #define MULH(X,Y) ({ int xxo; \ - asm ( \ + __asm__ ( \ "a1 = %2.H * %1.L (IS,M);\n\t" \ "a0 = %1.H * %2.H, a1+= %1.H * %2.L (IS,M);\n\t"\ "a1 = a1 >>> 16;\n\t" \ @@ -44,7 +44,7 @@ /* signed 16x16 -> 32 multiply */ #define MUL16(a, b) ({ int xxo; \ - asm ( \ + __asm__ ( \ "%0 = %1.l*%2.l (is);\n\t" \ : "=W" (xxo) : "d" (a), "d" (b) : "A1"); \ xxo; }) diff -r a512ac8fa540 -r eebc7209c47f bfin/mpegvideo_bfin.c --- a/bfin/mpegvideo_bfin.c Wed Oct 15 08:01:54 2008 +0000 +++ b/bfin/mpegvideo_bfin.c Thu Oct 16 13:34:09 2008 +0000 @@ -88,7 +88,7 @@ /* block[i] = level; */ /* } */ - asm volatile + __asm__ volatile ("i2=%1;\n\t" "r1=[%1++]; \n\t" "r0=r1>>>15 (v); \n\t" @@ -114,7 +114,7 @@ PROF("zzscan",5); - asm volatile + __asm__ volatile ("r0=b[%1--] (x); \n\t" "lsetup (0f,1f) lc0=%3; \n\t" /* for(i=63; i>=start_i; i--) { */ "0: p0=r0; \n\t" /* j = scantable[i]; */ diff -r a512ac8fa540 -r eebc7209c47f bitstream.h --- a/bitstream.h Wed Oct 15 08:01:54 2008 +0000 +++ b/bitstream.h Thu Oct 16 13:34:09 2008 +0000 @@ -55,14 +55,14 @@ #if defined(ARCH_X86) // avoid +32 for shift optimization (gcc should do that ...) static inline int32_t NEG_SSR32( int32_t a, int8_t s){ - asm ("sarl %1, %0\n\t" + __asm__ ("sarl %1, %0\n\t" : "+r" (a) : "ic" ((uint8_t)(-s)) ); return a; } static inline uint32_t NEG_USR32(uint32_t a, int8_t s){ - asm ("shrl %1, %0\n\t" + __asm__ ("shrl %1, %0\n\t" : "+r" (a) : "ic" ((uint8_t)(-s)) ); @@ -248,7 +248,7 @@ { # ifdef ALIGNED_BITSTREAM_WRITER # if defined(ARCH_X86) - asm volatile( + __asm__ volatile( "movl %0, %%ecx \n\t" "xorl %%eax, %%eax \n\t" "shrdl %%cl, %1, %%eax \n\t" @@ -279,7 +279,7 @@ # endif # else //ALIGNED_BITSTREAM_WRITER # if defined(ARCH_X86) - asm volatile( + __asm__ volatile( "movl $7, %%ecx \n\t" "andl %0, %%ecx \n\t" "addl %3, %%ecx \n\t" @@ -556,7 +556,7 @@ #if defined(ARCH_X86) # define SKIP_CACHE(name, gb, num)\ - asm(\ + __asm__(\ "shldl %2, %1, %0 \n\t"\ "shll %2, %1 \n\t"\ : "+r" (name##_cache0), "+r" (name##_cache1)\ diff -r a512ac8fa540 -r eebc7209c47f cabac.h --- a/cabac.h Wed Oct 15 08:01:54 2008 +0000 +++ b/cabac.h Thu Oct 16 13:34:09 2008 +0000 @@ -304,7 +304,7 @@ int temp; #if 0 //P3:683 athlon:475 - asm( + __asm__( "lea -0x100(%0), %2 \n\t" "shr $31, %2 \n\t" //FIXME 31->63 for x86-64 "shl %%cl, %0 \n\t" @@ -313,7 +313,7 @@ ); #elif 0 //P3:680 athlon:474 - asm( + __asm__( "cmp $0x100, %0 \n\t" "setb %%cl \n\t" //FIXME 31->63 for x86-64 "shl %%cl, %0 \n\t" @@ -323,7 +323,7 @@ #elif 1 int temp2; //P3:665 athlon:517 - asm( + __asm__( "lea -0x100(%0), %%eax \n\t" "cltd \n\t" "mov %0, %%eax \n\t" @@ -336,7 +336,7 @@ #elif 0 int temp2; //P3:673 athlon:509 - asm( + __asm__( "cmp $0x100, %0 \n\t" "sbb %%edx, %%edx \n\t" "mov %0, %%eax \n\t" @@ -349,7 +349,7 @@ #else int temp2; //P3:677 athlon:511 - asm( + __asm__( "cmp $0x100, %0 \n\t" "lea (%0, %0), %%eax \n\t" "lea (%1, %1), %%edx \n\t" @@ -385,7 +385,7 @@ int bit; #ifndef BRANCHLESS_CABAC_DECODER - asm volatile( + __asm__ volatile( "movzbl (%1), %0 \n\t" "movl "RANGE "(%2), %%ebx \n\t" "movl "RANGE "(%2), %%edx \n\t" @@ -524,7 +524,7 @@ "add "tmp" , "low" \n\t"\ "1: \n\t" - asm volatile( + __asm__ volatile( "movl "RANGE "(%2), %%esi \n\t" "movl "LOW "(%2), %%ebx \n\t" BRANCHLESS_GET_CABAC("%0", "%2", "(%1)", "%%ebx", "%%bx", "%%esi", "%%edx", "%%dl") @@ -591,7 +591,7 @@ static int av_unused get_cabac_bypass(CABACContext *c){ #if 0 //not faster int bit; - asm volatile( + __asm__ volatile( "movl "RANGE "(%1), %%ebx \n\t" "movl "LOW "(%1), %%eax \n\t" "shl $17, %%ebx \n\t" @@ -638,7 +638,7 @@ static av_always_inline int get_cabac_bypass_sign(CABACContext *c, int val){ #if defined(ARCH_X86) && !(defined(PIC) && defined(__GNUC__)) - asm volatile( + __asm__ volatile( "movl "RANGE "(%1), %%ebx \n\t" "movl "LOW "(%1), %%eax \n\t" "shl $17, %%ebx \n\t" diff -r a512ac8fa540 -r eebc7209c47f dct-test.c --- a/dct-test.c Wed Oct 15 08:01:54 2008 +0000 +++ b/dct-test.c Thu Oct 16 13:34:09 2008 +0000 @@ -177,7 +177,7 @@ { #ifdef HAVE_MMX if (cpu_flags & MM_MMX) - asm volatile ("emms\n\t"); + __asm__ volatile ("emms\n\t"); #endif } diff -r a512ac8fa540 -r eebc7209c47f dsputil.h --- a/dsputil.h Wed Oct 15 08:01:54 2008 +0000 +++ b/dsputil.h Thu Oct 16 13:34:09 2008 +0000 @@ -579,7 +579,7 @@ static inline void emms(void) { - asm volatile ("emms;":::"memory"); + __asm__ volatile ("emms;":::"memory"); } diff -r a512ac8fa540 -r eebc7209c47f i386/cavsdsp_mmx.c --- a/i386/cavsdsp_mmx.c Wed Oct 15 08:01:54 2008 +0000 +++ b/i386/cavsdsp_mmx.c Thu Oct 16 13:34:09 2008 +0000 @@ -35,7 +35,7 @@ static inline void cavs_idct8_1d(int16_t *block, uint64_t bias) { - asm volatile( + __asm__ volatile( "movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */ "movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */ "movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */ @@ -120,7 +120,7 @@ cavs_idct8_1d(block+4*i, ff_pw_4); - asm volatile( + __asm__ volatile( "psraw $3, %%mm7 \n\t" "psraw $3, %%mm6 \n\t" "psraw $3, %%mm5 \n\t" @@ -150,7 +150,7 @@ for(i=0; i<2; i++){ cavs_idct8_1d(b2+4*i, ff_pw_64); - asm volatile( + __asm__ volatile( "psraw $7, %%mm7 \n\t" "psraw $7, %%mm6 \n\t" "psraw $7, %%mm5 \n\t" @@ -175,7 +175,7 @@ add_pixels_clamped_mmx(b2, dst, stride); /* clear block */ - asm volatile( + __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" "movq %%mm7, (%0) \n\t" "movq %%mm7, 8(%0) \n\t" @@ -275,7 +275,7 @@ src -= 2*srcStride;\ \ while(w--){\ - asm volatile(\ + __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movd (%0), %%mm0 \n\t"\ "add %2, %0 \n\t"\ @@ -306,7 +306,7 @@ : "memory"\ );\ if(h==16){\ - asm volatile(\ + __asm__ volatile(\ VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ @@ -328,7 +328,7 @@ #define QPEL_CAVS(OPNAME, OP, MMX)\ static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ int h=8;\ - asm volatile(\ + __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movq %5, %%mm6 \n\t"\ "1: \n\t"\ diff -r a512ac8fa540 -r eebc7209c47f i386/cpuid.c --- a/i386/cpuid.c Wed Oct 15 08:01:54 2008 +0000 +++ b/i386/cpuid.c Thu Oct 16 13:34:09 2008 +0000 @@ -28,7 +28,7 @@ /* ebx saving is necessary for PIC. gcc seems unable to see it alone */ #define cpuid(index,eax,ebx,ecx,edx)\ - asm volatile\ + __asm__ volatile\ ("mov %%"REG_b", %%"REG_S"\n\t"\ "cpuid\n\t"\ "xchg %%"REG_b", %%"REG_S\ @@ -44,7 +44,7 @@ int max_std_level, max_ext_level, std_caps=0, ext_caps=0; x86_reg a, c; - asm volatile ( + __asm__ volatile ( /* See if CPUID instruction is supported ... */ /* ... Get copies of EFLAGS into eax and ecx */ "pushf\n\t" diff -r a512ac8fa540 -r eebc7209c47f i386/dsputil_h264_template_mmx.c --- a/i386/dsputil_h264_template_mmx.c Wed Oct 15 08:01:54 2008 +0000 +++ b/i386/dsputil_h264_template_mmx.c Thu Oct 16 13:34:09 2008 +0000 @@ -47,7 +47,7 @@ rnd_reg = rnd ? &ff_pw_4 : &ff_pw_3; - asm volatile( + __asm__ volatile( "movd %0, %%mm5\n\t" "movq %1, %%mm4\n\t" "movq %2, %%mm6\n\t" /* mm6 = rnd */ @@ -58,13 +58,13 @@ :: "rm"(x+y), "m"(ff_pw_8), "m"(*rnd_reg)); for(i=0; i> 6 */ "paddw %1, %%mm2\n\t" "paddw %1, %%mm3\n\t" @@ -187,7 +187,7 @@ static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { - asm volatile( + __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" "movd %5, %%mm2 \n\t" "movd %6, %%mm3 \n\t" @@ -259,7 +259,7 @@ int tmp = ((1<<16)-1)*x + 8; int CD= tmp*y; int AB= (tmp<<3) - CD; - asm volatile( + __asm__ volatile( /* mm5 = {A,B,A,B} */ /* mm6 = {C,D,C,D} */ "movd %0, %%mm5\n\t" @@ -274,7 +274,7 @@ :: "r"(AB), "r"(CD), "m"(src[0])); - asm volatile( + __asm__ volatile( "1:\n\t" "add %4, %1\n\t" /* mm1 = A * src[0,1] + B * src[1,2] */ diff -r a512ac8fa540 -r eebc7209c47f i386/dsputil_h264_template_ssse3.c --- a/i386/dsputil_h264_template_ssse3.c Wed Oct 15 08:01:54 2008 +0000 +++ b/i386/dsputil_h264_template_ssse3.c Thu Oct 16 13:34:09 2008 +0000 @@ -37,7 +37,7 @@ if(y==0 || x==0) { /* 1 dimensional filter only */ - asm volatile( + __asm__ volatile( "movd %0, %%xmm7 \n\t" "movq %1, %%xmm6 \n\t" "pshuflw $0, %%xmm7, %%xmm7 \n\t" @@ -47,7 +47,7 @@ ); if(x) { - asm volatile( + __asm__ volatile( "1: \n\t" "movq (%1), %%xmm0 \n\t" "movq 1(%1), %%xmm1 \n\t" @@ -75,7 +75,7 @@ :"r"((x86_reg)stride) ); } else { - asm volatile( + __asm__ volatile( "1: \n\t" "movq (%1), %%xmm0 \n\t" "movq (%1,%3), %%xmm1 \n\t" @@ -107,7 +107,7 @@ } /* general case, bilinear */ - asm volatile( + __asm__ volatile( "movd %0, %%xmm7 \n\t" "movd %1, %%xmm6 \n\t" "movdqa %2, %%xmm5 \n\t" @@ -118,7 +118,7 @@ :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(*(rnd?&ff_pw_32:&ff_pw_28)) ); - asm volatile( + __asm__ volatile( "movq (%1), %%xmm0 \n\t" "movq 1(%1), %%xmm1 \n\t" "punpcklbw %%xmm1, %%xmm0 \n\t" @@ -160,7 +160,7 @@ static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { - asm volatile( + __asm__ volatile( "movd %0, %%mm7 \n\t" "movd %1, %%mm6 \n\t" "movq %2, %%mm5 \n\t" @@ -169,7 +169,7 @@ :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(ff_pw_32) ); - asm volatile( + __asm__ volatile( "movd (%1), %%mm0 \n\t" "punpcklbw 1(%1), %%mm0 \n\t" "add %3, %1 \n\t" diff -r a512ac8fa540 -r eebc7209c47f i386/dsputil_mmx.c --- a/i386/dsputil_mmx.c Wed Oct 15 08:01:54 2008 +0000 +++ b/i386/dsputil_mmx.c Thu Oct 16 13:34:09 2008 +0000 @@ -70,28 +70,28 @@ DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 }; DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 }; -#define JUMPALIGN() asm volatile (ASMALIGN(3)::) -#define MOVQ_ZERO(regd) asm volatile ("pxor %%" #regd ", %%" #regd ::) +#define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::) +#define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::) #define MOVQ_BFE(regd) \ - asm volatile ( \ + __asm__ volatile ( \ "pcmpeqd %%" #regd ", %%" #regd " \n\t"\ "paddb %%" #regd ", %%" #regd " \n\t" ::) #ifndef PIC -#define MOVQ_BONE(regd) asm volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone)) -#define MOVQ_WTWO(regd) asm volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo)) +#define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone)) +#define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo)) #else // for shared library it's better to use this way for accessing constants // pcmpeqd -> -1 #define MOVQ_BONE(regd) \ - asm volatile ( \ + __asm__ volatile ( \ "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ "psrlw $15, %%" #regd " \n\t" \ "packuswb %%" #regd ", %%" #regd " \n\t" ::) #define MOVQ_WTWO(regd) \ - asm volatile ( \ + __asm__ volatile ( \ "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ "psrlw $15, %%" #regd " \n\t" \ "psllw $1, %%" #regd " \n\t"::) @@ -223,7 +223,7 @@ p = block; pix = pixels; /* unrolled loop */ - asm volatile( + __asm__ volatile( "movq %3, %%mm0 \n\t" "movq 8%3, %%mm1 \n\t" "movq 16%3, %%mm2 \n\t" @@ -248,7 +248,7 @@ // if here would be an exact copy of the code above // compiler would generate some very strange code // thus using "r" - asm volatile( + __asm__ volatile( "movq (%3), %%mm0 \n\t" "movq 8(%3), %%mm1 \n\t" "movq 16(%3), %%mm2 \n\t" @@ -299,7 +299,7 @@ MOVQ_ZERO(mm7); i = 4; do { - asm volatile( + __asm__ volatile( "movq (%2), %%mm0 \n\t" "movq 8(%2), %%mm1 \n\t" "movq 16(%2), %%mm2 \n\t" @@ -330,7 +330,7 @@ static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) { - asm volatile( + __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" ASMALIGN(3) "1: \n\t" @@ -356,7 +356,7 @@ static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) { - asm volatile( + __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" ASMALIGN(3) "1: \n\t" @@ -382,7 +382,7 @@ static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) { - asm volatile( + __asm__ volatile( "lea (%3, %3), %%"REG_a" \n\t" ASMALIGN(3) "1: \n\t" @@ -416,7 +416,7 @@ static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) { - asm volatile( + __asm__ volatile( "1: \n\t" "movdqu (%1), %%xmm0 \n\t" "movdqu (%1,%3), %%xmm1 \n\t" @@ -438,7 +438,7 @@ static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) { - asm volatile( + __asm__ volatile( "1: \n\t" "movdqu (%1), %%xmm0 \n\t" "movdqu (%1,%3), %%xmm1 \n\t" @@ -464,7 +464,7 @@ static void clear_blocks_mmx(DCTELEM *blocks) { - asm volatile( + __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" "mov $-128*6, %%"REG_a" \n\t" "1: \n\t" @@ -481,7 +481,7 @@ static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){ x86_reg i=0; - asm volatile( + __asm__ volatile( "jmp 2f \n\t" "1: \n\t" "movq (%1, %0), %%mm0 \n\t" @@ -505,7 +505,7 @@ static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ x86_reg i=0; - asm volatile( + __asm__ volatile( "jmp 2f \n\t" "1: \n\t" "movq (%2, %0), %%mm0 \n\t" @@ -600,7 +600,7 @@ if(ENABLE_ANY_H263) { const int strength= ff_h263_loop_filter_strength[qscale]; - asm volatile( + __asm__ volatile( H263_LOOP_FILTER @@ -618,7 +618,7 @@ } static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){ - asm volatile( //FIXME could save 1 instruction if done as 8x4 ... + __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ... "movd %4, %%mm0 \n\t" "movd %5, %%mm1 \n\t" "movd %6, %%mm2 \n\t" @@ -656,7 +656,7 @@ transpose4x4(btemp , src , 8, stride); transpose4x4(btemp+4, src + 4*stride, 8, stride); - asm volatile( + __asm__ volatile( H263_LOOP_FILTER // 5 3 4 6 : "+m" (temp[0]), @@ -666,7 +666,7 @@ : "g" (2*strength), "m"(ff_pb_FC) ); - asm volatile( + __asm__ volatile( "movq %%mm5, %%mm1 \n\t" "movq %%mm4, %%mm0 \n\t" "punpcklbw %%mm3, %%mm5 \n\t" @@ -711,7 +711,7 @@ ptr = buf; if(w==8) { - asm volatile( + __asm__ volatile( "1: \n\t" "movd (%0), %%mm0 \n\t" "punpcklbw %%mm0, %%mm0 \n\t" @@ -732,7 +732,7 @@ } else { - asm volatile( + __asm__ volatile( "1: \n\t" "movd (%0), %%mm0 \n\t" "punpcklbw %%mm0, %%mm0 \n\t" @@ -757,7 +757,7 @@ for(i=0;inbits; int i; ff_fft_dispatch_interleave_3dn2(z, s->nbits); - asm volatile("femms"); + __asm__ volatile("femms"); if(n <= 8) for(i=0; inbits; int i; for(i=0; i=0; k-=2) { - asm volatile( + __asm__ volatile( "movaps (%2,%1,2), %%xmm0 \n" // { z[k].re, z[k].im, z[k+1].re, z[k+1].im } "movaps -16(%2,%0,2), %%xmm1 \n" // { z[-k-2].re, z[-k-2].im, z[-k-1].re, z[-k-1].im } "movaps %%xmm0, %%xmm2 \n" @@ -111,7 +111,7 @@ #ifdef ARCH_X86_64 // if we have enough regs, don't let gcc make the luts latency-bound // but if not, latency is faster than spilling - asm("movlps %%xmm0, %0 \n" + __asm__("movlps %%xmm0, %0 \n" "movhps %%xmm0, %1 \n" "movlps %%xmm1, %2 \n" "movhps %%xmm1, %3 \n" @@ -121,10 +121,10 @@ "=m"(z[revtab[ k+1]]) ); #else - asm("movlps %%xmm0, %0" :"=m"(z[revtab[-k-2]])); - asm("movhps %%xmm0, %0" :"=m"(z[revtab[-k-1]])); - asm("movlps %%xmm1, %0" :"=m"(z[revtab[ k ]])); - asm("movhps %%xmm1, %0" :"=m"(z[revtab[ k+1]])); + __asm__("movlps %%xmm0, %0" :"=m"(z[revtab[-k-2]])); + __asm__("movhps %%xmm0, %0" :"=m"(z[revtab[-k-1]])); + __asm__("movlps %%xmm1, %0" :"=m"(z[revtab[ k ]])); + __asm__("movhps %%xmm1, %0" :"=m"(z[revtab[ k+1]])); #endif } @@ -146,7 +146,7 @@ j = -n2; k = n2-16; - asm volatile( + __asm__ volatile( "1: \n" CMUL(%0, %%xmm0, %%xmm1) CMUL(%1, %%xmm4, %%xmm5) @@ -181,7 +181,7 @@ j = -n; k = n-16; - asm volatile( + __asm__ volatile( "movaps %4, %%xmm7 \n" "1: \n" "movaps (%2,%1), %%xmm0 \n" diff -r a512ac8fa540 -r eebc7209c47f i386/flacdsp_mmx.c --- a/i386/flacdsp_mmx.c Wed Oct 15 08:01:54 2008 +0000 +++ b/i386/flacdsp_mmx.c Thu Oct 16 13:34:09 2008 +0000 @@ -28,7 +28,7 @@ int n2 = len>>1; x86_reg i = -n2*sizeof(int32_t); x86_reg j = n2*sizeof(int32_t); - asm volatile( + __asm__ volatile( "movsd %0, %%xmm7 \n\t" "movapd "MANGLE(ff_pd_1)", %%xmm6 \n\t" "movapd "MANGLE(ff_pd_2)", %%xmm5 \n\t" @@ -38,7 +38,7 @@ ::"m"(c) ); #define WELCH(MOVPD, offset)\ - asm volatile(\ + __asm__ volatile(\ "1: \n\t"\ "movapd %%xmm7, %%xmm1 \n\t"\ "mulpd %%xmm1, %%xmm1 \n\t"\ @@ -84,7 +84,7 @@ for(j=0; j> 6; - asm volatile( + __asm__ volatile( "movd %0, %%mm0 \n\t" "pshufw $0, %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" @@ -313,7 +313,7 @@ "packuswb %%mm1, %%mm1 \n\t" ::"r"(dc) ); - asm volatile( + __asm__ volatile( "movd %0, %%mm2 \n\t" "movd %1, %%mm3 \n\t" "movd %2, %%mm4 \n\t" @@ -341,7 +341,7 @@ { int dc = (block[0] + 32) >> 6; int y; - asm volatile( + __asm__ volatile( "movd %0, %%mm0 \n\t" "pshufw $0, %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" @@ -351,7 +351,7 @@ ::"r"(dc) ); for(y=2; y--; dst += 4*stride){ - asm volatile( + __asm__ volatile( "movq %0, %%mm2 \n\t" "movq %1, %%mm3 \n\t" "movq %2, %%mm4 \n\t" @@ -463,7 +463,7 @@ { DECLARE_ALIGNED_8(uint64_t, tmp0[2]); - asm volatile( + __asm__ volatile( "movq (%1,%3), %%mm0 \n\t" //p1 "movq (%1,%3,2), %%mm1 \n\t" //p0 "movq (%2), %%mm2 \n\t" //q0 @@ -540,7 +540,7 @@ static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0) { - asm volatile( + __asm__ volatile( "movq (%0), %%mm0 \n\t" //p1 "movq (%0,%2), %%mm1 \n\t" //p0 "movq (%1), %%mm2 \n\t" //q0 @@ -586,7 +586,7 @@ static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1) { - asm volatile( + __asm__ volatile( "movq (%0), %%mm0 \n\t" "movq (%0,%2), %%mm1 \n\t" "movq (%1), %%mm2 \n\t" @@ -628,7 +628,7 @@ static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) { int dir; - asm volatile( + __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" "movq %0, %%mm6 \n\t" "movq %1, %%mm5 \n\t" @@ -636,7 +636,7 @@ ::"m"(ff_pb_1), "m"(ff_pb_3), "m"(ff_pb_7) ); if(field) - asm volatile( + __asm__ volatile( "movq %0, %%mm5 \n\t" "movq %1, %%mm4 \n\t" ::"m"(ff_pb_3_1), "m"(ff_pb_7_3) @@ -650,14 +650,14 @@ DECLARE_ALIGNED_8(const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL; int b_idx, edge, l; for( b_idx=12, edge=0; edge= 0; l-- ) { - asm volatile( + __asm__ volatile( "movd %0, %%mm1 \n\t" "punpckldq %1, %%mm1 \n\t" "movq %%mm1, %%mm2 \n\t" @@ -688,7 +688,7 @@ ); } } - asm volatile( + __asm__ volatile( "movd %0, %%mm1 \n\t" "por %1, %%mm1 \n\t" "punpcklbw %%mm7, %%mm1 \n\t" @@ -696,7 +696,7 @@ ::"m"(nnz[b_idx]), "m"(nnz[b_idx+d_idx]) ); - asm volatile( + __asm__ volatile( "pcmpeqw %%mm7, %%mm0 \n\t" "pcmpeqw %%mm7, %%mm0 \n\t" "psrlw $15, %%mm0 \n\t" // nonzero -> 1 @@ -713,7 +713,7 @@ edges = 4; step = 1; } - asm volatile( + __asm__ volatile( "movq (%0), %%mm0 \n\t" "movq 8(%0), %%mm1 \n\t" "movq 16(%0), %%mm2 \n\t" @@ -774,7 +774,7 @@ static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ int h=4;\ \ - asm volatile(\ + __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movq %5, %%mm4 \n\t"\ "movq %6, %%mm5 \n\t"\ @@ -813,14 +813,14 @@ }\ static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ int h=4;\ - asm volatile(\ + __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movq %0, %%mm4 \n\t"\ "movq %1, %%mm5 \n\t"\ :: "m"(ff_pw_5), "m"(ff_pw_16)\ );\ do{\ - asm volatile(\ + __asm__ volatile(\ "movd -1(%0), %%mm1 \n\t"\ "movd (%0), %%mm2 \n\t"\ "movd 1(%0), %%mm3 \n\t"\ @@ -857,7 +857,7 @@ }\ static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ src -= 2*srcStride;\ - asm volatile(\ + __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movd (%0), %%mm0 \n\t"\ "add %2, %0 \n\t"\ @@ -889,7 +889,7 @@ int w=3;\ src -= 2*srcStride+2;\ while(w--){\ - asm volatile(\ + __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movd (%0), %%mm0 \n\t"\ "add %2, %0 \n\t"\ @@ -919,7 +919,7 @@ src += 4 - 9*srcStride;\ }\ tmp -= 3*4;\ - asm volatile(\ + __asm__ volatile(\ "1: \n\t"\ "movq (%0), %%mm0 \n\t"\ "paddw 10(%0), %%mm0 \n\t"\ @@ -948,7 +948,7 @@ \ static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ int h=8;\ - asm volatile(\ + __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movq %5, %%mm6 \n\t"\ "1: \n\t"\ @@ -1005,13 +1005,13 @@ \ static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ int h=8;\ - asm volatile(\ + __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movq %0, %%mm6 \n\t"\ :: "m"(ff_pw_5)\ );\ do{\ - asm volatile(\ + __asm__ volatile(\ "movq (%0), %%mm0 \n\t"\ "movq 1(%0), %%mm2 \n\t"\ "movq %%mm0, %%mm1 \n\t"\ @@ -1071,7 +1071,7 @@ src -= 2*srcStride;\ \ while(w--){\ - asm volatile(\ + __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movd (%0), %%mm0 \n\t"\ "add %2, %0 \n\t"\ @@ -1102,7 +1102,7 @@ : "memory"\ );\ if(h==16){\ - asm volatile(\ + __asm__ volatile(\ QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ @@ -1125,7 +1125,7 @@ int w = (size+8)>>2;\ src -= 2*srcStride+2;\ while(w--){\ - asm volatile(\ + __asm__ volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movd (%0), %%mm0 \n\t"\ "add %2, %0 \n\t"\ @@ -1155,7 +1155,7 @@ : "memory"\ );\ if(size==16){\ - asm volatile(\ + __asm__ volatile(\ QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\ QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\ QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\ @@ -1177,7 +1177,7 @@ int w = size>>4;\ do{\ int h = size;\ - asm volatile(\ + __asm__ volatile(\ "1: \n\t"\ "movq (%0), %%mm0 \n\t"\ "movq 8(%0), %%mm3 \n\t"\ @@ -1261,7 +1261,7 @@ \ static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ {\ - asm volatile(\ + __asm__ volatile(\ "movq (%1), %%mm0 \n\t"\ "movq 24(%1), %%mm1 \n\t"\ "psraw $5, %%mm0 \n\t"\ @@ -1291,7 +1291,7 @@ static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ {\ do{\ - asm volatile(\ + __asm__ volatile(\ "movq (%1), %%mm0 \n\t"\ "movq 8(%1), %%mm1 \n\t"\ "movq 48(%1), %%mm2 \n\t"\ @@ -1325,7 +1325,7 @@ #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\ static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ int h=16;\ - asm volatile(\ + __asm__ volatile(\ "pxor %%xmm15, %%xmm15 \n\t"\ "movdqa %6, %%xmm14 \n\t"\ "movdqa %7, %%xmm13 \n\t"\ @@ -1403,13 +1403,13 @@ #define QPEL_H264_H_XMM(OPNAME, OP, MMX)\ static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ int h=8;\ - asm volatile(\ + __asm__ volatile(\ "pxor %%xmm7, %%xmm7 \n\t"\ "movdqa %0, %%xmm6 \n\t"\ :: "m"(ff_pw_5)\ );\ do{\ - asm volatile(\ + __asm__ volatile(\ "lddqu -5(%0), %%xmm1 \n\t"\ "movdqa %%xmm1, %%xmm0 \n\t"\ "punpckhbw %%xmm7, %%xmm1 \n\t"\ @@ -1450,7 +1450,7 @@ \ static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ int h=8;\ - asm volatile(\ + __asm__ volatile(\ "pxor %%xmm7, %%xmm7 \n\t"\ "movdqa %5, %%xmm6 \n\t"\ "1: \n\t"\ @@ -1501,7 +1501,7 @@ static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ src -= 2*srcStride;\ \ - asm volatile(\ + __asm__ volatile(\ "pxor %%xmm7, %%xmm7 \n\t"\ "movq (%0), %%xmm0 \n\t"\ "add %2, %0 \n\t"\ @@ -1532,7 +1532,7 @@ : "memory"\ );\ if(h==16){\ - asm volatile(\ + __asm__ volatile(\ QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\ QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\ QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\ @@ -1560,7 +1560,7 @@ int w = (size+8)>>3; src -= 2*srcStride+2; while(w--){ - asm volatile( + __asm__ volatile( "pxor %%xmm7, %%xmm7 \n\t" "movq (%0), %%xmm0 \n\t" "add %2, %0 \n\t" @@ -1590,7 +1590,7 @@ : "memory" ); if(size==16){ - asm volatile( + __asm__ volatile( QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 8*48) QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 9*48) QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 10*48) @@ -1613,7 +1613,7 @@ static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\ int h = size;\ if(size == 16){\ - asm volatile(\ + __asm__ volatile(\ "1: \n\t"\ "movdqa 32(%0), %%xmm4 \n\t"\ "movdqa 16(%0), %%xmm5 \n\t"\ @@ -1668,7 +1668,7 @@ : "memory"\ );\ }else{\ - asm volatile(\ + __asm__ volatile(\ "1: \n\t"\ "movdqa 16(%0), %%xmm1 \n\t"\ "movdqa (%0), %%xmm0 \n\t"\ @@ -2022,7 +2022,7 @@ int x, y; offset <<= log2_denom; offset += (1 << log2_denom) >> 1; - asm volatile( + __asm__ volatile( "movd %0, %%mm4 \n\t" "movd %1, %%mm5 \n\t" "movd %2, %%mm6 \n\t" @@ -2033,7 +2033,7 @@ ); for(y=0; yinter_scantable.raster_end[ s->block_last_index[n] ]; //printf("%d %d ", qmul, qadd); -asm volatile( +__asm__ volatile( "movd %1, %%mm6 \n\t" //qmul "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" @@ -118,7 +118,7 @@ nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; //printf("%d %d ", qmul, qadd); -asm volatile( +__asm__ volatile( "movd %1, %%mm6 \n\t" //qmul "packssdw %%mm6, %%mm6 \n\t" "packssdw %%mm6, %%mm6 \n\t" @@ -214,7 +214,7 @@ block0 = block[0] * s->c_dc_scale; /* XXX: only mpeg1 */ quant_matrix = s->intra_matrix; -asm volatile( +__asm__ volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $15, %%mm7 \n\t" "movd %2, %%mm6 \n\t" @@ -277,7 +277,7 @@ nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1; quant_matrix = s->inter_matrix; -asm volatile( +__asm__ volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $15, %%mm7 \n\t" "movd %2, %%mm6 \n\t" @@ -349,7 +349,7 @@ else block0 = block[0] * s->c_dc_scale; quant_matrix = s->intra_matrix; -asm volatile( +__asm__ volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $15, %%mm7 \n\t" "movd %2, %%mm6 \n\t" @@ -410,7 +410,7 @@ else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; quant_matrix = s->inter_matrix; -asm volatile( +__asm__ volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "psrlq $48, %%mm7 \n\t" "movd %2, %%mm6 \n\t" @@ -482,7 +482,7 @@ s->dct_count[intra]++; - asm volatile( + __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" "1: \n\t" "pxor %%mm0, %%mm0 \n\t" @@ -536,7 +536,7 @@ s->dct_count[intra]++; - asm volatile( + __asm__ volatile( "pxor %%xmm7, %%xmm7 \n\t" "1: \n\t" "pxor %%xmm0, %%xmm0 \n\t" diff -r a512ac8fa540 -r eebc7209c47f i386/mpegvideo_mmx_template.c --- a/i386/mpegvideo_mmx_template.c Wed Oct 15 08:01:54 2008 +0000 +++ b/i386/mpegvideo_mmx_template.c Thu Oct 16 13:34:09 2008 +0000 @@ -117,13 +117,13 @@ /* note: block[0] is assumed to be positive */ if (!s->h263_aic) { #if 1 - asm volatile ( + __asm__ volatile ( "mul %%ecx \n\t" : "=d" (level), "=a"(dummy) : "a" ((block[0]>>2) + q), "c" (ff_inverse[q<<1]) ); #else - asm volatile ( + __asm__ volatile ( "xorl %%edx, %%edx \n\t" "divw %%cx \n\t" "movzwl %%ax, %%eax \n\t" @@ -149,7 +149,7 @@ if((s->out_format == FMT_H263 || s->out_format == FMT_H261) && s->mpeg_quant==0){ - asm volatile( + __asm__ volatile( "movd %%"REG_a", "MM"3 \n\t" // last_non_zero_p1 SPREADW(MM"3") "pxor "MM"7, "MM"7 \n\t" // 0 @@ -182,7 +182,7 @@ "r" (inv_zigzag_direct16+64), "r" (temp_block+64) ); }else{ // FMT_H263 - asm volatile( + __asm__ volatile( "movd %%"REG_a", "MM"3 \n\t" // last_non_zero_p1 SPREADW(MM"3") "pxor "MM"7, "MM"7 \n\t" // 0 @@ -214,7 +214,7 @@ "r" (inv_zigzag_direct16+64), "r" (temp_block+64) ); } - asm volatile( + __asm__ volatile( "movd %1, "MM"1 \n\t" // max_qcoeff SPREADW(MM"1") "psubusw "MM"1, "MM"4 \n\t" diff -r a512ac8fa540 -r eebc7209c47f i386/simple_idct_mmx.c --- a/i386/simple_idct_mmx.c Wed Oct 15 08:01:54 2008 +0000 +++ b/i386/simple_idct_mmx.c Thu Oct 16 13:34:09 2008 +0000 @@ -212,7 +212,7 @@ DECLARE_ALIGNED(8, int64_t, align_tmp[16]); int16_t * const temp= (int16_t*)align_tmp; - asm volatile( + __asm__ volatile( #if 0 //Alternative, simpler variant #define ROW_IDCT(src0, src4, src1, src5, dst, rounder, shift) \ diff -r a512ac8fa540 -r eebc7209c47f i386/snowdsp_mmx.c --- a/i386/snowdsp_mmx.c Wed Oct 15 08:01:54 2008 +0000 +++ b/i386/snowdsp_mmx.c Thu Oct 16 13:34:09 2008 +0000 @@ -38,7 +38,7 @@ // calculate b[0] correctly afterwards. i = 0; - asm volatile( + __asm__ volatile( "pcmpeqd %%xmm7, %%xmm7 \n\t" "pcmpeqd %%xmm3, %%xmm3 \n\t" "psllw $1, %%xmm3 \n\t" @@ -46,7 +46,7 @@ "psllw $13, %%xmm3 \n\t" ::); for(; i>W_AS); } for(; i>1]; } for (i-=62; i>=0; i-=64){ - asm volatile( + __asm__ volatile( "movdqa (%1), %%xmm0 \n\t" "movdqa 16(%1), %%xmm2 \n\t" "movdqa 32(%1), %%xmm4 \n\t" @@ -224,7 +224,7 @@ i = 1; b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); - asm volatile( + __asm__ volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "pcmpeqw %%mm3, %%mm3 \n\t" "psllw $1, %%mm3 \n\t" @@ -232,7 +232,7 @@ "psllw $13, %%mm3 \n\t" ::); for(; i> W_BS); - asm volatile( + __asm__ volatile( "psllw $15, %%mm7 \n\t" "pcmpeqw %%mm6, %%mm6 \n\t" "psrlw $13, %%mm6 \n\t" "paddw %%mm7, %%mm6 \n\t" ::); for(; i>1]; } for (i-=30; i>=0; i-=32){ - asm volatile( + __asm__ volatile( "movq (%1), %%mm0 \n\t" "movq 8(%1), %%mm2 \n\t" "movq 16(%1), %%mm4 \n\t" @@ -448,7 +448,7 @@ } i+=i; - asm volatile ( + __asm__ volatile ( "jmp 2f \n\t" "1: \n\t" snow_vertical_compose_sse2_load("%4","xmm0","xmm2","xmm4","xmm6") @@ -544,7 +544,7 @@ b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS; } i+=i; - asm volatile( + __asm__ volatile( "jmp 2f \n\t" "1: \n\t" @@ -606,7 +606,7 @@ #define snow_inner_add_yblock_sse2_header \ IDWTELEM * * dst_array = sb->line + src_y;\ x86_reg tmp;\ - asm volatile(\ + __asm__ volatile(\ "mov %7, %%"REG_c" \n\t"\ "mov %6, %2 \n\t"\ "mov %4, %%"REG_S" \n\t"\ @@ -759,7 +759,7 @@ #define snow_inner_add_yblock_mmx_header \ IDWTELEM * * dst_array = sb->line + src_y;\ x86_reg tmp;\ - asm volatile(\ + __asm__ volatile(\ "mov %7, %%"REG_c" \n\t"\ "mov %6, %2 \n\t"\ "mov %4, %%"REG_S" \n\t"\ diff -r a512ac8fa540 -r eebc7209c47f i386/vc1dsp_mmx.c --- a/i386/vc1dsp_mmx.c Wed Oct 15 08:01:54 2008 +0000 +++ b/i386/vc1dsp_mmx.c Thu Oct 16 13:34:09 2008 +0000 @@ -74,7 +74,7 @@ const uint8_t *src, x86_reg stride, int rnd, int64_t shift) { - asm volatile( + __asm__ volatile( "mov $3, %%"REG_c" \n\t" LOAD_ROUNDER_MMX("%5") "movq "MANGLE(ff_pw_9)", %%mm6 \n\t" @@ -114,7 +114,7 @@ src -= 1; rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */ - asm volatile( + __asm__ volatile( LOAD_ROUNDER_MMX("%4") "movq "MANGLE(ff_pw_128)", %%mm6\n\t" "movq "MANGLE(ff_pw_9)", %%mm5 \n\t" @@ -155,7 +155,7 @@ x86_reg stride, int rnd, x86_reg offset) { rnd = 8-rnd; - asm volatile( + __asm__ volatile( "mov $8, %%"REG_c" \n\t" LOAD_ROUNDER_MMX("%5") "movq "MANGLE(ff_pw_9)", %%mm6\n\t" @@ -264,7 +264,7 @@ { \ int h = 8; \ src -= src_stride; \ - asm volatile( \ + __asm__ volatile( \ LOAD_ROUNDER_MMX("%5") \ "movq "MANGLE(ff_pw_53)", %%mm5\n\t" \ "movq "MANGLE(ff_pw_18)", %%mm6\n\t" \ @@ -320,7 +320,7 @@ int h = 8; \ src -= 1; \ rnd -= (-4+58+13-3)*256; /* Add -256 bias */ \ - asm volatile( \ + __asm__ volatile( \ LOAD_ROUNDER_MMX("%4") \ "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \ "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \ @@ -358,7 +358,7 @@ int h = 8; \ src -= offset; \ rnd = 32-rnd; \ - asm volatile ( \ + __asm__ volatile ( \ LOAD_ROUNDER_MMX("%6") \ "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \ "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \ @@ -412,7 +412,7 @@ static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] = { NULL, vc1_put_shift1_mmx, vc1_put_shift2_mmx, vc1_put_shift3_mmx }; - asm volatile( + __asm__ volatile( "pxor %%mm0, %%mm0 \n\t" ::: "memory" ); diff -r a512ac8fa540 -r eebc7209c47f i386/vp3dsp_mmx.c --- a/i386/vp3dsp_mmx.c Wed Oct 15 08:01:54 2008 +0000 +++ b/i386/vp3dsp_mmx.c Thu Oct 16 13:34:09 2008 +0000 @@ -250,7 +250,7 @@ #define I(x) AV_STRINGIFY(16* x )"(%0)" #define J(x) AV_STRINGIFY(16*(x-4) + 8)"(%0)" - asm volatile ( + __asm__ volatile ( RowIDCT() Transpose() diff -r a512ac8fa540 -r eebc7209c47f i386/vp3dsp_sse2.c --- a/i386/vp3dsp_sse2.c Wed Oct 15 08:01:54 2008 +0000 +++ b/i386/vp3dsp_sse2.c Thu Oct 16 13:34:09 2008 +0000 @@ -161,7 +161,7 @@ #define O(x) I(x) #define C(x) AV_STRINGIFY(16*(x-1))"(%1)" - asm volatile ( + __asm__ volatile ( VP3_1D_IDCT_SSE2(NOP, NOP) TRANSPOSE8(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7, (%0)) diff -r a512ac8fa540 -r eebc7209c47f msmpeg4.c --- a/msmpeg4.c Wed Oct 15 08:01:54 2008 +0000 +++ b/msmpeg4.c Thu Oct 16 13:34:09 2008 +0000 @@ -654,7 +654,7 @@ fact they decided to store the quantized DC (which would lead to problems if Q could vary !) */ #if (defined(ARCH_X86)) && !defined PIC - asm volatile( + __asm__ volatile( "movl %3, %%eax \n\t" "shrl $1, %%eax \n\t" "addl %%eax, %2 \n\t" diff -r a512ac8fa540 -r eebc7209c47f ppc/check_altivec.c --- a/ppc/check_altivec.c Wed Oct 15 08:01:54 2008 +0000 +++ b/ppc/check_altivec.c Thu Oct 16 13:34:09 2008 +0000 @@ -66,7 +66,7 @@ #elif defined(RUNTIME_CPUDETECT) int proc_ver; // Support of mfspr PVR emulation added in Linux 2.6.17. - asm volatile("mfspr %0, 287" : "=r" (proc_ver)); + __asm__ volatile("mfspr %0, 287" : "=r" (proc_ver)); proc_ver >>= 16; if (proc_ver & 0x8000 || proc_ver == 0x000c || diff -r a512ac8fa540 -r eebc7209c47f ppc/dsputil_ppc.c --- a/ppc/dsputil_ppc.c Wed Oct 15 08:01:54 2008 +0000 +++ b/ppc/dsputil_ppc.c Thu Oct 16 13:34:09 2008 +0000 @@ -148,7 +148,7 @@ i += 16; } for ( ; i < sizeof(DCTELEM)*6*64-31 ; i += 32) { - asm volatile("dcbz %0,%1" : : "b" (blocks), "r" (i) : "memory"); + __asm__ volatile("dcbz %0,%1" : : "b" (blocks), "r" (i) : "memory"); } if (misal) { ((unsigned long*)blocks)[188] = 0L; @@ -181,7 +181,7 @@ } else for ( ; i < sizeof(DCTELEM)*6*64 ; i += 128) { - asm volatile("dcbzl %0,%1" : : "b" (blocks), "r" (i) : "memory"); + __asm__ volatile("dcbzl %0,%1" : : "b" (blocks), "r" (i) : "memory"); } #else memset(blocks, 0, sizeof(DCTELEM)*6*64); @@ -219,7 +219,7 @@ /* below the constraint "b" seems to mean "Address base register" in gcc-3.3 / RS/6000 speaks. seems to avoid using r0, so.... */ - asm volatile("dcbzl %0, %1" : : "b" (fakedata_middle), "r" (zero)); + __asm__ volatile("dcbzl %0, %1" : : "b" (fakedata_middle), "r" (zero)); for (i = 0; i < 1024 ; i ++) { if (fakedata[i] == (char)0) @@ -241,7 +241,7 @@ { register const uint8_t *p = mem; do { - asm volatile ("dcbt 0,%0" : : "r" (p)); + __asm__ volatile ("dcbt 0,%0" : : "r" (p)); p+= stride; } while(--h); } diff -r a512ac8fa540 -r eebc7209c47f ppc/dsputil_ppc.h --- a/ppc/dsputil_ppc.h Wed Oct 15 08:01:54 2008 +0000 +++ b/ppc/dsputil_ppc.h Thu Oct 16 13:34:09 2008 +0000 @@ -70,36 +70,36 @@ #ifndef HAVE_PPC64 #define POWERP_PMC_DATATYPE unsigned long -#define POWERPC_GET_PMC1(a) asm volatile("mfspr %0, 937" : "=r" (a)) -#define POWERPC_GET_PMC2(a) asm volatile("mfspr %0, 938" : "=r" (a)) +#define POWERPC_GET_PMC1(a) __asm__ volatile("mfspr %0, 937" : "=r" (a)) +#define POWERPC_GET_PMC2(a) __asm__ volatile("mfspr %0, 938" : "=r" (a)) #if (POWERPC_NUM_PMC_ENABLED > 2) -#define POWERPC_GET_PMC3(a) asm volatile("mfspr %0, 941" : "=r" (a)) -#define POWERPC_GET_PMC4(a) asm volatile("mfspr %0, 942" : "=r" (a)) +#define POWERPC_GET_PMC3(a) __asm__ volatile("mfspr %0, 941" : "=r" (a)) +#define POWERPC_GET_PMC4(a) __asm__ volatile("mfspr %0, 942" : "=r" (a)) #else #define POWERPC_GET_PMC3(a) do {} while (0) #define POWERPC_GET_PMC4(a) do {} while (0) #endif #if (POWERPC_NUM_PMC_ENABLED > 4) -#define POWERPC_GET_PMC5(a) asm volatile("mfspr %0, 929" : "=r" (a)) -#define POWERPC_GET_PMC6(a) asm volatile("mfspr %0, 930" : "=r" (a)) +#define POWERPC_GET_PMC5(a) __asm__ volatile("mfspr %0, 929" : "=r" (a)) +#define POWERPC_GET_PMC6(a) __asm__ volatile("mfspr %0, 930" : "=r" (a)) #else #define POWERPC_GET_PMC5(a) do {} while (0) #define POWERPC_GET_PMC6(a) do {} while (0) #endif #else /* HAVE_PPC64 */ #define POWERP_PMC_DATATYPE unsigned long long -#define POWERPC_GET_PMC1(a) asm volatile("mfspr %0, 771" : "=r" (a)) -#define POWERPC_GET_PMC2(a) asm volatile("mfspr %0, 772" : "=r" (a)) +#define POWERPC_GET_PMC1(a) __asm__ volatile("mfspr %0, 771" : "=r" (a)) +#define POWERPC_GET_PMC2(a) __asm__ volatile("mfspr %0, 772" : "=r" (a)) #if (POWERPC_NUM_PMC_ENABLED > 2) -#define POWERPC_GET_PMC3(a) asm volatile("mfspr %0, 773" : "=r" (a)) -#define POWERPC_GET_PMC4(a) asm volatile("mfspr %0, 774" : "=r" (a)) +#define POWERPC_GET_PMC3(a) __asm__ volatile("mfspr %0, 773" : "=r" (a)) +#define POWERPC_GET_PMC4(a) __asm__ volatile("mfspr %0, 774" : "=r" (a)) #else #define POWERPC_GET_PMC3(a) do {} while (0) #define POWERPC_GET_PMC4(a) do {} while (0) #endif #if (POWERPC_NUM_PMC_ENABLED > 4) -#define POWERPC_GET_PMC5(a) asm volatile("mfspr %0, 775" : "=r" (a)) -#define POWERPC_GET_PMC6(a) asm volatile("mfspr %0, 776" : "=r" (a)) +#define POWERPC_GET_PMC5(a) __asm__ volatile("mfspr %0, 775" : "=r" (a)) +#define POWERPC_GET_PMC6(a) __asm__ volatile("mfspr %0, 776" : "=r" (a)) #else #define POWERPC_GET_PMC5(a) do {} while (0) #define POWERPC_GET_PMC6(a) do {} while (0) diff -r a512ac8fa540 -r eebc7209c47f ppc/gcc_fixes.h --- a/ppc/gcc_fixes.h Wed Oct 15 08:01:54 2008 +0000 +++ b/ppc/gcc_fixes.h Thu Oct 16 13:34:09 2008 +0000 @@ -32,7 +32,7 @@ #if (__GNUC__ < 4) # define REG_v(a) #else -# define REG_v(a) asm ( #a ) +# define REG_v(a) __asm__ ( #a ) #endif #if (__GNUC__ == 3 && __GNUC_MINOR__ < 3) diff -r a512ac8fa540 -r eebc7209c47f ppc/mathops.h --- a/ppc/mathops.h Wed Oct 15 08:01:54 2008 +0000 +++ b/ppc/mathops.h Thu Oct 16 13:34:09 2008 +0000 @@ -26,12 +26,12 @@ #if defined(ARCH_POWERPC_405) /* signed 16x16 -> 32 multiply add accumulate */ #define MAC16(rt, ra, rb) \ - asm ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb)); + __asm__ ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb)); /* signed 16x16 -> 32 multiply */ #define MUL16(ra, rb) \ ({ int __rt; \ - asm ("mullhw %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); \ + __asm__ ("mullhw %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); \ __rt; }) #endif diff -r a512ac8fa540 -r eebc7209c47f ps2/dsputil_mmi.c --- a/ps2/dsputil_mmi.c Wed Oct 15 08:01:54 2008 +0000 +++ b/ps2/dsputil_mmi.c Thu Oct 16 13:34:09 2008 +0000 @@ -31,7 +31,7 @@ static void clear_blocks_mmi(DCTELEM * blocks) { - asm volatile( + __asm__ volatile( ".set noreorder \n" "addiu $9, %0, 768 \n" "nop \n" @@ -51,7 +51,7 @@ static void get_pixels_mmi(DCTELEM *block, const uint8_t *pixels, int line_size) { - asm volatile( + __asm__ volatile( ".set push \n\t" ".set mips3 \n\t" "ld $8, 0(%0) \n\t" @@ -92,7 +92,7 @@ static void put_pixels8_mmi(uint8_t *block, const uint8_t *pixels, int line_size, int h) { - asm volatile( + __asm__ volatile( ".set push \n\t" ".set mips3 \n\t" "1: \n\t" @@ -111,7 +111,7 @@ static void put_pixels16_mmi(uint8_t *block, const uint8_t *pixels, int line_size, int h) { - asm volatile ( + __asm__ volatile ( ".set push \n\t" ".set mips3 \n\t" "1: \n\t" diff -r a512ac8fa540 -r eebc7209c47f ps2/idct_mmi.c --- a/ps2/idct_mmi.c Wed Oct 15 08:01:54 2008 +0000 +++ b/ps2/idct_mmi.c Thu Oct 16 13:34:09 2008 +0000 @@ -257,7 +257,7 @@ pmaxh($2, $0, $2); \ ppacb($0, $2, $2); \ sd3(2, 0, 4); \ - asm volatile ("add $4, $5, $4"); + __asm__ volatile ("add $4, $5, $4"); #define DCT_8_INV_COL8_PUT() \ PUT($16); \ @@ -277,7 +277,7 @@ pmaxh($2, $0, $2); \ ppacb($0, $2, $2); \ sd3(2, 0, 4); \ - asm volatile ("add $4, $5, $4"); + __asm__ volatile ("add $4, $5, $4"); /*fixme: schedule*/ #define DCT_8_INV_COL8_ADD() \ @@ -294,7 +294,7 @@ void ff_mmi_idct(int16_t * block) { /* $4 = block */ - asm volatile("la $24, %0"::"m"(consttable[0])); + __asm__ volatile("la $24, %0"::"m"(consttable[0])); lq($24, ROUNDER_0, $8); lq($24, ROUNDER_1, $7); DCT_8_INV_ROW1($4, 0, TAB_i_04, $8, $8); @@ -309,14 +309,14 @@ DCT_8_INV_COL8_STORE($4); //let savedtemp regs be saved - asm volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23"); + __asm__ volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23"); } void ff_mmi_idct_put(uint8_t *dest, int line_size, DCTELEM *block) { /* $4 = dest, $5 = line_size, $6 = block */ - asm volatile("la $24, %0"::"m"(consttable[0])); + __asm__ volatile("la $24, %0"::"m"(consttable[0])); lq($24, ROUNDER_0, $8); lq($24, ROUNDER_1, $7); DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8); @@ -333,14 +333,14 @@ DCT_8_INV_COL8_PUT(); //let savedtemp regs be saved - asm volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23"); + __asm__ volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23"); } void ff_mmi_idct_add(uint8_t *dest, int line_size, DCTELEM *block) { /* $4 = dest, $5 = line_size, $6 = block */ - asm volatile("la $24, %0"::"m"(consttable[0])); + __asm__ volatile("la $24, %0"::"m"(consttable[0])); lq($24, ROUNDER_0, $8); lq($24, ROUNDER_1, $7); DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8); @@ -357,6 +357,6 @@ DCT_8_INV_COL8_ADD(); //let savedtemp regs be saved - asm volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23"); + __asm__ volatile(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23"); } diff -r a512ac8fa540 -r eebc7209c47f ps2/mmi.h --- a/ps2/mmi.h Wed Oct 15 08:01:54 2008 +0000 +++ b/ps2/mmi.h Thu Oct 16 13:34:09 2008 +0000 @@ -60,112 +60,112 @@ #define lq(base, off, reg) \ - asm volatile ("lq " #reg ", %0("#base ")" : : "i" (off) ) + __asm__ volatile ("lq " #reg ", %0("#base ")" : : "i" (off) ) #define lq2(mem, reg) \ - asm volatile ("lq " #reg ", %0" : : "r" (mem)) + __asm__ volatile ("lq " #reg ", %0" : : "r" (mem)) #define sq(reg, off, base) \ - asm volatile ("sq " #reg ", %0("#base ")" : : "i" (off) ) + __asm__ volatile ("sq " #reg ", %0("#base ")" : : "i" (off) ) /* #define ld(base, off, reg) \ - asm volatile ("ld " #reg ", " #off "("#base ")") + __asm__ volatile ("ld " #reg ", " #off "("#base ")") */ #define ld3(base, off, reg) \ - asm volatile (".word %0" : : "i" ( 0xdc000000 | (base<<21) | (reg<<16) | (off))) + __asm__ volatile (".word %0" : : "i" ( 0xdc000000 | (base<<21) | (reg<<16) | (off))) #define ldr3(base, off, reg) \ - asm volatile (".word %0" : : "i" ( 0x6c000000 | (base<<21) | (reg<<16) | (off))) + __asm__ volatile (".word %0" : : "i" ( 0x6c000000 | (base<<21) | (reg<<16) | (off))) #define ldl3(base, off, reg) \ - asm volatile (".word %0" : : "i" ( 0x68000000 | (base<<21) | (reg<<16) | (off))) + __asm__ volatile (".word %0" : : "i" ( 0x68000000 | (base<<21) | (reg<<16) | (off))) /* #define sd(reg, off, base) \ - asm volatile ("sd " #reg ", " #off "("#base ")") + __asm__ volatile ("sd " #reg ", " #off "("#base ")") */ //seems assembler has bug encoding mnemonic 'sd', so DIY #define sd3(reg, off, base) \ - asm volatile (".word %0" : : "i" ( 0xfc000000 | (base<<21) | (reg<<16) | (off))) + __asm__ volatile (".word %0" : : "i" ( 0xfc000000 | (base<<21) | (reg<<16) | (off))) #define sw(reg, off, base) \ - asm volatile ("sw " #reg ", " #off "("#base ")") + __asm__ volatile ("sw " #reg ", " #off "("#base ")") #define sq2(reg, mem) \ - asm volatile ("sq " #reg ", %0" : : "m" (*(mem))) + __asm__ volatile ("sq " #reg ", %0" : : "m" (*(mem))) #define pinth(rs, rt, rd) \ - asm volatile ("pinth " #rd ", " #rs ", " #rt ) + __asm__ volatile ("pinth " #rd ", " #rs ", " #rt ) #define phmadh(rs, rt, rd) \ - asm volatile ("phmadh " #rd ", " #rs ", " #rt ) + __asm__ volatile ("phmadh " #rd ", " #rs ", " #rt ) #define pcpyud(rs, rt, rd) \ - asm volatile ("pcpyud " #rd ", " #rs ", " #rt ) + __asm__ volatile ("pcpyud " #rd ", " #rs ", " #rt ) #define pcpyld(rs, rt, rd) \ - asm volatile ("pcpyld " #rd ", " #rs ", " #rt ) + __asm__ volatile ("pcpyld " #rd ", " #rs ", " #rt ) #define pcpyh(rt, rd) \ - asm volatile ("pcpyh " #rd ", " #rt ) + __asm__ volatile ("pcpyh " #rd ", " #rt ) #define paddw(rs, rt, rd) \ - asm volatile ("paddw " #rd ", " #rs ", " #rt ) + __asm__ volatile ("paddw " #rd ", " #rs ", " #rt ) #define pextlw(rs, rt, rd) \ - asm volatile ("pextlw " #rd ", " #rs ", " #rt ) + __asm__ volatile ("pextlw " #rd ", " #rs ", " #rt ) #define pextuw(rs, rt, rd) \ - asm volatile ("pextuw " #rd ", " #rs ", " #rt ) + __asm__ volatile ("pextuw " #rd ", " #rs ", " #rt ) #define pextlh(rs, rt, rd) \ - asm volatile ("pextlh " #rd ", " #rs ", " #rt ) + __asm__ volatile ("pextlh " #rd ", " #rs ", " #rt ) #define pextuh(rs, rt, rd) \ - asm volatile ("pextuh " #rd ", " #rs ", " #rt ) + __asm__ volatile ("pextuh " #rd ", " #rs ", " #rt ) #define psubw(rs, rt, rd) \ - asm volatile ("psubw " #rd ", " #rs ", " #rt ) + __asm__ volatile ("psubw " #rd ", " #rs ", " #rt ) #define psraw(rt, sa, rd) \ - asm volatile ("psraw " #rd ", " #rt ", %0" : : "i"(sa) ) + __asm__ volatile ("psraw " #rd ", " #rt ", %0" : : "i"(sa) ) #define ppach(rs, rt, rd) \ - asm volatile ("ppach " #rd ", " #rs ", " #rt ) + __asm__ volatile ("ppach " #rd ", " #rs ", " #rt ) #define ppacb(rs, rt, rd) \ - asm volatile ("ppacb " #rd ", " #rs ", " #rt ) + __asm__ volatile ("ppacb " #rd ", " #rs ", " #rt ) #define prevh(rt, rd) \ - asm volatile ("prevh " #rd ", " #rt ) + __asm__ volatile ("prevh " #rd ", " #rt ) #define pmulth(rs, rt, rd) \ - asm volatile ("pmulth " #rd ", " #rs ", " #rt ) + __asm__ volatile ("pmulth " #rd ", " #rs ", " #rt ) #define pmaxh(rs, rt, rd) \ - asm volatile ("pmaxh " #rd ", " #rs ", " #rt ) + __asm__ volatile ("pmaxh " #rd ", " #rs ", " #rt ) #define pminh(rs, rt, rd) \ - asm volatile ("pminh " #rd ", " #rs ", " #rt ) + __asm__ volatile ("pminh " #rd ", " #rs ", " #rt ) #define pinteh(rs, rt, rd) \ - asm volatile ("pinteh " #rd ", " #rs ", " #rt ) + __asm__ volatile ("pinteh " #rd ", " #rs ", " #rt ) #define paddh(rs, rt, rd) \ - asm volatile ("paddh " #rd ", " #rs ", " #rt ) + __asm__ volatile ("paddh " #rd ", " #rs ", " #rt ) #define psubh(rs, rt, rd) \ - asm volatile ("psubh " #rd ", " #rs ", " #rt ) + __asm__ volatile ("psubh " #rd ", " #rs ", " #rt ) #define psrah(rt, sa, rd) \ - asm volatile ("psrah " #rd ", " #rt ", %0" : : "i"(sa) ) + __asm__ volatile ("psrah " #rd ", " #rt ", %0" : : "i"(sa) ) #define pmfhl_uw(rd) \ - asm volatile ("pmfhl.uw " #rd) + __asm__ volatile ("pmfhl.uw " #rd) #define pextlb(rs, rt, rd) \ - asm volatile ("pextlb " #rd ", " #rs ", " #rt ) + __asm__ volatile ("pextlb " #rd ", " #rs ", " #rt ) #endif /* AVCODEC_PS2_MMI_H */ diff -r a512ac8fa540 -r eebc7209c47f ps2/mpegvideo_mmi.c --- a/ps2/mpegvideo_mmi.c Wed Oct 15 08:01:54 2008 +0000 +++ b/ps2/mpegvideo_mmi.c Thu Oct 16 13:34:09 2008 +0000 @@ -50,7 +50,7 @@ nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; } - asm volatile( + __asm__ volatile( "add $14, $0, %3 \n\t" "pcpyld $8, %0, %0 \n\t" "pcpyh $8, $8 \n\t" //r8 = qmul diff -r a512ac8fa540 -r eebc7209c47f sh4/dsputil_sh4.c --- a/sh4/dsputil_sh4.c Wed Oct 15 08:01:54 2008 +0000 +++ b/sh4/dsputil_sh4.c Thu Oct 16 13:34:09 2008 +0000 @@ -28,7 +28,7 @@ #if defined(__SH4__) || defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__) (char*)dst+=size; size/=8*4; - asm( + __asm__( #if defined(__SH4__) " fschg\n" //single float mode #endif diff -r a512ac8fa540 -r eebc7209c47f sh4/idct_sh4.c --- a/sh4/idct_sh4.c Wed Oct 15 08:01:54 2008 +0000 +++ b/sh4/idct_sh4.c Thu Oct 16 13:34:09 2008 +0000 @@ -54,7 +54,7 @@ #if defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__) #define load_matrix(table) \ - asm volatile( \ + __asm__ volatile( \ " fschg\n" \ " fmov @%0+,xd0\n" \ " fmov @%0+,xd2\n" \ @@ -71,15 +71,15 @@ ) #define ftrv() \ - asm volatile("ftrv xmtrx,fv0" \ + __asm__ volatile("ftrv xmtrx,fv0" \ : "=f"(fr0),"=f"(fr1),"=f"(fr2),"=f"(fr3) \ : "0"(fr0), "1"(fr1), "2"(fr2), "3"(fr3) ); #define DEFREG \ - register float fr0 asm("fr0"); \ - register float fr1 asm("fr1"); \ - register float fr2 asm("fr2"); \ - register float fr3 asm("fr3") + register float fr0 __asm__("fr0"); \ + register float fr1 __asm__("fr1"); \ + register float fr2 __asm__("fr2"); \ + register float fr3 __asm__("fr3") #else diff -r a512ac8fa540 -r eebc7209c47f simple_idct.c --- a/simple_idct.c Wed Oct 15 08:01:54 2008 +0000 +++ b/simple_idct.c Thu Oct 16 13:34:09 2008 +0000 @@ -59,11 +59,11 @@ /* signed 16x16 -> 32 multiply add accumulate */ #define MAC16(rt, ra, rb) \ - asm ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb)); + __asm__ ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb)); /* signed 16x16 -> 32 multiply */ #define MUL16(rt, ra, rb) \ - asm ("mullhw %0, %1, %2" : "=r" (rt) : "r" (ra), "r" (rb)); + __asm__ ("mullhw %0, %1, %2" : "=r" (rt) : "r" (ra), "r" (rb)); #else diff -r a512ac8fa540 -r eebc7209c47f sparc/simple_idct_vis.c --- a/sparc/simple_idct_vis.c Wed Oct 15 08:01:54 2008 +0000 +++ b/sparc/simple_idct_vis.c Thu Oct 16 13:34:09 2008 +0000 @@ -388,7 +388,7 @@ int out1, out2, out3, out4; DECLARE_ALIGNED_8(int16_t, temp[8*8]); - asm volatile( + __asm__ volatile( INIT_IDCT #define ADDROUNDER @@ -428,7 +428,7 @@ int out1, out2, out3, out4, out5; int r1, r2, r3, r4, r5, r6, r7; - asm volatile( + __asm__ volatile( "wr %%g0, 0x8, %%gsr \n\t" INIT_IDCT @@ -478,7 +478,7 @@ int out1, out2, out3, out4, out5, out6; int r1, r2, r3, r4, r5, r6, r7; - asm volatile( + __asm__ volatile( "wr %%g0, 0x8, %%gsr \n\t" INIT_IDCT diff -r a512ac8fa540 -r eebc7209c47f sparc/vis.h --- a/sparc/vis.h Wed Oct 15 08:01:54 2008 +0000 +++ b/sparc/vis.h Thu Oct 16 13:34:09 2008 +0000 @@ -55,97 +55,97 @@ #define vis_rd_d(X) (vis_dreg(X) << 25) #define vis_ss2s(opf,rs1,rs2,rd) \ - asm volatile (".word %0" \ + __asm__ volatile (".word %0" \ : : "i" (vis_opc_base | vis_opf(opf) | \ vis_rs1_s(rs1) | \ vis_rs2_s(rs2) | \ vis_rd_s(rd))) #define vis_dd2d(opf,rs1,rs2,rd) \ - asm volatile (".word %0" \ + __asm__ volatile (".word %0" \ : : "i" (vis_opc_base | vis_opf(opf) | \ vis_rs1_d(rs1) | \ vis_rs2_d(rs2) | \ vis_rd_d(rd))) #define vis_ss2d(opf,rs1,rs2,rd) \ - asm volatile (".word %0" \ + __asm__ volatile (".word %0" \ : : "i" (vis_opc_base | vis_opf(opf) | \ vis_rs1_s(rs1) | \ vis_rs2_s(rs2) | \ vis_rd_d(rd))) #define vis_sd2d(opf,rs1,rs2,rd) \ - asm volatile (".word %0" \ + __asm__ volatile (".word %0" \ : : "i" (vis_opc_base | vis_opf(opf) | \ vis_rs1_s(rs1) | \ vis_rs2_d(rs2) | \ vis_rd_d(rd))) #define vis_d2s(opf,rs2,rd) \ - asm volatile (".word %0" \ + __asm__ volatile (".word %0" \ : : "i" (vis_opc_base | vis_opf(opf) | \ vis_rs2_d(rs2) | \ vis_rd_s(rd))) #define vis_s2d(opf,rs2,rd) \ - asm volatile (".word %0" \ + __asm__ volatile (".word %0" \ : : "i" (vis_opc_base | vis_opf(opf) | \ vis_rs2_s(rs2) | \ vis_rd_d(rd))) #define vis_d12d(opf,rs1,rd) \ - asm volatile (".word %0" \ + __asm__ volatile (".word %0" \ : : "i" (vis_opc_base | vis_opf(opf) | \ vis_rs1_d(rs1) | \ vis_rd_d(rd))) #define vis_d22d(opf,rs2,rd) \ - asm volatile (".word %0" \ + __asm__ volatile (".word %0" \ : : "i" (vis_opc_base | vis_opf(opf) | \ vis_rs2_d(rs2) | \ vis_rd_d(rd))) #define vis_s12s(opf,rs1,rd) \ - asm volatile (".word %0" \ + __asm__ volatile (".word %0" \ : : "i" (vis_opc_base | vis_opf(opf) | \ vis_rs1_s(rs1) | \ vis_rd_s(rd))) #define vis_s22s(opf,rs2,rd) \ - asm volatile (".word %0" \ + __asm__ volatile (".word %0" \ : : "i" (vis_opc_base | vis_opf(opf) | \ vis_rs2_s(rs2) | \ vis_rd_s(rd))) #define vis_s(opf,rd) \ - asm volatile (".word %0" \ + __asm__ volatile (".word %0" \ : : "i" (vis_opc_base | vis_opf(opf) | \ vis_rd_s(rd))) #define vis_d(opf,rd) \ - asm volatile (".word %0" \ + __asm__ volatile (".word %0" \ : : "i" (vis_opc_base | vis_opf(opf) | \ vis_rd_d(rd))) #define vis_r2m(op,rd,mem) \ - asm volatile (#op "\t%%f" #rd ", [%0]" : : "r" (&(mem)) ) + __asm__ volatile (#op "\t%%f" #rd ", [%0]" : : "r" (&(mem)) ) #define vis_r2m_2(op,rd,mem1,mem2) \ - asm volatile (#op "\t%%f" #rd ", [%0 + %1]" : : "r" (mem1), "r" (mem2) ) + __asm__ volatile (#op "\t%%f" #rd ", [%0 + %1]" : : "r" (mem1), "r" (mem2) ) #define vis_m2r(op,mem,rd) \ - asm volatile (#op "\t[%0], %%f" #rd : : "r" (&(mem)) ) + __asm__ volatile (#op "\t[%0], %%f" #rd : : "r" (&(mem)) ) #define vis_m2r_2(op,mem1,mem2,rd) \ - asm volatile (#op "\t[%0 + %1], %%f" #rd : : "r" (mem1), "r" (mem2) ) + __asm__ volatile (#op "\t[%0 + %1], %%f" #rd : : "r" (mem1), "r" (mem2) ) static inline void vis_set_gsr(unsigned int _val) { - register unsigned int val asm("g1"); + register unsigned int val __asm__("g1"); val = _val; - asm volatile(".word 0xa7804000" + __asm__ volatile(".word 0xa7804000" : : "r" (val)); } @@ -164,9 +164,9 @@ #define vis_st64_2(rs1,mem1,mem2) vis_r2m_2(std, rs1, mem1, mem2) #define vis_ldblk(mem, rd) \ -do { register void *__mem asm("g1"); \ +do { register void *__mem __asm__("g1"); \ __mem = &(mem); \ - asm volatile(".word 0xc1985e00 | %1" \ + __asm__ volatile(".word 0xc1985e00 | %1" \ : \ : "r" (__mem), \ "i" (vis_rd_d(rd)) \ @@ -174,9 +174,9 @@ } while (0) #define vis_stblk(rd, mem) \ -do { register void *__mem asm("g1"); \ +do { register void *__mem __asm__("g1"); \ __mem = &(mem); \ - asm volatile(".word 0xc1b85e00 | %1" \ + __asm__ volatile(".word 0xc1b85e00 | %1" \ : \ : "r" (__mem), \ "i" (vis_rd_d(rd)) \ @@ -184,10 +184,10 @@ } while (0) #define vis_membar_storestore() \ - asm volatile(".word 0x8143e008" : : : "memory") + __asm__ volatile(".word 0x8143e008" : : : "memory") #define vis_membar_sync() \ - asm volatile(".word 0x8143e040" : : : "memory") + __asm__ volatile(".word 0x8143e040" : : : "memory") /* 16 and 32 bit partitioned addition and subtraction. The normal * versions perform 4 16-bit or 2 32-bit additions or subtractions. @@ -226,11 +226,11 @@ static inline void *vis_alignaddr(void *_ptr) { - register void *ptr asm("g1"); + register void *ptr __asm__("g1"); ptr = _ptr; - asm volatile(".word %2" + __asm__ volatile(".word %2" : "=&r" (ptr) : "0" (ptr), "i" (vis_opc_base | vis_opf(0x18) | @@ -243,11 +243,11 @@ static inline void vis_alignaddr_g0(void *_ptr) { - register void *ptr asm("g1"); + register void *ptr __asm__("g1"); ptr = _ptr; - asm volatile(".word %2" + __asm__ volatile(".word %2" : "=&r" (ptr) : "0" (ptr), "i" (vis_opc_base | vis_opf(0x18) | @@ -258,11 +258,11 @@ static inline void *vis_alignaddrl(void *_ptr) { - register void *ptr asm("g1"); + register void *ptr __asm__("g1"); ptr = _ptr; - asm volatile(".word %2" + __asm__ volatile(".word %2" : "=&r" (ptr) : "0" (ptr), "i" (vis_opc_base | vis_opf(0x19) | @@ -275,11 +275,11 @@ static inline void vis_alignaddrl_g0(void *_ptr) { - register void *ptr asm("g1"); + register void *ptr __asm__("g1"); ptr = _ptr; - asm volatile(".word %2" + __asm__ volatile(".word %2" : "=&r" (ptr) : "0" (ptr), "i" (vis_opc_base | vis_opf(0x19) |