Mercurial > mplayer.hg
changeset 27744:7b83cbade239
Convert asm keyword into __asm__.
Neither the asm() nor the __asm__() keyword is part of the C99
standard, but while GCC accepts the former in C89 syntax, it is not
accepted in C99 unless GNU extensions are turned on (with -fasm). The
latter form is accepted in any syntax as an extension (without
requiring further command-line options).
Sun Studio C99 compiler also does not accept asm() while accepting
__asm__(), albeit reporting warnings that it's not valid C99 syntax.
author | flameeyes |
---|---|
date | Thu, 16 Oct 2008 13:34:30 +0000 |
parents | d74f44a9a192 |
children | a5ed200519dc |
files | libswscale/rgb2rgb_template.c libswscale/swscale-example.c libswscale/swscale.c libswscale/swscale_template.c libswscale/yuv2rgb_template.c libswscale/yuv2rgb_vis.c |
diffstat | 6 files changed, 180 insertions(+), 180 deletions(-) [+] |
line wrap: on
line diff
--- a/libswscale/rgb2rgb_template.c Thu Oct 16 13:33:29 2008 +0000 +++ b/libswscale/rgb2rgb_template.c Thu Oct 16 13:34:30 2008 +0000 @@ -81,12 +81,12 @@ #endif end = s + src_size; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*s):"memory"); + __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 23; - asm volatile("movq %0, %%mm7"::"m"(mask32):"memory"); + __asm__ volatile("movq %0, %%mm7"::"m"(mask32):"memory"); while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movd %1, %%mm0 \n\t" "punpckldq 3%1, %%mm0 \n\t" @@ -110,8 +110,8 @@ dest += 32; s += 24; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -141,11 +141,11 @@ #endif end = s + src_size; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*s):"memory"); + __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 31; while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq 8%1, %%mm1 \n\t" @@ -196,8 +196,8 @@ dest += 24; s += 32; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -231,12 +231,12 @@ const uint8_t *mm_end; end = s + src_size; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*s)); - asm volatile("movq %0, %%mm4"::"m"(mask15s)); + __asm__ volatile(PREFETCH" %0"::"m"(*s)); + __asm__ volatile("movq %0, %%mm4"::"m"(mask15s)); mm_end = end - 15; while (s<mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq 8%1, %%mm2 \n\t" @@ -254,8 +254,8 @@ d+=16; s+=16; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif mm_end = end - 3; while (s < mm_end) @@ -280,13 +280,13 @@ const uint8_t *mm_end; end = s + src_size; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*s)); - asm volatile("movq %0, %%mm7"::"m"(mask15rg)); - asm volatile("movq %0, %%mm6"::"m"(mask15b)); + __asm__ volatile(PREFETCH" %0"::"m"(*s)); + __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg)); + __asm__ volatile("movq %0, %%mm6"::"m"(mask15b)); mm_end = end - 15; while (s<mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq 8%1, %%mm2 \n\t" @@ -308,8 +308,8 @@ d+=16; s+=16; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif mm_end = end - 3; while (s < mm_end) @@ -340,7 +340,7 @@ #ifdef HAVE_MMX mm_end = end - 15; #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) - asm volatile( + __asm__ volatile( "movq %3, %%mm5 \n\t" "movq %4, %%mm6 \n\t" "movq %5, %%mm7 \n\t" @@ -375,14 +375,14 @@ : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216) ); #else - asm volatile(PREFETCH" %0"::"m"(*src):"memory"); - asm volatile( + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( "movq %0, %%mm7 \n\t" "movq %1, %%mm6 \n\t" ::"m"(red_16mask),"m"(green_16mask)); while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movd %1, %%mm0 \n\t" "movd 4%1, %%mm3 \n\t" @@ -416,8 +416,8 @@ s += 16; } #endif - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -436,15 +436,15 @@ uint16_t *d = (uint16_t *)dst; end = s + src_size; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*src):"memory"); - asm volatile( + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( "movq %0, %%mm7 \n\t" "movq %1, %%mm6 \n\t" ::"m"(red_16mask),"m"(green_16mask)); mm_end = end - 15; while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movd %1, %%mm0 \n\t" "movd 4%1, %%mm3 \n\t" @@ -477,8 +477,8 @@ d += 4; s += 16; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -499,7 +499,7 @@ #ifdef HAVE_MMX mm_end = end - 15; #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) - asm volatile( + __asm__ volatile( "movq %3, %%mm5 \n\t" "movq %4, %%mm6 \n\t" "movq %5, %%mm7 \n\t" @@ -534,14 +534,14 @@ : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215) ); #else - asm volatile(PREFETCH" %0"::"m"(*src):"memory"); - asm volatile( + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( "movq %0, %%mm7 \n\t" "movq %1, %%mm6 \n\t" ::"m"(red_15mask),"m"(green_15mask)); while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movd %1, %%mm0 \n\t" "movd 4%1, %%mm3 \n\t" @@ -575,8 +575,8 @@ s += 16; } #endif - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -595,15 +595,15 @@ uint16_t *d = (uint16_t *)dst; end = s + src_size; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*src):"memory"); - asm volatile( + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( "movq %0, %%mm7 \n\t" "movq %1, %%mm6 \n\t" ::"m"(red_15mask),"m"(green_15mask)); mm_end = end - 15; while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movd %1, %%mm0 \n\t" "movd 4%1, %%mm3 \n\t" @@ -636,8 +636,8 @@ d += 4; s += 16; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -656,15 +656,15 @@ uint16_t *d = (uint16_t *)dst; end = s + src_size; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*src):"memory"); - asm volatile( + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( "movq %0, %%mm7 \n\t" "movq %1, %%mm6 \n\t" ::"m"(red_16mask),"m"(green_16mask)); mm_end = end - 11; while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movd %1, %%mm0 \n\t" "movd 3%1, %%mm3 \n\t" @@ -697,8 +697,8 @@ d += 4; s += 12; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -719,15 +719,15 @@ uint16_t *d = (uint16_t *)dst; end = s + src_size; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*src):"memory"); - asm volatile( + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( "movq %0, %%mm7 \n\t" "movq %1, %%mm6 \n\t" ::"m"(red_16mask),"m"(green_16mask)); mm_end = end - 15; while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movd %1, %%mm0 \n\t" "movd 3%1, %%mm3 \n\t" @@ -760,8 +760,8 @@ d += 4; s += 12; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -782,15 +782,15 @@ uint16_t *d = (uint16_t *)dst; end = s + src_size; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*src):"memory"); - asm volatile( + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( "movq %0, %%mm7 \n\t" "movq %1, %%mm6 \n\t" ::"m"(red_15mask),"m"(green_15mask)); mm_end = end - 11; while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movd %1, %%mm0 \n\t" "movd 3%1, %%mm3 \n\t" @@ -823,8 +823,8 @@ d += 4; s += 12; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -845,15 +845,15 @@ uint16_t *d = (uint16_t *)dst; end = s + src_size; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*src):"memory"); - asm volatile( + __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm__ volatile( "movq %0, %%mm7 \n\t" "movq %1, %%mm6 \n\t" ::"m"(red_15mask),"m"(green_15mask)); mm_end = end - 15; while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movd %1, %%mm0 \n\t" "movd 3%1, %%mm3 \n\t" @@ -886,8 +886,8 @@ d += 4; s += 12; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -929,11 +929,11 @@ const uint16_t *s = (const uint16_t*)src; end = s + src_size/2; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*s):"memory"); + __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 7; while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq %1, %%mm1 \n\t" @@ -996,7 +996,7 @@ :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null) :"memory"); /* borrowed 32 to 24 */ - asm volatile( + __asm__ volatile( "movq %%mm0, %%mm4 \n\t" "movq %%mm3, %%mm5 \n\t" "movq %%mm6, %%mm0 \n\t" @@ -1048,8 +1048,8 @@ d += 24; s += 8; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -1071,11 +1071,11 @@ const uint16_t *s = (const uint16_t *)src; end = s + src_size/2; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*s):"memory"); + __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 7; while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq %1, %%mm1 \n\t" @@ -1137,7 +1137,7 @@ :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null) :"memory"); /* borrowed 32 to 24 */ - asm volatile( + __asm__ volatile( "movq %%mm0, %%mm4 \n\t" "movq %%mm3, %%mm5 \n\t" "movq %%mm6, %%mm0 \n\t" @@ -1189,8 +1189,8 @@ d += 24; s += 8; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -1212,12 +1212,12 @@ const uint16_t *s = (const uint16_t *)src; end = s + src_size/2; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*s):"memory"); - asm volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); + __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); + __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); mm_end = end - 3; while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq %1, %%mm1 \n\t" @@ -1253,8 +1253,8 @@ d += 16; s += 4; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -1290,12 +1290,12 @@ const uint16_t *s = (const uint16_t*)src; end = s + src_size/2; #ifdef HAVE_MMX - asm volatile(PREFETCH" %0"::"m"(*s):"memory"); - asm volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); + __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); + __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); mm_end = end - 3; while (s < mm_end) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq %1, %%mm1 \n\t" @@ -1331,8 +1331,8 @@ d += 16; s += 4; } - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -1358,7 +1358,7 @@ const uint8_t *s = src-idx; uint8_t *d = dst-idx; #ifdef HAVE_MMX - asm volatile( + __asm__ volatile( "test %0, %0 \n\t" "jns 2f \n\t" PREFETCH" (%1, %0) \n\t" @@ -1421,7 +1421,7 @@ unsigned i; #ifdef HAVE_MMX long mmx_size= 23 - src_size; - asm volatile ( + __asm__ volatile ( "test %%"REG_a", %%"REG_a" \n\t" "jns 2f \n\t" "movq "MANGLE(mask24r)", %%mm5 \n\t" @@ -1465,8 +1465,8 @@ : "r" (src-mmx_size), "r"(dst-mmx_size) ); - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); if (mmx_size==23) return; //finished, was multiple of 8 @@ -1496,7 +1496,7 @@ { #ifdef HAVE_MMX //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway) - asm volatile( + __asm__ volatile( "xor %%"REG_a", %%"REG_a" \n\t" ASMALIGN(4) "1: \n\t" @@ -1537,10 +1537,10 @@ y2 = yc2[n]; \ u = uc[n]; \ v = vc[n]; \ - asm("unpkbw %1, %0" : "=r"(y1) : "r"(y1)); \ - asm("unpkbw %1, %0" : "=r"(y2) : "r"(y2)); \ - asm("unpkbl %1, %0" : "=r"(u) : "r"(u)); \ - asm("unpkbl %1, %0" : "=r"(v) : "r"(v)); \ + __asm__("unpkbw %1, %0" : "=r"(y1) : "r"(y1)); \ + __asm__("unpkbw %1, %0" : "=r"(y2) : "r"(y2)); \ + __asm__("unpkbl %1, %0" : "=r"(u) : "r"(u)); \ + __asm__("unpkbl %1, %0" : "=r"(v) : "r"(v)); \ yuv1 = (u << 8) + (v << 24); \ yuv2 = yuv1 + y2; \ yuv1 += y1; \ @@ -1557,10 +1557,10 @@ uint64_t y1, y2, yuv1, yuv2; uint64_t u, v; /* Prefetch */ - asm("ldq $31,64(%0)" :: "r"(yc)); - asm("ldq $31,64(%0)" :: "r"(yc2)); - asm("ldq $31,64(%0)" :: "r"(uc)); - asm("ldq $31,64(%0)" :: "r"(vc)); + __asm__("ldq $31,64(%0)" :: "r"(yc)); + __asm__("ldq $31,64(%0)" :: "r"(yc2)); + __asm__("ldq $31,64(%0)" :: "r"(uc)); + __asm__("ldq $31,64(%0)" :: "r"(vc)); pl2yuy2(0); pl2yuy2(1); @@ -1620,7 +1620,7 @@ dst += dstStride; } #ifdef HAVE_MMX -asm( EMMS" \n\t" +__asm__( EMMS" \n\t" SFENCE" \n\t" :::"memory"); #endif @@ -1648,7 +1648,7 @@ { #ifdef HAVE_MMX //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway) - asm volatile( + __asm__ volatile( "xor %%"REG_a", %%"REG_a" \n\t" ASMALIGN(4) "1: \n\t" @@ -1726,7 +1726,7 @@ dst += dstStride; } #ifdef HAVE_MMX -asm( EMMS" \n\t" +__asm__( EMMS" \n\t" SFENCE" \n\t" :::"memory"); #endif @@ -1777,7 +1777,7 @@ for (y=0; y<height; y+=2) { #ifdef HAVE_MMX - asm volatile( + __asm__ volatile( "xor %%"REG_a", %%"REG_a" \n\t" "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $8, %%mm7 \n\t" // FF,00,FF,00... @@ -1832,7 +1832,7 @@ ydst += lumStride; src += srcStride; - asm volatile( + __asm__ volatile( "xor %%"REG_a", %%"REG_a" \n\t" ASMALIGN(4) "1: \n\t" @@ -1882,7 +1882,7 @@ src += srcStride; } #ifdef HAVE_MMX -asm volatile( EMMS" \n\t" +__asm__ volatile( EMMS" \n\t" SFENCE" \n\t" :::"memory"); #endif @@ -1916,7 +1916,7 @@ for (y=1; y<srcHeight; y++){ #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) const long mmxSize= srcWidth&~15; - asm volatile( + __asm__ volatile( "mov %4, %%"REG_a" \n\t" "1: \n\t" "movq (%0, %%"REG_a"), %%mm0 \n\t" @@ -1994,7 +1994,7 @@ #endif #ifdef HAVE_MMX -asm volatile( EMMS" \n\t" +__asm__ volatile( EMMS" \n\t" SFENCE" \n\t" :::"memory"); #endif @@ -2015,7 +2015,7 @@ for (y=0; y<height; y+=2) { #ifdef HAVE_MMX - asm volatile( + __asm__ volatile( "xorl %%eax, %%eax \n\t" "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $8, %%mm7 \n\t" // FF,00,FF,00... @@ -2070,7 +2070,7 @@ ydst += lumStride; src += srcStride; - asm volatile( + __asm__ volatile( "xorl %%eax, %%eax \n\t" ASMALIGN(4) "1: \n\t" @@ -2120,7 +2120,7 @@ src += srcStride; } #ifdef HAVE_MMX -asm volatile( EMMS" \n\t" +__asm__ volatile( EMMS" \n\t" SFENCE" \n\t" :::"memory"); #endif @@ -2145,7 +2145,7 @@ long i; for (i=0; i<2; i++) { - asm volatile( + __asm__ volatile( "mov %2, %%"REG_a" \n\t" "movq "MANGLE(ff_bgr2YCoeff)", %%mm6 \n\t" "movq "MANGLE(ff_w1111)", %%mm5 \n\t" @@ -2218,7 +2218,7 @@ src += srcStride; } src -= srcStride*2; - asm volatile( + __asm__ volatile( "mov %4, %%"REG_a" \n\t" "movq "MANGLE(ff_w1111)", %%mm5 \n\t" "movq "MANGLE(ff_bgr2UCoeff)", %%mm6 \n\t" @@ -2372,7 +2372,7 @@ src += srcStride*2; } - asm volatile( EMMS" \n\t" + __asm__ volatile( EMMS" \n\t" SFENCE" \n\t" :::"memory"); #else @@ -2440,7 +2440,7 @@ #ifdef HAVE_MMX #ifdef HAVE_SSE2 - asm( + __asm__( "xor %%"REG_a", %%"REG_a" \n\t" "1: \n\t" PREFETCH" 64(%1, %%"REG_a") \n\t" @@ -2459,7 +2459,7 @@ : "memory", "%"REG_a"" ); #else - asm( + __asm__( "xor %%"REG_a", %%"REG_a" \n\t" "1: \n\t" PREFETCH" 64(%1, %%"REG_a") \n\t" @@ -2502,7 +2502,7 @@ src2 += src2Stride; } #ifdef HAVE_MMX - asm( + __asm__( EMMS" \n\t" SFENCE" \n\t" ::: "memory" @@ -2519,7 +2519,7 @@ long y,x,w,h; w=width/2; h=height/2; #ifdef HAVE_MMX - asm volatile( + __asm__ volatile( PREFETCH" %0 \n\t" PREFETCH" %1 \n\t" ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory"); @@ -2531,7 +2531,7 @@ #ifdef HAVE_MMX for (;x<w-31;x+=32) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq 8%1, %%mm2 \n\t" @@ -2571,7 +2571,7 @@ #ifdef HAVE_MMX for (;x<w-31;x+=32) { - asm volatile( + __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq 8%1, %%mm2 \n\t" @@ -2605,7 +2605,7 @@ for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x]; } #ifdef HAVE_MMX - asm( + __asm__( EMMS" \n\t" SFENCE" \n\t" ::: "memory" @@ -2630,7 +2630,7 @@ #ifdef HAVE_MMX for (;x<w-7;x+=8) { - asm volatile( + __asm__ volatile( PREFETCH" 32(%1, %0) \n\t" PREFETCH" 32(%2, %0) \n\t" PREFETCH" 32(%3, %0) \n\t" @@ -2696,7 +2696,7 @@ } } #ifdef HAVE_MMX - asm( + __asm__( EMMS" \n\t" SFENCE" \n\t" ::: "memory"
--- a/libswscale/swscale-example.c Thu Oct 16 13:33:29 2008 +0000 +++ b/libswscale/swscale-example.c Thu Oct 16 13:34:30 2008 +0000 @@ -120,7 +120,7 @@ sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride); #if defined(ARCH_X86) - asm volatile ("emms\n\t"); + __asm__ volatile ("emms\n\t"); #endif ssdY= getSSD(ref[0], out[0], refStride[0], refStride[0], w, h); @@ -215,7 +215,7 @@ sws_scale(sws, rgb_src, rgb_stride, 0, H, src, stride); #if defined(ARCH_X86) - asm volatile ("emms\n\t"); + __asm__ volatile ("emms\n\t"); #endif selfTest(src, stride, W, H);
--- a/libswscale/swscale.c Thu Oct 16 13:33:29 2008 +0000 +++ b/libswscale/swscale.c Thu Oct 16 13:34:30 2008 +0000 @@ -1061,7 +1061,7 @@ int ret= -1; #if defined(ARCH_X86) if (flags & SWS_CPU_CAPS_MMX) - asm volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions) + __asm__ volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions) #endif // Note the +1 is for the MMXscaler which reads over the end @@ -1450,7 +1450,7 @@ //code fragment - asm volatile( + __asm__ volatile( "jmp 9f \n\t" // Begin "0: \n\t" @@ -1490,7 +1490,7 @@ "=r" (fragmentLengthA) ); - asm volatile( + __asm__ volatile( "jmp 9f \n\t" // Begin "0: \n\t" @@ -2167,7 +2167,7 @@ SwsFilter dummyFilter= {NULL, NULL, NULL, NULL}; #if defined(ARCH_X86) if (flags & SWS_CPU_CAPS_MMX) - asm volatile("emms\n\t"::: "memory"); + __asm__ volatile("emms\n\t"::: "memory"); #endif #if !defined(RUNTIME_CPUDETECT) || !defined (CONFIG_GPL) //ensure that the flags match the compiled variant if cpudetect is off
--- a/libswscale/swscale_template.c Thu Oct 16 13:33:29 2008 +0000 +++ b/libswscale/swscale_template.c Thu Oct 16 13:34:30 2008 +0000 @@ -71,7 +71,7 @@ #endif #define YSCALEYUV2YV12X(x, offset, dest, width) \ - asm volatile(\ + __asm__ volatile(\ "xor %%"REG_a", %%"REG_a" \n\t"\ "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\ "movq %%mm3, %%mm4 \n\t"\ @@ -107,7 +107,7 @@ ); #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \ - asm volatile(\ + __asm__ volatile(\ "lea " offset "(%0), %%"REG_d" \n\t"\ "xor %%"REG_a", %%"REG_a" \n\t"\ "pxor %%mm4, %%mm4 \n\t"\ @@ -207,7 +207,7 @@ : "%eax", "%ebx", "%ecx", "%edx", "%esi" */ #define YSCALEYUV2PACKEDX \ - asm volatile(\ + __asm__ volatile(\ "xor %%"REG_a", %%"REG_a" \n\t"\ ASMALIGN(4)\ "nop \n\t"\ @@ -256,7 +256,7 @@ ); #define YSCALEYUV2PACKEDX_ACCURATE \ - asm volatile(\ + __asm__ volatile(\ "xor %%"REG_a", %%"REG_a" \n\t"\ ASMALIGN(4)\ "nop \n\t"\ @@ -1002,7 +1002,7 @@ if (c->flags & SWS_ACCURATE_RND){ while(p--){ - asm volatile( + __asm__ volatile( YSCALEYUV2YV121_ACCURATE :: "r" (src[p]), "r" (dst[p] + counter[p]), "g" (-counter[p]) @@ -1011,7 +1011,7 @@ } }else{ while(p--){ - asm volatile( + __asm__ volatile( YSCALEYUV2YV121 :: "r" (src[p]), "r" (dst[p] + counter[p]), "g" (-counter[p]) @@ -1220,7 +1220,7 @@ { #ifdef HAVE_MMX case PIX_FMT_RGB32: - asm volatile( + __asm__ volatile( FULL_YSCALEYUV2RGB @@ -1244,7 +1244,7 @@ ); break; case PIX_FMT_BGR24: - asm volatile( + __asm__ volatile( FULL_YSCALEYUV2RGB @@ -1293,7 +1293,7 @@ ); break; case PIX_FMT_BGR555: - asm volatile( + __asm__ volatile( FULL_YSCALEYUV2RGB #ifdef DITHER1XBPP @@ -1326,7 +1326,7 @@ ); break; case PIX_FMT_BGR565: - asm volatile( + __asm__ volatile( FULL_YSCALEYUV2RGB #ifdef DITHER1XBPP @@ -1434,7 +1434,7 @@ { //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( case PIX_FMT_RGB32: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1448,7 +1448,7 @@ ); return; case PIX_FMT_BGR24: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1461,7 +1461,7 @@ ); return; case PIX_FMT_RGB555: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1482,7 +1482,7 @@ ); return; case PIX_FMT_RGB565: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1502,7 +1502,7 @@ ); return; case PIX_FMT_YUYV422: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1546,7 +1546,7 @@ switch(dstFormat) { case PIX_FMT_RGB32: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1560,7 +1560,7 @@ ); return; case PIX_FMT_BGR24: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1574,7 +1574,7 @@ ); return; case PIX_FMT_RGB555: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1594,7 +1594,7 @@ ); return; case PIX_FMT_RGB565: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1615,7 +1615,7 @@ ); return; case PIX_FMT_YUYV422: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1635,7 +1635,7 @@ switch(dstFormat) { case PIX_FMT_RGB32: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1649,7 +1649,7 @@ ); return; case PIX_FMT_BGR24: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1663,7 +1663,7 @@ ); return; case PIX_FMT_RGB555: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1683,7 +1683,7 @@ ); return; case PIX_FMT_RGB565: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1704,7 +1704,7 @@ ); return; case PIX_FMT_YUYV422: - asm volatile( + __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" @@ -1734,7 +1734,7 @@ static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused) { #ifdef HAVE_MMX - asm volatile( + __asm__ volatile( "movq "MANGLE(bm01010101)", %%mm2 \n\t" "mov %0, %%"REG_a" \n\t" "1: \n\t" @@ -1759,7 +1759,7 @@ static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused) { #ifdef HAVE_MMX - asm volatile( + __asm__ volatile( "movq "MANGLE(bm01010101)", %%mm4 \n\t" "mov %0, %%"REG_a" \n\t" "1: \n\t" @@ -1796,7 +1796,7 @@ static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused) { #ifdef HAVE_MMX - asm volatile( + __asm__ volatile( "mov %0, %%"REG_a" \n\t" "1: \n\t" "movq (%1, %%"REG_a",2), %%mm0 \n\t" @@ -1820,7 +1820,7 @@ static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused) { #ifdef HAVE_MMX - asm volatile( + __asm__ volatile( "movq "MANGLE(bm01010101)", %%mm4 \n\t" "mov %0, %%"REG_a" \n\t" "1: \n\t" @@ -1917,20 +1917,20 @@ { if(srcFormat == PIX_FMT_BGR24){ - asm volatile( + __asm__ volatile( "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t" "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t" : ); }else{ - asm volatile( + __asm__ volatile( "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t" "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t" : ); } - asm volatile( + __asm__ volatile( "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t" "mov %2, %%"REG_a" \n\t" "pxor %%mm7, %%mm7 \n\t" @@ -1968,7 +1968,7 @@ static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, long width, int srcFormat) { - asm volatile( + __asm__ volatile( "movq 24+%4, %%mm6 \n\t" "mov %3, %%"REG_a" \n\t" "pxor %%mm7, %%mm7 \n\t" @@ -2184,7 +2184,7 @@ filter-= counter*2; filterPos-= counter/2; dst-= counter/2; - asm volatile( + __asm__ volatile( #if defined(PIC) "push %%"REG_b" \n\t" #endif @@ -2230,7 +2230,7 @@ filter-= counter*4; filterPos-= counter/2; dst-= counter/2; - asm volatile( + __asm__ volatile( #if defined(PIC) "push %%"REG_b" \n\t" #endif @@ -2288,7 +2288,7 @@ //filter-= counter*filterSize/2; filterPos-= counter/2; dst-= counter/2; - asm volatile( + __asm__ volatile( "pxor %%mm7, %%mm7 \n\t" ASMALIGN(4) "1: \n\t" @@ -2456,7 +2456,7 @@ #endif if (canMMX2BeUsed) { - asm volatile( + __asm__ volatile( #if defined(PIC) "mov %%"REG_b", %5 \n\t" #endif @@ -2521,7 +2521,7 @@ long xInc_shr16 = xInc >> 16; uint16_t xInc_mask = xInc & 0xffff; //NO MMX just normal asm ... - asm volatile( + __asm__ volatile( "xor %%"REG_a", %%"REG_a" \n\t" // i "xor %%"REG_d", %%"REG_d" \n\t" // xx "xorl %%ecx, %%ecx \n\t" // 2*xalpha @@ -2729,7 +2729,7 @@ #endif if (canMMX2BeUsed) { - asm volatile( + __asm__ volatile( #if defined(PIC) "mov %%"REG_b", %6 \n\t" #endif @@ -2806,7 +2806,7 @@ #endif /* HAVE_MMX2 */ long xInc_shr16 = (long) (xInc >> 16); uint16_t xInc_mask = xInc & 0xffff; - asm volatile( + __asm__ volatile( "xor %%"REG_a", %%"REG_a" \n\t" // i "xor %%"REG_d", %%"REG_d" \n\t" // xx "xorl %%ecx, %%ecx \n\t" // 2*xalpha @@ -3256,8 +3256,8 @@ } #ifdef HAVE_MMX - asm volatile(SFENCE:::"memory"); - asm volatile(EMMS:::"memory"); + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif /* store changed local vars back in the context */ c->dstY= dstY;
--- a/libswscale/yuv2rgb_template.c Thu Oct 16 13:33:29 2008 +0000 +++ b/libswscale/yuv2rgb_template.c Thu Oct 16 13:34:30 2008 +0000 @@ -133,7 +133,7 @@ h_size= (c->dstW+7)&~7; if(h_size*2 > FFABS(dstStride[0])) h_size-=8; - asm volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); + __asm__ volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); //printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&c->blueDither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0], //srcStride[0],srcStride[1],srcStride[2],dstStride[0]); for (y= 0; y<srcSliceH; y++ ) { @@ -148,7 +148,7 @@ c->redDither= ff_dither8[(y+1)&1]; /* This MMX assembly code deals with a SINGLE scan line at a time, * it converts 8 pixels in each iteration. */ - asm volatile ( + __asm__ volatile ( /* load data for start of next scan line */ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ @@ -210,7 +210,7 @@ ); } - asm volatile (EMMS); + __asm__ volatile (EMMS); return srcSliceH; } @@ -227,7 +227,7 @@ h_size= (c->dstW+7)&~7; if(h_size*2 > FFABS(dstStride[0])) h_size-=8; - asm volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); + __asm__ volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); //printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&c->blueDither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0], //srcStride[0],srcStride[1],srcStride[2],dstStride[0]); for (y= 0; y<srcSliceH; y++ ) { @@ -242,7 +242,7 @@ c->redDither= ff_dither8[(y+1)&1]; /* This MMX assembly code deals with a SINGLE scan line at a time, * it converts 8 pixels in each iteration. */ - asm volatile ( + __asm__ volatile ( /* load data for start of next scan line */ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ @@ -299,7 +299,7 @@ ); } - asm volatile (EMMS); + __asm__ volatile (EMMS); return srcSliceH; } @@ -315,7 +315,7 @@ h_size= (c->dstW+7)&~7; if(h_size*3 > FFABS(dstStride[0])) h_size-=8; - asm volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); + __asm__ volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); for (y= 0; y<srcSliceH; y++ ) { uint8_t *image = dst[0] + (y+srcSliceY)*dstStride[0]; @@ -326,7 +326,7 @@ /* This MMX assembly code deals with a SINGLE scan line at a time, * it converts 8 pixels in each iteration. */ - asm volatile ( + __asm__ volatile ( /* load data for start of next scan line */ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ @@ -445,7 +445,7 @@ ); } - asm volatile (EMMS); + __asm__ volatile (EMMS); return srcSliceH; } @@ -461,7 +461,7 @@ h_size= (c->dstW+7)&~7; if(h_size*4 > FFABS(dstStride[0])) h_size-=8; - asm volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); + __asm__ volatile ("pxor %mm4, %mm4;" /* zero mm4 */ ); for (y= 0; y<srcSliceH; y++ ) { uint8_t *image = dst[0] + (y+srcSliceY)*dstStride[0]; @@ -472,7 +472,7 @@ /* This MMX assembly code deals with a SINGLE scan line at a time, * it converts 8 pixels in each iteration. */ - asm volatile ( + __asm__ volatile ( /* load data for start of next scan line */ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ @@ -531,6 +531,6 @@ ); } - asm volatile (EMMS); + __asm__ volatile (EMMS); return srcSliceH; }
--- a/libswscale/yuv2rgb_vis.c Thu Oct 16 13:33:29 2008 +0000 +++ b/libswscale/yuv2rgb_vis.c Thu Oct 16 13:34:30 2008 +0000 @@ -85,7 +85,7 @@ int y, out1, out2, out3, out4, out5, out6; for(y=0;y < srcSliceH;++y) { - asm volatile ( + __asm__ volatile ( YUV2RGB_INIT "wr %%g0, 0xd2, %%asi \n\t" /* ASI_FL16_P */ "1: \n\t" @@ -136,7 +136,7 @@ int y, out1, out2, out3, out4, out5, out6; for(y=0;y < srcSliceH;++y) { - asm volatile ( + __asm__ volatile ( YUV2RGB_INIT "wr %%g0, 0xd2, %%asi \n\t" /* ASI_FL16_P */ "1: \n\t"