comparison libswscale/swscale.c @ 27744:7b83cbade239

Convert asm keyword into __asm__. Neither the asm() nor the __asm__() keyword is part of the C99 standard, but while GCC accepts the former in C89 syntax, it is not accepted in C99 unless GNU extensions are turned on (with -fasm). The latter form is accepted in any syntax as an extension (without requiring further command-line options). Sun Studio C99 compiler also does not accept asm() while accepting __asm__(), albeit reporting warnings that it's not valid C99 syntax.
author flameeyes
date Thu, 16 Oct 2008 13:34:30 +0000
parents ad1d056da559
children 3a18c8bdb555
comparison
equal deleted inserted replaced
27743:d74f44a9a192 27744:7b83cbade239
1059 int64_t *filter2=NULL; 1059 int64_t *filter2=NULL;
1060 const int64_t fone= 1LL<<54; 1060 const int64_t fone= 1LL<<54;
1061 int ret= -1; 1061 int ret= -1;
1062 #if defined(ARCH_X86) 1062 #if defined(ARCH_X86)
1063 if (flags & SWS_CPU_CAPS_MMX) 1063 if (flags & SWS_CPU_CAPS_MMX)
1064 asm volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions) 1064 __asm__ volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions)
1065 #endif 1065 #endif
1066 1066
1067 // Note the +1 is for the MMXscaler which reads over the end 1067 // Note the +1 is for the MMXscaler which reads over the end
1068 *filterPos = av_malloc((dstW+1)*sizeof(int16_t)); 1068 *filterPos = av_malloc((dstW+1)*sizeof(int16_t));
1069 1069
1448 1448
1449 // create an optimized horizontal scaling routine 1449 // create an optimized horizontal scaling routine
1450 1450
1451 //code fragment 1451 //code fragment
1452 1452
1453 asm volatile( 1453 __asm__ volatile(
1454 "jmp 9f \n\t" 1454 "jmp 9f \n\t"
1455 // Begin 1455 // Begin
1456 "0: \n\t" 1456 "0: \n\t"
1457 "movq (%%"REG_d", %%"REG_a"), %%mm3 \n\t" 1457 "movq (%%"REG_d", %%"REG_a"), %%mm3 \n\t"
1458 "movd (%%"REG_c", %%"REG_S"), %%mm0 \n\t" 1458 "movd (%%"REG_c", %%"REG_S"), %%mm0 \n\t"
1488 1488
1489 :"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A), 1489 :"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A),
1490 "=r" (fragmentLengthA) 1490 "=r" (fragmentLengthA)
1491 ); 1491 );
1492 1492
1493 asm volatile( 1493 __asm__ volatile(
1494 "jmp 9f \n\t" 1494 "jmp 9f \n\t"
1495 // Begin 1495 // Begin
1496 "0: \n\t" 1496 "0: \n\t"
1497 "movq (%%"REG_d", %%"REG_a"), %%mm3 \n\t" 1497 "movq (%%"REG_d", %%"REG_a"), %%mm3 \n\t"
1498 "movd (%%"REG_c", %%"REG_S"), %%mm0 \n\t" 1498 "movd (%%"REG_c", %%"REG_S"), %%mm0 \n\t"
2165 int unscaled, needsDither; 2165 int unscaled, needsDither;
2166 int srcRange, dstRange; 2166 int srcRange, dstRange;
2167 SwsFilter dummyFilter= {NULL, NULL, NULL, NULL}; 2167 SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
2168 #if defined(ARCH_X86) 2168 #if defined(ARCH_X86)
2169 if (flags & SWS_CPU_CAPS_MMX) 2169 if (flags & SWS_CPU_CAPS_MMX)
2170 asm volatile("emms\n\t"::: "memory"); 2170 __asm__ volatile("emms\n\t"::: "memory");
2171 #endif 2171 #endif
2172 2172
2173 #if !defined(RUNTIME_CPUDETECT) || !defined (CONFIG_GPL) //ensure that the flags match the compiled variant if cpudetect is off 2173 #if !defined(RUNTIME_CPUDETECT) || !defined (CONFIG_GPL) //ensure that the flags match the compiled variant if cpudetect is off
2174 flags &= ~(SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2|SWS_CPU_CAPS_3DNOW|SWS_CPU_CAPS_ALTIVEC|SWS_CPU_CAPS_BFIN); 2174 flags &= ~(SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2|SWS_CPU_CAPS_3DNOW|SWS_CPU_CAPS_ALTIVEC|SWS_CPU_CAPS_BFIN);
2175 #ifdef HAVE_MMX2 2175 #ifdef HAVE_MMX2