comparison i386/vc1dsp_mmx.c @ 8031:eebc7209c47f libavcodec

Convert asm keyword into __asm__. Neither the asm() nor the __asm__() keyword is part of the C99 standard, but while GCC accepts the former in C89 syntax, it is not accepted in C99 unless GNU extensions are turned on (with -fasm). The latter form is accepted in any syntax as an extension (without requiring further command-line options). Sun Studio C99 compiler also does not accept asm() while accepting __asm__(), albeit reporting warnings that it's not valid C99 syntax.
author flameeyes
date Thu, 16 Oct 2008 13:34:09 +0000
parents d38acad6d4bb
children
comparison
equal deleted inserted replaced
8030:a512ac8fa540 8031:eebc7209c47f
72 /** Sacrifying mm6 allows to pipeline loads from src */ 72 /** Sacrifying mm6 allows to pipeline loads from src */
73 static void vc1_put_ver_16b_shift2_mmx(int16_t *dst, 73 static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
74 const uint8_t *src, x86_reg stride, 74 const uint8_t *src, x86_reg stride,
75 int rnd, int64_t shift) 75 int rnd, int64_t shift)
76 { 76 {
77 asm volatile( 77 __asm__ volatile(
78 "mov $3, %%"REG_c" \n\t" 78 "mov $3, %%"REG_c" \n\t"
79 LOAD_ROUNDER_MMX("%5") 79 LOAD_ROUNDER_MMX("%5")
80 "movq "MANGLE(ff_pw_9)", %%mm6 \n\t" 80 "movq "MANGLE(ff_pw_9)", %%mm6 \n\t"
81 "1: \n\t" 81 "1: \n\t"
82 "movd (%0), %%mm2 \n\t" 82 "movd (%0), %%mm2 \n\t"
112 { 112 {
113 int h = 8; 113 int h = 8;
114 114
115 src -= 1; 115 src -= 1;
116 rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */ 116 rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */
117 asm volatile( 117 __asm__ volatile(
118 LOAD_ROUNDER_MMX("%4") 118 LOAD_ROUNDER_MMX("%4")
119 "movq "MANGLE(ff_pw_128)", %%mm6\n\t" 119 "movq "MANGLE(ff_pw_128)", %%mm6\n\t"
120 "movq "MANGLE(ff_pw_9)", %%mm5 \n\t" 120 "movq "MANGLE(ff_pw_9)", %%mm5 \n\t"
121 "1: \n\t" 121 "1: \n\t"
122 "movq 2*0+0(%1), %%mm1 \n\t" 122 "movq 2*0+0(%1), %%mm1 \n\t"
153 */ 153 */
154 static void vc1_put_shift2_mmx(uint8_t *dst, const uint8_t *src, 154 static void vc1_put_shift2_mmx(uint8_t *dst, const uint8_t *src,
155 x86_reg stride, int rnd, x86_reg offset) 155 x86_reg stride, int rnd, x86_reg offset)
156 { 156 {
157 rnd = 8-rnd; 157 rnd = 8-rnd;
158 asm volatile( 158 __asm__ volatile(
159 "mov $8, %%"REG_c" \n\t" 159 "mov $8, %%"REG_c" \n\t"
160 LOAD_ROUNDER_MMX("%5") 160 LOAD_ROUNDER_MMX("%5")
161 "movq "MANGLE(ff_pw_9)", %%mm6\n\t" 161 "movq "MANGLE(ff_pw_9)", %%mm6\n\t"
162 "1: \n\t" 162 "1: \n\t"
163 "movd 0(%0 ), %%mm3 \n\t" 163 "movd 0(%0 ), %%mm3 \n\t"
262 x86_reg src_stride, \ 262 x86_reg src_stride, \
263 int rnd, int64_t shift) \ 263 int rnd, int64_t shift) \
264 { \ 264 { \
265 int h = 8; \ 265 int h = 8; \
266 src -= src_stride; \ 266 src -= src_stride; \
267 asm volatile( \ 267 __asm__ volatile( \
268 LOAD_ROUNDER_MMX("%5") \ 268 LOAD_ROUNDER_MMX("%5") \
269 "movq "MANGLE(ff_pw_53)", %%mm5\n\t" \ 269 "movq "MANGLE(ff_pw_53)", %%mm5\n\t" \
270 "movq "MANGLE(ff_pw_18)", %%mm6\n\t" \ 270 "movq "MANGLE(ff_pw_18)", %%mm6\n\t" \
271 ASMALIGN(3) \ 271 ASMALIGN(3) \
272 "1: \n\t" \ 272 "1: \n\t" \
318 const int16_t *src, int rnd) \ 318 const int16_t *src, int rnd) \
319 { \ 319 { \
320 int h = 8; \ 320 int h = 8; \
321 src -= 1; \ 321 src -= 1; \
322 rnd -= (-4+58+13-3)*256; /* Add -256 bias */ \ 322 rnd -= (-4+58+13-3)*256; /* Add -256 bias */ \
323 asm volatile( \ 323 __asm__ volatile( \
324 LOAD_ROUNDER_MMX("%4") \ 324 LOAD_ROUNDER_MMX("%4") \
325 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \ 325 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
326 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \ 326 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
327 ASMALIGN(3) \ 327 ASMALIGN(3) \
328 "1: \n\t" \ 328 "1: \n\t" \
356 x86_reg stride, int rnd, x86_reg offset) \ 356 x86_reg stride, int rnd, x86_reg offset) \
357 { \ 357 { \
358 int h = 8; \ 358 int h = 8; \
359 src -= offset; \ 359 src -= offset; \
360 rnd = 32-rnd; \ 360 rnd = 32-rnd; \
361 asm volatile ( \ 361 __asm__ volatile ( \
362 LOAD_ROUNDER_MMX("%6") \ 362 LOAD_ROUNDER_MMX("%6") \
363 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \ 363 "movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
364 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \ 364 "movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
365 ASMALIGN(3) \ 365 ASMALIGN(3) \
366 "1: \n\t" \ 366 "1: \n\t" \
410 static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] = 410 static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =
411 { NULL, vc1_put_hor_16b_shift1_mmx, vc1_put_hor_16b_shift2_mmx, vc1_put_hor_16b_shift3_mmx }; 411 { NULL, vc1_put_hor_16b_shift1_mmx, vc1_put_hor_16b_shift2_mmx, vc1_put_hor_16b_shift3_mmx };
412 static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] = 412 static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =
413 { NULL, vc1_put_shift1_mmx, vc1_put_shift2_mmx, vc1_put_shift3_mmx }; 413 { NULL, vc1_put_shift1_mmx, vc1_put_shift2_mmx, vc1_put_shift3_mmx };
414 414
415 asm volatile( 415 __asm__ volatile(
416 "pxor %%mm0, %%mm0 \n\t" 416 "pxor %%mm0, %%mm0 \n\t"
417 ::: "memory" 417 ::: "memory"
418 ); 418 );
419 419
420 if (vmode) { /* Vertical filter to apply */ 420 if (vmode) { /* Vertical filter to apply */