Mercurial > libavcodec.hg
comparison i386/cavsdsp_mmx.c @ 8031:eebc7209c47f libavcodec
Convert asm keyword into __asm__.
Neither the asm() nor the __asm__() keyword is part of the C99
standard, but while GCC accepts the former in C89 syntax, it is not
accepted in C99 unless GNU extensions are turned on (with -fasm). The
latter form is accepted in any syntax as an extension (without
requiring further command-line options).
Sun Studio C99 compiler also does not accept asm() while accepting
__asm__(), albeit reporting warnings that it's not valid C99 syntax.
author | flameeyes |
---|---|
date | Thu, 16 Oct 2008 13:34:09 +0000 |
parents | f7cbb7733146 |
children | 899a12113af5 |
comparison
equal
deleted
inserted
replaced
8030:a512ac8fa540 | 8031:eebc7209c47f |
---|---|
33 * | 33 * |
34 ****************************************************************************/ | 34 ****************************************************************************/ |
35 | 35 |
36 static inline void cavs_idct8_1d(int16_t *block, uint64_t bias) | 36 static inline void cavs_idct8_1d(int16_t *block, uint64_t bias) |
37 { | 37 { |
38 asm volatile( | 38 __asm__ volatile( |
39 "movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */ | 39 "movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */ |
40 "movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */ | 40 "movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */ |
41 "movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */ | 41 "movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */ |
42 "movq 48(%0), %%mm7 \n\t" /* mm7 = src3 */ | 42 "movq 48(%0), %%mm7 \n\t" /* mm7 = src3 */ |
43 "movq %%mm4, %%mm0 \n\t" | 43 "movq %%mm4, %%mm0 \n\t" |
118 for(i=0; i<2; i++){ | 118 for(i=0; i<2; i++){ |
119 DECLARE_ALIGNED_8(uint64_t, tmp); | 119 DECLARE_ALIGNED_8(uint64_t, tmp); |
120 | 120 |
121 cavs_idct8_1d(block+4*i, ff_pw_4); | 121 cavs_idct8_1d(block+4*i, ff_pw_4); |
122 | 122 |
123 asm volatile( | 123 __asm__ volatile( |
124 "psraw $3, %%mm7 \n\t" | 124 "psraw $3, %%mm7 \n\t" |
125 "psraw $3, %%mm6 \n\t" | 125 "psraw $3, %%mm6 \n\t" |
126 "psraw $3, %%mm5 \n\t" | 126 "psraw $3, %%mm5 \n\t" |
127 "psraw $3, %%mm4 \n\t" | 127 "psraw $3, %%mm4 \n\t" |
128 "psraw $3, %%mm3 \n\t" | 128 "psraw $3, %%mm3 \n\t" |
148 } | 148 } |
149 | 149 |
150 for(i=0; i<2; i++){ | 150 for(i=0; i<2; i++){ |
151 cavs_idct8_1d(b2+4*i, ff_pw_64); | 151 cavs_idct8_1d(b2+4*i, ff_pw_64); |
152 | 152 |
153 asm volatile( | 153 __asm__ volatile( |
154 "psraw $7, %%mm7 \n\t" | 154 "psraw $7, %%mm7 \n\t" |
155 "psraw $7, %%mm6 \n\t" | 155 "psraw $7, %%mm6 \n\t" |
156 "psraw $7, %%mm5 \n\t" | 156 "psraw $7, %%mm5 \n\t" |
157 "psraw $7, %%mm4 \n\t" | 157 "psraw $7, %%mm4 \n\t" |
158 "psraw $7, %%mm3 \n\t" | 158 "psraw $7, %%mm3 \n\t" |
173 } | 173 } |
174 | 174 |
175 add_pixels_clamped_mmx(b2, dst, stride); | 175 add_pixels_clamped_mmx(b2, dst, stride); |
176 | 176 |
177 /* clear block */ | 177 /* clear block */ |
178 asm volatile( | 178 __asm__ volatile( |
179 "pxor %%mm7, %%mm7 \n\t" | 179 "pxor %%mm7, %%mm7 \n\t" |
180 "movq %%mm7, (%0) \n\t" | 180 "movq %%mm7, (%0) \n\t" |
181 "movq %%mm7, 8(%0) \n\t" | 181 "movq %%mm7, 8(%0) \n\t" |
182 "movq %%mm7, 16(%0) \n\t" | 182 "movq %%mm7, 16(%0) \n\t" |
183 "movq %%mm7, 24(%0) \n\t" | 183 "movq %%mm7, 24(%0) \n\t" |
273 #define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\ | 273 #define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\ |
274 int w= 2;\ | 274 int w= 2;\ |
275 src -= 2*srcStride;\ | 275 src -= 2*srcStride;\ |
276 \ | 276 \ |
277 while(w--){\ | 277 while(w--){\ |
278 asm volatile(\ | 278 __asm__ volatile(\ |
279 "pxor %%mm7, %%mm7 \n\t"\ | 279 "pxor %%mm7, %%mm7 \n\t"\ |
280 "movd (%0), %%mm0 \n\t"\ | 280 "movd (%0), %%mm0 \n\t"\ |
281 "add %2, %0 \n\t"\ | 281 "add %2, %0 \n\t"\ |
282 "movd (%0), %%mm1 \n\t"\ | 282 "movd (%0), %%mm1 \n\t"\ |
283 "add %2, %0 \n\t"\ | 283 "add %2, %0 \n\t"\ |
304 : "+a"(src), "+c"(dst)\ | 304 : "+a"(src), "+c"(dst)\ |
305 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ADD), "m"(MUL1), "m"(MUL2)\ | 305 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ADD), "m"(MUL1), "m"(MUL2)\ |
306 : "memory"\ | 306 : "memory"\ |
307 );\ | 307 );\ |
308 if(h==16){\ | 308 if(h==16){\ |
309 asm volatile(\ | 309 __asm__ volatile(\ |
310 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ | 310 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ |
311 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ | 311 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ |
312 VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ | 312 VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ |
313 VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ | 313 VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ |
314 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ | 314 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ |
326 } | 326 } |
327 | 327 |
328 #define QPEL_CAVS(OPNAME, OP, MMX)\ | 328 #define QPEL_CAVS(OPNAME, OP, MMX)\ |
329 static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | 329 static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
330 int h=8;\ | 330 int h=8;\ |
331 asm volatile(\ | 331 __asm__ volatile(\ |
332 "pxor %%mm7, %%mm7 \n\t"\ | 332 "pxor %%mm7, %%mm7 \n\t"\ |
333 "movq %5, %%mm6 \n\t"\ | 333 "movq %5, %%mm6 \n\t"\ |
334 "1: \n\t"\ | 334 "1: \n\t"\ |
335 "movq (%0), %%mm0 \n\t"\ | 335 "movq (%0), %%mm0 \n\t"\ |
336 "movq 1(%0), %%mm2 \n\t"\ | 336 "movq 1(%0), %%mm2 \n\t"\ |