Mercurial > libavcodec.hg
comparison i386/dsputil_h264_template_ssse3.c @ 8031:eebc7209c47f libavcodec
Convert asm keyword into __asm__.
Neither the asm() nor the __asm__() keyword is part of the C99
standard, but while GCC accepts the former in C89 syntax, it is not
accepted in C99 unless GNU extensions are turned on (with -fasm). The
latter form is accepted in any syntax as an extension (without
requiring further command-line options).
Sun Studio C99 compiler also does not accept asm() while accepting
__asm__(), albeit reporting warnings that it's not valid C99 syntax.
author | flameeyes |
---|---|
date | Thu, 16 Oct 2008 13:34:09 +0000 |
parents | 33896780c612 |
children |
comparison
equal
deleted
inserted
replaced
8030:a512ac8fa540 | 8031:eebc7209c47f |
---|---|
35 assert(x<8 && y<8 && x>=0 && y>=0); | 35 assert(x<8 && y<8 && x>=0 && y>=0); |
36 | 36 |
37 if(y==0 || x==0) | 37 if(y==0 || x==0) |
38 { | 38 { |
39 /* 1 dimensional filter only */ | 39 /* 1 dimensional filter only */ |
40 asm volatile( | 40 __asm__ volatile( |
41 "movd %0, %%xmm7 \n\t" | 41 "movd %0, %%xmm7 \n\t" |
42 "movq %1, %%xmm6 \n\t" | 42 "movq %1, %%xmm6 \n\t" |
43 "pshuflw $0, %%xmm7, %%xmm7 \n\t" | 43 "pshuflw $0, %%xmm7, %%xmm7 \n\t" |
44 "movlhps %%xmm6, %%xmm6 \n\t" | 44 "movlhps %%xmm6, %%xmm6 \n\t" |
45 "movlhps %%xmm7, %%xmm7 \n\t" | 45 "movlhps %%xmm7, %%xmm7 \n\t" |
46 :: "r"(255*(x+y)+8), "m"(*(rnd?&ff_pw_4:&ff_pw_3)) | 46 :: "r"(255*(x+y)+8), "m"(*(rnd?&ff_pw_4:&ff_pw_3)) |
47 ); | 47 ); |
48 | 48 |
49 if(x) { | 49 if(x) { |
50 asm volatile( | 50 __asm__ volatile( |
51 "1: \n\t" | 51 "1: \n\t" |
52 "movq (%1), %%xmm0 \n\t" | 52 "movq (%1), %%xmm0 \n\t" |
53 "movq 1(%1), %%xmm1 \n\t" | 53 "movq 1(%1), %%xmm1 \n\t" |
54 "movq (%1,%3), %%xmm2 \n\t" | 54 "movq (%1,%3), %%xmm2 \n\t" |
55 "movq 1(%1,%3), %%xmm3 \n\t" | 55 "movq 1(%1,%3), %%xmm3 \n\t" |
73 "jg 1b \n\t" | 73 "jg 1b \n\t" |
74 :"+r"(dst), "+r"(src), "+r"(h) | 74 :"+r"(dst), "+r"(src), "+r"(h) |
75 :"r"((x86_reg)stride) | 75 :"r"((x86_reg)stride) |
76 ); | 76 ); |
77 } else { | 77 } else { |
78 asm volatile( | 78 __asm__ volatile( |
79 "1: \n\t" | 79 "1: \n\t" |
80 "movq (%1), %%xmm0 \n\t" | 80 "movq (%1), %%xmm0 \n\t" |
81 "movq (%1,%3), %%xmm1 \n\t" | 81 "movq (%1,%3), %%xmm1 \n\t" |
82 "movdqa %%xmm1, %%xmm2 \n\t" | 82 "movdqa %%xmm1, %%xmm2 \n\t" |
83 "movq (%1,%3,2), %%xmm3 \n\t" | 83 "movq (%1,%3,2), %%xmm3 \n\t" |
105 } | 105 } |
106 return; | 106 return; |
107 } | 107 } |
108 | 108 |
109 /* general case, bilinear */ | 109 /* general case, bilinear */ |
110 asm volatile( | 110 __asm__ volatile( |
111 "movd %0, %%xmm7 \n\t" | 111 "movd %0, %%xmm7 \n\t" |
112 "movd %1, %%xmm6 \n\t" | 112 "movd %1, %%xmm6 \n\t" |
113 "movdqa %2, %%xmm5 \n\t" | 113 "movdqa %2, %%xmm5 \n\t" |
114 "pshuflw $0, %%xmm7, %%xmm7 \n\t" | 114 "pshuflw $0, %%xmm7, %%xmm7 \n\t" |
115 "pshuflw $0, %%xmm6, %%xmm6 \n\t" | 115 "pshuflw $0, %%xmm6, %%xmm6 \n\t" |
116 "movlhps %%xmm7, %%xmm7 \n\t" | 116 "movlhps %%xmm7, %%xmm7 \n\t" |
117 "movlhps %%xmm6, %%xmm6 \n\t" | 117 "movlhps %%xmm6, %%xmm6 \n\t" |
118 :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(*(rnd?&ff_pw_32:&ff_pw_28)) | 118 :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(*(rnd?&ff_pw_32:&ff_pw_28)) |
119 ); | 119 ); |
120 | 120 |
121 asm volatile( | 121 __asm__ volatile( |
122 "movq (%1), %%xmm0 \n\t" | 122 "movq (%1), %%xmm0 \n\t" |
123 "movq 1(%1), %%xmm1 \n\t" | 123 "movq 1(%1), %%xmm1 \n\t" |
124 "punpcklbw %%xmm1, %%xmm0 \n\t" | 124 "punpcklbw %%xmm1, %%xmm0 \n\t" |
125 "add %3, %1 \n\t" | 125 "add %3, %1 \n\t" |
126 "1: \n\t" | 126 "1: \n\t" |
158 ); | 158 ); |
159 } | 159 } |
160 | 160 |
161 static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | 161 static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
162 { | 162 { |
163 asm volatile( | 163 __asm__ volatile( |
164 "movd %0, %%mm7 \n\t" | 164 "movd %0, %%mm7 \n\t" |
165 "movd %1, %%mm6 \n\t" | 165 "movd %1, %%mm6 \n\t" |
166 "movq %2, %%mm5 \n\t" | 166 "movq %2, %%mm5 \n\t" |
167 "pshufw $0, %%mm7, %%mm7 \n\t" | 167 "pshufw $0, %%mm7, %%mm7 \n\t" |
168 "pshufw $0, %%mm6, %%mm6 \n\t" | 168 "pshufw $0, %%mm6, %%mm6 \n\t" |
169 :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(ff_pw_32) | 169 :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(ff_pw_32) |
170 ); | 170 ); |
171 | 171 |
172 asm volatile( | 172 __asm__ volatile( |
173 "movd (%1), %%mm0 \n\t" | 173 "movd (%1), %%mm0 \n\t" |
174 "punpcklbw 1(%1), %%mm0 \n\t" | 174 "punpcklbw 1(%1), %%mm0 \n\t" |
175 "add %3, %1 \n\t" | 175 "add %3, %1 \n\t" |
176 "1: \n\t" | 176 "1: \n\t" |
177 "movd (%1), %%mm1 \n\t" | 177 "movd (%1), %%mm1 \n\t" |