Mercurial > libavcodec.hg
annotate x86/cavsdsp_mmx.c @ 12471:d821f7c64fc9 libavcodec
Remove use of deprecated functions av_image_fill_pointers/linesizes in
libavcodec/utils.c, fix warnings.
author | stefano |
---|---|
date | Tue, 07 Sep 2010 21:24:03 +0000 |
parents | a5ddb39627fd |
children | 9fef0a8ddd63 |
rev | line source |
---|---|
8430 | 1 /* |
2 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder. | |
3 * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de> | |
4 * | |
5 * MMX-optimized DSP functions, based on H.264 optimizations by | |
6 * Michael Niedermayer and Loren Merritt | |
7 * | |
8 * This file is part of FFmpeg. | |
9 * | |
10 * FFmpeg is free software; you can redistribute it and/or | |
11 * modify it under the terms of the GNU Lesser General Public | |
12 * License as published by the Free Software Foundation; either | |
13 * version 2.1 of the License, or (at your option) any later version. | |
14 * | |
15 * FFmpeg is distributed in the hope that it will be useful, | |
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 * Lesser General Public License for more details. | |
19 * | |
20 * You should have received a copy of the GNU Lesser General Public | |
21 * License along with FFmpeg; if not, write to the Free Software | |
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
23 */ | |
24 | |
25 #include "libavutil/common.h" | |
26 #include "libavutil/x86_cpu.h" | |
27 #include "libavcodec/dsputil.h" | |
12356 | 28 #include "libavcodec/cavsdsp.h" |
8430 | 29 #include "dsputil_mmx.h" |
30 | |
31 /***************************************************************************** | |
32 * | |
33 * inverse transform | |
34 * | |
35 ****************************************************************************/ | |
36 | |
37 static inline void cavs_idct8_1d(int16_t *block, uint64_t bias) | |
38 { | |
39 __asm__ volatile( | |
40 "movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */ | |
41 "movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */ | |
42 "movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */ | |
43 "movq 48(%0), %%mm7 \n\t" /* mm7 = src3 */ | |
44 "movq %%mm4, %%mm0 \n\t" | |
45 "movq %%mm5, %%mm3 \n\t" | |
46 "movq %%mm2, %%mm6 \n\t" | |
47 "movq %%mm7, %%mm1 \n\t" | |
48 | |
49 "paddw %%mm4, %%mm4 \n\t" /* mm4 = 2*src7 */ | |
50 "paddw %%mm3, %%mm3 \n\t" /* mm3 = 2*src1 */ | |
51 "paddw %%mm6, %%mm6 \n\t" /* mm6 = 2*src5 */ | |
52 "paddw %%mm1, %%mm1 \n\t" /* mm1 = 2*src3 */ | |
53 "paddw %%mm4, %%mm0 \n\t" /* mm0 = 3*src7 */ | |
54 "paddw %%mm3, %%mm5 \n\t" /* mm5 = 3*src1 */ | |
55 "paddw %%mm6, %%mm2 \n\t" /* mm2 = 3*src5 */ | |
56 "paddw %%mm1, %%mm7 \n\t" /* mm7 = 3*src3 */ | |
57 "psubw %%mm4, %%mm5 \n\t" /* mm5 = 3*src1 - 2*src7 = a0 */ | |
58 "paddw %%mm6, %%mm7 \n\t" /* mm7 = 3*src3 + 2*src5 = a1 */ | |
59 "psubw %%mm2, %%mm1 \n\t" /* mm1 = 2*src3 - 3*src5 = a2 */ | |
60 "paddw %%mm0, %%mm3 \n\t" /* mm3 = 2*src1 + 3*src7 = a3 */ | |
61 | |
62 "movq %%mm5, %%mm4 \n\t" | |
63 "movq %%mm7, %%mm6 \n\t" | |
64 "movq %%mm3, %%mm0 \n\t" | |
65 "movq %%mm1, %%mm2 \n\t" | |
66 SUMSUB_BA( %%mm7, %%mm5 ) /* mm7 = a0 + a1 mm5 = a0 - a1 */ | |
67 "paddw %%mm3, %%mm7 \n\t" /* mm7 = a0 + a1 + a3 */ | |
68 "paddw %%mm1, %%mm5 \n\t" /* mm5 = a0 - a1 + a2 */ | |
69 "paddw %%mm7, %%mm7 \n\t" | |
70 "paddw %%mm5, %%mm5 \n\t" | |
71 "paddw %%mm6, %%mm7 \n\t" /* mm7 = b4 */ | |
72 "paddw %%mm4, %%mm5 \n\t" /* mm5 = b5 */ | |
73 | |
74 SUMSUB_BA( %%mm1, %%mm3 ) /* mm1 = a3 + a2 mm3 = a3 - a2 */ | |
75 "psubw %%mm1, %%mm4 \n\t" /* mm4 = a0 - a2 - a3 */ | |
76 "movq %%mm4, %%mm1 \n\t" /* mm1 = a0 - a2 - a3 */ | |
77 "psubw %%mm6, %%mm3 \n\t" /* mm3 = a3 - a2 - a1 */ | |
78 "paddw %%mm1, %%mm1 \n\t" | |
79 "paddw %%mm3, %%mm3 \n\t" | |
80 "psubw %%mm2, %%mm1 \n\t" /* mm1 = b7 */ | |
81 "paddw %%mm0, %%mm3 \n\t" /* mm3 = b6 */ | |
82 | |
83 "movq 32(%0), %%mm2 \n\t" /* mm2 = src2 */ | |
84 "movq 96(%0), %%mm6 \n\t" /* mm6 = src6 */ | |
85 "movq %%mm2, %%mm4 \n\t" | |
86 "movq %%mm6, %%mm0 \n\t" | |
87 "psllw $2, %%mm4 \n\t" /* mm4 = 4*src2 */ | |
88 "psllw $2, %%mm6 \n\t" /* mm6 = 4*src6 */ | |
89 "paddw %%mm4, %%mm2 \n\t" /* mm2 = 5*src2 */ | |
90 "paddw %%mm6, %%mm0 \n\t" /* mm0 = 5*src6 */ | |
91 "paddw %%mm2, %%mm2 \n\t" | |
92 "paddw %%mm0, %%mm0 \n\t" | |
93 "psubw %%mm0, %%mm4 \n\t" /* mm4 = 4*src2 - 10*src6 = a7 */ | |
94 "paddw %%mm2, %%mm6 \n\t" /* mm6 = 4*src6 + 10*src2 = a6 */ | |
95 | |
96 "movq (%0), %%mm2 \n\t" /* mm2 = src0 */ | |
97 "movq 64(%0), %%mm0 \n\t" /* mm0 = src4 */ | |
98 SUMSUB_BA( %%mm0, %%mm2 ) /* mm0 = src0+src4 mm2 = src0-src4 */ | |
99 "psllw $3, %%mm0 \n\t" | |
100 "psllw $3, %%mm2 \n\t" | |
101 "paddw %1, %%mm0 \n\t" /* add rounding bias */ | |
102 "paddw %1, %%mm2 \n\t" /* add rounding bias */ | |
103 | |
104 SUMSUB_BA( %%mm6, %%mm0 ) /* mm6 = a4 + a6 mm0 = a4 - a6 */ | |
105 SUMSUB_BA( %%mm4, %%mm2 ) /* mm4 = a5 + a7 mm2 = a5 - a7 */ | |
106 SUMSUB_BA( %%mm7, %%mm6 ) /* mm7 = dst0 mm6 = dst7 */ | |
107 SUMSUB_BA( %%mm5, %%mm4 ) /* mm5 = dst1 mm4 = dst6 */ | |
108 SUMSUB_BA( %%mm3, %%mm2 ) /* mm3 = dst2 mm2 = dst5 */ | |
109 SUMSUB_BA( %%mm1, %%mm0 ) /* mm1 = dst3 mm0 = dst4 */ | |
110 :: "r"(block), "m"(bias) | |
111 ); | |
112 } | |
113 | |
114 static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride) | |
115 { | |
116 int i; | |
11369 | 117 DECLARE_ALIGNED(8, int16_t, b2)[64]; |
8430 | 118 |
119 for(i=0; i<2; i++){ | |
11369 | 120 DECLARE_ALIGNED(8, uint64_t, tmp); |
8430 | 121 |
12143 | 122 cavs_idct8_1d(block+4*i, ff_pw_4.a); |
8430 | 123 |
124 __asm__ volatile( | |
125 "psraw $3, %%mm7 \n\t" | |
126 "psraw $3, %%mm6 \n\t" | |
127 "psraw $3, %%mm5 \n\t" | |
128 "psraw $3, %%mm4 \n\t" | |
129 "psraw $3, %%mm3 \n\t" | |
130 "psraw $3, %%mm2 \n\t" | |
131 "psraw $3, %%mm1 \n\t" | |
132 "psraw $3, %%mm0 \n\t" | |
133 "movq %%mm7, %0 \n\t" | |
134 TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 ) | |
135 "movq %%mm0, 8(%1) \n\t" | |
136 "movq %%mm6, 24(%1) \n\t" | |
137 "movq %%mm7, 40(%1) \n\t" | |
138 "movq %%mm4, 56(%1) \n\t" | |
139 "movq %0, %%mm7 \n\t" | |
140 TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 ) | |
141 "movq %%mm7, (%1) \n\t" | |
142 "movq %%mm1, 16(%1) \n\t" | |
143 "movq %%mm0, 32(%1) \n\t" | |
144 "movq %%mm3, 48(%1) \n\t" | |
145 : "=m"(tmp) | |
146 : "r"(b2+32*i) | |
147 : "memory" | |
148 ); | |
149 } | |
150 | |
151 for(i=0; i<2; i++){ | |
8816
53f9f3994ec8
convert ff_pw_64 into an xmm_reg for future use in vp6 sse code
aurel
parents:
8793
diff
changeset
|
152 cavs_idct8_1d(b2+4*i, ff_pw_64.a); |
8430 | 153 |
154 __asm__ volatile( | |
155 "psraw $7, %%mm7 \n\t" | |
156 "psraw $7, %%mm6 \n\t" | |
157 "psraw $7, %%mm5 \n\t" | |
158 "psraw $7, %%mm4 \n\t" | |
159 "psraw $7, %%mm3 \n\t" | |
160 "psraw $7, %%mm2 \n\t" | |
161 "psraw $7, %%mm1 \n\t" | |
162 "psraw $7, %%mm0 \n\t" | |
163 "movq %%mm7, (%0) \n\t" | |
164 "movq %%mm5, 16(%0) \n\t" | |
165 "movq %%mm3, 32(%0) \n\t" | |
166 "movq %%mm1, 48(%0) \n\t" | |
167 "movq %%mm0, 64(%0) \n\t" | |
168 "movq %%mm2, 80(%0) \n\t" | |
169 "movq %%mm4, 96(%0) \n\t" | |
170 "movq %%mm6, 112(%0) \n\t" | |
171 :: "r"(b2+4*i) | |
172 : "memory" | |
173 ); | |
174 } | |
175 | |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12356
diff
changeset
|
176 ff_add_pixels_clamped_mmx(b2, dst, stride); |
8430 | 177 } |
178 | |
179 /***************************************************************************** | |
180 * | |
181 * motion compensation | |
182 * | |
183 ****************************************************************************/ | |
184 | |
185 /* vertical filter [-1 -2 96 42 -7 0] */ | |
10343
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
186 #define QPEL_CAVSV1(A,B,C,D,E,F,OP,MUL2) \ |
8430 | 187 "movd (%0), "#F" \n\t"\ |
188 "movq "#C", %%mm6 \n\t"\ | |
189 "pmullw %5, %%mm6 \n\t"\ | |
190 "movq "#D", %%mm7 \n\t"\ | |
10343
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
191 "pmullw "MANGLE(MUL2)", %%mm7\n\t"\ |
8430 | 192 "psllw $3, "#E" \n\t"\ |
193 "psubw "#E", %%mm6 \n\t"\ | |
194 "psraw $3, "#E" \n\t"\ | |
195 "paddw %%mm7, %%mm6 \n\t"\ | |
196 "paddw "#E", %%mm6 \n\t"\ | |
197 "paddw "#B", "#B" \n\t"\ | |
198 "pxor %%mm7, %%mm7 \n\t"\ | |
199 "add %2, %0 \n\t"\ | |
200 "punpcklbw %%mm7, "#F" \n\t"\ | |
201 "psubw "#B", %%mm6 \n\t"\ | |
202 "psraw $1, "#B" \n\t"\ | |
203 "psubw "#A", %%mm6 \n\t"\ | |
204 "paddw %4, %%mm6 \n\t"\ | |
205 "psraw $7, %%mm6 \n\t"\ | |
206 "packuswb %%mm6, %%mm6 \n\t"\ | |
207 OP(%%mm6, (%1), A, d) \ | |
208 "add %3, %1 \n\t" | |
209 | |
210 /* vertical filter [ 0 -1 5 5 -1 0] */ | |
10343
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
211 #define QPEL_CAVSV2(A,B,C,D,E,F,OP,MUL2) \ |
8430 | 212 "movd (%0), "#F" \n\t"\ |
213 "movq "#C", %%mm6 \n\t"\ | |
214 "paddw "#D", %%mm6 \n\t"\ | |
215 "pmullw %5, %%mm6 \n\t"\ | |
216 "add %2, %0 \n\t"\ | |
217 "punpcklbw %%mm7, "#F" \n\t"\ | |
218 "psubw "#B", %%mm6 \n\t"\ | |
219 "psubw "#E", %%mm6 \n\t"\ | |
220 "paddw %4, %%mm6 \n\t"\ | |
221 "psraw $3, %%mm6 \n\t"\ | |
222 "packuswb %%mm6, %%mm6 \n\t"\ | |
223 OP(%%mm6, (%1), A, d) \ | |
224 "add %3, %1 \n\t" | |
225 | |
226 /* vertical filter [ 0 -7 42 96 -2 -1] */ | |
10343
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
227 #define QPEL_CAVSV3(A,B,C,D,E,F,OP,MUL2) \ |
8430 | 228 "movd (%0), "#F" \n\t"\ |
229 "movq "#C", %%mm6 \n\t"\ | |
10343
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
230 "pmullw "MANGLE(MUL2)", %%mm6\n\t"\ |
8430 | 231 "movq "#D", %%mm7 \n\t"\ |
232 "pmullw %5, %%mm7 \n\t"\ | |
233 "psllw $3, "#B" \n\t"\ | |
234 "psubw "#B", %%mm6 \n\t"\ | |
235 "psraw $3, "#B" \n\t"\ | |
236 "paddw %%mm7, %%mm6 \n\t"\ | |
237 "paddw "#B", %%mm6 \n\t"\ | |
238 "paddw "#E", "#E" \n\t"\ | |
239 "pxor %%mm7, %%mm7 \n\t"\ | |
240 "add %2, %0 \n\t"\ | |
241 "punpcklbw %%mm7, "#F" \n\t"\ | |
242 "psubw "#E", %%mm6 \n\t"\ | |
243 "psraw $1, "#E" \n\t"\ | |
244 "psubw "#F", %%mm6 \n\t"\ | |
245 "paddw %4, %%mm6 \n\t"\ | |
246 "psraw $7, %%mm6 \n\t"\ | |
247 "packuswb %%mm6, %%mm6 \n\t"\ | |
248 OP(%%mm6, (%1), A, d) \ | |
249 "add %3, %1 \n\t" | |
250 | |
251 | |
252 #define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\ | |
253 int w= 2;\ | |
254 src -= 2*srcStride;\ | |
255 \ | |
256 while(w--){\ | |
257 __asm__ volatile(\ | |
258 "pxor %%mm7, %%mm7 \n\t"\ | |
259 "movd (%0), %%mm0 \n\t"\ | |
260 "add %2, %0 \n\t"\ | |
261 "movd (%0), %%mm1 \n\t"\ | |
262 "add %2, %0 \n\t"\ | |
263 "movd (%0), %%mm2 \n\t"\ | |
264 "add %2, %0 \n\t"\ | |
265 "movd (%0), %%mm3 \n\t"\ | |
266 "add %2, %0 \n\t"\ | |
267 "movd (%0), %%mm4 \n\t"\ | |
268 "add %2, %0 \n\t"\ | |
269 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
270 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
271 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
272 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
273 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
10343
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
274 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
275 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
276 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
277 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
278 VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
279 VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
280 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
281 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\ |
8430 | 282 \ |
283 : "+a"(src), "+c"(dst)\ | |
10343
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
284 : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\ |
8430 | 285 : "memory"\ |
286 );\ | |
287 if(h==16){\ | |
288 __asm__ volatile(\ | |
10343
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
289 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
290 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
291 VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
292 VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
293 VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
294 VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
295 VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, MUL2)\ |
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
296 VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, MUL2)\ |
8430 | 297 \ |
298 : "+a"(src), "+c"(dst)\ | |
10343
b1218e0b0f2b
Use MANGLE in cavsdsp, the current version using "m" constraints will not
reimar
parents:
8816
diff
changeset
|
299 : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride), "m"(ADD), "m"(MUL1)\ |
8430 | 300 : "memory"\ |
301 );\ | |
302 }\ | |
303 src += 4-(h+5)*srcStride;\ | |
304 dst += 4-h*dstStride;\ | |
305 } | |
306 | |
307 #define QPEL_CAVS(OPNAME, OP, MMX)\ | |
308 static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
309 int h=8;\ | |
310 __asm__ volatile(\ | |
311 "pxor %%mm7, %%mm7 \n\t"\ | |
312 "movq %5, %%mm6 \n\t"\ | |
313 "1: \n\t"\ | |
314 "movq (%0), %%mm0 \n\t"\ | |
315 "movq 1(%0), %%mm2 \n\t"\ | |
316 "movq %%mm0, %%mm1 \n\t"\ | |
317 "movq %%mm2, %%mm3 \n\t"\ | |
318 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
319 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
320 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
321 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
322 "paddw %%mm2, %%mm0 \n\t"\ | |
323 "paddw %%mm3, %%mm1 \n\t"\ | |
324 "pmullw %%mm6, %%mm0 \n\t"\ | |
325 "pmullw %%mm6, %%mm1 \n\t"\ | |
326 "movq -1(%0), %%mm2 \n\t"\ | |
327 "movq 2(%0), %%mm4 \n\t"\ | |
328 "movq %%mm2, %%mm3 \n\t"\ | |
329 "movq %%mm4, %%mm5 \n\t"\ | |
330 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
331 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
332 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
333 "punpckhbw %%mm7, %%mm5 \n\t"\ | |
334 "paddw %%mm4, %%mm2 \n\t"\ | |
335 "paddw %%mm3, %%mm5 \n\t"\ | |
336 "psubw %%mm2, %%mm0 \n\t"\ | |
337 "psubw %%mm5, %%mm1 \n\t"\ | |
338 "movq %6, %%mm5 \n\t"\ | |
339 "paddw %%mm5, %%mm0 \n\t"\ | |
340 "paddw %%mm5, %%mm1 \n\t"\ | |
341 "psraw $3, %%mm0 \n\t"\ | |
342 "psraw $3, %%mm1 \n\t"\ | |
343 "packuswb %%mm1, %%mm0 \n\t"\ | |
344 OP(%%mm0, (%1),%%mm5, q) \ | |
345 "add %3, %0 \n\t"\ | |
346 "add %4, %1 \n\t"\ | |
347 "decl %2 \n\t"\ | |
348 " jnz 1b \n\t"\ | |
349 : "+a"(src), "+c"(dst), "+m"(h)\ | |
350 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_4)\ | |
351 : "memory"\ | |
352 );\ | |
353 }\ | |
354 \ | |
355 static inline void OPNAME ## cavs_qpel8or16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
356 QPEL_CAVSVNUM(QPEL_CAVSV1,OP,ff_pw_64,ff_pw_96,ff_pw_42) \ | |
357 }\ | |
358 \ | |
359 static inline void OPNAME ## cavs_qpel8or16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
360 QPEL_CAVSVNUM(QPEL_CAVSV2,OP,ff_pw_4,ff_pw_5,ff_pw_5) \ | |
361 }\ | |
362 \ | |
363 static inline void OPNAME ## cavs_qpel8or16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
364 QPEL_CAVSVNUM(QPEL_CAVSV3,OP,ff_pw_64,ff_pw_96,ff_pw_42) \ | |
365 }\ | |
366 \ | |
367 static void OPNAME ## cavs_qpel8_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
368 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 8);\ | |
369 }\ | |
370 static void OPNAME ## cavs_qpel16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
371 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 16);\ | |
372 OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ | |
373 }\ | |
374 \ | |
375 static void OPNAME ## cavs_qpel8_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
376 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 8);\ | |
377 }\ | |
378 static void OPNAME ## cavs_qpel16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
379 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 16);\ | |
380 OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ | |
381 }\ | |
382 \ | |
383 static void OPNAME ## cavs_qpel8_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
384 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 8);\ | |
385 }\ | |
386 static void OPNAME ## cavs_qpel16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
387 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 16);\ | |
388 OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ | |
389 }\ | |
390 \ | |
391 static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
392 OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\ | |
393 OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\ | |
394 src += 8*srcStride;\ | |
395 dst += 8*dstStride;\ | |
396 OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\ | |
397 OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\ | |
398 }\ | |
399 | |
400 #define CAVS_MC(OPNAME, SIZE, MMX) \ | |
401 static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
402 OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\ | |
403 }\ | |
404 \ | |
405 static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
406 OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\ | |
407 }\ | |
408 \ | |
409 static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
410 OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\ | |
411 }\ | |
412 \ | |
413 static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
414 OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\ | |
415 }\ | |
416 | |
417 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t" | |
418 #define AVG_3DNOW_OP(a,b,temp, size) \ | |
419 "mov" #size " " #b ", " #temp " \n\t"\ | |
420 "pavgusb " #temp ", " #a " \n\t"\ | |
421 "mov" #size " " #a ", " #b " \n\t" | |
422 #define AVG_MMX2_OP(a,b,temp, size) \ | |
423 "mov" #size " " #b ", " #temp " \n\t"\ | |
424 "pavgb " #temp ", " #a " \n\t"\ | |
425 "mov" #size " " #a ", " #b " \n\t" | |
426 | |
427 QPEL_CAVS(put_, PUT_OP, 3dnow) | |
428 QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow) | |
429 QPEL_CAVS(put_, PUT_OP, mmx2) | |
430 QPEL_CAVS(avg_, AVG_MMX2_OP, mmx2) | |
431 | |
432 CAVS_MC(put_, 8, 3dnow) | |
433 CAVS_MC(put_, 16,3dnow) | |
434 CAVS_MC(avg_, 8, 3dnow) | |
435 CAVS_MC(avg_, 16,3dnow) | |
436 CAVS_MC(put_, 8, mmx2) | |
437 CAVS_MC(put_, 16,mmx2) | |
438 CAVS_MC(avg_, 8, mmx2) | |
439 CAVS_MC(avg_, 16,mmx2) | |
440 | |
12356 | 441 static void ff_cavsdsp_init_mmx2(CAVSDSPContext* c, AVCodecContext *avctx) { |
8430 | 442 #define dspfunc(PFX, IDX, NUM) \ |
443 c->PFX ## _pixels_tab[IDX][ 0] = ff_ ## PFX ## NUM ## _mc00_mmx2; \ | |
444 c->PFX ## _pixels_tab[IDX][ 2] = ff_ ## PFX ## NUM ## _mc20_mmx2; \ | |
445 c->PFX ## _pixels_tab[IDX][ 4] = ff_ ## PFX ## NUM ## _mc01_mmx2; \ | |
446 c->PFX ## _pixels_tab[IDX][ 8] = ff_ ## PFX ## NUM ## _mc02_mmx2; \ | |
447 c->PFX ## _pixels_tab[IDX][12] = ff_ ## PFX ## NUM ## _mc03_mmx2; \ | |
448 | |
449 dspfunc(put_cavs_qpel, 0, 16); | |
450 dspfunc(put_cavs_qpel, 1, 8); | |
451 dspfunc(avg_cavs_qpel, 0, 16); | |
452 dspfunc(avg_cavs_qpel, 1, 8); | |
453 #undef dspfunc | |
454 c->cavs_idct8_add = cavs_idct8_add_mmx; | |
455 } | |
456 | |
12356 | 457 static void ff_cavsdsp_init_3dnow(CAVSDSPContext* c, AVCodecContext *avctx) { |
8430 | 458 #define dspfunc(PFX, IDX, NUM) \ |
459 c->PFX ## _pixels_tab[IDX][ 0] = ff_ ## PFX ## NUM ## _mc00_mmx2; \ | |
460 c->PFX ## _pixels_tab[IDX][ 2] = ff_ ## PFX ## NUM ## _mc20_3dnow; \ | |
461 c->PFX ## _pixels_tab[IDX][ 4] = ff_ ## PFX ## NUM ## _mc01_3dnow; \ | |
462 c->PFX ## _pixels_tab[IDX][ 8] = ff_ ## PFX ## NUM ## _mc02_3dnow; \ | |
463 c->PFX ## _pixels_tab[IDX][12] = ff_ ## PFX ## NUM ## _mc03_3dnow; \ | |
464 | |
465 dspfunc(put_cavs_qpel, 0, 16); | |
466 dspfunc(put_cavs_qpel, 1, 8); | |
467 dspfunc(avg_cavs_qpel, 0, 16); | |
468 dspfunc(avg_cavs_qpel, 1, 8); | |
469 #undef dspfunc | |
470 c->cavs_idct8_add = cavs_idct8_add_mmx; | |
471 } | |
12356 | 472 |
473 void ff_cavsdsp_init_mmx(CAVSDSPContext *c, AVCodecContext *avctx) | |
474 { | |
475 int mm_flags = mm_support(); | |
476 | |
12456
a5ddb39627fd
Rename FF_MM_ symbols related to CPU features flags as AV_CPU_FLAG_
stefano
parents:
12435
diff
changeset
|
477 if (mm_flags & AV_CPU_FLAG_MMX2) ff_cavsdsp_init_mmx2 (c, avctx); |
a5ddb39627fd
Rename FF_MM_ symbols related to CPU features flags as AV_CPU_FLAG_
stefano
parents:
12435
diff
changeset
|
478 if (mm_flags & AV_CPU_FLAG_3DNOW) ff_cavsdsp_init_3dnow(c, avctx); |
12356 | 479 } |