Mercurial > libavcodec.hg
annotate x86/dsputil_mmx.c @ 8730:cfb8849d0452 libavcodec
Doxygen defgroups throw a warning if they don't have a title as well as a name
author | superdump |
---|---|
date | Mon, 02 Feb 2009 10:31:51 +0000 |
parents | 191860960b23 |
children | 31138c296ac6 |
rev | line source |
---|---|
8430 | 1 /* |
2 * MMX optimized DSP utils | |
8629
04423b2f6e0b
cosmetics: Remove pointless period after copyright statement non-sentences.
diego
parents:
8596
diff
changeset
|
3 * Copyright (c) 2000, 2001 Fabrice Bellard |
8430 | 4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> |
5 * | |
6 * This file is part of FFmpeg. | |
7 * | |
8 * FFmpeg is free software; you can redistribute it and/or | |
9 * modify it under the terms of the GNU Lesser General Public | |
10 * License as published by the Free Software Foundation; either | |
11 * version 2.1 of the License, or (at your option) any later version. | |
12 * | |
13 * FFmpeg is distributed in the hope that it will be useful, | |
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 * Lesser General Public License for more details. | |
17 * | |
18 * You should have received a copy of the GNU Lesser General Public | |
19 * License along with FFmpeg; if not, write to the Free Software | |
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 * | |
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru> | |
23 */ | |
24 | |
25 #include "libavutil/x86_cpu.h" | |
26 #include "libavcodec/dsputil.h" | |
27 #include "libavcodec/h263.h" | |
28 #include "libavcodec/mpegvideo.h" | |
29 #include "libavcodec/simple_idct.h" | |
30 #include "dsputil_mmx.h" | |
31 #include "mmx.h" | |
32 #include "vp3dsp_mmx.h" | |
33 #include "vp3dsp_sse2.h" | |
34 #include "idct_xvid.h" | |
35 | |
36 //#undef NDEBUG | |
37 //#include <assert.h> | |
38 | |
39 int mm_flags; /* multimedia extension flags */ | |
40 | |
41 /* pixel operations */ | |
42 DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL; | |
43 DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL; | |
44 | |
45 DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) = | |
46 {0x8000000080000000ULL, 0x8000000080000000ULL}; | |
47 | |
48 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL; | |
49 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL; | |
50 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL}; | |
51 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL}; | |
52 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL; | |
53 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL}; | |
54 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL; | |
55 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL}; | |
56 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL}; | |
57 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL; | |
58 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_64 ) = 0x0040004000400040ULL; | |
59 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL; | |
60 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL; | |
61 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL; | |
62 | |
63 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL; | |
64 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL; | |
65 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL; | |
66 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL; | |
67 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL; | |
68 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL; | |
69 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL; | |
70 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL; | |
71 | |
72 DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 }; | |
73 DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 }; | |
74 | |
75 #define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::) | |
76 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::) | |
77 | |
78 #define MOVQ_BFE(regd) \ | |
79 __asm__ volatile ( \ | |
80 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\ | |
81 "paddb %%" #regd ", %%" #regd " \n\t" ::) | |
82 | |
83 #ifndef PIC | |
84 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone)) | |
85 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo)) | |
86 #else | |
87 // for shared library it's better to use this way for accessing constants | |
88 // pcmpeqd -> -1 | |
89 #define MOVQ_BONE(regd) \ | |
90 __asm__ volatile ( \ | |
91 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ | |
92 "psrlw $15, %%" #regd " \n\t" \ | |
93 "packuswb %%" #regd ", %%" #regd " \n\t" ::) | |
94 | |
95 #define MOVQ_WTWO(regd) \ | |
96 __asm__ volatile ( \ | |
97 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ | |
98 "psrlw $15, %%" #regd " \n\t" \ | |
99 "psllw $1, %%" #regd " \n\t"::) | |
100 | |
101 #endif | |
102 | |
103 // using regr as temporary and for the output result | |
104 // first argument is unmodifed and second is trashed | |
105 // regfe is supposed to contain 0xfefefefefefefefe | |
106 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \ | |
107 "movq " #rega ", " #regr " \n\t"\ | |
108 "pand " #regb ", " #regr " \n\t"\ | |
109 "pxor " #rega ", " #regb " \n\t"\ | |
110 "pand " #regfe "," #regb " \n\t"\ | |
111 "psrlq $1, " #regb " \n\t"\ | |
112 "paddb " #regb ", " #regr " \n\t" | |
113 | |
114 #define PAVGB_MMX(rega, regb, regr, regfe) \ | |
115 "movq " #rega ", " #regr " \n\t"\ | |
116 "por " #regb ", " #regr " \n\t"\ | |
117 "pxor " #rega ", " #regb " \n\t"\ | |
118 "pand " #regfe "," #regb " \n\t"\ | |
119 "psrlq $1, " #regb " \n\t"\ | |
120 "psubb " #regb ", " #regr " \n\t" | |
121 | |
122 // mm6 is supposed to contain 0xfefefefefefefefe | |
123 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \ | |
124 "movq " #rega ", " #regr " \n\t"\ | |
125 "movq " #regc ", " #regp " \n\t"\ | |
126 "pand " #regb ", " #regr " \n\t"\ | |
127 "pand " #regd ", " #regp " \n\t"\ | |
128 "pxor " #rega ", " #regb " \n\t"\ | |
129 "pxor " #regc ", " #regd " \n\t"\ | |
130 "pand %%mm6, " #regb " \n\t"\ | |
131 "pand %%mm6, " #regd " \n\t"\ | |
132 "psrlq $1, " #regb " \n\t"\ | |
133 "psrlq $1, " #regd " \n\t"\ | |
134 "paddb " #regb ", " #regr " \n\t"\ | |
135 "paddb " #regd ", " #regp " \n\t" | |
136 | |
137 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \ | |
138 "movq " #rega ", " #regr " \n\t"\ | |
139 "movq " #regc ", " #regp " \n\t"\ | |
140 "por " #regb ", " #regr " \n\t"\ | |
141 "por " #regd ", " #regp " \n\t"\ | |
142 "pxor " #rega ", " #regb " \n\t"\ | |
143 "pxor " #regc ", " #regd " \n\t"\ | |
144 "pand %%mm6, " #regb " \n\t"\ | |
145 "pand %%mm6, " #regd " \n\t"\ | |
146 "psrlq $1, " #regd " \n\t"\ | |
147 "psrlq $1, " #regb " \n\t"\ | |
148 "psubb " #regb ", " #regr " \n\t"\ | |
149 "psubb " #regd ", " #regp " \n\t" | |
150 | |
151 /***********************************/ | |
152 /* MMX no rounding */ | |
153 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx | |
154 #define SET_RND MOVQ_WONE | |
155 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f) | |
156 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e) | |
157 | |
158 #include "dsputil_mmx_rnd_template.c" | |
159 | |
160 #undef DEF | |
161 #undef SET_RND | |
162 #undef PAVGBP | |
163 #undef PAVGB | |
164 /***********************************/ | |
165 /* MMX rounding */ | |
166 | |
167 #define DEF(x, y) x ## _ ## y ##_mmx | |
168 #define SET_RND MOVQ_WTWO | |
169 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f) | |
170 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e) | |
171 | |
172 #include "dsputil_mmx_rnd_template.c" | |
173 | |
174 #undef DEF | |
175 #undef SET_RND | |
176 #undef PAVGBP | |
177 #undef PAVGB | |
178 | |
179 /***********************************/ | |
180 /* 3Dnow specific */ | |
181 | |
182 #define DEF(x) x ## _3dnow | |
183 #define PAVGB "pavgusb" | |
184 | |
185 #include "dsputil_mmx_avg_template.c" | |
186 | |
187 #undef DEF | |
188 #undef PAVGB | |
189 | |
190 /***********************************/ | |
191 /* MMX2 specific */ | |
192 | |
193 #define DEF(x) x ## _mmx2 | |
194 | |
195 /* Introduced only in MMX2 set */ | |
196 #define PAVGB "pavgb" | |
197 | |
198 #include "dsputil_mmx_avg_template.c" | |
199 | |
200 #undef DEF | |
201 #undef PAVGB | |
202 | |
203 #define put_no_rnd_pixels16_mmx put_pixels16_mmx | |
204 #define put_no_rnd_pixels8_mmx put_pixels8_mmx | |
205 #define put_pixels16_mmx2 put_pixels16_mmx | |
206 #define put_pixels8_mmx2 put_pixels8_mmx | |
207 #define put_pixels4_mmx2 put_pixels4_mmx | |
208 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx | |
209 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx | |
210 #define put_pixels16_3dnow put_pixels16_mmx | |
211 #define put_pixels8_3dnow put_pixels8_mmx | |
212 #define put_pixels4_3dnow put_pixels4_mmx | |
213 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx | |
214 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx | |
215 | |
216 /***********************************/ | |
217 /* standard MMX */ | |
218 | |
219 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) | |
220 { | |
221 const DCTELEM *p; | |
222 uint8_t *pix; | |
223 | |
224 /* read the pixels */ | |
225 p = block; | |
226 pix = pixels; | |
227 /* unrolled loop */ | |
228 __asm__ volatile( | |
229 "movq %3, %%mm0 \n\t" | |
230 "movq 8%3, %%mm1 \n\t" | |
231 "movq 16%3, %%mm2 \n\t" | |
232 "movq 24%3, %%mm3 \n\t" | |
233 "movq 32%3, %%mm4 \n\t" | |
234 "movq 40%3, %%mm5 \n\t" | |
235 "movq 48%3, %%mm6 \n\t" | |
236 "movq 56%3, %%mm7 \n\t" | |
237 "packuswb %%mm1, %%mm0 \n\t" | |
238 "packuswb %%mm3, %%mm2 \n\t" | |
239 "packuswb %%mm5, %%mm4 \n\t" | |
240 "packuswb %%mm7, %%mm6 \n\t" | |
241 "movq %%mm0, (%0) \n\t" | |
242 "movq %%mm2, (%0, %1) \n\t" | |
243 "movq %%mm4, (%0, %1, 2) \n\t" | |
244 "movq %%mm6, (%0, %2) \n\t" | |
245 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p) | |
246 :"memory"); | |
247 pix += line_size*4; | |
248 p += 32; | |
249 | |
250 // if here would be an exact copy of the code above | |
251 // compiler would generate some very strange code | |
252 // thus using "r" | |
253 __asm__ volatile( | |
254 "movq (%3), %%mm0 \n\t" | |
255 "movq 8(%3), %%mm1 \n\t" | |
256 "movq 16(%3), %%mm2 \n\t" | |
257 "movq 24(%3), %%mm3 \n\t" | |
258 "movq 32(%3), %%mm4 \n\t" | |
259 "movq 40(%3), %%mm5 \n\t" | |
260 "movq 48(%3), %%mm6 \n\t" | |
261 "movq 56(%3), %%mm7 \n\t" | |
262 "packuswb %%mm1, %%mm0 \n\t" | |
263 "packuswb %%mm3, %%mm2 \n\t" | |
264 "packuswb %%mm5, %%mm4 \n\t" | |
265 "packuswb %%mm7, %%mm6 \n\t" | |
266 "movq %%mm0, (%0) \n\t" | |
267 "movq %%mm2, (%0, %1) \n\t" | |
268 "movq %%mm4, (%0, %1, 2) \n\t" | |
269 "movq %%mm6, (%0, %2) \n\t" | |
270 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p) | |
271 :"memory"); | |
272 } | |
273 | |
274 static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) = | |
275 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }; | |
276 | |
277 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) | |
278 { | |
279 int i; | |
280 | |
281 movq_m2r(*vector128, mm1); | |
282 for (i = 0; i < 8; i++) { | |
283 movq_m2r(*(block), mm0); | |
284 packsswb_m2r(*(block + 4), mm0); | |
285 block += 8; | |
286 paddb_r2r(mm1, mm0); | |
287 movq_r2m(mm0, *pixels); | |
288 pixels += line_size; | |
289 } | |
290 } | |
291 | |
292 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) | |
293 { | |
294 const DCTELEM *p; | |
295 uint8_t *pix; | |
296 int i; | |
297 | |
298 /* read the pixels */ | |
299 p = block; | |
300 pix = pixels; | |
301 MOVQ_ZERO(mm7); | |
302 i = 4; | |
303 do { | |
304 __asm__ volatile( | |
305 "movq (%2), %%mm0 \n\t" | |
306 "movq 8(%2), %%mm1 \n\t" | |
307 "movq 16(%2), %%mm2 \n\t" | |
308 "movq 24(%2), %%mm3 \n\t" | |
309 "movq %0, %%mm4 \n\t" | |
310 "movq %1, %%mm6 \n\t" | |
311 "movq %%mm4, %%mm5 \n\t" | |
312 "punpcklbw %%mm7, %%mm4 \n\t" | |
313 "punpckhbw %%mm7, %%mm5 \n\t" | |
314 "paddsw %%mm4, %%mm0 \n\t" | |
315 "paddsw %%mm5, %%mm1 \n\t" | |
316 "movq %%mm6, %%mm5 \n\t" | |
317 "punpcklbw %%mm7, %%mm6 \n\t" | |
318 "punpckhbw %%mm7, %%mm5 \n\t" | |
319 "paddsw %%mm6, %%mm2 \n\t" | |
320 "paddsw %%mm5, %%mm3 \n\t" | |
321 "packuswb %%mm1, %%mm0 \n\t" | |
322 "packuswb %%mm3, %%mm2 \n\t" | |
323 "movq %%mm0, %0 \n\t" | |
324 "movq %%mm2, %1 \n\t" | |
325 :"+m"(*pix), "+m"(*(pix+line_size)) | |
326 :"r"(p) | |
327 :"memory"); | |
328 pix += line_size*2; | |
329 p += 16; | |
330 } while (--i); | |
331 } | |
332 | |
333 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
334 { | |
335 __asm__ volatile( | |
336 "lea (%3, %3), %%"REG_a" \n\t" | |
337 ASMALIGN(3) | |
338 "1: \n\t" | |
339 "movd (%1), %%mm0 \n\t" | |
340 "movd (%1, %3), %%mm1 \n\t" | |
341 "movd %%mm0, (%2) \n\t" | |
342 "movd %%mm1, (%2, %3) \n\t" | |
343 "add %%"REG_a", %1 \n\t" | |
344 "add %%"REG_a", %2 \n\t" | |
345 "movd (%1), %%mm0 \n\t" | |
346 "movd (%1, %3), %%mm1 \n\t" | |
347 "movd %%mm0, (%2) \n\t" | |
348 "movd %%mm1, (%2, %3) \n\t" | |
349 "add %%"REG_a", %1 \n\t" | |
350 "add %%"REG_a", %2 \n\t" | |
351 "subl $4, %0 \n\t" | |
352 "jnz 1b \n\t" | |
353 : "+g"(h), "+r" (pixels), "+r" (block) | |
354 : "r"((x86_reg)line_size) | |
355 : "%"REG_a, "memory" | |
356 ); | |
357 } | |
358 | |
359 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
360 { | |
361 __asm__ volatile( | |
362 "lea (%3, %3), %%"REG_a" \n\t" | |
363 ASMALIGN(3) | |
364 "1: \n\t" | |
365 "movq (%1), %%mm0 \n\t" | |
366 "movq (%1, %3), %%mm1 \n\t" | |
367 "movq %%mm0, (%2) \n\t" | |
368 "movq %%mm1, (%2, %3) \n\t" | |
369 "add %%"REG_a", %1 \n\t" | |
370 "add %%"REG_a", %2 \n\t" | |
371 "movq (%1), %%mm0 \n\t" | |
372 "movq (%1, %3), %%mm1 \n\t" | |
373 "movq %%mm0, (%2) \n\t" | |
374 "movq %%mm1, (%2, %3) \n\t" | |
375 "add %%"REG_a", %1 \n\t" | |
376 "add %%"REG_a", %2 \n\t" | |
377 "subl $4, %0 \n\t" | |
378 "jnz 1b \n\t" | |
379 : "+g"(h), "+r" (pixels), "+r" (block) | |
380 : "r"((x86_reg)line_size) | |
381 : "%"REG_a, "memory" | |
382 ); | |
383 } | |
384 | |
385 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
386 { | |
387 __asm__ volatile( | |
388 "lea (%3, %3), %%"REG_a" \n\t" | |
389 ASMALIGN(3) | |
390 "1: \n\t" | |
391 "movq (%1), %%mm0 \n\t" | |
392 "movq 8(%1), %%mm4 \n\t" | |
393 "movq (%1, %3), %%mm1 \n\t" | |
394 "movq 8(%1, %3), %%mm5 \n\t" | |
395 "movq %%mm0, (%2) \n\t" | |
396 "movq %%mm4, 8(%2) \n\t" | |
397 "movq %%mm1, (%2, %3) \n\t" | |
398 "movq %%mm5, 8(%2, %3) \n\t" | |
399 "add %%"REG_a", %1 \n\t" | |
400 "add %%"REG_a", %2 \n\t" | |
401 "movq (%1), %%mm0 \n\t" | |
402 "movq 8(%1), %%mm4 \n\t" | |
403 "movq (%1, %3), %%mm1 \n\t" | |
404 "movq 8(%1, %3), %%mm5 \n\t" | |
405 "movq %%mm0, (%2) \n\t" | |
406 "movq %%mm4, 8(%2) \n\t" | |
407 "movq %%mm1, (%2, %3) \n\t" | |
408 "movq %%mm5, 8(%2, %3) \n\t" | |
409 "add %%"REG_a", %1 \n\t" | |
410 "add %%"REG_a", %2 \n\t" | |
411 "subl $4, %0 \n\t" | |
412 "jnz 1b \n\t" | |
413 : "+g"(h), "+r" (pixels), "+r" (block) | |
414 : "r"((x86_reg)line_size) | |
415 : "%"REG_a, "memory" | |
416 ); | |
417 } | |
418 | |
419 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
420 { | |
421 __asm__ volatile( | |
422 "1: \n\t" | |
423 "movdqu (%1), %%xmm0 \n\t" | |
424 "movdqu (%1,%3), %%xmm1 \n\t" | |
425 "movdqu (%1,%3,2), %%xmm2 \n\t" | |
426 "movdqu (%1,%4), %%xmm3 \n\t" | |
427 "movdqa %%xmm0, (%2) \n\t" | |
428 "movdqa %%xmm1, (%2,%3) \n\t" | |
429 "movdqa %%xmm2, (%2,%3,2) \n\t" | |
430 "movdqa %%xmm3, (%2,%4) \n\t" | |
431 "subl $4, %0 \n\t" | |
432 "lea (%1,%3,4), %1 \n\t" | |
433 "lea (%2,%3,4), %2 \n\t" | |
434 "jnz 1b \n\t" | |
435 : "+g"(h), "+r" (pixels), "+r" (block) | |
436 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) | |
437 : "memory" | |
438 ); | |
439 } | |
440 | |
441 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
442 { | |
443 __asm__ volatile( | |
444 "1: \n\t" | |
445 "movdqu (%1), %%xmm0 \n\t" | |
446 "movdqu (%1,%3), %%xmm1 \n\t" | |
447 "movdqu (%1,%3,2), %%xmm2 \n\t" | |
448 "movdqu (%1,%4), %%xmm3 \n\t" | |
449 "pavgb (%2), %%xmm0 \n\t" | |
450 "pavgb (%2,%3), %%xmm1 \n\t" | |
451 "pavgb (%2,%3,2), %%xmm2 \n\t" | |
452 "pavgb (%2,%4), %%xmm3 \n\t" | |
453 "movdqa %%xmm0, (%2) \n\t" | |
454 "movdqa %%xmm1, (%2,%3) \n\t" | |
455 "movdqa %%xmm2, (%2,%3,2) \n\t" | |
456 "movdqa %%xmm3, (%2,%4) \n\t" | |
457 "subl $4, %0 \n\t" | |
458 "lea (%1,%3,4), %1 \n\t" | |
459 "lea (%2,%3,4), %2 \n\t" | |
460 "jnz 1b \n\t" | |
461 : "+g"(h), "+r" (pixels), "+r" (block) | |
462 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) | |
463 : "memory" | |
464 ); | |
465 } | |
466 | |
467 #define CLEAR_BLOCKS(name,n) \ | |
468 static void name(DCTELEM *blocks)\ | |
469 {\ | |
470 __asm__ volatile(\ | |
471 "pxor %%mm7, %%mm7 \n\t"\ | |
472 "mov %1, %%"REG_a" \n\t"\ | |
473 "1: \n\t"\ | |
474 "movq %%mm7, (%0, %%"REG_a") \n\t"\ | |
475 "movq %%mm7, 8(%0, %%"REG_a") \n\t"\ | |
476 "movq %%mm7, 16(%0, %%"REG_a") \n\t"\ | |
477 "movq %%mm7, 24(%0, %%"REG_a") \n\t"\ | |
478 "add $32, %%"REG_a" \n\t"\ | |
479 " js 1b \n\t"\ | |
480 : : "r" (((uint8_t *)blocks)+128*n),\ | |
481 "i" (-128*n)\ | |
482 : "%"REG_a\ | |
483 );\ | |
484 } | |
485 CLEAR_BLOCKS(clear_blocks_mmx, 6) | |
486 CLEAR_BLOCKS(clear_block_mmx, 1) | |
487 | |
488 static void clear_block_sse(DCTELEM *block) | |
489 { | |
490 __asm__ volatile( | |
491 "xorps %%xmm0, %%xmm0 \n" | |
492 "movaps %%xmm0, (%0) \n" | |
493 "movaps %%xmm0, 16(%0) \n" | |
494 "movaps %%xmm0, 32(%0) \n" | |
495 "movaps %%xmm0, 48(%0) \n" | |
496 "movaps %%xmm0, 64(%0) \n" | |
497 "movaps %%xmm0, 80(%0) \n" | |
498 "movaps %%xmm0, 96(%0) \n" | |
499 "movaps %%xmm0, 112(%0) \n" | |
500 :: "r"(block) | |
501 : "memory" | |
502 ); | |
503 } | |
504 | |
505 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){ | |
506 x86_reg i=0; | |
507 __asm__ volatile( | |
508 "jmp 2f \n\t" | |
509 "1: \n\t" | |
510 "movq (%1, %0), %%mm0 \n\t" | |
511 "movq (%2, %0), %%mm1 \n\t" | |
512 "paddb %%mm0, %%mm1 \n\t" | |
513 "movq %%mm1, (%2, %0) \n\t" | |
514 "movq 8(%1, %0), %%mm0 \n\t" | |
515 "movq 8(%2, %0), %%mm1 \n\t" | |
516 "paddb %%mm0, %%mm1 \n\t" | |
517 "movq %%mm1, 8(%2, %0) \n\t" | |
518 "add $16, %0 \n\t" | |
519 "2: \n\t" | |
520 "cmp %3, %0 \n\t" | |
521 " js 1b \n\t" | |
522 : "+r" (i) | |
523 : "r"(src), "r"(dst), "r"((x86_reg)w-15) | |
524 ); | |
525 for(; i<w; i++) | |
526 dst[i+0] += src[i+0]; | |
527 } | |
528 | |
529 static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ | |
530 x86_reg i=0; | |
531 __asm__ volatile( | |
532 "jmp 2f \n\t" | |
533 "1: \n\t" | |
534 "movq (%2, %0), %%mm0 \n\t" | |
535 "movq 8(%2, %0), %%mm1 \n\t" | |
536 "paddb (%3, %0), %%mm0 \n\t" | |
537 "paddb 8(%3, %0), %%mm1 \n\t" | |
538 "movq %%mm0, (%1, %0) \n\t" | |
539 "movq %%mm1, 8(%1, %0) \n\t" | |
540 "add $16, %0 \n\t" | |
541 "2: \n\t" | |
542 "cmp %4, %0 \n\t" | |
543 " js 1b \n\t" | |
544 : "+r" (i) | |
545 : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15) | |
546 ); | |
547 for(; i<w; i++) | |
548 dst[i] = src1[i] + src2[i]; | |
549 } | |
550 | |
551 #define H263_LOOP_FILTER \ | |
552 "pxor %%mm7, %%mm7 \n\t"\ | |
553 "movq %0, %%mm0 \n\t"\ | |
554 "movq %0, %%mm1 \n\t"\ | |
555 "movq %3, %%mm2 \n\t"\ | |
556 "movq %3, %%mm3 \n\t"\ | |
557 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
558 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
559 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
560 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
561 "psubw %%mm2, %%mm0 \n\t"\ | |
562 "psubw %%mm3, %%mm1 \n\t"\ | |
563 "movq %1, %%mm2 \n\t"\ | |
564 "movq %1, %%mm3 \n\t"\ | |
565 "movq %2, %%mm4 \n\t"\ | |
566 "movq %2, %%mm5 \n\t"\ | |
567 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
568 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
569 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
570 "punpckhbw %%mm7, %%mm5 \n\t"\ | |
571 "psubw %%mm2, %%mm4 \n\t"\ | |
572 "psubw %%mm3, %%mm5 \n\t"\ | |
573 "psllw $2, %%mm4 \n\t"\ | |
574 "psllw $2, %%mm5 \n\t"\ | |
575 "paddw %%mm0, %%mm4 \n\t"\ | |
576 "paddw %%mm1, %%mm5 \n\t"\ | |
577 "pxor %%mm6, %%mm6 \n\t"\ | |
578 "pcmpgtw %%mm4, %%mm6 \n\t"\ | |
579 "pcmpgtw %%mm5, %%mm7 \n\t"\ | |
580 "pxor %%mm6, %%mm4 \n\t"\ | |
581 "pxor %%mm7, %%mm5 \n\t"\ | |
582 "psubw %%mm6, %%mm4 \n\t"\ | |
583 "psubw %%mm7, %%mm5 \n\t"\ | |
584 "psrlw $3, %%mm4 \n\t"\ | |
585 "psrlw $3, %%mm5 \n\t"\ | |
586 "packuswb %%mm5, %%mm4 \n\t"\ | |
587 "packsswb %%mm7, %%mm6 \n\t"\ | |
588 "pxor %%mm7, %%mm7 \n\t"\ | |
589 "movd %4, %%mm2 \n\t"\ | |
590 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
591 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
592 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
593 "psubusb %%mm4, %%mm2 \n\t"\ | |
594 "movq %%mm2, %%mm3 \n\t"\ | |
595 "psubusb %%mm4, %%mm3 \n\t"\ | |
596 "psubb %%mm3, %%mm2 \n\t"\ | |
597 "movq %1, %%mm3 \n\t"\ | |
598 "movq %2, %%mm4 \n\t"\ | |
599 "pxor %%mm6, %%mm3 \n\t"\ | |
600 "pxor %%mm6, %%mm4 \n\t"\ | |
601 "paddusb %%mm2, %%mm3 \n\t"\ | |
602 "psubusb %%mm2, %%mm4 \n\t"\ | |
603 "pxor %%mm6, %%mm3 \n\t"\ | |
604 "pxor %%mm6, %%mm4 \n\t"\ | |
605 "paddusb %%mm2, %%mm2 \n\t"\ | |
606 "packsswb %%mm1, %%mm0 \n\t"\ | |
607 "pcmpgtb %%mm0, %%mm7 \n\t"\ | |
608 "pxor %%mm7, %%mm0 \n\t"\ | |
609 "psubb %%mm7, %%mm0 \n\t"\ | |
610 "movq %%mm0, %%mm1 \n\t"\ | |
611 "psubusb %%mm2, %%mm0 \n\t"\ | |
612 "psubb %%mm0, %%mm1 \n\t"\ | |
613 "pand %5, %%mm1 \n\t"\ | |
614 "psrlw $2, %%mm1 \n\t"\ | |
615 "pxor %%mm7, %%mm1 \n\t"\ | |
616 "psubb %%mm7, %%mm1 \n\t"\ | |
617 "movq %0, %%mm5 \n\t"\ | |
618 "movq %3, %%mm6 \n\t"\ | |
619 "psubb %%mm1, %%mm5 \n\t"\ | |
620 "paddb %%mm1, %%mm6 \n\t" | |
621 | |
622 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){ | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
623 if(CONFIG_ANY_H263) { |
8430 | 624 const int strength= ff_h263_loop_filter_strength[qscale]; |
625 | |
626 __asm__ volatile( | |
627 | |
628 H263_LOOP_FILTER | |
629 | |
630 "movq %%mm3, %1 \n\t" | |
631 "movq %%mm4, %2 \n\t" | |
632 "movq %%mm5, %0 \n\t" | |
633 "movq %%mm6, %3 \n\t" | |
634 : "+m" (*(uint64_t*)(src - 2*stride)), | |
635 "+m" (*(uint64_t*)(src - 1*stride)), | |
636 "+m" (*(uint64_t*)(src + 0*stride)), | |
637 "+m" (*(uint64_t*)(src + 1*stride)) | |
638 : "g" (2*strength), "m"(ff_pb_FC) | |
639 ); | |
640 } | |
641 } | |
642 | |
643 static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){ | |
644 __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ... | |
645 "movd %4, %%mm0 \n\t" | |
646 "movd %5, %%mm1 \n\t" | |
647 "movd %6, %%mm2 \n\t" | |
648 "movd %7, %%mm3 \n\t" | |
649 "punpcklbw %%mm1, %%mm0 \n\t" | |
650 "punpcklbw %%mm3, %%mm2 \n\t" | |
651 "movq %%mm0, %%mm1 \n\t" | |
652 "punpcklwd %%mm2, %%mm0 \n\t" | |
653 "punpckhwd %%mm2, %%mm1 \n\t" | |
654 "movd %%mm0, %0 \n\t" | |
655 "punpckhdq %%mm0, %%mm0 \n\t" | |
656 "movd %%mm0, %1 \n\t" | |
657 "movd %%mm1, %2 \n\t" | |
658 "punpckhdq %%mm1, %%mm1 \n\t" | |
659 "movd %%mm1, %3 \n\t" | |
660 | |
661 : "=m" (*(uint32_t*)(dst + 0*dst_stride)), | |
662 "=m" (*(uint32_t*)(dst + 1*dst_stride)), | |
663 "=m" (*(uint32_t*)(dst + 2*dst_stride)), | |
664 "=m" (*(uint32_t*)(dst + 3*dst_stride)) | |
665 : "m" (*(uint32_t*)(src + 0*src_stride)), | |
666 "m" (*(uint32_t*)(src + 1*src_stride)), | |
667 "m" (*(uint32_t*)(src + 2*src_stride)), | |
668 "m" (*(uint32_t*)(src + 3*src_stride)) | |
669 ); | |
670 } | |
671 | |
672 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){ | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
673 if(CONFIG_ANY_H263) { |
8430 | 674 const int strength= ff_h263_loop_filter_strength[qscale]; |
675 DECLARE_ALIGNED(8, uint64_t, temp[4]); | |
676 uint8_t *btemp= (uint8_t*)temp; | |
677 | |
678 src -= 2; | |
679 | |
680 transpose4x4(btemp , src , 8, stride); | |
681 transpose4x4(btemp+4, src + 4*stride, 8, stride); | |
682 __asm__ volatile( | |
683 H263_LOOP_FILTER // 5 3 4 6 | |
684 | |
685 : "+m" (temp[0]), | |
686 "+m" (temp[1]), | |
687 "+m" (temp[2]), | |
688 "+m" (temp[3]) | |
689 : "g" (2*strength), "m"(ff_pb_FC) | |
690 ); | |
691 | |
692 __asm__ volatile( | |
693 "movq %%mm5, %%mm1 \n\t" | |
694 "movq %%mm4, %%mm0 \n\t" | |
695 "punpcklbw %%mm3, %%mm5 \n\t" | |
696 "punpcklbw %%mm6, %%mm4 \n\t" | |
697 "punpckhbw %%mm3, %%mm1 \n\t" | |
698 "punpckhbw %%mm6, %%mm0 \n\t" | |
699 "movq %%mm5, %%mm3 \n\t" | |
700 "movq %%mm1, %%mm6 \n\t" | |
701 "punpcklwd %%mm4, %%mm5 \n\t" | |
702 "punpcklwd %%mm0, %%mm1 \n\t" | |
703 "punpckhwd %%mm4, %%mm3 \n\t" | |
704 "punpckhwd %%mm0, %%mm6 \n\t" | |
705 "movd %%mm5, (%0) \n\t" | |
706 "punpckhdq %%mm5, %%mm5 \n\t" | |
707 "movd %%mm5, (%0,%2) \n\t" | |
708 "movd %%mm3, (%0,%2,2) \n\t" | |
709 "punpckhdq %%mm3, %%mm3 \n\t" | |
710 "movd %%mm3, (%0,%3) \n\t" | |
711 "movd %%mm1, (%1) \n\t" | |
712 "punpckhdq %%mm1, %%mm1 \n\t" | |
713 "movd %%mm1, (%1,%2) \n\t" | |
714 "movd %%mm6, (%1,%2,2) \n\t" | |
715 "punpckhdq %%mm6, %%mm6 \n\t" | |
716 "movd %%mm6, (%1,%3) \n\t" | |
717 :: "r" (src), | |
718 "r" (src + 4*stride), | |
719 "r" ((x86_reg) stride ), | |
720 "r" ((x86_reg)(3*stride)) | |
721 ); | |
722 } | |
723 } | |
724 | |
725 /* draw the edges of width 'w' of an image of size width, height | |
726 this mmx version can only handle w==8 || w==16 */ | |
727 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w) | |
728 { | |
729 uint8_t *ptr, *last_line; | |
730 int i; | |
731 | |
732 last_line = buf + (height - 1) * wrap; | |
733 /* left and right */ | |
734 ptr = buf; | |
735 if(w==8) | |
736 { | |
737 __asm__ volatile( | |
738 "1: \n\t" | |
739 "movd (%0), %%mm0 \n\t" | |
740 "punpcklbw %%mm0, %%mm0 \n\t" | |
741 "punpcklwd %%mm0, %%mm0 \n\t" | |
742 "punpckldq %%mm0, %%mm0 \n\t" | |
743 "movq %%mm0, -8(%0) \n\t" | |
744 "movq -8(%0, %2), %%mm1 \n\t" | |
745 "punpckhbw %%mm1, %%mm1 \n\t" | |
746 "punpckhwd %%mm1, %%mm1 \n\t" | |
747 "punpckhdq %%mm1, %%mm1 \n\t" | |
748 "movq %%mm1, (%0, %2) \n\t" | |
749 "add %1, %0 \n\t" | |
750 "cmp %3, %0 \n\t" | |
751 " jb 1b \n\t" | |
752 : "+r" (ptr) | |
753 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) | |
754 ); | |
755 } | |
756 else | |
757 { | |
758 __asm__ volatile( | |
759 "1: \n\t" | |
760 "movd (%0), %%mm0 \n\t" | |
761 "punpcklbw %%mm0, %%mm0 \n\t" | |
762 "punpcklwd %%mm0, %%mm0 \n\t" | |
763 "punpckldq %%mm0, %%mm0 \n\t" | |
764 "movq %%mm0, -8(%0) \n\t" | |
765 "movq %%mm0, -16(%0) \n\t" | |
766 "movq -8(%0, %2), %%mm1 \n\t" | |
767 "punpckhbw %%mm1, %%mm1 \n\t" | |
768 "punpckhwd %%mm1, %%mm1 \n\t" | |
769 "punpckhdq %%mm1, %%mm1 \n\t" | |
770 "movq %%mm1, (%0, %2) \n\t" | |
771 "movq %%mm1, 8(%0, %2) \n\t" | |
772 "add %1, %0 \n\t" | |
773 "cmp %3, %0 \n\t" | |
774 " jb 1b \n\t" | |
775 : "+r" (ptr) | |
776 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) | |
777 ); | |
778 } | |
779 | |
780 for(i=0;i<w;i+=4) { | |
781 /* top and bottom (and hopefully also the corners) */ | |
782 ptr= buf - (i + 1) * wrap - w; | |
783 __asm__ volatile( | |
784 "1: \n\t" | |
785 "movq (%1, %0), %%mm0 \n\t" | |
786 "movq %%mm0, (%0) \n\t" | |
787 "movq %%mm0, (%0, %2) \n\t" | |
788 "movq %%mm0, (%0, %2, 2) \n\t" | |
789 "movq %%mm0, (%0, %3) \n\t" | |
790 "add $8, %0 \n\t" | |
791 "cmp %4, %0 \n\t" | |
792 " jb 1b \n\t" | |
793 : "+r" (ptr) | |
794 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w) | |
795 ); | |
796 ptr= last_line + (i + 1) * wrap - w; | |
797 __asm__ volatile( | |
798 "1: \n\t" | |
799 "movq (%1, %0), %%mm0 \n\t" | |
800 "movq %%mm0, (%0) \n\t" | |
801 "movq %%mm0, (%0, %2) \n\t" | |
802 "movq %%mm0, (%0, %2, 2) \n\t" | |
803 "movq %%mm0, (%0, %3) \n\t" | |
804 "add $8, %0 \n\t" | |
805 "cmp %4, %0 \n\t" | |
806 " jb 1b \n\t" | |
807 : "+r" (ptr) | |
808 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w) | |
809 ); | |
810 } | |
811 } | |
812 | |
813 #define PAETH(cpu, abs3)\ | |
814 static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\ | |
815 {\ | |
816 x86_reg i = -bpp;\ | |
817 x86_reg end = w-3;\ | |
818 __asm__ volatile(\ | |
819 "pxor %%mm7, %%mm7 \n"\ | |
820 "movd (%1,%0), %%mm0 \n"\ | |
821 "movd (%2,%0), %%mm1 \n"\ | |
822 "punpcklbw %%mm7, %%mm0 \n"\ | |
823 "punpcklbw %%mm7, %%mm1 \n"\ | |
824 "add %4, %0 \n"\ | |
825 "1: \n"\ | |
826 "movq %%mm1, %%mm2 \n"\ | |
827 "movd (%2,%0), %%mm1 \n"\ | |
828 "movq %%mm2, %%mm3 \n"\ | |
829 "punpcklbw %%mm7, %%mm1 \n"\ | |
830 "movq %%mm2, %%mm4 \n"\ | |
831 "psubw %%mm1, %%mm3 \n"\ | |
832 "psubw %%mm0, %%mm4 \n"\ | |
833 "movq %%mm3, %%mm5 \n"\ | |
834 "paddw %%mm4, %%mm5 \n"\ | |
835 abs3\ | |
836 "movq %%mm4, %%mm6 \n"\ | |
837 "pminsw %%mm5, %%mm6 \n"\ | |
838 "pcmpgtw %%mm6, %%mm3 \n"\ | |
839 "pcmpgtw %%mm5, %%mm4 \n"\ | |
840 "movq %%mm4, %%mm6 \n"\ | |
841 "pand %%mm3, %%mm4 \n"\ | |
842 "pandn %%mm3, %%mm6 \n"\ | |
843 "pandn %%mm0, %%mm3 \n"\ | |
844 "movd (%3,%0), %%mm0 \n"\ | |
845 "pand %%mm1, %%mm6 \n"\ | |
846 "pand %%mm4, %%mm2 \n"\ | |
847 "punpcklbw %%mm7, %%mm0 \n"\ | |
848 "movq %6, %%mm5 \n"\ | |
849 "paddw %%mm6, %%mm0 \n"\ | |
850 "paddw %%mm2, %%mm3 \n"\ | |
851 "paddw %%mm3, %%mm0 \n"\ | |
852 "pand %%mm5, %%mm0 \n"\ | |
853 "movq %%mm0, %%mm3 \n"\ | |
854 "packuswb %%mm3, %%mm3 \n"\ | |
855 "movd %%mm3, (%1,%0) \n"\ | |
856 "add %4, %0 \n"\ | |
857 "cmp %5, %0 \n"\ | |
858 "jle 1b \n"\ | |
859 :"+r"(i)\ | |
860 :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\ | |
861 "m"(ff_pw_255)\ | |
862 :"memory"\ | |
863 );\ | |
864 } | |
865 | |
866 #define ABS3_MMX2\ | |
867 "psubw %%mm5, %%mm7 \n"\ | |
868 "pmaxsw %%mm7, %%mm5 \n"\ | |
869 "pxor %%mm6, %%mm6 \n"\ | |
870 "pxor %%mm7, %%mm7 \n"\ | |
871 "psubw %%mm3, %%mm6 \n"\ | |
872 "psubw %%mm4, %%mm7 \n"\ | |
873 "pmaxsw %%mm6, %%mm3 \n"\ | |
874 "pmaxsw %%mm7, %%mm4 \n"\ | |
875 "pxor %%mm7, %%mm7 \n" | |
876 | |
877 #define ABS3_SSSE3\ | |
878 "pabsw %%mm3, %%mm3 \n"\ | |
879 "pabsw %%mm4, %%mm4 \n"\ | |
880 "pabsw %%mm5, %%mm5 \n" | |
881 | |
882 PAETH(mmx2, ABS3_MMX2) | |
8590 | 883 #if HAVE_SSSE3 |
8430 | 884 PAETH(ssse3, ABS3_SSSE3) |
885 #endif | |
886 | |
887 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\ | |
888 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\ | |
889 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\ | |
890 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\ | |
891 "movq "#in7", " #m3 " \n\t" /* d */\ | |
892 "movq "#in0", %%mm5 \n\t" /* D */\ | |
893 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\ | |
894 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\ | |
895 "movq "#in1", %%mm5 \n\t" /* C */\ | |
896 "movq "#in2", %%mm6 \n\t" /* B */\ | |
897 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\ | |
898 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\ | |
899 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\ | |
900 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\ | |
901 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\ | |
902 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\ | |
903 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\ | |
904 "psraw $5, %%mm5 \n\t"\ | |
905 "packuswb %%mm5, %%mm5 \n\t"\ | |
906 OP(%%mm5, out, %%mm7, d) | |
907 | |
908 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\ | |
909 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
910 uint64_t temp;\ | |
911 \ | |
912 __asm__ volatile(\ | |
913 "pxor %%mm7, %%mm7 \n\t"\ | |
914 "1: \n\t"\ | |
915 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ | |
916 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ | |
917 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ | |
918 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ | |
919 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ | |
920 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ | |
921 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ | |
922 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ | |
923 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ | |
924 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ | |
925 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ | |
926 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ | |
927 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ | |
928 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ | |
929 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ | |
930 "paddw %%mm3, %%mm5 \n\t" /* b */\ | |
931 "paddw %%mm2, %%mm6 \n\t" /* c */\ | |
932 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
933 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ | |
934 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ | |
935 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ | |
936 "paddw %%mm4, %%mm0 \n\t" /* a */\ | |
937 "paddw %%mm1, %%mm5 \n\t" /* d */\ | |
938 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ | |
939 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ | |
940 "paddw %6, %%mm6 \n\t"\ | |
941 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ | |
942 "psraw $5, %%mm0 \n\t"\ | |
943 "movq %%mm0, %5 \n\t"\ | |
944 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ | |
945 \ | |
946 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\ | |
947 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\ | |
948 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\ | |
949 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\ | |
950 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\ | |
951 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\ | |
952 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\ | |
953 "paddw %%mm0, %%mm2 \n\t" /* b */\ | |
954 "paddw %%mm5, %%mm3 \n\t" /* c */\ | |
955 "paddw %%mm2, %%mm2 \n\t" /* 2b */\ | |
956 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ | |
957 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\ | |
958 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\ | |
959 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\ | |
960 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\ | |
961 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ | |
962 "paddw %%mm2, %%mm1 \n\t" /* a */\ | |
963 "paddw %%mm6, %%mm4 \n\t" /* d */\ | |
964 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ | |
965 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\ | |
966 "paddw %6, %%mm1 \n\t"\ | |
967 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\ | |
968 "psraw $5, %%mm3 \n\t"\ | |
969 "movq %5, %%mm1 \n\t"\ | |
970 "packuswb %%mm3, %%mm1 \n\t"\ | |
971 OP_MMX2(%%mm1, (%1),%%mm4, q)\ | |
972 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\ | |
973 \ | |
974 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\ | |
975 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\ | |
976 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\ | |
977 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\ | |
978 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\ | |
979 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\ | |
980 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\ | |
981 "paddw %%mm1, %%mm5 \n\t" /* b */\ | |
982 "paddw %%mm4, %%mm0 \n\t" /* c */\ | |
983 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
984 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\ | |
985 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\ | |
986 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\ | |
987 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\ | |
988 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\ | |
989 "paddw %%mm3, %%mm2 \n\t" /* d */\ | |
990 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\ | |
991 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\ | |
992 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\ | |
993 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\ | |
994 "paddw %%mm2, %%mm6 \n\t" /* a */\ | |
995 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\ | |
996 "paddw %6, %%mm0 \n\t"\ | |
997 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ | |
998 "psraw $5, %%mm0 \n\t"\ | |
999 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\ | |
1000 \ | |
1001 "paddw %%mm5, %%mm3 \n\t" /* a */\ | |
1002 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\ | |
1003 "paddw %%mm4, %%mm6 \n\t" /* b */\ | |
1004 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\ | |
1005 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\ | |
1006 "paddw %%mm1, %%mm4 \n\t" /* c */\ | |
1007 "paddw %%mm2, %%mm5 \n\t" /* d */\ | |
1008 "paddw %%mm6, %%mm6 \n\t" /* 2b */\ | |
1009 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\ | |
1010 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\ | |
1011 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\ | |
1012 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\ | |
1013 "paddw %6, %%mm4 \n\t"\ | |
1014 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\ | |
1015 "psraw $5, %%mm4 \n\t"\ | |
1016 "packuswb %%mm4, %%mm0 \n\t"\ | |
1017 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\ | |
1018 \ | |
1019 "add %3, %0 \n\t"\ | |
1020 "add %4, %1 \n\t"\ | |
1021 "decl %2 \n\t"\ | |
1022 " jnz 1b \n\t"\ | |
1023 : "+a"(src), "+c"(dst), "+D"(h)\ | |
1024 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\ | |
1025 : "memory"\ | |
1026 );\ | |
1027 }\ | |
1028 \ | |
1029 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1030 int i;\ | |
1031 int16_t temp[16];\ | |
1032 /* quick HACK, XXX FIXME MUST be optimized */\ | |
1033 for(i=0; i<h; i++)\ | |
1034 {\ | |
1035 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ | |
1036 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ | |
1037 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ | |
1038 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ | |
1039 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ | |
1040 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\ | |
1041 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\ | |
1042 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\ | |
1043 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\ | |
1044 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\ | |
1045 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\ | |
1046 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\ | |
1047 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\ | |
1048 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\ | |
1049 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\ | |
1050 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\ | |
1051 __asm__ volatile(\ | |
1052 "movq (%0), %%mm0 \n\t"\ | |
1053 "movq 8(%0), %%mm1 \n\t"\ | |
1054 "paddw %2, %%mm0 \n\t"\ | |
1055 "paddw %2, %%mm1 \n\t"\ | |
1056 "psraw $5, %%mm0 \n\t"\ | |
1057 "psraw $5, %%mm1 \n\t"\ | |
1058 "packuswb %%mm1, %%mm0 \n\t"\ | |
1059 OP_3DNOW(%%mm0, (%1), %%mm1, q)\ | |
1060 "movq 16(%0), %%mm0 \n\t"\ | |
1061 "movq 24(%0), %%mm1 \n\t"\ | |
1062 "paddw %2, %%mm0 \n\t"\ | |
1063 "paddw %2, %%mm1 \n\t"\ | |
1064 "psraw $5, %%mm0 \n\t"\ | |
1065 "psraw $5, %%mm1 \n\t"\ | |
1066 "packuswb %%mm1, %%mm0 \n\t"\ | |
1067 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\ | |
1068 :: "r"(temp), "r"(dst), "m"(ROUNDER)\ | |
1069 : "memory"\ | |
1070 );\ | |
1071 dst+=dstStride;\ | |
1072 src+=srcStride;\ | |
1073 }\ | |
1074 }\ | |
1075 \ | |
1076 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1077 __asm__ volatile(\ | |
1078 "pxor %%mm7, %%mm7 \n\t"\ | |
1079 "1: \n\t"\ | |
1080 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ | |
1081 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ | |
1082 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ | |
1083 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ | |
1084 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ | |
1085 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ | |
1086 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ | |
1087 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ | |
1088 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ | |
1089 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ | |
1090 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ | |
1091 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ | |
1092 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ | |
1093 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ | |
1094 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ | |
1095 "paddw %%mm3, %%mm5 \n\t" /* b */\ | |
1096 "paddw %%mm2, %%mm6 \n\t" /* c */\ | |
1097 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
1098 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ | |
1099 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ | |
1100 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ | |
1101 "paddw %%mm4, %%mm0 \n\t" /* a */\ | |
1102 "paddw %%mm1, %%mm5 \n\t" /* d */\ | |
1103 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ | |
1104 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ | |
1105 "paddw %5, %%mm6 \n\t"\ | |
1106 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ | |
1107 "psraw $5, %%mm0 \n\t"\ | |
1108 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ | |
1109 \ | |
1110 "movd 5(%0), %%mm5 \n\t" /* FGHI */\ | |
1111 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\ | |
1112 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\ | |
1113 "paddw %%mm5, %%mm1 \n\t" /* a */\ | |
1114 "paddw %%mm6, %%mm2 \n\t" /* b */\ | |
1115 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\ | |
1116 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\ | |
1117 "paddw %%mm6, %%mm3 \n\t" /* c */\ | |
1118 "paddw %%mm5, %%mm4 \n\t" /* d */\ | |
1119 "paddw %%mm2, %%mm2 \n\t" /* 2b */\ | |
1120 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ | |
1121 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ | |
1122 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ | |
1123 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\ | |
1124 "paddw %5, %%mm1 \n\t"\ | |
1125 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\ | |
1126 "psraw $5, %%mm3 \n\t"\ | |
1127 "packuswb %%mm3, %%mm0 \n\t"\ | |
1128 OP_MMX2(%%mm0, (%1), %%mm4, q)\ | |
1129 \ | |
1130 "add %3, %0 \n\t"\ | |
1131 "add %4, %1 \n\t"\ | |
1132 "decl %2 \n\t"\ | |
1133 " jnz 1b \n\t"\ | |
1134 : "+a"(src), "+c"(dst), "+d"(h)\ | |
1135 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\ | |
1136 : "memory"\ | |
1137 );\ | |
1138 }\ | |
1139 \ | |
1140 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1141 int i;\ | |
1142 int16_t temp[8];\ | |
1143 /* quick HACK, XXX FIXME MUST be optimized */\ | |
1144 for(i=0; i<h; i++)\ | |
1145 {\ | |
1146 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ | |
1147 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ | |
1148 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ | |
1149 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ | |
1150 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ | |
1151 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\ | |
1152 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\ | |
1153 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\ | |
1154 __asm__ volatile(\ | |
1155 "movq (%0), %%mm0 \n\t"\ | |
1156 "movq 8(%0), %%mm1 \n\t"\ | |
1157 "paddw %2, %%mm0 \n\t"\ | |
1158 "paddw %2, %%mm1 \n\t"\ | |
1159 "psraw $5, %%mm0 \n\t"\ | |
1160 "psraw $5, %%mm1 \n\t"\ | |
1161 "packuswb %%mm1, %%mm0 \n\t"\ | |
1162 OP_3DNOW(%%mm0, (%1), %%mm1, q)\ | |
1163 :: "r"(temp), "r"(dst), "m"(ROUNDER)\ | |
1164 :"memory"\ | |
1165 );\ | |
1166 dst+=dstStride;\ | |
1167 src+=srcStride;\ | |
1168 }\ | |
1169 } | |
1170 | |
1171 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\ | |
1172 \ | |
1173 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1174 uint64_t temp[17*4];\ | |
1175 uint64_t *temp_ptr= temp;\ | |
1176 int count= 17;\ | |
1177 \ | |
1178 /*FIXME unroll */\ | |
1179 __asm__ volatile(\ | |
1180 "pxor %%mm7, %%mm7 \n\t"\ | |
1181 "1: \n\t"\ | |
1182 "movq (%0), %%mm0 \n\t"\ | |
1183 "movq (%0), %%mm1 \n\t"\ | |
1184 "movq 8(%0), %%mm2 \n\t"\ | |
1185 "movq 8(%0), %%mm3 \n\t"\ | |
1186 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1187 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
1188 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1189 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
1190 "movq %%mm0, (%1) \n\t"\ | |
1191 "movq %%mm1, 17*8(%1) \n\t"\ | |
1192 "movq %%mm2, 2*17*8(%1) \n\t"\ | |
1193 "movq %%mm3, 3*17*8(%1) \n\t"\ | |
1194 "add $8, %1 \n\t"\ | |
1195 "add %3, %0 \n\t"\ | |
1196 "decl %2 \n\t"\ | |
1197 " jnz 1b \n\t"\ | |
1198 : "+r" (src), "+r" (temp_ptr), "+r"(count)\ | |
1199 : "r" ((x86_reg)srcStride)\ | |
1200 : "memory"\ | |
1201 );\ | |
1202 \ | |
1203 temp_ptr= temp;\ | |
1204 count=4;\ | |
1205 \ | |
1206 /*FIXME reorder for speed */\ | |
1207 __asm__ volatile(\ | |
1208 /*"pxor %%mm7, %%mm7 \n\t"*/\ | |
1209 "1: \n\t"\ | |
1210 "movq (%0), %%mm0 \n\t"\ | |
1211 "movq 8(%0), %%mm1 \n\t"\ | |
1212 "movq 16(%0), %%mm2 \n\t"\ | |
1213 "movq 24(%0), %%mm3 \n\t"\ | |
1214 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ | |
1215 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ | |
1216 "add %4, %1 \n\t"\ | |
1217 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ | |
1218 \ | |
1219 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ | |
1220 "add %4, %1 \n\t"\ | |
1221 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ | |
1222 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\ | |
1223 "add %4, %1 \n\t"\ | |
1224 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\ | |
1225 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\ | |
1226 "add %4, %1 \n\t"\ | |
1227 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\ | |
1228 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\ | |
1229 "add %4, %1 \n\t"\ | |
1230 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\ | |
1231 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\ | |
1232 "add %4, %1 \n\t"\ | |
1233 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\ | |
1234 \ | |
1235 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\ | |
1236 "add %4, %1 \n\t" \ | |
1237 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\ | |
1238 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\ | |
1239 \ | |
1240 "add $136, %0 \n\t"\ | |
1241 "add %6, %1 \n\t"\ | |
1242 "decl %2 \n\t"\ | |
1243 " jnz 1b \n\t"\ | |
1244 \ | |
1245 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ | |
1246 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\ | |
1247 :"memory"\ | |
1248 );\ | |
1249 }\ | |
1250 \ | |
1251 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1252 uint64_t temp[9*2];\ | |
1253 uint64_t *temp_ptr= temp;\ | |
1254 int count= 9;\ | |
1255 \ | |
1256 /*FIXME unroll */\ | |
1257 __asm__ volatile(\ | |
1258 "pxor %%mm7, %%mm7 \n\t"\ | |
1259 "1: \n\t"\ | |
1260 "movq (%0), %%mm0 \n\t"\ | |
1261 "movq (%0), %%mm1 \n\t"\ | |
1262 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1263 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
1264 "movq %%mm0, (%1) \n\t"\ | |
1265 "movq %%mm1, 9*8(%1) \n\t"\ | |
1266 "add $8, %1 \n\t"\ | |
1267 "add %3, %0 \n\t"\ | |
1268 "decl %2 \n\t"\ | |
1269 " jnz 1b \n\t"\ | |
1270 : "+r" (src), "+r" (temp_ptr), "+r"(count)\ | |
1271 : "r" ((x86_reg)srcStride)\ | |
1272 : "memory"\ | |
1273 );\ | |
1274 \ | |
1275 temp_ptr= temp;\ | |
1276 count=2;\ | |
1277 \ | |
1278 /*FIXME reorder for speed */\ | |
1279 __asm__ volatile(\ | |
1280 /*"pxor %%mm7, %%mm7 \n\t"*/\ | |
1281 "1: \n\t"\ | |
1282 "movq (%0), %%mm0 \n\t"\ | |
1283 "movq 8(%0), %%mm1 \n\t"\ | |
1284 "movq 16(%0), %%mm2 \n\t"\ | |
1285 "movq 24(%0), %%mm3 \n\t"\ | |
1286 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ | |
1287 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ | |
1288 "add %4, %1 \n\t"\ | |
1289 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ | |
1290 \ | |
1291 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ | |
1292 "add %4, %1 \n\t"\ | |
1293 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ | |
1294 \ | |
1295 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\ | |
1296 "add %4, %1 \n\t"\ | |
1297 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\ | |
1298 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\ | |
1299 \ | |
1300 "add $72, %0 \n\t"\ | |
1301 "add %6, %1 \n\t"\ | |
1302 "decl %2 \n\t"\ | |
1303 " jnz 1b \n\t"\ | |
1304 \ | |
1305 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ | |
1306 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\ | |
1307 : "memory"\ | |
1308 );\ | |
1309 }\ | |
1310 \ | |
1311 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ | |
1312 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\ | |
1313 }\ | |
1314 \ | |
1315 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1316 uint64_t temp[8];\ | |
1317 uint8_t * const half= (uint8_t*)temp;\ | |
1318 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ | |
1319 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ | |
1320 }\ | |
1321 \ | |
1322 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1323 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\ | |
1324 }\ | |
1325 \ | |
1326 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1327 uint64_t temp[8];\ | |
1328 uint8_t * const half= (uint8_t*)temp;\ | |
1329 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ | |
1330 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\ | |
1331 }\ | |
1332 \ | |
1333 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1334 uint64_t temp[8];\ | |
1335 uint8_t * const half= (uint8_t*)temp;\ | |
1336 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ | |
1337 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ | |
1338 }\ | |
1339 \ | |
1340 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1341 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\ | |
1342 }\ | |
1343 \ | |
1344 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1345 uint64_t temp[8];\ | |
1346 uint8_t * const half= (uint8_t*)temp;\ | |
1347 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ | |
1348 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\ | |
1349 }\ | |
1350 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1351 uint64_t half[8 + 9];\ | |
1352 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1353 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1354 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1355 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ | |
1356 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1357 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ | |
1358 }\ | |
1359 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1360 uint64_t half[8 + 9];\ | |
1361 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1362 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1363 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1364 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ | |
1365 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1366 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ | |
1367 }\ | |
1368 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1369 uint64_t half[8 + 9];\ | |
1370 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1371 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1372 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1373 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ | |
1374 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1375 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ | |
1376 }\ | |
1377 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1378 uint64_t half[8 + 9];\ | |
1379 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1380 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1381 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1382 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ | |
1383 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1384 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ | |
1385 }\ | |
1386 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1387 uint64_t half[8 + 9];\ | |
1388 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1389 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1390 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1391 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1392 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ | |
1393 }\ | |
1394 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1395 uint64_t half[8 + 9];\ | |
1396 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1397 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1398 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1399 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1400 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ | |
1401 }\ | |
1402 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1403 uint64_t half[8 + 9];\ | |
1404 uint8_t * const halfH= ((uint8_t*)half);\ | |
1405 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1406 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ | |
1407 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ | |
1408 }\ | |
1409 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1410 uint64_t half[8 + 9];\ | |
1411 uint8_t * const halfH= ((uint8_t*)half);\ | |
1412 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1413 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ | |
1414 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ | |
1415 }\ | |
1416 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1417 uint64_t half[9];\ | |
1418 uint8_t * const halfH= ((uint8_t*)half);\ | |
1419 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1420 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ | |
1421 }\ | |
1422 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ | |
1423 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\ | |
1424 }\ | |
1425 \ | |
1426 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1427 uint64_t temp[32];\ | |
1428 uint8_t * const half= (uint8_t*)temp;\ | |
1429 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ | |
1430 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ | |
1431 }\ | |
1432 \ | |
1433 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1434 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\ | |
1435 }\ | |
1436 \ | |
1437 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1438 uint64_t temp[32];\ | |
1439 uint8_t * const half= (uint8_t*)temp;\ | |
1440 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ | |
1441 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\ | |
1442 }\ | |
1443 \ | |
1444 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1445 uint64_t temp[32];\ | |
1446 uint8_t * const half= (uint8_t*)temp;\ | |
1447 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ | |
1448 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ | |
1449 }\ | |
1450 \ | |
1451 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1452 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\ | |
1453 }\ | |
1454 \ | |
1455 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1456 uint64_t temp[32];\ | |
1457 uint8_t * const half= (uint8_t*)temp;\ | |
1458 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ | |
1459 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\ | |
1460 }\ | |
1461 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1462 uint64_t half[16*2 + 17*2];\ | |
1463 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1464 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1465 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1466 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ | |
1467 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1468 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ | |
1469 }\ | |
1470 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1471 uint64_t half[16*2 + 17*2];\ | |
1472 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1473 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1474 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1475 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ | |
1476 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1477 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ | |
1478 }\ | |
1479 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1480 uint64_t half[16*2 + 17*2];\ | |
1481 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1482 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1483 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1484 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ | |
1485 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1486 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ | |
1487 }\ | |
1488 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1489 uint64_t half[16*2 + 17*2];\ | |
1490 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1491 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1492 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1493 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ | |
1494 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1495 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ | |
1496 }\ | |
1497 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1498 uint64_t half[16*2 + 17*2];\ | |
1499 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1500 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1501 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1502 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1503 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ | |
1504 }\ | |
1505 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1506 uint64_t half[16*2 + 17*2];\ | |
1507 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1508 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1509 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1510 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1511 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ | |
1512 }\ | |
1513 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1514 uint64_t half[17*2];\ | |
1515 uint8_t * const halfH= ((uint8_t*)half);\ | |
1516 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1517 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ | |
1518 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ | |
1519 }\ | |
1520 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1521 uint64_t half[17*2];\ | |
1522 uint8_t * const halfH= ((uint8_t*)half);\ | |
1523 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1524 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ | |
1525 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ | |
1526 }\ | |
1527 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1528 uint64_t half[17*2];\ | |
1529 uint8_t * const halfH= ((uint8_t*)half);\ | |
1530 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1531 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ | |
1532 } | |
1533 | |
1534 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t" | |
1535 #define AVG_3DNOW_OP(a,b,temp, size) \ | |
1536 "mov" #size " " #b ", " #temp " \n\t"\ | |
1537 "pavgusb " #temp ", " #a " \n\t"\ | |
1538 "mov" #size " " #a ", " #b " \n\t" | |
1539 #define AVG_MMX2_OP(a,b,temp, size) \ | |
1540 "mov" #size " " #b ", " #temp " \n\t"\ | |
1541 "pavgb " #temp ", " #a " \n\t"\ | |
1542 "mov" #size " " #a ", " #b " \n\t" | |
1543 | |
1544 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP) | |
1545 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP) | |
1546 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP) | |
1547 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow) | |
1548 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow) | |
1549 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow) | |
1550 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2) | |
1551 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2) | |
1552 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2) | |
1553 | |
1554 /***********************************/ | |
1555 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */ | |
1556 | |
1557 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\ | |
1558 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1559 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\ | |
1560 } | |
1561 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\ | |
1562 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1563 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\ | |
1564 } | |
1565 | |
1566 #define QPEL_2TAP(OPNAME, SIZE, MMX)\ | |
1567 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\ | |
1568 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\ | |
1569 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\ | |
1570 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\ | |
1571 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\ | |
1572 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\ | |
1573 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\ | |
1574 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\ | |
1575 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\ | |
1576 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1577 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\ | |
1578 }\ | |
1579 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1580 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\ | |
1581 }\ | |
1582 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\ | |
1583 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\ | |
1584 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\ | |
1585 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\ | |
1586 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\ | |
1587 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\ | |
1588 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\ | |
1589 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\ | |
1590 | |
1591 QPEL_2TAP(put_, 16, mmx2) | |
1592 QPEL_2TAP(avg_, 16, mmx2) | |
1593 QPEL_2TAP(put_, 8, mmx2) | |
1594 QPEL_2TAP(avg_, 8, mmx2) | |
1595 QPEL_2TAP(put_, 16, 3dnow) | |
1596 QPEL_2TAP(avg_, 16, 3dnow) | |
1597 QPEL_2TAP(put_, 8, 3dnow) | |
1598 QPEL_2TAP(avg_, 8, 3dnow) | |
1599 | |
1600 | |
1601 #if 0 | |
8527
f8bf438c6000
Add missing 'void' keyword to parameterless function declarations.
diego
parents:
8519
diff
changeset
|
1602 static void just_return(void) { return; } |
8430 | 1603 #endif |
1604 | |
1605 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, | |
1606 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){ | |
1607 const int w = 8; | |
1608 const int ix = ox>>(16+shift); | |
1609 const int iy = oy>>(16+shift); | |
1610 const int oxs = ox>>4; | |
1611 const int oys = oy>>4; | |
1612 const int dxxs = dxx>>4; | |
1613 const int dxys = dxy>>4; | |
1614 const int dyxs = dyx>>4; | |
1615 const int dyys = dyy>>4; | |
1616 const uint16_t r4[4] = {r,r,r,r}; | |
1617 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys}; | |
1618 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys}; | |
1619 const uint64_t shift2 = 2*shift; | |
1620 uint8_t edge_buf[(h+1)*stride]; | |
1621 int x, y; | |
1622 | |
1623 const int dxw = (dxx-(1<<(16+shift)))*(w-1); | |
1624 const int dyh = (dyy-(1<<(16+shift)))*(h-1); | |
1625 const int dxh = dxy*(h-1); | |
1626 const int dyw = dyx*(w-1); | |
1627 if( // non-constant fullpel offset (3% of blocks) | |
1628 ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) | | |
1629 (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift) | |
1630 // uses more than 16 bits of subpel mv (only at huge resolution) | |
1631 || (dxx|dxy|dyx|dyy)&15 ) | |
1632 { | |
1633 //FIXME could still use mmx for some of the rows | |
1634 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height); | |
1635 return; | |
1636 } | |
1637 | |
1638 src += ix + iy*stride; | |
1639 if( (unsigned)ix >= width-w || | |
1640 (unsigned)iy >= height-h ) | |
1641 { | |
1642 ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height); | |
1643 src = edge_buf; | |
1644 } | |
1645 | |
1646 __asm__ volatile( | |
1647 "movd %0, %%mm6 \n\t" | |
1648 "pxor %%mm7, %%mm7 \n\t" | |
1649 "punpcklwd %%mm6, %%mm6 \n\t" | |
1650 "punpcklwd %%mm6, %%mm6 \n\t" | |
1651 :: "r"(1<<shift) | |
1652 ); | |
1653 | |
1654 for(x=0; x<w; x+=4){ | |
1655 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0), | |
1656 oxs - dxys + dxxs*(x+1), | |
1657 oxs - dxys + dxxs*(x+2), | |
1658 oxs - dxys + dxxs*(x+3) }; | |
1659 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0), | |
1660 oys - dyys + dyxs*(x+1), | |
1661 oys - dyys + dyxs*(x+2), | |
1662 oys - dyys + dyxs*(x+3) }; | |
1663 | |
1664 for(y=0; y<h; y++){ | |
1665 __asm__ volatile( | |
1666 "movq %0, %%mm4 \n\t" | |
1667 "movq %1, %%mm5 \n\t" | |
1668 "paddw %2, %%mm4 \n\t" | |
1669 "paddw %3, %%mm5 \n\t" | |
1670 "movq %%mm4, %0 \n\t" | |
1671 "movq %%mm5, %1 \n\t" | |
1672 "psrlw $12, %%mm4 \n\t" | |
1673 "psrlw $12, %%mm5 \n\t" | |
1674 : "+m"(*dx4), "+m"(*dy4) | |
1675 : "m"(*dxy4), "m"(*dyy4) | |
1676 ); | |
1677 | |
1678 __asm__ volatile( | |
1679 "movq %%mm6, %%mm2 \n\t" | |
1680 "movq %%mm6, %%mm1 \n\t" | |
1681 "psubw %%mm4, %%mm2 \n\t" | |
1682 "psubw %%mm5, %%mm1 \n\t" | |
1683 "movq %%mm2, %%mm0 \n\t" | |
1684 "movq %%mm4, %%mm3 \n\t" | |
1685 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy) | |
1686 "pmullw %%mm5, %%mm3 \n\t" // dx*dy | |
1687 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy | |
1688 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy) | |
1689 | |
1690 "movd %4, %%mm5 \n\t" | |
1691 "movd %3, %%mm4 \n\t" | |
1692 "punpcklbw %%mm7, %%mm5 \n\t" | |
1693 "punpcklbw %%mm7, %%mm4 \n\t" | |
1694 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy | |
1695 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy | |
1696 | |
1697 "movd %2, %%mm5 \n\t" | |
1698 "movd %1, %%mm4 \n\t" | |
1699 "punpcklbw %%mm7, %%mm5 \n\t" | |
1700 "punpcklbw %%mm7, %%mm4 \n\t" | |
1701 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy) | |
1702 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy) | |
1703 "paddw %5, %%mm1 \n\t" | |
1704 "paddw %%mm3, %%mm2 \n\t" | |
1705 "paddw %%mm1, %%mm0 \n\t" | |
1706 "paddw %%mm2, %%mm0 \n\t" | |
1707 | |
1708 "psrlw %6, %%mm0 \n\t" | |
1709 "packuswb %%mm0, %%mm0 \n\t" | |
1710 "movd %%mm0, %0 \n\t" | |
1711 | |
1712 : "=m"(dst[x+y*stride]) | |
1713 : "m"(src[0]), "m"(src[1]), | |
1714 "m"(src[stride]), "m"(src[stride+1]), | |
1715 "m"(*r4), "m"(shift2) | |
1716 ); | |
1717 src += stride; | |
1718 } | |
1719 src += 4-h*stride; | |
1720 } | |
1721 } | |
1722 | |
1723 #define PREFETCH(name, op) \ | |
1724 static void name(void *mem, int stride, int h){\ | |
1725 const uint8_t *p= mem;\ | |
1726 do{\ | |
1727 __asm__ volatile(#op" %0" :: "m"(*p));\ | |
1728 p+= stride;\ | |
1729 }while(--h);\ | |
1730 } | |
1731 PREFETCH(prefetch_mmx2, prefetcht0) | |
1732 PREFETCH(prefetch_3dnow, prefetch) | |
1733 #undef PREFETCH | |
1734 | |
1735 #include "h264dsp_mmx.c" | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
1736 #include "rv40dsp_mmx.c" |
8430 | 1737 |
1738 /* CAVS specific */ | |
1739 void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx); | |
1740 void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx); | |
1741 | |
1742 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1743 put_pixels8_mmx(dst, src, stride, 8); | |
1744 } | |
1745 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1746 avg_pixels8_mmx(dst, src, stride, 8); | |
1747 } | |
1748 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1749 put_pixels16_mmx(dst, src, stride, 16); | |
1750 } | |
1751 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1752 avg_pixels16_mmx(dst, src, stride, 16); | |
1753 } | |
1754 | |
1755 /* VC1 specific */ | |
1756 void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx); | |
1757 | |
1758 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { | |
1759 put_pixels8_mmx(dst, src, stride, 8); | |
1760 } | |
1761 | |
1762 /* external functions, from idct_mmx.c */ | |
1763 void ff_mmx_idct(DCTELEM *block); | |
1764 void ff_mmxext_idct(DCTELEM *block); | |
1765 | |
1766 /* XXX: those functions should be suppressed ASAP when all IDCTs are | |
1767 converted */ | |
8590 | 1768 #if CONFIG_GPL |
8430 | 1769 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block) |
1770 { | |
1771 ff_mmx_idct (block); | |
1772 put_pixels_clamped_mmx(block, dest, line_size); | |
1773 } | |
1774 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1775 { | |
1776 ff_mmx_idct (block); | |
1777 add_pixels_clamped_mmx(block, dest, line_size); | |
1778 } | |
1779 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block) | |
1780 { | |
1781 ff_mmxext_idct (block); | |
1782 put_pixels_clamped_mmx(block, dest, line_size); | |
1783 } | |
1784 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1785 { | |
1786 ff_mmxext_idct (block); | |
1787 add_pixels_clamped_mmx(block, dest, line_size); | |
1788 } | |
1789 #endif | |
1790 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block) | |
1791 { | |
1792 ff_idct_xvid_mmx (block); | |
1793 put_pixels_clamped_mmx(block, dest, line_size); | |
1794 } | |
1795 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1796 { | |
1797 ff_idct_xvid_mmx (block); | |
1798 add_pixels_clamped_mmx(block, dest, line_size); | |
1799 } | |
1800 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block) | |
1801 { | |
1802 ff_idct_xvid_mmx2 (block); | |
1803 put_pixels_clamped_mmx(block, dest, line_size); | |
1804 } | |
1805 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1806 { | |
1807 ff_idct_xvid_mmx2 (block); | |
1808 add_pixels_clamped_mmx(block, dest, line_size); | |
1809 } | |
1810 | |
1811 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize) | |
1812 { | |
1813 int i; | |
1814 __asm__ volatile("pxor %%mm7, %%mm7":); | |
1815 for(i=0; i<blocksize; i+=2) { | |
1816 __asm__ volatile( | |
1817 "movq %0, %%mm0 \n\t" | |
1818 "movq %1, %%mm1 \n\t" | |
1819 "movq %%mm0, %%mm2 \n\t" | |
1820 "movq %%mm1, %%mm3 \n\t" | |
1821 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0 | |
1822 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0 | |
1823 "pslld $31, %%mm2 \n\t" // keep only the sign bit | |
1824 "pxor %%mm2, %%mm1 \n\t" | |
1825 "movq %%mm3, %%mm4 \n\t" | |
1826 "pand %%mm1, %%mm3 \n\t" | |
1827 "pandn %%mm1, %%mm4 \n\t" | |
1828 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) | |
1829 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) | |
1830 "movq %%mm3, %1 \n\t" | |
1831 "movq %%mm0, %0 \n\t" | |
1832 :"+m"(mag[i]), "+m"(ang[i]) | |
1833 ::"memory" | |
1834 ); | |
1835 } | |
1836 __asm__ volatile("femms"); | |
1837 } | |
1838 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize) | |
1839 { | |
1840 int i; | |
1841 | |
1842 __asm__ volatile( | |
1843 "movaps %0, %%xmm5 \n\t" | |
1844 ::"m"(ff_pdw_80000000[0]) | |
1845 ); | |
1846 for(i=0; i<blocksize; i+=4) { | |
1847 __asm__ volatile( | |
1848 "movaps %0, %%xmm0 \n\t" | |
1849 "movaps %1, %%xmm1 \n\t" | |
1850 "xorps %%xmm2, %%xmm2 \n\t" | |
1851 "xorps %%xmm3, %%xmm3 \n\t" | |
1852 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0 | |
1853 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0 | |
1854 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit | |
1855 "xorps %%xmm2, %%xmm1 \n\t" | |
1856 "movaps %%xmm3, %%xmm4 \n\t" | |
1857 "andps %%xmm1, %%xmm3 \n\t" | |
1858 "andnps %%xmm1, %%xmm4 \n\t" | |
1859 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) | |
1860 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) | |
1861 "movaps %%xmm3, %1 \n\t" | |
1862 "movaps %%xmm0, %0 \n\t" | |
1863 :"+m"(mag[i]), "+m"(ang[i]) | |
1864 ::"memory" | |
1865 ); | |
1866 } | |
1867 } | |
1868 | |
1869 #define IF1(x) x | |
1870 #define IF0(x) | |
1871 | |
1872 #define MIX5(mono,stereo)\ | |
1873 __asm__ volatile(\ | |
1874 "movss 0(%2), %%xmm5 \n"\ | |
1875 "movss 8(%2), %%xmm6 \n"\ | |
1876 "movss 24(%2), %%xmm7 \n"\ | |
1877 "shufps $0, %%xmm5, %%xmm5 \n"\ | |
1878 "shufps $0, %%xmm6, %%xmm6 \n"\ | |
1879 "shufps $0, %%xmm7, %%xmm7 \n"\ | |
1880 "1: \n"\ | |
1881 "movaps (%0,%1), %%xmm0 \n"\ | |
1882 "movaps 0x400(%0,%1), %%xmm1 \n"\ | |
1883 "movaps 0x800(%0,%1), %%xmm2 \n"\ | |
1884 "movaps 0xc00(%0,%1), %%xmm3 \n"\ | |
1885 "movaps 0x1000(%0,%1), %%xmm4 \n"\ | |
1886 "mulps %%xmm5, %%xmm0 \n"\ | |
1887 "mulps %%xmm6, %%xmm1 \n"\ | |
1888 "mulps %%xmm5, %%xmm2 \n"\ | |
1889 "mulps %%xmm7, %%xmm3 \n"\ | |
1890 "mulps %%xmm7, %%xmm4 \n"\ | |
1891 stereo("addps %%xmm1, %%xmm0 \n")\ | |
1892 "addps %%xmm1, %%xmm2 \n"\ | |
1893 "addps %%xmm3, %%xmm0 \n"\ | |
1894 "addps %%xmm4, %%xmm2 \n"\ | |
1895 mono("addps %%xmm2, %%xmm0 \n")\ | |
1896 "movaps %%xmm0, (%0,%1) \n"\ | |
1897 stereo("movaps %%xmm2, 0x400(%0,%1) \n")\ | |
1898 "add $16, %0 \n"\ | |
1899 "jl 1b \n"\ | |
1900 :"+&r"(i)\ | |
1901 :"r"(samples[0]+len), "r"(matrix)\ | |
1902 :"memory"\ | |
1903 ); | |
1904 | |
1905 #define MIX_MISC(stereo)\ | |
1906 __asm__ volatile(\ | |
1907 "1: \n"\ | |
1908 "movaps (%3,%0), %%xmm0 \n"\ | |
1909 stereo("movaps %%xmm0, %%xmm1 \n")\ | |
1910 "mulps %%xmm6, %%xmm0 \n"\ | |
1911 stereo("mulps %%xmm7, %%xmm1 \n")\ | |
1912 "lea 1024(%3,%0), %1 \n"\ | |
1913 "mov %5, %2 \n"\ | |
1914 "2: \n"\ | |
1915 "movaps (%1), %%xmm2 \n"\ | |
1916 stereo("movaps %%xmm2, %%xmm3 \n")\ | |
1917 "mulps (%4,%2), %%xmm2 \n"\ | |
1918 stereo("mulps 16(%4,%2), %%xmm3 \n")\ | |
1919 "addps %%xmm2, %%xmm0 \n"\ | |
1920 stereo("addps %%xmm3, %%xmm1 \n")\ | |
1921 "add $1024, %1 \n"\ | |
1922 "add $32, %2 \n"\ | |
1923 "jl 2b \n"\ | |
1924 "movaps %%xmm0, (%3,%0) \n"\ | |
1925 stereo("movaps %%xmm1, 1024(%3,%0) \n")\ | |
1926 "add $16, %0 \n"\ | |
1927 "jl 1b \n"\ | |
1928 :"+&r"(i), "=&r"(j), "=&r"(k)\ | |
1929 :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\ | |
1930 :"memory"\ | |
1931 ); | |
1932 | |
1933 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len) | |
1934 { | |
1935 int (*matrix_cmp)[2] = (int(*)[2])matrix; | |
1936 intptr_t i,j,k; | |
1937 | |
1938 i = -len*sizeof(float); | |
1939 if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) { | |
1940 MIX5(IF0,IF1); | |
1941 } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) { | |
1942 MIX5(IF1,IF0); | |
1943 } else { | |
1944 DECLARE_ALIGNED_16(float, matrix_simd[in_ch][2][4]); | |
1945 j = 2*in_ch*sizeof(float); | |
1946 __asm__ volatile( | |
1947 "1: \n" | |
1948 "sub $8, %0 \n" | |
1949 "movss (%2,%0), %%xmm6 \n" | |
1950 "movss 4(%2,%0), %%xmm7 \n" | |
1951 "shufps $0, %%xmm6, %%xmm6 \n" | |
1952 "shufps $0, %%xmm7, %%xmm7 \n" | |
1953 "movaps %%xmm6, (%1,%0,4) \n" | |
1954 "movaps %%xmm7, 16(%1,%0,4) \n" | |
1955 "jg 1b \n" | |
1956 :"+&r"(j) | |
1957 :"r"(matrix_simd), "r"(matrix) | |
1958 :"memory" | |
1959 ); | |
1960 if(out_ch == 2) { | |
1961 MIX_MISC(IF1); | |
1962 } else { | |
1963 MIX_MISC(IF0); | |
1964 } | |
1965 } | |
1966 } | |
1967 | |
1968 static void vector_fmul_3dnow(float *dst, const float *src, int len){ | |
1969 x86_reg i = (len-4)*4; | |
1970 __asm__ volatile( | |
1971 "1: \n\t" | |
1972 "movq (%1,%0), %%mm0 \n\t" | |
1973 "movq 8(%1,%0), %%mm1 \n\t" | |
1974 "pfmul (%2,%0), %%mm0 \n\t" | |
1975 "pfmul 8(%2,%0), %%mm1 \n\t" | |
1976 "movq %%mm0, (%1,%0) \n\t" | |
1977 "movq %%mm1, 8(%1,%0) \n\t" | |
1978 "sub $16, %0 \n\t" | |
1979 "jge 1b \n\t" | |
1980 "femms \n\t" | |
1981 :"+r"(i) | |
1982 :"r"(dst), "r"(src) | |
1983 :"memory" | |
1984 ); | |
1985 } | |
1986 static void vector_fmul_sse(float *dst, const float *src, int len){ | |
1987 x86_reg i = (len-8)*4; | |
1988 __asm__ volatile( | |
1989 "1: \n\t" | |
1990 "movaps (%1,%0), %%xmm0 \n\t" | |
1991 "movaps 16(%1,%0), %%xmm1 \n\t" | |
1992 "mulps (%2,%0), %%xmm0 \n\t" | |
1993 "mulps 16(%2,%0), %%xmm1 \n\t" | |
1994 "movaps %%xmm0, (%1,%0) \n\t" | |
1995 "movaps %%xmm1, 16(%1,%0) \n\t" | |
1996 "sub $32, %0 \n\t" | |
1997 "jge 1b \n\t" | |
1998 :"+r"(i) | |
1999 :"r"(dst), "r"(src) | |
2000 :"memory" | |
2001 ); | |
2002 } | |
2003 | |
2004 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){ | |
2005 x86_reg i = len*4-16; | |
2006 __asm__ volatile( | |
2007 "1: \n\t" | |
2008 "pswapd 8(%1), %%mm0 \n\t" | |
2009 "pswapd (%1), %%mm1 \n\t" | |
2010 "pfmul (%3,%0), %%mm0 \n\t" | |
2011 "pfmul 8(%3,%0), %%mm1 \n\t" | |
2012 "movq %%mm0, (%2,%0) \n\t" | |
2013 "movq %%mm1, 8(%2,%0) \n\t" | |
2014 "add $16, %1 \n\t" | |
2015 "sub $16, %0 \n\t" | |
2016 "jge 1b \n\t" | |
2017 :"+r"(i), "+r"(src1) | |
2018 :"r"(dst), "r"(src0) | |
2019 ); | |
2020 __asm__ volatile("femms"); | |
2021 } | |
2022 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){ | |
2023 x86_reg i = len*4-32; | |
2024 __asm__ volatile( | |
2025 "1: \n\t" | |
2026 "movaps 16(%1), %%xmm0 \n\t" | |
2027 "movaps (%1), %%xmm1 \n\t" | |
2028 "shufps $0x1b, %%xmm0, %%xmm0 \n\t" | |
2029 "shufps $0x1b, %%xmm1, %%xmm1 \n\t" | |
2030 "mulps (%3,%0), %%xmm0 \n\t" | |
2031 "mulps 16(%3,%0), %%xmm1 \n\t" | |
2032 "movaps %%xmm0, (%2,%0) \n\t" | |
2033 "movaps %%xmm1, 16(%2,%0) \n\t" | |
2034 "add $32, %1 \n\t" | |
2035 "sub $32, %0 \n\t" | |
2036 "jge 1b \n\t" | |
2037 :"+r"(i), "+r"(src1) | |
2038 :"r"(dst), "r"(src0) | |
2039 ); | |
2040 } | |
2041 | |
2042 static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1, | |
2043 const float *src2, int src3, int len, int step){ | |
2044 x86_reg i = (len-4)*4; | |
2045 if(step == 2 && src3 == 0){ | |
2046 dst += (len-4)*2; | |
2047 __asm__ volatile( | |
2048 "1: \n\t" | |
2049 "movq (%2,%0), %%mm0 \n\t" | |
2050 "movq 8(%2,%0), %%mm1 \n\t" | |
2051 "pfmul (%3,%0), %%mm0 \n\t" | |
2052 "pfmul 8(%3,%0), %%mm1 \n\t" | |
2053 "pfadd (%4,%0), %%mm0 \n\t" | |
2054 "pfadd 8(%4,%0), %%mm1 \n\t" | |
2055 "movd %%mm0, (%1) \n\t" | |
2056 "movd %%mm1, 16(%1) \n\t" | |
2057 "psrlq $32, %%mm0 \n\t" | |
2058 "psrlq $32, %%mm1 \n\t" | |
2059 "movd %%mm0, 8(%1) \n\t" | |
2060 "movd %%mm1, 24(%1) \n\t" | |
2061 "sub $32, %1 \n\t" | |
2062 "sub $16, %0 \n\t" | |
2063 "jge 1b \n\t" | |
2064 :"+r"(i), "+r"(dst) | |
2065 :"r"(src0), "r"(src1), "r"(src2) | |
2066 :"memory" | |
2067 ); | |
2068 } | |
2069 else if(step == 1 && src3 == 0){ | |
2070 __asm__ volatile( | |
2071 "1: \n\t" | |
2072 "movq (%2,%0), %%mm0 \n\t" | |
2073 "movq 8(%2,%0), %%mm1 \n\t" | |
2074 "pfmul (%3,%0), %%mm0 \n\t" | |
2075 "pfmul 8(%3,%0), %%mm1 \n\t" | |
2076 "pfadd (%4,%0), %%mm0 \n\t" | |
2077 "pfadd 8(%4,%0), %%mm1 \n\t" | |
2078 "movq %%mm0, (%1,%0) \n\t" | |
2079 "movq %%mm1, 8(%1,%0) \n\t" | |
2080 "sub $16, %0 \n\t" | |
2081 "jge 1b \n\t" | |
2082 :"+r"(i) | |
2083 :"r"(dst), "r"(src0), "r"(src1), "r"(src2) | |
2084 :"memory" | |
2085 ); | |
2086 } | |
2087 else | |
2088 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step); | |
2089 __asm__ volatile("femms"); | |
2090 } | |
2091 static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1, | |
2092 const float *src2, int src3, int len, int step){ | |
2093 x86_reg i = (len-8)*4; | |
2094 if(step == 2 && src3 == 0){ | |
2095 dst += (len-8)*2; | |
2096 __asm__ volatile( | |
2097 "1: \n\t" | |
2098 "movaps (%2,%0), %%xmm0 \n\t" | |
2099 "movaps 16(%2,%0), %%xmm1 \n\t" | |
2100 "mulps (%3,%0), %%xmm0 \n\t" | |
2101 "mulps 16(%3,%0), %%xmm1 \n\t" | |
2102 "addps (%4,%0), %%xmm0 \n\t" | |
2103 "addps 16(%4,%0), %%xmm1 \n\t" | |
2104 "movss %%xmm0, (%1) \n\t" | |
2105 "movss %%xmm1, 32(%1) \n\t" | |
2106 "movhlps %%xmm0, %%xmm2 \n\t" | |
2107 "movhlps %%xmm1, %%xmm3 \n\t" | |
2108 "movss %%xmm2, 16(%1) \n\t" | |
2109 "movss %%xmm3, 48(%1) \n\t" | |
2110 "shufps $0xb1, %%xmm0, %%xmm0 \n\t" | |
2111 "shufps $0xb1, %%xmm1, %%xmm1 \n\t" | |
2112 "movss %%xmm0, 8(%1) \n\t" | |
2113 "movss %%xmm1, 40(%1) \n\t" | |
2114 "movhlps %%xmm0, %%xmm2 \n\t" | |
2115 "movhlps %%xmm1, %%xmm3 \n\t" | |
2116 "movss %%xmm2, 24(%1) \n\t" | |
2117 "movss %%xmm3, 56(%1) \n\t" | |
2118 "sub $64, %1 \n\t" | |
2119 "sub $32, %0 \n\t" | |
2120 "jge 1b \n\t" | |
2121 :"+r"(i), "+r"(dst) | |
2122 :"r"(src0), "r"(src1), "r"(src2) | |
2123 :"memory" | |
2124 ); | |
2125 } | |
2126 else if(step == 1 && src3 == 0){ | |
2127 __asm__ volatile( | |
2128 "1: \n\t" | |
2129 "movaps (%2,%0), %%xmm0 \n\t" | |
2130 "movaps 16(%2,%0), %%xmm1 \n\t" | |
2131 "mulps (%3,%0), %%xmm0 \n\t" | |
2132 "mulps 16(%3,%0), %%xmm1 \n\t" | |
2133 "addps (%4,%0), %%xmm0 \n\t" | |
2134 "addps 16(%4,%0), %%xmm1 \n\t" | |
2135 "movaps %%xmm0, (%1,%0) \n\t" | |
2136 "movaps %%xmm1, 16(%1,%0) \n\t" | |
2137 "sub $32, %0 \n\t" | |
2138 "jge 1b \n\t" | |
2139 :"+r"(i) | |
2140 :"r"(dst), "r"(src0), "r"(src1), "r"(src2) | |
2141 :"memory" | |
2142 ); | |
2143 } | |
2144 else | |
2145 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step); | |
2146 } | |
2147 | |
2148 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1, | |
2149 const float *win, float add_bias, int len){ | |
8590 | 2150 #if HAVE_6REGS |
8430 | 2151 if(add_bias == 0){ |
2152 x86_reg i = -len*4; | |
2153 x86_reg j = len*4-8; | |
2154 __asm__ volatile( | |
2155 "1: \n" | |
2156 "pswapd (%5,%1), %%mm1 \n" | |
2157 "movq (%5,%0), %%mm0 \n" | |
2158 "pswapd (%4,%1), %%mm5 \n" | |
2159 "movq (%3,%0), %%mm4 \n" | |
2160 "movq %%mm0, %%mm2 \n" | |
2161 "movq %%mm1, %%mm3 \n" | |
2162 "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i] | |
2163 "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j] | |
2164 "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j] | |
2165 "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i] | |
2166 "pfadd %%mm3, %%mm2 \n" | |
2167 "pfsub %%mm0, %%mm1 \n" | |
2168 "pswapd %%mm2, %%mm2 \n" | |
2169 "movq %%mm1, (%2,%0) \n" | |
2170 "movq %%mm2, (%2,%1) \n" | |
2171 "sub $8, %1 \n" | |
2172 "add $8, %0 \n" | |
2173 "jl 1b \n" | |
2174 "femms \n" | |
2175 :"+r"(i), "+r"(j) | |
2176 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) | |
2177 ); | |
2178 }else | |
2179 #endif | |
2180 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len); | |
2181 } | |
2182 | |
2183 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1, | |
2184 const float *win, float add_bias, int len){ | |
8590 | 2185 #if HAVE_6REGS |
8430 | 2186 if(add_bias == 0){ |
2187 x86_reg i = -len*4; | |
2188 x86_reg j = len*4-16; | |
2189 __asm__ volatile( | |
2190 "1: \n" | |
2191 "movaps (%5,%1), %%xmm1 \n" | |
2192 "movaps (%5,%0), %%xmm0 \n" | |
2193 "movaps (%4,%1), %%xmm5 \n" | |
2194 "movaps (%3,%0), %%xmm4 \n" | |
2195 "shufps $0x1b, %%xmm1, %%xmm1 \n" | |
2196 "shufps $0x1b, %%xmm5, %%xmm5 \n" | |
2197 "movaps %%xmm0, %%xmm2 \n" | |
2198 "movaps %%xmm1, %%xmm3 \n" | |
2199 "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i] | |
2200 "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j] | |
2201 "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j] | |
2202 "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i] | |
2203 "addps %%xmm3, %%xmm2 \n" | |
2204 "subps %%xmm0, %%xmm1 \n" | |
2205 "shufps $0x1b, %%xmm2, %%xmm2 \n" | |
2206 "movaps %%xmm1, (%2,%0) \n" | |
2207 "movaps %%xmm2, (%2,%1) \n" | |
2208 "sub $16, %1 \n" | |
2209 "add $16, %0 \n" | |
2210 "jl 1b \n" | |
2211 :"+r"(i), "+r"(j) | |
2212 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) | |
2213 ); | |
2214 }else | |
2215 #endif | |
2216 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len); | |
2217 } | |
2218 | |
2219 static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len) | |
2220 { | |
2221 x86_reg i = -4*len; | |
2222 __asm__ volatile( | |
2223 "movss %3, %%xmm4 \n" | |
2224 "shufps $0, %%xmm4, %%xmm4 \n" | |
2225 "1: \n" | |
2226 "cvtpi2ps (%2,%0), %%xmm0 \n" | |
2227 "cvtpi2ps 8(%2,%0), %%xmm1 \n" | |
2228 "cvtpi2ps 16(%2,%0), %%xmm2 \n" | |
2229 "cvtpi2ps 24(%2,%0), %%xmm3 \n" | |
2230 "movlhps %%xmm1, %%xmm0 \n" | |
2231 "movlhps %%xmm3, %%xmm2 \n" | |
2232 "mulps %%xmm4, %%xmm0 \n" | |
2233 "mulps %%xmm4, %%xmm2 \n" | |
2234 "movaps %%xmm0, (%1,%0) \n" | |
2235 "movaps %%xmm2, 16(%1,%0) \n" | |
2236 "add $32, %0 \n" | |
2237 "jl 1b \n" | |
2238 :"+r"(i) | |
2239 :"r"(dst+len), "r"(src+len), "m"(mul) | |
2240 ); | |
2241 } | |
2242 | |
2243 static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len) | |
2244 { | |
2245 x86_reg i = -4*len; | |
2246 __asm__ volatile( | |
2247 "movss %3, %%xmm4 \n" | |
2248 "shufps $0, %%xmm4, %%xmm4 \n" | |
2249 "1: \n" | |
2250 "cvtdq2ps (%2,%0), %%xmm0 \n" | |
2251 "cvtdq2ps 16(%2,%0), %%xmm1 \n" | |
2252 "mulps %%xmm4, %%xmm0 \n" | |
2253 "mulps %%xmm4, %%xmm1 \n" | |
2254 "movaps %%xmm0, (%1,%0) \n" | |
2255 "movaps %%xmm1, 16(%1,%0) \n" | |
2256 "add $32, %0 \n" | |
2257 "jl 1b \n" | |
2258 :"+r"(i) | |
2259 :"r"(dst+len), "r"(src+len), "m"(mul) | |
2260 ); | |
2261 } | |
2262 | |
2263 static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){ | |
2264 x86_reg reglen = len; | |
2265 // not bit-exact: pf2id uses different rounding than C and SSE | |
2266 __asm__ volatile( | |
2267 "add %0 , %0 \n\t" | |
2268 "lea (%2,%0,2) , %2 \n\t" | |
2269 "add %0 , %1 \n\t" | |
2270 "neg %0 \n\t" | |
2271 "1: \n\t" | |
2272 "pf2id (%2,%0,2) , %%mm0 \n\t" | |
2273 "pf2id 8(%2,%0,2) , %%mm1 \n\t" | |
2274 "pf2id 16(%2,%0,2) , %%mm2 \n\t" | |
2275 "pf2id 24(%2,%0,2) , %%mm3 \n\t" | |
2276 "packssdw %%mm1 , %%mm0 \n\t" | |
2277 "packssdw %%mm3 , %%mm2 \n\t" | |
2278 "movq %%mm0 , (%1,%0) \n\t" | |
2279 "movq %%mm2 , 8(%1,%0) \n\t" | |
2280 "add $16 , %0 \n\t" | |
2281 " js 1b \n\t" | |
2282 "femms \n\t" | |
2283 :"+r"(reglen), "+r"(dst), "+r"(src) | |
2284 ); | |
2285 } | |
2286 static void float_to_int16_sse(int16_t *dst, const float *src, long len){ | |
2287 x86_reg reglen = len; | |
2288 __asm__ volatile( | |
2289 "add %0 , %0 \n\t" | |
2290 "lea (%2,%0,2) , %2 \n\t" | |
2291 "add %0 , %1 \n\t" | |
2292 "neg %0 \n\t" | |
2293 "1: \n\t" | |
2294 "cvtps2pi (%2,%0,2) , %%mm0 \n\t" | |
2295 "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t" | |
2296 "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t" | |
2297 "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t" | |
2298 "packssdw %%mm1 , %%mm0 \n\t" | |
2299 "packssdw %%mm3 , %%mm2 \n\t" | |
2300 "movq %%mm0 , (%1,%0) \n\t" | |
2301 "movq %%mm2 , 8(%1,%0) \n\t" | |
2302 "add $16 , %0 \n\t" | |
2303 " js 1b \n\t" | |
2304 "emms \n\t" | |
2305 :"+r"(reglen), "+r"(dst), "+r"(src) | |
2306 ); | |
2307 } | |
2308 | |
2309 static void float_to_int16_sse2(int16_t *dst, const float *src, long len){ | |
2310 x86_reg reglen = len; | |
2311 __asm__ volatile( | |
2312 "add %0 , %0 \n\t" | |
2313 "lea (%2,%0,2) , %2 \n\t" | |
2314 "add %0 , %1 \n\t" | |
2315 "neg %0 \n\t" | |
2316 "1: \n\t" | |
2317 "cvtps2dq (%2,%0,2) , %%xmm0 \n\t" | |
2318 "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t" | |
2319 "packssdw %%xmm1 , %%xmm0 \n\t" | |
2320 "movdqa %%xmm0 , (%1,%0) \n\t" | |
2321 "add $16 , %0 \n\t" | |
2322 " js 1b \n\t" | |
2323 :"+r"(reglen), "+r"(dst), "+r"(src) | |
2324 ); | |
2325 } | |
2326 | |
8590 | 2327 #if HAVE_YASM |
8430 | 2328 void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len); |
2329 void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len); | |
2330 void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len); | |
2331 void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0); | |
2332 void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0); | |
2333 void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta); | |
2334 void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta); | |
8590 | 2335 #if ARCH_X86_32 |
8430 | 2336 static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta) |
2337 { | |
2338 ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta); | |
2339 ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta); | |
2340 } | |
8463
2ba4e13aa21a
Fix compilation without optimization under 64-bit with x264 deblock asm enabled.
darkshikari
parents:
8430
diff
changeset
|
2341 #endif |
8430 | 2342 void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta); |
2343 void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta); | |
2344 #else | |
2345 #define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6) | |
2346 #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6) | |
2347 #define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6) | |
2348 #endif | |
2349 #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse | |
2350 | |
2351 #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \ | |
2352 /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\ | |
2353 static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\ | |
2354 DECLARE_ALIGNED_16(int16_t, tmp[len]);\ | |
2355 int i,j,c;\ | |
2356 for(c=0; c<channels; c++){\ | |
2357 float_to_int16_##cpu(tmp, src[c], len);\ | |
2358 for(i=0, j=c; i<len; i++, j+=channels)\ | |
2359 dst[j] = tmp[i];\ | |
2360 }\ | |
2361 }\ | |
2362 \ | |
2363 static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\ | |
2364 if(channels==1)\ | |
2365 float_to_int16_##cpu(dst, src[0], len);\ | |
2366 else if(channels==2){\ | |
2367 x86_reg reglen = len; \ | |
2368 const float *src0 = src[0];\ | |
2369 const float *src1 = src[1];\ | |
2370 __asm__ volatile(\ | |
2371 "shl $2, %0 \n"\ | |
2372 "add %0, %1 \n"\ | |
2373 "add %0, %2 \n"\ | |
2374 "add %0, %3 \n"\ | |
2375 "neg %0 \n"\ | |
2376 body\ | |
2377 :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\ | |
2378 );\ | |
2379 }else if(channels==6){\ | |
2380 ff_float_to_int16_interleave6_##cpu(dst, src, len);\ | |
2381 }else\ | |
2382 float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\ | |
2383 } | |
2384 | |
2385 FLOAT_TO_INT16_INTERLEAVE(3dnow, | |
2386 "1: \n" | |
2387 "pf2id (%2,%0), %%mm0 \n" | |
2388 "pf2id 8(%2,%0), %%mm1 \n" | |
2389 "pf2id (%3,%0), %%mm2 \n" | |
2390 "pf2id 8(%3,%0), %%mm3 \n" | |
2391 "packssdw %%mm1, %%mm0 \n" | |
2392 "packssdw %%mm3, %%mm2 \n" | |
2393 "movq %%mm0, %%mm1 \n" | |
2394 "punpcklwd %%mm2, %%mm0 \n" | |
2395 "punpckhwd %%mm2, %%mm1 \n" | |
2396 "movq %%mm0, (%1,%0)\n" | |
2397 "movq %%mm1, 8(%1,%0)\n" | |
2398 "add $16, %0 \n" | |
2399 "js 1b \n" | |
2400 "femms \n" | |
2401 ) | |
2402 | |
2403 FLOAT_TO_INT16_INTERLEAVE(sse, | |
2404 "1: \n" | |
2405 "cvtps2pi (%2,%0), %%mm0 \n" | |
2406 "cvtps2pi 8(%2,%0), %%mm1 \n" | |
2407 "cvtps2pi (%3,%0), %%mm2 \n" | |
2408 "cvtps2pi 8(%3,%0), %%mm3 \n" | |
2409 "packssdw %%mm1, %%mm0 \n" | |
2410 "packssdw %%mm3, %%mm2 \n" | |
2411 "movq %%mm0, %%mm1 \n" | |
2412 "punpcklwd %%mm2, %%mm0 \n" | |
2413 "punpckhwd %%mm2, %%mm1 \n" | |
2414 "movq %%mm0, (%1,%0)\n" | |
2415 "movq %%mm1, 8(%1,%0)\n" | |
2416 "add $16, %0 \n" | |
2417 "js 1b \n" | |
2418 "emms \n" | |
2419 ) | |
2420 | |
2421 FLOAT_TO_INT16_INTERLEAVE(sse2, | |
2422 "1: \n" | |
2423 "cvtps2dq (%2,%0), %%xmm0 \n" | |
2424 "cvtps2dq (%3,%0), %%xmm1 \n" | |
2425 "packssdw %%xmm1, %%xmm0 \n" | |
2426 "movhlps %%xmm0, %%xmm1 \n" | |
2427 "punpcklwd %%xmm1, %%xmm0 \n" | |
2428 "movdqa %%xmm0, (%1,%0) \n" | |
2429 "add $16, %0 \n" | |
2430 "js 1b \n" | |
2431 ) | |
2432 | |
2433 static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){ | |
2434 if(channels==6) | |
2435 ff_float_to_int16_interleave6_3dn2(dst, src, len); | |
2436 else | |
2437 float_to_int16_interleave_3dnow(dst, src, len, channels); | |
2438 } | |
2439 | |
2440 | |
2441 void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width); | |
2442 void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width); | |
2443 void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width); | |
2444 void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width); | |
2445 void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, | |
2446 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8); | |
2447 void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, | |
2448 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8); | |
2449 | |
2450 | |
2451 static void add_int16_sse2(int16_t * v1, int16_t * v2, int order) | |
2452 { | |
2453 x86_reg o = -(order << 1); | |
2454 v1 += order; | |
2455 v2 += order; | |
2456 __asm__ volatile( | |
2457 "1: \n\t" | |
2458 "movdqu (%1,%2), %%xmm0 \n\t" | |
2459 "movdqu 16(%1,%2), %%xmm1 \n\t" | |
2460 "paddw (%0,%2), %%xmm0 \n\t" | |
2461 "paddw 16(%0,%2), %%xmm1 \n\t" | |
2462 "movdqa %%xmm0, (%0,%2) \n\t" | |
2463 "movdqa %%xmm1, 16(%0,%2) \n\t" | |
2464 "add $32, %2 \n\t" | |
2465 "js 1b \n\t" | |
2466 : "+r"(v1), "+r"(v2), "+r"(o) | |
2467 ); | |
2468 } | |
2469 | |
2470 static void sub_int16_sse2(int16_t * v1, int16_t * v2, int order) | |
2471 { | |
2472 x86_reg o = -(order << 1); | |
2473 v1 += order; | |
2474 v2 += order; | |
2475 __asm__ volatile( | |
2476 "1: \n\t" | |
2477 "movdqa (%0,%2), %%xmm0 \n\t" | |
2478 "movdqa 16(%0,%2), %%xmm2 \n\t" | |
2479 "movdqu (%1,%2), %%xmm1 \n\t" | |
2480 "movdqu 16(%1,%2), %%xmm3 \n\t" | |
2481 "psubw %%xmm1, %%xmm0 \n\t" | |
2482 "psubw %%xmm3, %%xmm2 \n\t" | |
2483 "movdqa %%xmm0, (%0,%2) \n\t" | |
2484 "movdqa %%xmm2, 16(%0,%2) \n\t" | |
2485 "add $32, %2 \n\t" | |
2486 "js 1b \n\t" | |
2487 : "+r"(v1), "+r"(v2), "+r"(o) | |
2488 ); | |
2489 } | |
2490 | |
2491 static int32_t scalarproduct_int16_sse2(int16_t * v1, int16_t * v2, int order, int shift) | |
2492 { | |
2493 int res = 0; | |
8668 | 2494 DECLARE_ALIGNED_16(xmm_reg, sh); |
8430 | 2495 x86_reg o = -(order << 1); |
2496 | |
2497 v1 += order; | |
2498 v2 += order; | |
8668 | 2499 sh.a = shift; |
8430 | 2500 __asm__ volatile( |
2501 "pxor %%xmm7, %%xmm7 \n\t" | |
2502 "1: \n\t" | |
2503 "movdqu (%0,%3), %%xmm0 \n\t" | |
2504 "movdqu 16(%0,%3), %%xmm1 \n\t" | |
2505 "pmaddwd (%1,%3), %%xmm0 \n\t" | |
2506 "pmaddwd 16(%1,%3), %%xmm1 \n\t" | |
2507 "paddd %%xmm0, %%xmm7 \n\t" | |
2508 "paddd %%xmm1, %%xmm7 \n\t" | |
2509 "add $32, %3 \n\t" | |
2510 "js 1b \n\t" | |
2511 "movhlps %%xmm7, %%xmm2 \n\t" | |
2512 "paddd %%xmm2, %%xmm7 \n\t" | |
2513 "psrad %4, %%xmm7 \n\t" | |
2514 "pshuflw $0x4E, %%xmm7,%%xmm2 \n\t" | |
2515 "paddd %%xmm2, %%xmm7 \n\t" | |
2516 "movd %%xmm7, %2 \n\t" | |
2517 : "+r"(v1), "+r"(v2), "=r"(res), "+r"(o) | |
2518 : "m"(sh) | |
2519 ); | |
2520 return res; | |
2521 } | |
2522 | |
2523 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) | |
2524 { | |
2525 mm_flags = mm_support(); | |
2526 | |
2527 if (avctx->dsp_mask) { | |
2528 if (avctx->dsp_mask & FF_MM_FORCE) | |
2529 mm_flags |= (avctx->dsp_mask & 0xffff); | |
2530 else | |
2531 mm_flags &= ~(avctx->dsp_mask & 0xffff); | |
2532 } | |
2533 | |
2534 #if 0 | |
2535 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:"); | |
2536 if (mm_flags & FF_MM_MMX) | |
2537 av_log(avctx, AV_LOG_INFO, " mmx"); | |
2538 if (mm_flags & FF_MM_MMXEXT) | |
2539 av_log(avctx, AV_LOG_INFO, " mmxext"); | |
2540 if (mm_flags & FF_MM_3DNOW) | |
2541 av_log(avctx, AV_LOG_INFO, " 3dnow"); | |
2542 if (mm_flags & FF_MM_SSE) | |
2543 av_log(avctx, AV_LOG_INFO, " sse"); | |
2544 if (mm_flags & FF_MM_SSE2) | |
2545 av_log(avctx, AV_LOG_INFO, " sse2"); | |
2546 av_log(avctx, AV_LOG_INFO, "\n"); | |
2547 #endif | |
2548 | |
2549 if (mm_flags & FF_MM_MMX) { | |
2550 const int idct_algo= avctx->idct_algo; | |
2551 | |
2552 if(avctx->lowres==0){ | |
2553 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){ | |
2554 c->idct_put= ff_simple_idct_put_mmx; | |
2555 c->idct_add= ff_simple_idct_add_mmx; | |
2556 c->idct = ff_simple_idct_mmx; | |
2557 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM; | |
8590 | 2558 #if CONFIG_GPL |
8430 | 2559 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){ |
2560 if(mm_flags & FF_MM_MMXEXT){ | |
2561 c->idct_put= ff_libmpeg2mmx2_idct_put; | |
2562 c->idct_add= ff_libmpeg2mmx2_idct_add; | |
2563 c->idct = ff_mmxext_idct; | |
2564 }else{ | |
2565 c->idct_put= ff_libmpeg2mmx_idct_put; | |
2566 c->idct_add= ff_libmpeg2mmx_idct_add; | |
2567 c->idct = ff_mmx_idct; | |
2568 } | |
2569 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; | |
2570 #endif | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2571 }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER || CONFIG_THEORA_DECODER) && |
8430 | 2572 idct_algo==FF_IDCT_VP3){ |
2573 if(mm_flags & FF_MM_SSE2){ | |
2574 c->idct_put= ff_vp3_idct_put_sse2; | |
2575 c->idct_add= ff_vp3_idct_add_sse2; | |
2576 c->idct = ff_vp3_idct_sse2; | |
2577 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; | |
2578 }else{ | |
2579 c->idct_put= ff_vp3_idct_put_mmx; | |
2580 c->idct_add= ff_vp3_idct_add_mmx; | |
2581 c->idct = ff_vp3_idct_mmx; | |
2582 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM; | |
2583 } | |
2584 }else if(idct_algo==FF_IDCT_CAVS){ | |
2585 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; | |
2586 }else if(idct_algo==FF_IDCT_XVIDMMX){ | |
2587 if(mm_flags & FF_MM_SSE2){ | |
2588 c->idct_put= ff_idct_xvid_sse2_put; | |
2589 c->idct_add= ff_idct_xvid_sse2_add; | |
2590 c->idct = ff_idct_xvid_sse2; | |
2591 c->idct_permutation_type= FF_SSE2_IDCT_PERM; | |
2592 }else if(mm_flags & FF_MM_MMXEXT){ | |
2593 c->idct_put= ff_idct_xvid_mmx2_put; | |
2594 c->idct_add= ff_idct_xvid_mmx2_add; | |
2595 c->idct = ff_idct_xvid_mmx2; | |
2596 }else{ | |
2597 c->idct_put= ff_idct_xvid_mmx_put; | |
2598 c->idct_add= ff_idct_xvid_mmx_add; | |
2599 c->idct = ff_idct_xvid_mmx; | |
2600 } | |
2601 } | |
2602 } | |
2603 | |
2604 c->put_pixels_clamped = put_pixels_clamped_mmx; | |
2605 c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx; | |
2606 c->add_pixels_clamped = add_pixels_clamped_mmx; | |
2607 c->clear_block = clear_block_mmx; | |
2608 c->clear_blocks = clear_blocks_mmx; | |
2609 if (mm_flags & FF_MM_SSE) | |
2610 c->clear_block = clear_block_sse; | |
2611 | |
2612 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \ | |
2613 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \ | |
2614 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \ | |
2615 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \ | |
2616 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU | |
2617 | |
2618 SET_HPEL_FUNCS(put, 0, 16, mmx); | |
2619 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx); | |
2620 SET_HPEL_FUNCS(avg, 0, 16, mmx); | |
2621 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx); | |
2622 SET_HPEL_FUNCS(put, 1, 8, mmx); | |
2623 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx); | |
2624 SET_HPEL_FUNCS(avg, 1, 8, mmx); | |
2625 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx); | |
2626 | |
2627 c->gmc= gmc_mmx; | |
2628 | |
2629 c->add_bytes= add_bytes_mmx; | |
2630 c->add_bytes_l2= add_bytes_l2_mmx; | |
2631 | |
2632 c->draw_edges = draw_edges_mmx; | |
2633 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2634 if (CONFIG_ANY_H263) { |
8430 | 2635 c->h263_v_loop_filter= h263_v_loop_filter_mmx; |
2636 c->h263_h_loop_filter= h263_h_loop_filter_mmx; | |
2637 } | |
2638 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd; | |
2639 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx; | |
2640 c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_nornd; | |
2641 | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2642 c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2643 c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2644 |
8430 | 2645 c->h264_idct_dc_add= |
2646 c->h264_idct_add= ff_h264_idct_add_mmx; | |
2647 c->h264_idct8_dc_add= | |
2648 c->h264_idct8_add= ff_h264_idct8_add_mmx; | |
2649 | |
2650 c->h264_idct_add16 = ff_h264_idct_add16_mmx; | |
2651 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx; | |
2652 c->h264_idct_add8 = ff_h264_idct_add8_mmx; | |
2653 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx; | |
2654 | |
2655 if (mm_flags & FF_MM_MMXEXT) { | |
2656 c->prefetch = prefetch_mmx2; | |
2657 | |
2658 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; | |
2659 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2; | |
2660 | |
2661 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2; | |
2662 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2; | |
2663 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2; | |
2664 | |
2665 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2; | |
2666 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2; | |
2667 | |
2668 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2; | |
2669 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2; | |
2670 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2; | |
2671 | |
2672 c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2; | |
2673 c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2; | |
2674 c->h264_idct_add16 = ff_h264_idct_add16_mmx2; | |
2675 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2; | |
2676 c->h264_idct_add8 = ff_h264_idct_add8_mmx2; | |
2677 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2; | |
2678 | |
2679 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2680 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2; | |
2681 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2; | |
2682 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2; | |
2683 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2; | |
2684 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2; | |
2685 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2; | |
2686 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2687 if (CONFIG_VP3_DECODER || CONFIG_THEORA_DECODER) { |
8430 | 2688 c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2; |
2689 c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2; | |
2690 } | |
2691 } | |
2692 | |
2693 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \ | |
2694 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \ | |
2695 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \ | |
2696 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \ | |
2697 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \ | |
2698 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \ | |
2699 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \ | |
2700 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \ | |
2701 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \ | |
2702 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \ | |
2703 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \ | |
2704 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \ | |
2705 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \ | |
2706 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \ | |
2707 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \ | |
2708 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \ | |
2709 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU | |
2710 | |
2711 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2); | |
2712 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2); | |
2713 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2); | |
2714 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2); | |
2715 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2); | |
2716 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2); | |
2717 | |
2718 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2); | |
2719 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2); | |
2720 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2); | |
2721 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2); | |
2722 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2); | |
2723 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2); | |
2724 | |
2725 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2); | |
2726 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2); | |
2727 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2); | |
2728 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2); | |
2729 | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2730 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2731 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2732 |
8430 | 2733 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd; |
2734 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2; | |
2735 c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2; | |
2736 c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2; | |
2737 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2; | |
2738 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2; | |
2739 c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2; | |
2740 c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2; | |
2741 c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2; | |
2742 c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2; | |
2743 c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2; | |
2744 | |
2745 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2; | |
2746 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2; | |
2747 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2; | |
2748 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2; | |
2749 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2; | |
2750 c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2; | |
2751 c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2; | |
2752 c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2; | |
2753 | |
2754 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2; | |
2755 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2; | |
2756 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2; | |
2757 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2; | |
2758 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2; | |
2759 c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2; | |
2760 c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2; | |
2761 c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2; | |
2762 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2763 if (CONFIG_CAVS_DECODER) |
8430 | 2764 ff_cavsdsp_init_mmx2(c, avctx); |
2765 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2766 if (CONFIG_VC1_DECODER || CONFIG_WMV3_DECODER) |
8430 | 2767 ff_vc1dsp_init_mmx(c, avctx); |
2768 | |
2769 c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2; | |
2770 } else if (mm_flags & FF_MM_3DNOW) { | |
2771 c->prefetch = prefetch_3dnow; | |
2772 | |
2773 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; | |
2774 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; | |
2775 | |
2776 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow; | |
2777 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow; | |
2778 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow; | |
2779 | |
2780 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow; | |
2781 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow; | |
2782 | |
2783 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow; | |
2784 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow; | |
2785 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow; | |
2786 | |
2787 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2788 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow; | |
2789 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow; | |
2790 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow; | |
2791 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow; | |
2792 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow; | |
2793 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow; | |
2794 } | |
2795 | |
2796 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow); | |
2797 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow); | |
2798 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow); | |
2799 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow); | |
2800 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow); | |
2801 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow); | |
2802 | |
2803 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow); | |
2804 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow); | |
2805 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow); | |
2806 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow); | |
2807 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow); | |
2808 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow); | |
2809 | |
2810 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow); | |
2811 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow); | |
2812 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow); | |
2813 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow); | |
2814 | |
2815 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd; | |
2816 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow; | |
2817 | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2818 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2819 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2820 |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2821 if (CONFIG_CAVS_DECODER) |
8430 | 2822 ff_cavsdsp_init_3dnow(c, avctx); |
2823 } | |
2824 | |
2825 | |
2826 #define H264_QPEL_FUNCS(x, y, CPU)\ | |
2827 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\ | |
2828 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\ | |
2829 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\ | |
2830 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU; | |
2831 if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){ | |
2832 // these functions are slower than mmx on AMD, but faster on Intel | |
2833 /* FIXME works in most codecs, but crashes svq1 due to unaligned chroma | |
2834 c->put_pixels_tab[0][0] = put_pixels16_sse2; | |
2835 c->avg_pixels_tab[0][0] = avg_pixels16_sse2; | |
2836 */ | |
2837 H264_QPEL_FUNCS(0, 0, sse2); | |
2838 } | |
2839 if(mm_flags & FF_MM_SSE2){ | |
2840 c->h264_idct8_add = ff_h264_idct8_add_sse2; | |
2841 c->h264_idct8_add4= ff_h264_idct8_add4_sse2; | |
2842 | |
2843 H264_QPEL_FUNCS(0, 1, sse2); | |
2844 H264_QPEL_FUNCS(0, 2, sse2); | |
2845 H264_QPEL_FUNCS(0, 3, sse2); | |
2846 H264_QPEL_FUNCS(1, 1, sse2); | |
2847 H264_QPEL_FUNCS(1, 2, sse2); | |
2848 H264_QPEL_FUNCS(1, 3, sse2); | |
2849 H264_QPEL_FUNCS(2, 1, sse2); | |
2850 H264_QPEL_FUNCS(2, 2, sse2); | |
2851 H264_QPEL_FUNCS(2, 3, sse2); | |
2852 H264_QPEL_FUNCS(3, 1, sse2); | |
2853 H264_QPEL_FUNCS(3, 2, sse2); | |
2854 H264_QPEL_FUNCS(3, 3, sse2); | |
2855 } | |
8590 | 2856 #if HAVE_SSSE3 |
8430 | 2857 if(mm_flags & FF_MM_SSSE3){ |
2858 H264_QPEL_FUNCS(1, 0, ssse3); | |
2859 H264_QPEL_FUNCS(1, 1, ssse3); | |
2860 H264_QPEL_FUNCS(1, 2, ssse3); | |
2861 H264_QPEL_FUNCS(1, 3, ssse3); | |
2862 H264_QPEL_FUNCS(2, 0, ssse3); | |
2863 H264_QPEL_FUNCS(2, 1, ssse3); | |
2864 H264_QPEL_FUNCS(2, 2, ssse3); | |
2865 H264_QPEL_FUNCS(2, 3, ssse3); | |
2866 H264_QPEL_FUNCS(3, 0, ssse3); | |
2867 H264_QPEL_FUNCS(3, 1, ssse3); | |
2868 H264_QPEL_FUNCS(3, 2, ssse3); | |
2869 H264_QPEL_FUNCS(3, 3, ssse3); | |
2870 c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_nornd; | |
2871 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd; | |
2872 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd; | |
2873 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3; | |
2874 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3; | |
2875 c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3; | |
2876 } | |
2877 #endif | |
2878 | |
8590 | 2879 #if CONFIG_GPL && HAVE_YASM |
8430 | 2880 if( mm_flags&FF_MM_MMXEXT ){ |
8590 | 2881 #if ARCH_X86_32 |
8430 | 2882 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext; |
2883 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext; | |
2884 #endif | |
8510
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2885 if( mm_flags&FF_MM_SSE2 ){ |
8590 | 2886 #if ARCH_X86_64 || !defined(__ICC) || __ICC > 1100 |
8430 | 2887 c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2; |
2888 c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2; | |
2889 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2; | |
2890 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2; | |
8510
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2891 #endif |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2892 c->h264_idct_add16 = ff_h264_idct_add16_sse2; |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2893 c->h264_idct_add8 = ff_h264_idct_add8_sse2; |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2894 c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2; |
8430 | 2895 } |
2896 } | |
2897 #endif | |
2898 | |
8590 | 2899 #if CONFIG_SNOW_DECODER |
8430 | 2900 if(mm_flags & FF_MM_SSE2 & 0){ |
2901 c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2; | |
8590 | 2902 #if HAVE_7REGS |
8430 | 2903 c->vertical_compose97i = ff_snow_vertical_compose97i_sse2; |
2904 #endif | |
2905 c->inner_add_yblock = ff_snow_inner_add_yblock_sse2; | |
2906 } | |
2907 else{ | |
2908 if(mm_flags & FF_MM_MMXEXT){ | |
2909 c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx; | |
8590 | 2910 #if HAVE_7REGS |
8430 | 2911 c->vertical_compose97i = ff_snow_vertical_compose97i_mmx; |
2912 #endif | |
2913 } | |
2914 c->inner_add_yblock = ff_snow_inner_add_yblock_mmx; | |
2915 } | |
2916 #endif | |
2917 | |
2918 if(mm_flags & FF_MM_3DNOW){ | |
2919 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow; | |
2920 c->vector_fmul = vector_fmul_3dnow; | |
2921 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2922 c->float_to_int16 = float_to_int16_3dnow; | |
2923 c->float_to_int16_interleave = float_to_int16_interleave_3dnow; | |
2924 } | |
2925 } | |
2926 if(mm_flags & FF_MM_3DNOWEXT){ | |
2927 c->vector_fmul_reverse = vector_fmul_reverse_3dnow2; | |
2928 c->vector_fmul_window = vector_fmul_window_3dnow2; | |
2929 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2930 c->float_to_int16_interleave = float_to_int16_interleave_3dn2; | |
2931 } | |
2932 } | |
2933 if(mm_flags & FF_MM_SSE){ | |
2934 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse; | |
2935 c->ac3_downmix = ac3_downmix_sse; | |
2936 c->vector_fmul = vector_fmul_sse; | |
2937 c->vector_fmul_reverse = vector_fmul_reverse_sse; | |
2938 c->vector_fmul_add_add = vector_fmul_add_add_sse; | |
2939 c->vector_fmul_window = vector_fmul_window_sse; | |
2940 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse; | |
2941 c->float_to_int16 = float_to_int16_sse; | |
2942 c->float_to_int16_interleave = float_to_int16_interleave_sse; | |
2943 } | |
2944 if(mm_flags & FF_MM_3DNOW) | |
2945 c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse | |
2946 if(mm_flags & FF_MM_SSE2){ | |
2947 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2; | |
2948 c->float_to_int16 = float_to_int16_sse2; | |
2949 c->float_to_int16_interleave = float_to_int16_interleave_sse2; | |
2950 c->add_int16 = add_int16_sse2; | |
2951 c->sub_int16 = sub_int16_sse2; | |
2952 c->scalarproduct_int16 = scalarproduct_int16_sse2; | |
2953 } | |
2954 } | |
2955 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2956 if (CONFIG_ENCODERS) |
8430 | 2957 dsputilenc_init_mmx(c, avctx); |
2958 | |
2959 #if 0 | |
2960 // for speed testing | |
2961 get_pixels = just_return; | |
2962 put_pixels_clamped = just_return; | |
2963 add_pixels_clamped = just_return; | |
2964 | |
2965 pix_abs16x16 = just_return; | |
2966 pix_abs16x16_x2 = just_return; | |
2967 pix_abs16x16_y2 = just_return; | |
2968 pix_abs16x16_xy2 = just_return; | |
2969 | |
2970 put_pixels_tab[0] = just_return; | |
2971 put_pixels_tab[1] = just_return; | |
2972 put_pixels_tab[2] = just_return; | |
2973 put_pixels_tab[3] = just_return; | |
2974 | |
2975 put_no_rnd_pixels_tab[0] = just_return; | |
2976 put_no_rnd_pixels_tab[1] = just_return; | |
2977 put_no_rnd_pixels_tab[2] = just_return; | |
2978 put_no_rnd_pixels_tab[3] = just_return; | |
2979 | |
2980 avg_pixels_tab[0] = just_return; | |
2981 avg_pixels_tab[1] = just_return; | |
2982 avg_pixels_tab[2] = just_return; | |
2983 avg_pixels_tab[3] = just_return; | |
2984 | |
2985 avg_no_rnd_pixels_tab[0] = just_return; | |
2986 avg_no_rnd_pixels_tab[1] = just_return; | |
2987 avg_no_rnd_pixels_tab[2] = just_return; | |
2988 avg_no_rnd_pixels_tab[3] = just_return; | |
2989 | |
2990 //av_fdct = just_return; | |
2991 //ff_idct = just_return; | |
2992 #endif | |
2993 } |