Mercurial > libavcodec.hg
annotate x86/dsputil_mmx.c @ 8793:d46cde168c69 libavcodec
avoid duplicating dsputil's clear_block
author | stefang |
---|---|
date | Tue, 10 Feb 2009 16:45:02 +0000 |
parents | 31138c296ac6 |
children | a5c8210814d7 |
rev | line source |
---|---|
8430 | 1 /* |
2 * MMX optimized DSP utils | |
8629
04423b2f6e0b
cosmetics: Remove pointless period after copyright statement non-sentences.
diego
parents:
8596
diff
changeset
|
3 * Copyright (c) 2000, 2001 Fabrice Bellard |
8430 | 4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> |
5 * | |
6 * This file is part of FFmpeg. | |
7 * | |
8 * FFmpeg is free software; you can redistribute it and/or | |
9 * modify it under the terms of the GNU Lesser General Public | |
10 * License as published by the Free Software Foundation; either | |
11 * version 2.1 of the License, or (at your option) any later version. | |
12 * | |
13 * FFmpeg is distributed in the hope that it will be useful, | |
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 * Lesser General Public License for more details. | |
17 * | |
18 * You should have received a copy of the GNU Lesser General Public | |
19 * License along with FFmpeg; if not, write to the Free Software | |
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 * | |
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru> | |
23 */ | |
24 | |
25 #include "libavutil/x86_cpu.h" | |
26 #include "libavcodec/dsputil.h" | |
27 #include "libavcodec/h263.h" | |
28 #include "libavcodec/mpegvideo.h" | |
29 #include "libavcodec/simple_idct.h" | |
30 #include "dsputil_mmx.h" | |
31 #include "mmx.h" | |
32 #include "vp3dsp_mmx.h" | |
33 #include "vp3dsp_sse2.h" | |
34 #include "idct_xvid.h" | |
35 | |
36 //#undef NDEBUG | |
37 //#include <assert.h> | |
38 | |
39 int mm_flags; /* multimedia extension flags */ | |
40 | |
41 /* pixel operations */ | |
42 DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL; | |
43 DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL; | |
44 | |
45 DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) = | |
46 {0x8000000080000000ULL, 0x8000000080000000ULL}; | |
47 | |
48 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL; | |
49 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL; | |
50 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL}; | |
51 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL}; | |
52 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL; | |
53 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL}; | |
54 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL; | |
55 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL}; | |
56 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL}; | |
57 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL; | |
58 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_64 ) = 0x0040004000400040ULL; | |
59 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL; | |
60 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL; | |
61 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL; | |
62 | |
63 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL; | |
64 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL; | |
65 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL; | |
66 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL; | |
67 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL; | |
68 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL; | |
69 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL; | |
70 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL; | |
71 | |
72 DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 }; | |
73 DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 }; | |
74 | |
75 #define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::) | |
76 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::) | |
77 | |
78 #define MOVQ_BFE(regd) \ | |
79 __asm__ volatile ( \ | |
80 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\ | |
81 "paddb %%" #regd ", %%" #regd " \n\t" ::) | |
82 | |
83 #ifndef PIC | |
84 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone)) | |
85 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo)) | |
86 #else | |
87 // for shared library it's better to use this way for accessing constants | |
88 // pcmpeqd -> -1 | |
89 #define MOVQ_BONE(regd) \ | |
90 __asm__ volatile ( \ | |
91 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ | |
92 "psrlw $15, %%" #regd " \n\t" \ | |
93 "packuswb %%" #regd ", %%" #regd " \n\t" ::) | |
94 | |
95 #define MOVQ_WTWO(regd) \ | |
96 __asm__ volatile ( \ | |
97 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ | |
98 "psrlw $15, %%" #regd " \n\t" \ | |
99 "psllw $1, %%" #regd " \n\t"::) | |
100 | |
101 #endif | |
102 | |
103 // using regr as temporary and for the output result | |
104 // first argument is unmodifed and second is trashed | |
105 // regfe is supposed to contain 0xfefefefefefefefe | |
106 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \ | |
107 "movq " #rega ", " #regr " \n\t"\ | |
108 "pand " #regb ", " #regr " \n\t"\ | |
109 "pxor " #rega ", " #regb " \n\t"\ | |
110 "pand " #regfe "," #regb " \n\t"\ | |
111 "psrlq $1, " #regb " \n\t"\ | |
112 "paddb " #regb ", " #regr " \n\t" | |
113 | |
114 #define PAVGB_MMX(rega, regb, regr, regfe) \ | |
115 "movq " #rega ", " #regr " \n\t"\ | |
116 "por " #regb ", " #regr " \n\t"\ | |
117 "pxor " #rega ", " #regb " \n\t"\ | |
118 "pand " #regfe "," #regb " \n\t"\ | |
119 "psrlq $1, " #regb " \n\t"\ | |
120 "psubb " #regb ", " #regr " \n\t" | |
121 | |
122 // mm6 is supposed to contain 0xfefefefefefefefe | |
123 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \ | |
124 "movq " #rega ", " #regr " \n\t"\ | |
125 "movq " #regc ", " #regp " \n\t"\ | |
126 "pand " #regb ", " #regr " \n\t"\ | |
127 "pand " #regd ", " #regp " \n\t"\ | |
128 "pxor " #rega ", " #regb " \n\t"\ | |
129 "pxor " #regc ", " #regd " \n\t"\ | |
130 "pand %%mm6, " #regb " \n\t"\ | |
131 "pand %%mm6, " #regd " \n\t"\ | |
132 "psrlq $1, " #regb " \n\t"\ | |
133 "psrlq $1, " #regd " \n\t"\ | |
134 "paddb " #regb ", " #regr " \n\t"\ | |
135 "paddb " #regd ", " #regp " \n\t" | |
136 | |
137 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \ | |
138 "movq " #rega ", " #regr " \n\t"\ | |
139 "movq " #regc ", " #regp " \n\t"\ | |
140 "por " #regb ", " #regr " \n\t"\ | |
141 "por " #regd ", " #regp " \n\t"\ | |
142 "pxor " #rega ", " #regb " \n\t"\ | |
143 "pxor " #regc ", " #regd " \n\t"\ | |
144 "pand %%mm6, " #regb " \n\t"\ | |
145 "pand %%mm6, " #regd " \n\t"\ | |
146 "psrlq $1, " #regd " \n\t"\ | |
147 "psrlq $1, " #regb " \n\t"\ | |
148 "psubb " #regb ", " #regr " \n\t"\ | |
149 "psubb " #regd ", " #regp " \n\t" | |
150 | |
151 /***********************************/ | |
152 /* MMX no rounding */ | |
153 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx | |
154 #define SET_RND MOVQ_WONE | |
155 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f) | |
156 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e) | |
157 | |
158 #include "dsputil_mmx_rnd_template.c" | |
159 | |
160 #undef DEF | |
161 #undef SET_RND | |
162 #undef PAVGBP | |
163 #undef PAVGB | |
164 /***********************************/ | |
165 /* MMX rounding */ | |
166 | |
167 #define DEF(x, y) x ## _ ## y ##_mmx | |
168 #define SET_RND MOVQ_WTWO | |
169 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f) | |
170 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e) | |
171 | |
172 #include "dsputil_mmx_rnd_template.c" | |
173 | |
174 #undef DEF | |
175 #undef SET_RND | |
176 #undef PAVGBP | |
177 #undef PAVGB | |
178 | |
179 /***********************************/ | |
180 /* 3Dnow specific */ | |
181 | |
182 #define DEF(x) x ## _3dnow | |
183 #define PAVGB "pavgusb" | |
184 | |
185 #include "dsputil_mmx_avg_template.c" | |
186 | |
187 #undef DEF | |
188 #undef PAVGB | |
189 | |
190 /***********************************/ | |
191 /* MMX2 specific */ | |
192 | |
193 #define DEF(x) x ## _mmx2 | |
194 | |
195 /* Introduced only in MMX2 set */ | |
196 #define PAVGB "pavgb" | |
197 | |
198 #include "dsputil_mmx_avg_template.c" | |
199 | |
200 #undef DEF | |
201 #undef PAVGB | |
202 | |
203 #define put_no_rnd_pixels16_mmx put_pixels16_mmx | |
204 #define put_no_rnd_pixels8_mmx put_pixels8_mmx | |
205 #define put_pixels16_mmx2 put_pixels16_mmx | |
206 #define put_pixels8_mmx2 put_pixels8_mmx | |
207 #define put_pixels4_mmx2 put_pixels4_mmx | |
208 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx | |
209 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx | |
210 #define put_pixels16_3dnow put_pixels16_mmx | |
211 #define put_pixels8_3dnow put_pixels8_mmx | |
212 #define put_pixels4_3dnow put_pixels4_mmx | |
213 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx | |
214 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx | |
215 | |
216 /***********************************/ | |
217 /* standard MMX */ | |
218 | |
219 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) | |
220 { | |
221 const DCTELEM *p; | |
222 uint8_t *pix; | |
223 | |
224 /* read the pixels */ | |
225 p = block; | |
226 pix = pixels; | |
227 /* unrolled loop */ | |
228 __asm__ volatile( | |
229 "movq %3, %%mm0 \n\t" | |
230 "movq 8%3, %%mm1 \n\t" | |
231 "movq 16%3, %%mm2 \n\t" | |
232 "movq 24%3, %%mm3 \n\t" | |
233 "movq 32%3, %%mm4 \n\t" | |
234 "movq 40%3, %%mm5 \n\t" | |
235 "movq 48%3, %%mm6 \n\t" | |
236 "movq 56%3, %%mm7 \n\t" | |
237 "packuswb %%mm1, %%mm0 \n\t" | |
238 "packuswb %%mm3, %%mm2 \n\t" | |
239 "packuswb %%mm5, %%mm4 \n\t" | |
240 "packuswb %%mm7, %%mm6 \n\t" | |
241 "movq %%mm0, (%0) \n\t" | |
242 "movq %%mm2, (%0, %1) \n\t" | |
243 "movq %%mm4, (%0, %1, 2) \n\t" | |
244 "movq %%mm6, (%0, %2) \n\t" | |
245 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p) | |
246 :"memory"); | |
247 pix += line_size*4; | |
248 p += 32; | |
249 | |
250 // if here would be an exact copy of the code above | |
251 // compiler would generate some very strange code | |
252 // thus using "r" | |
253 __asm__ volatile( | |
254 "movq (%3), %%mm0 \n\t" | |
255 "movq 8(%3), %%mm1 \n\t" | |
256 "movq 16(%3), %%mm2 \n\t" | |
257 "movq 24(%3), %%mm3 \n\t" | |
258 "movq 32(%3), %%mm4 \n\t" | |
259 "movq 40(%3), %%mm5 \n\t" | |
260 "movq 48(%3), %%mm6 \n\t" | |
261 "movq 56(%3), %%mm7 \n\t" | |
262 "packuswb %%mm1, %%mm0 \n\t" | |
263 "packuswb %%mm3, %%mm2 \n\t" | |
264 "packuswb %%mm5, %%mm4 \n\t" | |
265 "packuswb %%mm7, %%mm6 \n\t" | |
266 "movq %%mm0, (%0) \n\t" | |
267 "movq %%mm2, (%0, %1) \n\t" | |
268 "movq %%mm4, (%0, %1, 2) \n\t" | |
269 "movq %%mm6, (%0, %2) \n\t" | |
270 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p) | |
271 :"memory"); | |
272 } | |
273 | |
274 static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) = | |
275 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }; | |
276 | |
277 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) | |
278 { | |
279 int i; | |
280 | |
281 movq_m2r(*vector128, mm1); | |
282 for (i = 0; i < 8; i++) { | |
283 movq_m2r(*(block), mm0); | |
284 packsswb_m2r(*(block + 4), mm0); | |
285 block += 8; | |
286 paddb_r2r(mm1, mm0); | |
287 movq_r2m(mm0, *pixels); | |
288 pixels += line_size; | |
289 } | |
290 } | |
291 | |
292 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) | |
293 { | |
294 const DCTELEM *p; | |
295 uint8_t *pix; | |
296 int i; | |
297 | |
298 /* read the pixels */ | |
299 p = block; | |
300 pix = pixels; | |
301 MOVQ_ZERO(mm7); | |
302 i = 4; | |
303 do { | |
304 __asm__ volatile( | |
305 "movq (%2), %%mm0 \n\t" | |
306 "movq 8(%2), %%mm1 \n\t" | |
307 "movq 16(%2), %%mm2 \n\t" | |
308 "movq 24(%2), %%mm3 \n\t" | |
309 "movq %0, %%mm4 \n\t" | |
310 "movq %1, %%mm6 \n\t" | |
311 "movq %%mm4, %%mm5 \n\t" | |
312 "punpcklbw %%mm7, %%mm4 \n\t" | |
313 "punpckhbw %%mm7, %%mm5 \n\t" | |
314 "paddsw %%mm4, %%mm0 \n\t" | |
315 "paddsw %%mm5, %%mm1 \n\t" | |
316 "movq %%mm6, %%mm5 \n\t" | |
317 "punpcklbw %%mm7, %%mm6 \n\t" | |
318 "punpckhbw %%mm7, %%mm5 \n\t" | |
319 "paddsw %%mm6, %%mm2 \n\t" | |
320 "paddsw %%mm5, %%mm3 \n\t" | |
321 "packuswb %%mm1, %%mm0 \n\t" | |
322 "packuswb %%mm3, %%mm2 \n\t" | |
323 "movq %%mm0, %0 \n\t" | |
324 "movq %%mm2, %1 \n\t" | |
325 :"+m"(*pix), "+m"(*(pix+line_size)) | |
326 :"r"(p) | |
327 :"memory"); | |
328 pix += line_size*2; | |
329 p += 16; | |
330 } while (--i); | |
331 } | |
332 | |
333 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
334 { | |
335 __asm__ volatile( | |
336 "lea (%3, %3), %%"REG_a" \n\t" | |
337 ASMALIGN(3) | |
338 "1: \n\t" | |
339 "movd (%1), %%mm0 \n\t" | |
340 "movd (%1, %3), %%mm1 \n\t" | |
341 "movd %%mm0, (%2) \n\t" | |
342 "movd %%mm1, (%2, %3) \n\t" | |
343 "add %%"REG_a", %1 \n\t" | |
344 "add %%"REG_a", %2 \n\t" | |
345 "movd (%1), %%mm0 \n\t" | |
346 "movd (%1, %3), %%mm1 \n\t" | |
347 "movd %%mm0, (%2) \n\t" | |
348 "movd %%mm1, (%2, %3) \n\t" | |
349 "add %%"REG_a", %1 \n\t" | |
350 "add %%"REG_a", %2 \n\t" | |
351 "subl $4, %0 \n\t" | |
352 "jnz 1b \n\t" | |
353 : "+g"(h), "+r" (pixels), "+r" (block) | |
354 : "r"((x86_reg)line_size) | |
355 : "%"REG_a, "memory" | |
356 ); | |
357 } | |
358 | |
359 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
360 { | |
361 __asm__ volatile( | |
362 "lea (%3, %3), %%"REG_a" \n\t" | |
363 ASMALIGN(3) | |
364 "1: \n\t" | |
365 "movq (%1), %%mm0 \n\t" | |
366 "movq (%1, %3), %%mm1 \n\t" | |
367 "movq %%mm0, (%2) \n\t" | |
368 "movq %%mm1, (%2, %3) \n\t" | |
369 "add %%"REG_a", %1 \n\t" | |
370 "add %%"REG_a", %2 \n\t" | |
371 "movq (%1), %%mm0 \n\t" | |
372 "movq (%1, %3), %%mm1 \n\t" | |
373 "movq %%mm0, (%2) \n\t" | |
374 "movq %%mm1, (%2, %3) \n\t" | |
375 "add %%"REG_a", %1 \n\t" | |
376 "add %%"REG_a", %2 \n\t" | |
377 "subl $4, %0 \n\t" | |
378 "jnz 1b \n\t" | |
379 : "+g"(h), "+r" (pixels), "+r" (block) | |
380 : "r"((x86_reg)line_size) | |
381 : "%"REG_a, "memory" | |
382 ); | |
383 } | |
384 | |
385 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
386 { | |
387 __asm__ volatile( | |
388 "lea (%3, %3), %%"REG_a" \n\t" | |
389 ASMALIGN(3) | |
390 "1: \n\t" | |
391 "movq (%1), %%mm0 \n\t" | |
392 "movq 8(%1), %%mm4 \n\t" | |
393 "movq (%1, %3), %%mm1 \n\t" | |
394 "movq 8(%1, %3), %%mm5 \n\t" | |
395 "movq %%mm0, (%2) \n\t" | |
396 "movq %%mm4, 8(%2) \n\t" | |
397 "movq %%mm1, (%2, %3) \n\t" | |
398 "movq %%mm5, 8(%2, %3) \n\t" | |
399 "add %%"REG_a", %1 \n\t" | |
400 "add %%"REG_a", %2 \n\t" | |
401 "movq (%1), %%mm0 \n\t" | |
402 "movq 8(%1), %%mm4 \n\t" | |
403 "movq (%1, %3), %%mm1 \n\t" | |
404 "movq 8(%1, %3), %%mm5 \n\t" | |
405 "movq %%mm0, (%2) \n\t" | |
406 "movq %%mm4, 8(%2) \n\t" | |
407 "movq %%mm1, (%2, %3) \n\t" | |
408 "movq %%mm5, 8(%2, %3) \n\t" | |
409 "add %%"REG_a", %1 \n\t" | |
410 "add %%"REG_a", %2 \n\t" | |
411 "subl $4, %0 \n\t" | |
412 "jnz 1b \n\t" | |
413 : "+g"(h), "+r" (pixels), "+r" (block) | |
414 : "r"((x86_reg)line_size) | |
415 : "%"REG_a, "memory" | |
416 ); | |
417 } | |
418 | |
419 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
420 { | |
421 __asm__ volatile( | |
422 "1: \n\t" | |
423 "movdqu (%1), %%xmm0 \n\t" | |
424 "movdqu (%1,%3), %%xmm1 \n\t" | |
425 "movdqu (%1,%3,2), %%xmm2 \n\t" | |
426 "movdqu (%1,%4), %%xmm3 \n\t" | |
427 "movdqa %%xmm0, (%2) \n\t" | |
428 "movdqa %%xmm1, (%2,%3) \n\t" | |
429 "movdqa %%xmm2, (%2,%3,2) \n\t" | |
430 "movdqa %%xmm3, (%2,%4) \n\t" | |
431 "subl $4, %0 \n\t" | |
432 "lea (%1,%3,4), %1 \n\t" | |
433 "lea (%2,%3,4), %2 \n\t" | |
434 "jnz 1b \n\t" | |
435 : "+g"(h), "+r" (pixels), "+r" (block) | |
436 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) | |
437 : "memory" | |
438 ); | |
439 } | |
440 | |
441 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
442 { | |
443 __asm__ volatile( | |
444 "1: \n\t" | |
445 "movdqu (%1), %%xmm0 \n\t" | |
446 "movdqu (%1,%3), %%xmm1 \n\t" | |
447 "movdqu (%1,%3,2), %%xmm2 \n\t" | |
448 "movdqu (%1,%4), %%xmm3 \n\t" | |
449 "pavgb (%2), %%xmm0 \n\t" | |
450 "pavgb (%2,%3), %%xmm1 \n\t" | |
451 "pavgb (%2,%3,2), %%xmm2 \n\t" | |
452 "pavgb (%2,%4), %%xmm3 \n\t" | |
453 "movdqa %%xmm0, (%2) \n\t" | |
454 "movdqa %%xmm1, (%2,%3) \n\t" | |
455 "movdqa %%xmm2, (%2,%3,2) \n\t" | |
456 "movdqa %%xmm3, (%2,%4) \n\t" | |
457 "subl $4, %0 \n\t" | |
458 "lea (%1,%3,4), %1 \n\t" | |
459 "lea (%2,%3,4), %2 \n\t" | |
460 "jnz 1b \n\t" | |
461 : "+g"(h), "+r" (pixels), "+r" (block) | |
462 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) | |
463 : "memory" | |
464 ); | |
465 } | |
466 | |
467 #define CLEAR_BLOCKS(name,n) \ | |
468 static void name(DCTELEM *blocks)\ | |
469 {\ | |
470 __asm__ volatile(\ | |
471 "pxor %%mm7, %%mm7 \n\t"\ | |
472 "mov %1, %%"REG_a" \n\t"\ | |
473 "1: \n\t"\ | |
474 "movq %%mm7, (%0, %%"REG_a") \n\t"\ | |
475 "movq %%mm7, 8(%0, %%"REG_a") \n\t"\ | |
476 "movq %%mm7, 16(%0, %%"REG_a") \n\t"\ | |
477 "movq %%mm7, 24(%0, %%"REG_a") \n\t"\ | |
478 "add $32, %%"REG_a" \n\t"\ | |
479 " js 1b \n\t"\ | |
480 : : "r" (((uint8_t *)blocks)+128*n),\ | |
481 "i" (-128*n)\ | |
482 : "%"REG_a\ | |
483 );\ | |
484 } | |
485 CLEAR_BLOCKS(clear_blocks_mmx, 6) | |
486 CLEAR_BLOCKS(clear_block_mmx, 1) | |
487 | |
488 static void clear_block_sse(DCTELEM *block) | |
489 { | |
490 __asm__ volatile( | |
491 "xorps %%xmm0, %%xmm0 \n" | |
492 "movaps %%xmm0, (%0) \n" | |
493 "movaps %%xmm0, 16(%0) \n" | |
494 "movaps %%xmm0, 32(%0) \n" | |
495 "movaps %%xmm0, 48(%0) \n" | |
496 "movaps %%xmm0, 64(%0) \n" | |
497 "movaps %%xmm0, 80(%0) \n" | |
498 "movaps %%xmm0, 96(%0) \n" | |
499 "movaps %%xmm0, 112(%0) \n" | |
500 :: "r"(block) | |
501 : "memory" | |
502 ); | |
503 } | |
504 | |
505 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){ | |
506 x86_reg i=0; | |
507 __asm__ volatile( | |
508 "jmp 2f \n\t" | |
509 "1: \n\t" | |
510 "movq (%1, %0), %%mm0 \n\t" | |
511 "movq (%2, %0), %%mm1 \n\t" | |
512 "paddb %%mm0, %%mm1 \n\t" | |
513 "movq %%mm1, (%2, %0) \n\t" | |
514 "movq 8(%1, %0), %%mm0 \n\t" | |
515 "movq 8(%2, %0), %%mm1 \n\t" | |
516 "paddb %%mm0, %%mm1 \n\t" | |
517 "movq %%mm1, 8(%2, %0) \n\t" | |
518 "add $16, %0 \n\t" | |
519 "2: \n\t" | |
520 "cmp %3, %0 \n\t" | |
521 " js 1b \n\t" | |
522 : "+r" (i) | |
523 : "r"(src), "r"(dst), "r"((x86_reg)w-15) | |
524 ); | |
525 for(; i<w; i++) | |
526 dst[i+0] += src[i+0]; | |
527 } | |
528 | |
529 static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ | |
530 x86_reg i=0; | |
531 __asm__ volatile( | |
532 "jmp 2f \n\t" | |
533 "1: \n\t" | |
534 "movq (%2, %0), %%mm0 \n\t" | |
535 "movq 8(%2, %0), %%mm1 \n\t" | |
536 "paddb (%3, %0), %%mm0 \n\t" | |
537 "paddb 8(%3, %0), %%mm1 \n\t" | |
538 "movq %%mm0, (%1, %0) \n\t" | |
539 "movq %%mm1, 8(%1, %0) \n\t" | |
540 "add $16, %0 \n\t" | |
541 "2: \n\t" | |
542 "cmp %4, %0 \n\t" | |
543 " js 1b \n\t" | |
544 : "+r" (i) | |
545 : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15) | |
546 ); | |
547 for(; i<w; i++) | |
548 dst[i] = src1[i] + src2[i]; | |
549 } | |
550 | |
8760 | 551 #if HAVE_7REGS |
552 static void add_hfyu_median_prediction_cmov(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top) { | |
553 x86_reg w2 = -w; | |
554 x86_reg x; | |
555 int l = *left & 0xff; | |
556 int tl = *left_top & 0xff; | |
557 int t; | |
558 __asm__ volatile( | |
559 "mov %7, %3 \n" | |
560 "1: \n" | |
561 "movzx (%3,%4), %2 \n" | |
562 "mov %2, %k3 \n" | |
563 "sub %b1, %b3 \n" | |
564 "add %b0, %b3 \n" | |
565 "mov %2, %1 \n" | |
566 "cmp %0, %2 \n" | |
567 "cmovg %0, %2 \n" | |
568 "cmovg %1, %0 \n" | |
569 "cmp %k3, %0 \n" | |
570 "cmovg %k3, %0 \n" | |
571 "mov %7, %3 \n" | |
572 "cmp %2, %0 \n" | |
573 "cmovl %2, %0 \n" | |
574 "add (%6,%4), %b0 \n" | |
575 "mov %b0, (%5,%4) \n" | |
576 "inc %4 \n" | |
577 "jl 1b \n" | |
578 :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2) | |
579 :"r"(dst+w), "r"(diff+w), "rm"(top+w) | |
580 ); | |
581 *left = l; | |
582 *left_top = tl; | |
583 } | |
584 #endif | |
585 | |
8430 | 586 #define H263_LOOP_FILTER \ |
587 "pxor %%mm7, %%mm7 \n\t"\ | |
588 "movq %0, %%mm0 \n\t"\ | |
589 "movq %0, %%mm1 \n\t"\ | |
590 "movq %3, %%mm2 \n\t"\ | |
591 "movq %3, %%mm3 \n\t"\ | |
592 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
593 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
594 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
595 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
596 "psubw %%mm2, %%mm0 \n\t"\ | |
597 "psubw %%mm3, %%mm1 \n\t"\ | |
598 "movq %1, %%mm2 \n\t"\ | |
599 "movq %1, %%mm3 \n\t"\ | |
600 "movq %2, %%mm4 \n\t"\ | |
601 "movq %2, %%mm5 \n\t"\ | |
602 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
603 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
604 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
605 "punpckhbw %%mm7, %%mm5 \n\t"\ | |
606 "psubw %%mm2, %%mm4 \n\t"\ | |
607 "psubw %%mm3, %%mm5 \n\t"\ | |
608 "psllw $2, %%mm4 \n\t"\ | |
609 "psllw $2, %%mm5 \n\t"\ | |
610 "paddw %%mm0, %%mm4 \n\t"\ | |
611 "paddw %%mm1, %%mm5 \n\t"\ | |
612 "pxor %%mm6, %%mm6 \n\t"\ | |
613 "pcmpgtw %%mm4, %%mm6 \n\t"\ | |
614 "pcmpgtw %%mm5, %%mm7 \n\t"\ | |
615 "pxor %%mm6, %%mm4 \n\t"\ | |
616 "pxor %%mm7, %%mm5 \n\t"\ | |
617 "psubw %%mm6, %%mm4 \n\t"\ | |
618 "psubw %%mm7, %%mm5 \n\t"\ | |
619 "psrlw $3, %%mm4 \n\t"\ | |
620 "psrlw $3, %%mm5 \n\t"\ | |
621 "packuswb %%mm5, %%mm4 \n\t"\ | |
622 "packsswb %%mm7, %%mm6 \n\t"\ | |
623 "pxor %%mm7, %%mm7 \n\t"\ | |
624 "movd %4, %%mm2 \n\t"\ | |
625 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
626 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
627 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
628 "psubusb %%mm4, %%mm2 \n\t"\ | |
629 "movq %%mm2, %%mm3 \n\t"\ | |
630 "psubusb %%mm4, %%mm3 \n\t"\ | |
631 "psubb %%mm3, %%mm2 \n\t"\ | |
632 "movq %1, %%mm3 \n\t"\ | |
633 "movq %2, %%mm4 \n\t"\ | |
634 "pxor %%mm6, %%mm3 \n\t"\ | |
635 "pxor %%mm6, %%mm4 \n\t"\ | |
636 "paddusb %%mm2, %%mm3 \n\t"\ | |
637 "psubusb %%mm2, %%mm4 \n\t"\ | |
638 "pxor %%mm6, %%mm3 \n\t"\ | |
639 "pxor %%mm6, %%mm4 \n\t"\ | |
640 "paddusb %%mm2, %%mm2 \n\t"\ | |
641 "packsswb %%mm1, %%mm0 \n\t"\ | |
642 "pcmpgtb %%mm0, %%mm7 \n\t"\ | |
643 "pxor %%mm7, %%mm0 \n\t"\ | |
644 "psubb %%mm7, %%mm0 \n\t"\ | |
645 "movq %%mm0, %%mm1 \n\t"\ | |
646 "psubusb %%mm2, %%mm0 \n\t"\ | |
647 "psubb %%mm0, %%mm1 \n\t"\ | |
648 "pand %5, %%mm1 \n\t"\ | |
649 "psrlw $2, %%mm1 \n\t"\ | |
650 "pxor %%mm7, %%mm1 \n\t"\ | |
651 "psubb %%mm7, %%mm1 \n\t"\ | |
652 "movq %0, %%mm5 \n\t"\ | |
653 "movq %3, %%mm6 \n\t"\ | |
654 "psubb %%mm1, %%mm5 \n\t"\ | |
655 "paddb %%mm1, %%mm6 \n\t" | |
656 | |
657 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){ | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
658 if(CONFIG_ANY_H263) { |
8430 | 659 const int strength= ff_h263_loop_filter_strength[qscale]; |
660 | |
661 __asm__ volatile( | |
662 | |
663 H263_LOOP_FILTER | |
664 | |
665 "movq %%mm3, %1 \n\t" | |
666 "movq %%mm4, %2 \n\t" | |
667 "movq %%mm5, %0 \n\t" | |
668 "movq %%mm6, %3 \n\t" | |
669 : "+m" (*(uint64_t*)(src - 2*stride)), | |
670 "+m" (*(uint64_t*)(src - 1*stride)), | |
671 "+m" (*(uint64_t*)(src + 0*stride)), | |
672 "+m" (*(uint64_t*)(src + 1*stride)) | |
673 : "g" (2*strength), "m"(ff_pb_FC) | |
674 ); | |
675 } | |
676 } | |
677 | |
678 static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){ | |
679 __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ... | |
680 "movd %4, %%mm0 \n\t" | |
681 "movd %5, %%mm1 \n\t" | |
682 "movd %6, %%mm2 \n\t" | |
683 "movd %7, %%mm3 \n\t" | |
684 "punpcklbw %%mm1, %%mm0 \n\t" | |
685 "punpcklbw %%mm3, %%mm2 \n\t" | |
686 "movq %%mm0, %%mm1 \n\t" | |
687 "punpcklwd %%mm2, %%mm0 \n\t" | |
688 "punpckhwd %%mm2, %%mm1 \n\t" | |
689 "movd %%mm0, %0 \n\t" | |
690 "punpckhdq %%mm0, %%mm0 \n\t" | |
691 "movd %%mm0, %1 \n\t" | |
692 "movd %%mm1, %2 \n\t" | |
693 "punpckhdq %%mm1, %%mm1 \n\t" | |
694 "movd %%mm1, %3 \n\t" | |
695 | |
696 : "=m" (*(uint32_t*)(dst + 0*dst_stride)), | |
697 "=m" (*(uint32_t*)(dst + 1*dst_stride)), | |
698 "=m" (*(uint32_t*)(dst + 2*dst_stride)), | |
699 "=m" (*(uint32_t*)(dst + 3*dst_stride)) | |
700 : "m" (*(uint32_t*)(src + 0*src_stride)), | |
701 "m" (*(uint32_t*)(src + 1*src_stride)), | |
702 "m" (*(uint32_t*)(src + 2*src_stride)), | |
703 "m" (*(uint32_t*)(src + 3*src_stride)) | |
704 ); | |
705 } | |
706 | |
707 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){ | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
708 if(CONFIG_ANY_H263) { |
8430 | 709 const int strength= ff_h263_loop_filter_strength[qscale]; |
710 DECLARE_ALIGNED(8, uint64_t, temp[4]); | |
711 uint8_t *btemp= (uint8_t*)temp; | |
712 | |
713 src -= 2; | |
714 | |
715 transpose4x4(btemp , src , 8, stride); | |
716 transpose4x4(btemp+4, src + 4*stride, 8, stride); | |
717 __asm__ volatile( | |
718 H263_LOOP_FILTER // 5 3 4 6 | |
719 | |
720 : "+m" (temp[0]), | |
721 "+m" (temp[1]), | |
722 "+m" (temp[2]), | |
723 "+m" (temp[3]) | |
724 : "g" (2*strength), "m"(ff_pb_FC) | |
725 ); | |
726 | |
727 __asm__ volatile( | |
728 "movq %%mm5, %%mm1 \n\t" | |
729 "movq %%mm4, %%mm0 \n\t" | |
730 "punpcklbw %%mm3, %%mm5 \n\t" | |
731 "punpcklbw %%mm6, %%mm4 \n\t" | |
732 "punpckhbw %%mm3, %%mm1 \n\t" | |
733 "punpckhbw %%mm6, %%mm0 \n\t" | |
734 "movq %%mm5, %%mm3 \n\t" | |
735 "movq %%mm1, %%mm6 \n\t" | |
736 "punpcklwd %%mm4, %%mm5 \n\t" | |
737 "punpcklwd %%mm0, %%mm1 \n\t" | |
738 "punpckhwd %%mm4, %%mm3 \n\t" | |
739 "punpckhwd %%mm0, %%mm6 \n\t" | |
740 "movd %%mm5, (%0) \n\t" | |
741 "punpckhdq %%mm5, %%mm5 \n\t" | |
742 "movd %%mm5, (%0,%2) \n\t" | |
743 "movd %%mm3, (%0,%2,2) \n\t" | |
744 "punpckhdq %%mm3, %%mm3 \n\t" | |
745 "movd %%mm3, (%0,%3) \n\t" | |
746 "movd %%mm1, (%1) \n\t" | |
747 "punpckhdq %%mm1, %%mm1 \n\t" | |
748 "movd %%mm1, (%1,%2) \n\t" | |
749 "movd %%mm6, (%1,%2,2) \n\t" | |
750 "punpckhdq %%mm6, %%mm6 \n\t" | |
751 "movd %%mm6, (%1,%3) \n\t" | |
752 :: "r" (src), | |
753 "r" (src + 4*stride), | |
754 "r" ((x86_reg) stride ), | |
755 "r" ((x86_reg)(3*stride)) | |
756 ); | |
757 } | |
758 } | |
759 | |
760 /* draw the edges of width 'w' of an image of size width, height | |
761 this mmx version can only handle w==8 || w==16 */ | |
762 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w) | |
763 { | |
764 uint8_t *ptr, *last_line; | |
765 int i; | |
766 | |
767 last_line = buf + (height - 1) * wrap; | |
768 /* left and right */ | |
769 ptr = buf; | |
770 if(w==8) | |
771 { | |
772 __asm__ volatile( | |
773 "1: \n\t" | |
774 "movd (%0), %%mm0 \n\t" | |
775 "punpcklbw %%mm0, %%mm0 \n\t" | |
776 "punpcklwd %%mm0, %%mm0 \n\t" | |
777 "punpckldq %%mm0, %%mm0 \n\t" | |
778 "movq %%mm0, -8(%0) \n\t" | |
779 "movq -8(%0, %2), %%mm1 \n\t" | |
780 "punpckhbw %%mm1, %%mm1 \n\t" | |
781 "punpckhwd %%mm1, %%mm1 \n\t" | |
782 "punpckhdq %%mm1, %%mm1 \n\t" | |
783 "movq %%mm1, (%0, %2) \n\t" | |
784 "add %1, %0 \n\t" | |
785 "cmp %3, %0 \n\t" | |
786 " jb 1b \n\t" | |
787 : "+r" (ptr) | |
788 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) | |
789 ); | |
790 } | |
791 else | |
792 { | |
793 __asm__ volatile( | |
794 "1: \n\t" | |
795 "movd (%0), %%mm0 \n\t" | |
796 "punpcklbw %%mm0, %%mm0 \n\t" | |
797 "punpcklwd %%mm0, %%mm0 \n\t" | |
798 "punpckldq %%mm0, %%mm0 \n\t" | |
799 "movq %%mm0, -8(%0) \n\t" | |
800 "movq %%mm0, -16(%0) \n\t" | |
801 "movq -8(%0, %2), %%mm1 \n\t" | |
802 "punpckhbw %%mm1, %%mm1 \n\t" | |
803 "punpckhwd %%mm1, %%mm1 \n\t" | |
804 "punpckhdq %%mm1, %%mm1 \n\t" | |
805 "movq %%mm1, (%0, %2) \n\t" | |
806 "movq %%mm1, 8(%0, %2) \n\t" | |
807 "add %1, %0 \n\t" | |
808 "cmp %3, %0 \n\t" | |
809 " jb 1b \n\t" | |
810 : "+r" (ptr) | |
811 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) | |
812 ); | |
813 } | |
814 | |
815 for(i=0;i<w;i+=4) { | |
816 /* top and bottom (and hopefully also the corners) */ | |
817 ptr= buf - (i + 1) * wrap - w; | |
818 __asm__ volatile( | |
819 "1: \n\t" | |
820 "movq (%1, %0), %%mm0 \n\t" | |
821 "movq %%mm0, (%0) \n\t" | |
822 "movq %%mm0, (%0, %2) \n\t" | |
823 "movq %%mm0, (%0, %2, 2) \n\t" | |
824 "movq %%mm0, (%0, %3) \n\t" | |
825 "add $8, %0 \n\t" | |
826 "cmp %4, %0 \n\t" | |
827 " jb 1b \n\t" | |
828 : "+r" (ptr) | |
829 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w) | |
830 ); | |
831 ptr= last_line + (i + 1) * wrap - w; | |
832 __asm__ volatile( | |
833 "1: \n\t" | |
834 "movq (%1, %0), %%mm0 \n\t" | |
835 "movq %%mm0, (%0) \n\t" | |
836 "movq %%mm0, (%0, %2) \n\t" | |
837 "movq %%mm0, (%0, %2, 2) \n\t" | |
838 "movq %%mm0, (%0, %3) \n\t" | |
839 "add $8, %0 \n\t" | |
840 "cmp %4, %0 \n\t" | |
841 " jb 1b \n\t" | |
842 : "+r" (ptr) | |
843 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w) | |
844 ); | |
845 } | |
846 } | |
847 | |
848 #define PAETH(cpu, abs3)\ | |
849 static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\ | |
850 {\ | |
851 x86_reg i = -bpp;\ | |
852 x86_reg end = w-3;\ | |
853 __asm__ volatile(\ | |
854 "pxor %%mm7, %%mm7 \n"\ | |
855 "movd (%1,%0), %%mm0 \n"\ | |
856 "movd (%2,%0), %%mm1 \n"\ | |
857 "punpcklbw %%mm7, %%mm0 \n"\ | |
858 "punpcklbw %%mm7, %%mm1 \n"\ | |
859 "add %4, %0 \n"\ | |
860 "1: \n"\ | |
861 "movq %%mm1, %%mm2 \n"\ | |
862 "movd (%2,%0), %%mm1 \n"\ | |
863 "movq %%mm2, %%mm3 \n"\ | |
864 "punpcklbw %%mm7, %%mm1 \n"\ | |
865 "movq %%mm2, %%mm4 \n"\ | |
866 "psubw %%mm1, %%mm3 \n"\ | |
867 "psubw %%mm0, %%mm4 \n"\ | |
868 "movq %%mm3, %%mm5 \n"\ | |
869 "paddw %%mm4, %%mm5 \n"\ | |
870 abs3\ | |
871 "movq %%mm4, %%mm6 \n"\ | |
872 "pminsw %%mm5, %%mm6 \n"\ | |
873 "pcmpgtw %%mm6, %%mm3 \n"\ | |
874 "pcmpgtw %%mm5, %%mm4 \n"\ | |
875 "movq %%mm4, %%mm6 \n"\ | |
876 "pand %%mm3, %%mm4 \n"\ | |
877 "pandn %%mm3, %%mm6 \n"\ | |
878 "pandn %%mm0, %%mm3 \n"\ | |
879 "movd (%3,%0), %%mm0 \n"\ | |
880 "pand %%mm1, %%mm6 \n"\ | |
881 "pand %%mm4, %%mm2 \n"\ | |
882 "punpcklbw %%mm7, %%mm0 \n"\ | |
883 "movq %6, %%mm5 \n"\ | |
884 "paddw %%mm6, %%mm0 \n"\ | |
885 "paddw %%mm2, %%mm3 \n"\ | |
886 "paddw %%mm3, %%mm0 \n"\ | |
887 "pand %%mm5, %%mm0 \n"\ | |
888 "movq %%mm0, %%mm3 \n"\ | |
889 "packuswb %%mm3, %%mm3 \n"\ | |
890 "movd %%mm3, (%1,%0) \n"\ | |
891 "add %4, %0 \n"\ | |
892 "cmp %5, %0 \n"\ | |
893 "jle 1b \n"\ | |
894 :"+r"(i)\ | |
895 :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\ | |
896 "m"(ff_pw_255)\ | |
897 :"memory"\ | |
898 );\ | |
899 } | |
900 | |
901 #define ABS3_MMX2\ | |
902 "psubw %%mm5, %%mm7 \n"\ | |
903 "pmaxsw %%mm7, %%mm5 \n"\ | |
904 "pxor %%mm6, %%mm6 \n"\ | |
905 "pxor %%mm7, %%mm7 \n"\ | |
906 "psubw %%mm3, %%mm6 \n"\ | |
907 "psubw %%mm4, %%mm7 \n"\ | |
908 "pmaxsw %%mm6, %%mm3 \n"\ | |
909 "pmaxsw %%mm7, %%mm4 \n"\ | |
910 "pxor %%mm7, %%mm7 \n" | |
911 | |
912 #define ABS3_SSSE3\ | |
913 "pabsw %%mm3, %%mm3 \n"\ | |
914 "pabsw %%mm4, %%mm4 \n"\ | |
915 "pabsw %%mm5, %%mm5 \n" | |
916 | |
917 PAETH(mmx2, ABS3_MMX2) | |
8590 | 918 #if HAVE_SSSE3 |
8430 | 919 PAETH(ssse3, ABS3_SSSE3) |
920 #endif | |
921 | |
922 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\ | |
923 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\ | |
924 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\ | |
925 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\ | |
926 "movq "#in7", " #m3 " \n\t" /* d */\ | |
927 "movq "#in0", %%mm5 \n\t" /* D */\ | |
928 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\ | |
929 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\ | |
930 "movq "#in1", %%mm5 \n\t" /* C */\ | |
931 "movq "#in2", %%mm6 \n\t" /* B */\ | |
932 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\ | |
933 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\ | |
934 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\ | |
935 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\ | |
936 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\ | |
937 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\ | |
938 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\ | |
939 "psraw $5, %%mm5 \n\t"\ | |
940 "packuswb %%mm5, %%mm5 \n\t"\ | |
941 OP(%%mm5, out, %%mm7, d) | |
942 | |
943 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\ | |
944 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
945 uint64_t temp;\ | |
946 \ | |
947 __asm__ volatile(\ | |
948 "pxor %%mm7, %%mm7 \n\t"\ | |
949 "1: \n\t"\ | |
950 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ | |
951 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ | |
952 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ | |
953 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ | |
954 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ | |
955 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ | |
956 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ | |
957 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ | |
958 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ | |
959 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ | |
960 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ | |
961 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ | |
962 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ | |
963 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ | |
964 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ | |
965 "paddw %%mm3, %%mm5 \n\t" /* b */\ | |
966 "paddw %%mm2, %%mm6 \n\t" /* c */\ | |
967 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
968 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ | |
969 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ | |
970 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ | |
971 "paddw %%mm4, %%mm0 \n\t" /* a */\ | |
972 "paddw %%mm1, %%mm5 \n\t" /* d */\ | |
973 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ | |
974 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ | |
975 "paddw %6, %%mm6 \n\t"\ | |
976 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ | |
977 "psraw $5, %%mm0 \n\t"\ | |
978 "movq %%mm0, %5 \n\t"\ | |
979 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ | |
980 \ | |
981 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\ | |
982 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\ | |
983 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\ | |
984 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\ | |
985 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\ | |
986 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\ | |
987 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\ | |
988 "paddw %%mm0, %%mm2 \n\t" /* b */\ | |
989 "paddw %%mm5, %%mm3 \n\t" /* c */\ | |
990 "paddw %%mm2, %%mm2 \n\t" /* 2b */\ | |
991 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ | |
992 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\ | |
993 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\ | |
994 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\ | |
995 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\ | |
996 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ | |
997 "paddw %%mm2, %%mm1 \n\t" /* a */\ | |
998 "paddw %%mm6, %%mm4 \n\t" /* d */\ | |
999 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ | |
1000 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\ | |
1001 "paddw %6, %%mm1 \n\t"\ | |
1002 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\ | |
1003 "psraw $5, %%mm3 \n\t"\ | |
1004 "movq %5, %%mm1 \n\t"\ | |
1005 "packuswb %%mm3, %%mm1 \n\t"\ | |
1006 OP_MMX2(%%mm1, (%1),%%mm4, q)\ | |
1007 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\ | |
1008 \ | |
1009 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\ | |
1010 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\ | |
1011 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\ | |
1012 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\ | |
1013 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\ | |
1014 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\ | |
1015 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\ | |
1016 "paddw %%mm1, %%mm5 \n\t" /* b */\ | |
1017 "paddw %%mm4, %%mm0 \n\t" /* c */\ | |
1018 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
1019 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\ | |
1020 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\ | |
1021 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\ | |
1022 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\ | |
1023 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\ | |
1024 "paddw %%mm3, %%mm2 \n\t" /* d */\ | |
1025 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\ | |
1026 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\ | |
1027 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\ | |
1028 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\ | |
1029 "paddw %%mm2, %%mm6 \n\t" /* a */\ | |
1030 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\ | |
1031 "paddw %6, %%mm0 \n\t"\ | |
1032 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ | |
1033 "psraw $5, %%mm0 \n\t"\ | |
1034 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\ | |
1035 \ | |
1036 "paddw %%mm5, %%mm3 \n\t" /* a */\ | |
1037 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\ | |
1038 "paddw %%mm4, %%mm6 \n\t" /* b */\ | |
1039 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\ | |
1040 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\ | |
1041 "paddw %%mm1, %%mm4 \n\t" /* c */\ | |
1042 "paddw %%mm2, %%mm5 \n\t" /* d */\ | |
1043 "paddw %%mm6, %%mm6 \n\t" /* 2b */\ | |
1044 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\ | |
1045 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\ | |
1046 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\ | |
1047 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\ | |
1048 "paddw %6, %%mm4 \n\t"\ | |
1049 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\ | |
1050 "psraw $5, %%mm4 \n\t"\ | |
1051 "packuswb %%mm4, %%mm0 \n\t"\ | |
1052 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\ | |
1053 \ | |
1054 "add %3, %0 \n\t"\ | |
1055 "add %4, %1 \n\t"\ | |
1056 "decl %2 \n\t"\ | |
1057 " jnz 1b \n\t"\ | |
1058 : "+a"(src), "+c"(dst), "+D"(h)\ | |
1059 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\ | |
1060 : "memory"\ | |
1061 );\ | |
1062 }\ | |
1063 \ | |
1064 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1065 int i;\ | |
1066 int16_t temp[16];\ | |
1067 /* quick HACK, XXX FIXME MUST be optimized */\ | |
1068 for(i=0; i<h; i++)\ | |
1069 {\ | |
1070 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ | |
1071 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ | |
1072 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ | |
1073 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ | |
1074 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ | |
1075 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\ | |
1076 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\ | |
1077 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\ | |
1078 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\ | |
1079 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\ | |
1080 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\ | |
1081 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\ | |
1082 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\ | |
1083 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\ | |
1084 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\ | |
1085 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\ | |
1086 __asm__ volatile(\ | |
1087 "movq (%0), %%mm0 \n\t"\ | |
1088 "movq 8(%0), %%mm1 \n\t"\ | |
1089 "paddw %2, %%mm0 \n\t"\ | |
1090 "paddw %2, %%mm1 \n\t"\ | |
1091 "psraw $5, %%mm0 \n\t"\ | |
1092 "psraw $5, %%mm1 \n\t"\ | |
1093 "packuswb %%mm1, %%mm0 \n\t"\ | |
1094 OP_3DNOW(%%mm0, (%1), %%mm1, q)\ | |
1095 "movq 16(%0), %%mm0 \n\t"\ | |
1096 "movq 24(%0), %%mm1 \n\t"\ | |
1097 "paddw %2, %%mm0 \n\t"\ | |
1098 "paddw %2, %%mm1 \n\t"\ | |
1099 "psraw $5, %%mm0 \n\t"\ | |
1100 "psraw $5, %%mm1 \n\t"\ | |
1101 "packuswb %%mm1, %%mm0 \n\t"\ | |
1102 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\ | |
1103 :: "r"(temp), "r"(dst), "m"(ROUNDER)\ | |
1104 : "memory"\ | |
1105 );\ | |
1106 dst+=dstStride;\ | |
1107 src+=srcStride;\ | |
1108 }\ | |
1109 }\ | |
1110 \ | |
1111 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1112 __asm__ volatile(\ | |
1113 "pxor %%mm7, %%mm7 \n\t"\ | |
1114 "1: \n\t"\ | |
1115 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ | |
1116 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ | |
1117 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ | |
1118 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ | |
1119 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ | |
1120 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ | |
1121 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ | |
1122 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ | |
1123 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ | |
1124 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ | |
1125 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ | |
1126 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ | |
1127 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ | |
1128 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ | |
1129 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ | |
1130 "paddw %%mm3, %%mm5 \n\t" /* b */\ | |
1131 "paddw %%mm2, %%mm6 \n\t" /* c */\ | |
1132 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
1133 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ | |
1134 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ | |
1135 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ | |
1136 "paddw %%mm4, %%mm0 \n\t" /* a */\ | |
1137 "paddw %%mm1, %%mm5 \n\t" /* d */\ | |
1138 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ | |
1139 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ | |
1140 "paddw %5, %%mm6 \n\t"\ | |
1141 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ | |
1142 "psraw $5, %%mm0 \n\t"\ | |
1143 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ | |
1144 \ | |
1145 "movd 5(%0), %%mm5 \n\t" /* FGHI */\ | |
1146 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\ | |
1147 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\ | |
1148 "paddw %%mm5, %%mm1 \n\t" /* a */\ | |
1149 "paddw %%mm6, %%mm2 \n\t" /* b */\ | |
1150 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\ | |
1151 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\ | |
1152 "paddw %%mm6, %%mm3 \n\t" /* c */\ | |
1153 "paddw %%mm5, %%mm4 \n\t" /* d */\ | |
1154 "paddw %%mm2, %%mm2 \n\t" /* 2b */\ | |
1155 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ | |
1156 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ | |
1157 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ | |
1158 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\ | |
1159 "paddw %5, %%mm1 \n\t"\ | |
1160 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\ | |
1161 "psraw $5, %%mm3 \n\t"\ | |
1162 "packuswb %%mm3, %%mm0 \n\t"\ | |
1163 OP_MMX2(%%mm0, (%1), %%mm4, q)\ | |
1164 \ | |
1165 "add %3, %0 \n\t"\ | |
1166 "add %4, %1 \n\t"\ | |
1167 "decl %2 \n\t"\ | |
1168 " jnz 1b \n\t"\ | |
1169 : "+a"(src), "+c"(dst), "+d"(h)\ | |
1170 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\ | |
1171 : "memory"\ | |
1172 );\ | |
1173 }\ | |
1174 \ | |
1175 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1176 int i;\ | |
1177 int16_t temp[8];\ | |
1178 /* quick HACK, XXX FIXME MUST be optimized */\ | |
1179 for(i=0; i<h; i++)\ | |
1180 {\ | |
1181 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ | |
1182 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ | |
1183 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ | |
1184 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ | |
1185 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ | |
1186 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\ | |
1187 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\ | |
1188 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\ | |
1189 __asm__ volatile(\ | |
1190 "movq (%0), %%mm0 \n\t"\ | |
1191 "movq 8(%0), %%mm1 \n\t"\ | |
1192 "paddw %2, %%mm0 \n\t"\ | |
1193 "paddw %2, %%mm1 \n\t"\ | |
1194 "psraw $5, %%mm0 \n\t"\ | |
1195 "psraw $5, %%mm1 \n\t"\ | |
1196 "packuswb %%mm1, %%mm0 \n\t"\ | |
1197 OP_3DNOW(%%mm0, (%1), %%mm1, q)\ | |
1198 :: "r"(temp), "r"(dst), "m"(ROUNDER)\ | |
1199 :"memory"\ | |
1200 );\ | |
1201 dst+=dstStride;\ | |
1202 src+=srcStride;\ | |
1203 }\ | |
1204 } | |
1205 | |
1206 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\ | |
1207 \ | |
1208 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1209 uint64_t temp[17*4];\ | |
1210 uint64_t *temp_ptr= temp;\ | |
1211 int count= 17;\ | |
1212 \ | |
1213 /*FIXME unroll */\ | |
1214 __asm__ volatile(\ | |
1215 "pxor %%mm7, %%mm7 \n\t"\ | |
1216 "1: \n\t"\ | |
1217 "movq (%0), %%mm0 \n\t"\ | |
1218 "movq (%0), %%mm1 \n\t"\ | |
1219 "movq 8(%0), %%mm2 \n\t"\ | |
1220 "movq 8(%0), %%mm3 \n\t"\ | |
1221 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1222 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
1223 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1224 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
1225 "movq %%mm0, (%1) \n\t"\ | |
1226 "movq %%mm1, 17*8(%1) \n\t"\ | |
1227 "movq %%mm2, 2*17*8(%1) \n\t"\ | |
1228 "movq %%mm3, 3*17*8(%1) \n\t"\ | |
1229 "add $8, %1 \n\t"\ | |
1230 "add %3, %0 \n\t"\ | |
1231 "decl %2 \n\t"\ | |
1232 " jnz 1b \n\t"\ | |
1233 : "+r" (src), "+r" (temp_ptr), "+r"(count)\ | |
1234 : "r" ((x86_reg)srcStride)\ | |
1235 : "memory"\ | |
1236 );\ | |
1237 \ | |
1238 temp_ptr= temp;\ | |
1239 count=4;\ | |
1240 \ | |
1241 /*FIXME reorder for speed */\ | |
1242 __asm__ volatile(\ | |
1243 /*"pxor %%mm7, %%mm7 \n\t"*/\ | |
1244 "1: \n\t"\ | |
1245 "movq (%0), %%mm0 \n\t"\ | |
1246 "movq 8(%0), %%mm1 \n\t"\ | |
1247 "movq 16(%0), %%mm2 \n\t"\ | |
1248 "movq 24(%0), %%mm3 \n\t"\ | |
1249 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ | |
1250 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ | |
1251 "add %4, %1 \n\t"\ | |
1252 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ | |
1253 \ | |
1254 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ | |
1255 "add %4, %1 \n\t"\ | |
1256 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ | |
1257 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\ | |
1258 "add %4, %1 \n\t"\ | |
1259 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\ | |
1260 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\ | |
1261 "add %4, %1 \n\t"\ | |
1262 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\ | |
1263 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\ | |
1264 "add %4, %1 \n\t"\ | |
1265 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\ | |
1266 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\ | |
1267 "add %4, %1 \n\t"\ | |
1268 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\ | |
1269 \ | |
1270 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\ | |
1271 "add %4, %1 \n\t" \ | |
1272 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\ | |
1273 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\ | |
1274 \ | |
1275 "add $136, %0 \n\t"\ | |
1276 "add %6, %1 \n\t"\ | |
1277 "decl %2 \n\t"\ | |
1278 " jnz 1b \n\t"\ | |
1279 \ | |
1280 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ | |
1281 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\ | |
1282 :"memory"\ | |
1283 );\ | |
1284 }\ | |
1285 \ | |
1286 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1287 uint64_t temp[9*2];\ | |
1288 uint64_t *temp_ptr= temp;\ | |
1289 int count= 9;\ | |
1290 \ | |
1291 /*FIXME unroll */\ | |
1292 __asm__ volatile(\ | |
1293 "pxor %%mm7, %%mm7 \n\t"\ | |
1294 "1: \n\t"\ | |
1295 "movq (%0), %%mm0 \n\t"\ | |
1296 "movq (%0), %%mm1 \n\t"\ | |
1297 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1298 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
1299 "movq %%mm0, (%1) \n\t"\ | |
1300 "movq %%mm1, 9*8(%1) \n\t"\ | |
1301 "add $8, %1 \n\t"\ | |
1302 "add %3, %0 \n\t"\ | |
1303 "decl %2 \n\t"\ | |
1304 " jnz 1b \n\t"\ | |
1305 : "+r" (src), "+r" (temp_ptr), "+r"(count)\ | |
1306 : "r" ((x86_reg)srcStride)\ | |
1307 : "memory"\ | |
1308 );\ | |
1309 \ | |
1310 temp_ptr= temp;\ | |
1311 count=2;\ | |
1312 \ | |
1313 /*FIXME reorder for speed */\ | |
1314 __asm__ volatile(\ | |
1315 /*"pxor %%mm7, %%mm7 \n\t"*/\ | |
1316 "1: \n\t"\ | |
1317 "movq (%0), %%mm0 \n\t"\ | |
1318 "movq 8(%0), %%mm1 \n\t"\ | |
1319 "movq 16(%0), %%mm2 \n\t"\ | |
1320 "movq 24(%0), %%mm3 \n\t"\ | |
1321 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ | |
1322 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ | |
1323 "add %4, %1 \n\t"\ | |
1324 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ | |
1325 \ | |
1326 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ | |
1327 "add %4, %1 \n\t"\ | |
1328 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ | |
1329 \ | |
1330 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\ | |
1331 "add %4, %1 \n\t"\ | |
1332 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\ | |
1333 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\ | |
1334 \ | |
1335 "add $72, %0 \n\t"\ | |
1336 "add %6, %1 \n\t"\ | |
1337 "decl %2 \n\t"\ | |
1338 " jnz 1b \n\t"\ | |
1339 \ | |
1340 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ | |
1341 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\ | |
1342 : "memory"\ | |
1343 );\ | |
1344 }\ | |
1345 \ | |
1346 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ | |
1347 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\ | |
1348 }\ | |
1349 \ | |
1350 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1351 uint64_t temp[8];\ | |
1352 uint8_t * const half= (uint8_t*)temp;\ | |
1353 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ | |
1354 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ | |
1355 }\ | |
1356 \ | |
1357 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1358 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\ | |
1359 }\ | |
1360 \ | |
1361 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1362 uint64_t temp[8];\ | |
1363 uint8_t * const half= (uint8_t*)temp;\ | |
1364 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ | |
1365 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\ | |
1366 }\ | |
1367 \ | |
1368 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1369 uint64_t temp[8];\ | |
1370 uint8_t * const half= (uint8_t*)temp;\ | |
1371 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ | |
1372 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ | |
1373 }\ | |
1374 \ | |
1375 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1376 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\ | |
1377 }\ | |
1378 \ | |
1379 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1380 uint64_t temp[8];\ | |
1381 uint8_t * const half= (uint8_t*)temp;\ | |
1382 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ | |
1383 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\ | |
1384 }\ | |
1385 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1386 uint64_t half[8 + 9];\ | |
1387 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1388 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1389 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1390 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ | |
1391 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1392 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ | |
1393 }\ | |
1394 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1395 uint64_t half[8 + 9];\ | |
1396 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1397 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1398 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1399 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ | |
1400 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1401 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ | |
1402 }\ | |
1403 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1404 uint64_t half[8 + 9];\ | |
1405 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1406 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1407 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1408 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ | |
1409 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1410 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ | |
1411 }\ | |
1412 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1413 uint64_t half[8 + 9];\ | |
1414 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1415 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1416 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1417 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ | |
1418 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1419 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ | |
1420 }\ | |
1421 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1422 uint64_t half[8 + 9];\ | |
1423 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1424 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1425 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1426 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1427 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ | |
1428 }\ | |
1429 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1430 uint64_t half[8 + 9];\ | |
1431 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1432 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1433 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1434 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1435 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ | |
1436 }\ | |
1437 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1438 uint64_t half[8 + 9];\ | |
1439 uint8_t * const halfH= ((uint8_t*)half);\ | |
1440 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1441 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ | |
1442 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ | |
1443 }\ | |
1444 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1445 uint64_t half[8 + 9];\ | |
1446 uint8_t * const halfH= ((uint8_t*)half);\ | |
1447 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1448 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ | |
1449 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ | |
1450 }\ | |
1451 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1452 uint64_t half[9];\ | |
1453 uint8_t * const halfH= ((uint8_t*)half);\ | |
1454 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1455 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ | |
1456 }\ | |
1457 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ | |
1458 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\ | |
1459 }\ | |
1460 \ | |
1461 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1462 uint64_t temp[32];\ | |
1463 uint8_t * const half= (uint8_t*)temp;\ | |
1464 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ | |
1465 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ | |
1466 }\ | |
1467 \ | |
1468 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1469 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\ | |
1470 }\ | |
1471 \ | |
1472 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1473 uint64_t temp[32];\ | |
1474 uint8_t * const half= (uint8_t*)temp;\ | |
1475 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ | |
1476 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\ | |
1477 }\ | |
1478 \ | |
1479 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1480 uint64_t temp[32];\ | |
1481 uint8_t * const half= (uint8_t*)temp;\ | |
1482 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ | |
1483 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ | |
1484 }\ | |
1485 \ | |
1486 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1487 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\ | |
1488 }\ | |
1489 \ | |
1490 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1491 uint64_t temp[32];\ | |
1492 uint8_t * const half= (uint8_t*)temp;\ | |
1493 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ | |
1494 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\ | |
1495 }\ | |
1496 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1497 uint64_t half[16*2 + 17*2];\ | |
1498 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1499 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1500 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1501 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ | |
1502 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1503 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ | |
1504 }\ | |
1505 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1506 uint64_t half[16*2 + 17*2];\ | |
1507 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1508 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1509 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1510 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ | |
1511 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1512 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ | |
1513 }\ | |
1514 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1515 uint64_t half[16*2 + 17*2];\ | |
1516 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1517 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1518 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1519 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ | |
1520 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1521 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ | |
1522 }\ | |
1523 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1524 uint64_t half[16*2 + 17*2];\ | |
1525 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1526 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1527 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1528 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ | |
1529 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1530 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ | |
1531 }\ | |
1532 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1533 uint64_t half[16*2 + 17*2];\ | |
1534 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1535 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1536 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1537 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1538 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ | |
1539 }\ | |
1540 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1541 uint64_t half[16*2 + 17*2];\ | |
1542 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1543 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1544 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1545 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1546 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ | |
1547 }\ | |
1548 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1549 uint64_t half[17*2];\ | |
1550 uint8_t * const halfH= ((uint8_t*)half);\ | |
1551 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1552 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ | |
1553 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ | |
1554 }\ | |
1555 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1556 uint64_t half[17*2];\ | |
1557 uint8_t * const halfH= ((uint8_t*)half);\ | |
1558 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1559 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ | |
1560 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ | |
1561 }\ | |
1562 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1563 uint64_t half[17*2];\ | |
1564 uint8_t * const halfH= ((uint8_t*)half);\ | |
1565 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1566 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ | |
1567 } | |
1568 | |
1569 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t" | |
1570 #define AVG_3DNOW_OP(a,b,temp, size) \ | |
1571 "mov" #size " " #b ", " #temp " \n\t"\ | |
1572 "pavgusb " #temp ", " #a " \n\t"\ | |
1573 "mov" #size " " #a ", " #b " \n\t" | |
1574 #define AVG_MMX2_OP(a,b,temp, size) \ | |
1575 "mov" #size " " #b ", " #temp " \n\t"\ | |
1576 "pavgb " #temp ", " #a " \n\t"\ | |
1577 "mov" #size " " #a ", " #b " \n\t" | |
1578 | |
1579 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP) | |
1580 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP) | |
1581 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP) | |
1582 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow) | |
1583 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow) | |
1584 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow) | |
1585 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2) | |
1586 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2) | |
1587 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2) | |
1588 | |
1589 /***********************************/ | |
1590 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */ | |
1591 | |
1592 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\ | |
1593 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1594 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\ | |
1595 } | |
1596 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\ | |
1597 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1598 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\ | |
1599 } | |
1600 | |
1601 #define QPEL_2TAP(OPNAME, SIZE, MMX)\ | |
1602 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\ | |
1603 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\ | |
1604 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\ | |
1605 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\ | |
1606 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\ | |
1607 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\ | |
1608 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\ | |
1609 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\ | |
1610 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\ | |
1611 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1612 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\ | |
1613 }\ | |
1614 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1615 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\ | |
1616 }\ | |
1617 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\ | |
1618 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\ | |
1619 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\ | |
1620 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\ | |
1621 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\ | |
1622 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\ | |
1623 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\ | |
1624 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\ | |
1625 | |
1626 QPEL_2TAP(put_, 16, mmx2) | |
1627 QPEL_2TAP(avg_, 16, mmx2) | |
1628 QPEL_2TAP(put_, 8, mmx2) | |
1629 QPEL_2TAP(avg_, 8, mmx2) | |
1630 QPEL_2TAP(put_, 16, 3dnow) | |
1631 QPEL_2TAP(avg_, 16, 3dnow) | |
1632 QPEL_2TAP(put_, 8, 3dnow) | |
1633 QPEL_2TAP(avg_, 8, 3dnow) | |
1634 | |
1635 | |
1636 #if 0 | |
8527
f8bf438c6000
Add missing 'void' keyword to parameterless function declarations.
diego
parents:
8519
diff
changeset
|
1637 static void just_return(void) { return; } |
8430 | 1638 #endif |
1639 | |
1640 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, | |
1641 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){ | |
1642 const int w = 8; | |
1643 const int ix = ox>>(16+shift); | |
1644 const int iy = oy>>(16+shift); | |
1645 const int oxs = ox>>4; | |
1646 const int oys = oy>>4; | |
1647 const int dxxs = dxx>>4; | |
1648 const int dxys = dxy>>4; | |
1649 const int dyxs = dyx>>4; | |
1650 const int dyys = dyy>>4; | |
1651 const uint16_t r4[4] = {r,r,r,r}; | |
1652 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys}; | |
1653 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys}; | |
1654 const uint64_t shift2 = 2*shift; | |
1655 uint8_t edge_buf[(h+1)*stride]; | |
1656 int x, y; | |
1657 | |
1658 const int dxw = (dxx-(1<<(16+shift)))*(w-1); | |
1659 const int dyh = (dyy-(1<<(16+shift)))*(h-1); | |
1660 const int dxh = dxy*(h-1); | |
1661 const int dyw = dyx*(w-1); | |
1662 if( // non-constant fullpel offset (3% of blocks) | |
1663 ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) | | |
1664 (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift) | |
1665 // uses more than 16 bits of subpel mv (only at huge resolution) | |
1666 || (dxx|dxy|dyx|dyy)&15 ) | |
1667 { | |
1668 //FIXME could still use mmx for some of the rows | |
1669 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height); | |
1670 return; | |
1671 } | |
1672 | |
1673 src += ix + iy*stride; | |
1674 if( (unsigned)ix >= width-w || | |
1675 (unsigned)iy >= height-h ) | |
1676 { | |
1677 ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height); | |
1678 src = edge_buf; | |
1679 } | |
1680 | |
1681 __asm__ volatile( | |
1682 "movd %0, %%mm6 \n\t" | |
1683 "pxor %%mm7, %%mm7 \n\t" | |
1684 "punpcklwd %%mm6, %%mm6 \n\t" | |
1685 "punpcklwd %%mm6, %%mm6 \n\t" | |
1686 :: "r"(1<<shift) | |
1687 ); | |
1688 | |
1689 for(x=0; x<w; x+=4){ | |
1690 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0), | |
1691 oxs - dxys + dxxs*(x+1), | |
1692 oxs - dxys + dxxs*(x+2), | |
1693 oxs - dxys + dxxs*(x+3) }; | |
1694 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0), | |
1695 oys - dyys + dyxs*(x+1), | |
1696 oys - dyys + dyxs*(x+2), | |
1697 oys - dyys + dyxs*(x+3) }; | |
1698 | |
1699 for(y=0; y<h; y++){ | |
1700 __asm__ volatile( | |
1701 "movq %0, %%mm4 \n\t" | |
1702 "movq %1, %%mm5 \n\t" | |
1703 "paddw %2, %%mm4 \n\t" | |
1704 "paddw %3, %%mm5 \n\t" | |
1705 "movq %%mm4, %0 \n\t" | |
1706 "movq %%mm5, %1 \n\t" | |
1707 "psrlw $12, %%mm4 \n\t" | |
1708 "psrlw $12, %%mm5 \n\t" | |
1709 : "+m"(*dx4), "+m"(*dy4) | |
1710 : "m"(*dxy4), "m"(*dyy4) | |
1711 ); | |
1712 | |
1713 __asm__ volatile( | |
1714 "movq %%mm6, %%mm2 \n\t" | |
1715 "movq %%mm6, %%mm1 \n\t" | |
1716 "psubw %%mm4, %%mm2 \n\t" | |
1717 "psubw %%mm5, %%mm1 \n\t" | |
1718 "movq %%mm2, %%mm0 \n\t" | |
1719 "movq %%mm4, %%mm3 \n\t" | |
1720 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy) | |
1721 "pmullw %%mm5, %%mm3 \n\t" // dx*dy | |
1722 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy | |
1723 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy) | |
1724 | |
1725 "movd %4, %%mm5 \n\t" | |
1726 "movd %3, %%mm4 \n\t" | |
1727 "punpcklbw %%mm7, %%mm5 \n\t" | |
1728 "punpcklbw %%mm7, %%mm4 \n\t" | |
1729 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy | |
1730 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy | |
1731 | |
1732 "movd %2, %%mm5 \n\t" | |
1733 "movd %1, %%mm4 \n\t" | |
1734 "punpcklbw %%mm7, %%mm5 \n\t" | |
1735 "punpcklbw %%mm7, %%mm4 \n\t" | |
1736 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy) | |
1737 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy) | |
1738 "paddw %5, %%mm1 \n\t" | |
1739 "paddw %%mm3, %%mm2 \n\t" | |
1740 "paddw %%mm1, %%mm0 \n\t" | |
1741 "paddw %%mm2, %%mm0 \n\t" | |
1742 | |
1743 "psrlw %6, %%mm0 \n\t" | |
1744 "packuswb %%mm0, %%mm0 \n\t" | |
1745 "movd %%mm0, %0 \n\t" | |
1746 | |
1747 : "=m"(dst[x+y*stride]) | |
1748 : "m"(src[0]), "m"(src[1]), | |
1749 "m"(src[stride]), "m"(src[stride+1]), | |
1750 "m"(*r4), "m"(shift2) | |
1751 ); | |
1752 src += stride; | |
1753 } | |
1754 src += 4-h*stride; | |
1755 } | |
1756 } | |
1757 | |
1758 #define PREFETCH(name, op) \ | |
1759 static void name(void *mem, int stride, int h){\ | |
1760 const uint8_t *p= mem;\ | |
1761 do{\ | |
1762 __asm__ volatile(#op" %0" :: "m"(*p));\ | |
1763 p+= stride;\ | |
1764 }while(--h);\ | |
1765 } | |
1766 PREFETCH(prefetch_mmx2, prefetcht0) | |
1767 PREFETCH(prefetch_3dnow, prefetch) | |
1768 #undef PREFETCH | |
1769 | |
1770 #include "h264dsp_mmx.c" | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
1771 #include "rv40dsp_mmx.c" |
8430 | 1772 |
1773 /* CAVS specific */ | |
1774 void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx); | |
1775 void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx); | |
1776 | |
1777 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1778 put_pixels8_mmx(dst, src, stride, 8); | |
1779 } | |
1780 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1781 avg_pixels8_mmx(dst, src, stride, 8); | |
1782 } | |
1783 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1784 put_pixels16_mmx(dst, src, stride, 16); | |
1785 } | |
1786 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1787 avg_pixels16_mmx(dst, src, stride, 16); | |
1788 } | |
1789 | |
1790 /* VC1 specific */ | |
1791 void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx); | |
1792 | |
1793 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { | |
1794 put_pixels8_mmx(dst, src, stride, 8); | |
1795 } | |
1796 | |
1797 /* external functions, from idct_mmx.c */ | |
1798 void ff_mmx_idct(DCTELEM *block); | |
1799 void ff_mmxext_idct(DCTELEM *block); | |
1800 | |
1801 /* XXX: those functions should be suppressed ASAP when all IDCTs are | |
1802 converted */ | |
8590 | 1803 #if CONFIG_GPL |
8430 | 1804 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block) |
1805 { | |
1806 ff_mmx_idct (block); | |
1807 put_pixels_clamped_mmx(block, dest, line_size); | |
1808 } | |
1809 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1810 { | |
1811 ff_mmx_idct (block); | |
1812 add_pixels_clamped_mmx(block, dest, line_size); | |
1813 } | |
1814 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block) | |
1815 { | |
1816 ff_mmxext_idct (block); | |
1817 put_pixels_clamped_mmx(block, dest, line_size); | |
1818 } | |
1819 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1820 { | |
1821 ff_mmxext_idct (block); | |
1822 add_pixels_clamped_mmx(block, dest, line_size); | |
1823 } | |
1824 #endif | |
1825 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block) | |
1826 { | |
1827 ff_idct_xvid_mmx (block); | |
1828 put_pixels_clamped_mmx(block, dest, line_size); | |
1829 } | |
1830 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1831 { | |
1832 ff_idct_xvid_mmx (block); | |
1833 add_pixels_clamped_mmx(block, dest, line_size); | |
1834 } | |
1835 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block) | |
1836 { | |
1837 ff_idct_xvid_mmx2 (block); | |
1838 put_pixels_clamped_mmx(block, dest, line_size); | |
1839 } | |
1840 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1841 { | |
1842 ff_idct_xvid_mmx2 (block); | |
1843 add_pixels_clamped_mmx(block, dest, line_size); | |
1844 } | |
1845 | |
1846 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize) | |
1847 { | |
1848 int i; | |
1849 __asm__ volatile("pxor %%mm7, %%mm7":); | |
1850 for(i=0; i<blocksize; i+=2) { | |
1851 __asm__ volatile( | |
1852 "movq %0, %%mm0 \n\t" | |
1853 "movq %1, %%mm1 \n\t" | |
1854 "movq %%mm0, %%mm2 \n\t" | |
1855 "movq %%mm1, %%mm3 \n\t" | |
1856 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0 | |
1857 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0 | |
1858 "pslld $31, %%mm2 \n\t" // keep only the sign bit | |
1859 "pxor %%mm2, %%mm1 \n\t" | |
1860 "movq %%mm3, %%mm4 \n\t" | |
1861 "pand %%mm1, %%mm3 \n\t" | |
1862 "pandn %%mm1, %%mm4 \n\t" | |
1863 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) | |
1864 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) | |
1865 "movq %%mm3, %1 \n\t" | |
1866 "movq %%mm0, %0 \n\t" | |
1867 :"+m"(mag[i]), "+m"(ang[i]) | |
1868 ::"memory" | |
1869 ); | |
1870 } | |
1871 __asm__ volatile("femms"); | |
1872 } | |
1873 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize) | |
1874 { | |
1875 int i; | |
1876 | |
1877 __asm__ volatile( | |
1878 "movaps %0, %%xmm5 \n\t" | |
1879 ::"m"(ff_pdw_80000000[0]) | |
1880 ); | |
1881 for(i=0; i<blocksize; i+=4) { | |
1882 __asm__ volatile( | |
1883 "movaps %0, %%xmm0 \n\t" | |
1884 "movaps %1, %%xmm1 \n\t" | |
1885 "xorps %%xmm2, %%xmm2 \n\t" | |
1886 "xorps %%xmm3, %%xmm3 \n\t" | |
1887 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0 | |
1888 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0 | |
1889 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit | |
1890 "xorps %%xmm2, %%xmm1 \n\t" | |
1891 "movaps %%xmm3, %%xmm4 \n\t" | |
1892 "andps %%xmm1, %%xmm3 \n\t" | |
1893 "andnps %%xmm1, %%xmm4 \n\t" | |
1894 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) | |
1895 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) | |
1896 "movaps %%xmm3, %1 \n\t" | |
1897 "movaps %%xmm0, %0 \n\t" | |
1898 :"+m"(mag[i]), "+m"(ang[i]) | |
1899 ::"memory" | |
1900 ); | |
1901 } | |
1902 } | |
1903 | |
1904 #define IF1(x) x | |
1905 #define IF0(x) | |
1906 | |
1907 #define MIX5(mono,stereo)\ | |
1908 __asm__ volatile(\ | |
1909 "movss 0(%2), %%xmm5 \n"\ | |
1910 "movss 8(%2), %%xmm6 \n"\ | |
1911 "movss 24(%2), %%xmm7 \n"\ | |
1912 "shufps $0, %%xmm5, %%xmm5 \n"\ | |
1913 "shufps $0, %%xmm6, %%xmm6 \n"\ | |
1914 "shufps $0, %%xmm7, %%xmm7 \n"\ | |
1915 "1: \n"\ | |
1916 "movaps (%0,%1), %%xmm0 \n"\ | |
1917 "movaps 0x400(%0,%1), %%xmm1 \n"\ | |
1918 "movaps 0x800(%0,%1), %%xmm2 \n"\ | |
1919 "movaps 0xc00(%0,%1), %%xmm3 \n"\ | |
1920 "movaps 0x1000(%0,%1), %%xmm4 \n"\ | |
1921 "mulps %%xmm5, %%xmm0 \n"\ | |
1922 "mulps %%xmm6, %%xmm1 \n"\ | |
1923 "mulps %%xmm5, %%xmm2 \n"\ | |
1924 "mulps %%xmm7, %%xmm3 \n"\ | |
1925 "mulps %%xmm7, %%xmm4 \n"\ | |
1926 stereo("addps %%xmm1, %%xmm0 \n")\ | |
1927 "addps %%xmm1, %%xmm2 \n"\ | |
1928 "addps %%xmm3, %%xmm0 \n"\ | |
1929 "addps %%xmm4, %%xmm2 \n"\ | |
1930 mono("addps %%xmm2, %%xmm0 \n")\ | |
1931 "movaps %%xmm0, (%0,%1) \n"\ | |
1932 stereo("movaps %%xmm2, 0x400(%0,%1) \n")\ | |
1933 "add $16, %0 \n"\ | |
1934 "jl 1b \n"\ | |
1935 :"+&r"(i)\ | |
1936 :"r"(samples[0]+len), "r"(matrix)\ | |
1937 :"memory"\ | |
1938 ); | |
1939 | |
1940 #define MIX_MISC(stereo)\ | |
1941 __asm__ volatile(\ | |
1942 "1: \n"\ | |
1943 "movaps (%3,%0), %%xmm0 \n"\ | |
1944 stereo("movaps %%xmm0, %%xmm1 \n")\ | |
1945 "mulps %%xmm6, %%xmm0 \n"\ | |
1946 stereo("mulps %%xmm7, %%xmm1 \n")\ | |
1947 "lea 1024(%3,%0), %1 \n"\ | |
1948 "mov %5, %2 \n"\ | |
1949 "2: \n"\ | |
1950 "movaps (%1), %%xmm2 \n"\ | |
1951 stereo("movaps %%xmm2, %%xmm3 \n")\ | |
1952 "mulps (%4,%2), %%xmm2 \n"\ | |
1953 stereo("mulps 16(%4,%2), %%xmm3 \n")\ | |
1954 "addps %%xmm2, %%xmm0 \n"\ | |
1955 stereo("addps %%xmm3, %%xmm1 \n")\ | |
1956 "add $1024, %1 \n"\ | |
1957 "add $32, %2 \n"\ | |
1958 "jl 2b \n"\ | |
1959 "movaps %%xmm0, (%3,%0) \n"\ | |
1960 stereo("movaps %%xmm1, 1024(%3,%0) \n")\ | |
1961 "add $16, %0 \n"\ | |
1962 "jl 1b \n"\ | |
1963 :"+&r"(i), "=&r"(j), "=&r"(k)\ | |
1964 :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\ | |
1965 :"memory"\ | |
1966 ); | |
1967 | |
1968 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len) | |
1969 { | |
1970 int (*matrix_cmp)[2] = (int(*)[2])matrix; | |
1971 intptr_t i,j,k; | |
1972 | |
1973 i = -len*sizeof(float); | |
1974 if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) { | |
1975 MIX5(IF0,IF1); | |
1976 } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) { | |
1977 MIX5(IF1,IF0); | |
1978 } else { | |
1979 DECLARE_ALIGNED_16(float, matrix_simd[in_ch][2][4]); | |
1980 j = 2*in_ch*sizeof(float); | |
1981 __asm__ volatile( | |
1982 "1: \n" | |
1983 "sub $8, %0 \n" | |
1984 "movss (%2,%0), %%xmm6 \n" | |
1985 "movss 4(%2,%0), %%xmm7 \n" | |
1986 "shufps $0, %%xmm6, %%xmm6 \n" | |
1987 "shufps $0, %%xmm7, %%xmm7 \n" | |
1988 "movaps %%xmm6, (%1,%0,4) \n" | |
1989 "movaps %%xmm7, 16(%1,%0,4) \n" | |
1990 "jg 1b \n" | |
1991 :"+&r"(j) | |
1992 :"r"(matrix_simd), "r"(matrix) | |
1993 :"memory" | |
1994 ); | |
1995 if(out_ch == 2) { | |
1996 MIX_MISC(IF1); | |
1997 } else { | |
1998 MIX_MISC(IF0); | |
1999 } | |
2000 } | |
2001 } | |
2002 | |
2003 static void vector_fmul_3dnow(float *dst, const float *src, int len){ | |
2004 x86_reg i = (len-4)*4; | |
2005 __asm__ volatile( | |
2006 "1: \n\t" | |
2007 "movq (%1,%0), %%mm0 \n\t" | |
2008 "movq 8(%1,%0), %%mm1 \n\t" | |
2009 "pfmul (%2,%0), %%mm0 \n\t" | |
2010 "pfmul 8(%2,%0), %%mm1 \n\t" | |
2011 "movq %%mm0, (%1,%0) \n\t" | |
2012 "movq %%mm1, 8(%1,%0) \n\t" | |
2013 "sub $16, %0 \n\t" | |
2014 "jge 1b \n\t" | |
2015 "femms \n\t" | |
2016 :"+r"(i) | |
2017 :"r"(dst), "r"(src) | |
2018 :"memory" | |
2019 ); | |
2020 } | |
2021 static void vector_fmul_sse(float *dst, const float *src, int len){ | |
2022 x86_reg i = (len-8)*4; | |
2023 __asm__ volatile( | |
2024 "1: \n\t" | |
2025 "movaps (%1,%0), %%xmm0 \n\t" | |
2026 "movaps 16(%1,%0), %%xmm1 \n\t" | |
2027 "mulps (%2,%0), %%xmm0 \n\t" | |
2028 "mulps 16(%2,%0), %%xmm1 \n\t" | |
2029 "movaps %%xmm0, (%1,%0) \n\t" | |
2030 "movaps %%xmm1, 16(%1,%0) \n\t" | |
2031 "sub $32, %0 \n\t" | |
2032 "jge 1b \n\t" | |
2033 :"+r"(i) | |
2034 :"r"(dst), "r"(src) | |
2035 :"memory" | |
2036 ); | |
2037 } | |
2038 | |
2039 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){ | |
2040 x86_reg i = len*4-16; | |
2041 __asm__ volatile( | |
2042 "1: \n\t" | |
2043 "pswapd 8(%1), %%mm0 \n\t" | |
2044 "pswapd (%1), %%mm1 \n\t" | |
2045 "pfmul (%3,%0), %%mm0 \n\t" | |
2046 "pfmul 8(%3,%0), %%mm1 \n\t" | |
2047 "movq %%mm0, (%2,%0) \n\t" | |
2048 "movq %%mm1, 8(%2,%0) \n\t" | |
2049 "add $16, %1 \n\t" | |
2050 "sub $16, %0 \n\t" | |
2051 "jge 1b \n\t" | |
2052 :"+r"(i), "+r"(src1) | |
2053 :"r"(dst), "r"(src0) | |
2054 ); | |
2055 __asm__ volatile("femms"); | |
2056 } | |
2057 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){ | |
2058 x86_reg i = len*4-32; | |
2059 __asm__ volatile( | |
2060 "1: \n\t" | |
2061 "movaps 16(%1), %%xmm0 \n\t" | |
2062 "movaps (%1), %%xmm1 \n\t" | |
2063 "shufps $0x1b, %%xmm0, %%xmm0 \n\t" | |
2064 "shufps $0x1b, %%xmm1, %%xmm1 \n\t" | |
2065 "mulps (%3,%0), %%xmm0 \n\t" | |
2066 "mulps 16(%3,%0), %%xmm1 \n\t" | |
2067 "movaps %%xmm0, (%2,%0) \n\t" | |
2068 "movaps %%xmm1, 16(%2,%0) \n\t" | |
2069 "add $32, %1 \n\t" | |
2070 "sub $32, %0 \n\t" | |
2071 "jge 1b \n\t" | |
2072 :"+r"(i), "+r"(src1) | |
2073 :"r"(dst), "r"(src0) | |
2074 ); | |
2075 } | |
2076 | |
2077 static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1, | |
2078 const float *src2, int src3, int len, int step){ | |
2079 x86_reg i = (len-4)*4; | |
2080 if(step == 2 && src3 == 0){ | |
2081 dst += (len-4)*2; | |
2082 __asm__ volatile( | |
2083 "1: \n\t" | |
2084 "movq (%2,%0), %%mm0 \n\t" | |
2085 "movq 8(%2,%0), %%mm1 \n\t" | |
2086 "pfmul (%3,%0), %%mm0 \n\t" | |
2087 "pfmul 8(%3,%0), %%mm1 \n\t" | |
2088 "pfadd (%4,%0), %%mm0 \n\t" | |
2089 "pfadd 8(%4,%0), %%mm1 \n\t" | |
2090 "movd %%mm0, (%1) \n\t" | |
2091 "movd %%mm1, 16(%1) \n\t" | |
2092 "psrlq $32, %%mm0 \n\t" | |
2093 "psrlq $32, %%mm1 \n\t" | |
2094 "movd %%mm0, 8(%1) \n\t" | |
2095 "movd %%mm1, 24(%1) \n\t" | |
2096 "sub $32, %1 \n\t" | |
2097 "sub $16, %0 \n\t" | |
2098 "jge 1b \n\t" | |
2099 :"+r"(i), "+r"(dst) | |
2100 :"r"(src0), "r"(src1), "r"(src2) | |
2101 :"memory" | |
2102 ); | |
2103 } | |
2104 else if(step == 1 && src3 == 0){ | |
2105 __asm__ volatile( | |
2106 "1: \n\t" | |
2107 "movq (%2,%0), %%mm0 \n\t" | |
2108 "movq 8(%2,%0), %%mm1 \n\t" | |
2109 "pfmul (%3,%0), %%mm0 \n\t" | |
2110 "pfmul 8(%3,%0), %%mm1 \n\t" | |
2111 "pfadd (%4,%0), %%mm0 \n\t" | |
2112 "pfadd 8(%4,%0), %%mm1 \n\t" | |
2113 "movq %%mm0, (%1,%0) \n\t" | |
2114 "movq %%mm1, 8(%1,%0) \n\t" | |
2115 "sub $16, %0 \n\t" | |
2116 "jge 1b \n\t" | |
2117 :"+r"(i) | |
2118 :"r"(dst), "r"(src0), "r"(src1), "r"(src2) | |
2119 :"memory" | |
2120 ); | |
2121 } | |
2122 else | |
2123 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step); | |
2124 __asm__ volatile("femms"); | |
2125 } | |
2126 static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1, | |
2127 const float *src2, int src3, int len, int step){ | |
2128 x86_reg i = (len-8)*4; | |
2129 if(step == 2 && src3 == 0){ | |
2130 dst += (len-8)*2; | |
2131 __asm__ volatile( | |
2132 "1: \n\t" | |
2133 "movaps (%2,%0), %%xmm0 \n\t" | |
2134 "movaps 16(%2,%0), %%xmm1 \n\t" | |
2135 "mulps (%3,%0), %%xmm0 \n\t" | |
2136 "mulps 16(%3,%0), %%xmm1 \n\t" | |
2137 "addps (%4,%0), %%xmm0 \n\t" | |
2138 "addps 16(%4,%0), %%xmm1 \n\t" | |
2139 "movss %%xmm0, (%1) \n\t" | |
2140 "movss %%xmm1, 32(%1) \n\t" | |
2141 "movhlps %%xmm0, %%xmm2 \n\t" | |
2142 "movhlps %%xmm1, %%xmm3 \n\t" | |
2143 "movss %%xmm2, 16(%1) \n\t" | |
2144 "movss %%xmm3, 48(%1) \n\t" | |
2145 "shufps $0xb1, %%xmm0, %%xmm0 \n\t" | |
2146 "shufps $0xb1, %%xmm1, %%xmm1 \n\t" | |
2147 "movss %%xmm0, 8(%1) \n\t" | |
2148 "movss %%xmm1, 40(%1) \n\t" | |
2149 "movhlps %%xmm0, %%xmm2 \n\t" | |
2150 "movhlps %%xmm1, %%xmm3 \n\t" | |
2151 "movss %%xmm2, 24(%1) \n\t" | |
2152 "movss %%xmm3, 56(%1) \n\t" | |
2153 "sub $64, %1 \n\t" | |
2154 "sub $32, %0 \n\t" | |
2155 "jge 1b \n\t" | |
2156 :"+r"(i), "+r"(dst) | |
2157 :"r"(src0), "r"(src1), "r"(src2) | |
2158 :"memory" | |
2159 ); | |
2160 } | |
2161 else if(step == 1 && src3 == 0){ | |
2162 __asm__ volatile( | |
2163 "1: \n\t" | |
2164 "movaps (%2,%0), %%xmm0 \n\t" | |
2165 "movaps 16(%2,%0), %%xmm1 \n\t" | |
2166 "mulps (%3,%0), %%xmm0 \n\t" | |
2167 "mulps 16(%3,%0), %%xmm1 \n\t" | |
2168 "addps (%4,%0), %%xmm0 \n\t" | |
2169 "addps 16(%4,%0), %%xmm1 \n\t" | |
2170 "movaps %%xmm0, (%1,%0) \n\t" | |
2171 "movaps %%xmm1, 16(%1,%0) \n\t" | |
2172 "sub $32, %0 \n\t" | |
2173 "jge 1b \n\t" | |
2174 :"+r"(i) | |
2175 :"r"(dst), "r"(src0), "r"(src1), "r"(src2) | |
2176 :"memory" | |
2177 ); | |
2178 } | |
2179 else | |
2180 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step); | |
2181 } | |
2182 | |
2183 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1, | |
2184 const float *win, float add_bias, int len){ | |
8590 | 2185 #if HAVE_6REGS |
8430 | 2186 if(add_bias == 0){ |
2187 x86_reg i = -len*4; | |
2188 x86_reg j = len*4-8; | |
2189 __asm__ volatile( | |
2190 "1: \n" | |
2191 "pswapd (%5,%1), %%mm1 \n" | |
2192 "movq (%5,%0), %%mm0 \n" | |
2193 "pswapd (%4,%1), %%mm5 \n" | |
2194 "movq (%3,%0), %%mm4 \n" | |
2195 "movq %%mm0, %%mm2 \n" | |
2196 "movq %%mm1, %%mm3 \n" | |
2197 "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i] | |
2198 "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j] | |
2199 "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j] | |
2200 "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i] | |
2201 "pfadd %%mm3, %%mm2 \n" | |
2202 "pfsub %%mm0, %%mm1 \n" | |
2203 "pswapd %%mm2, %%mm2 \n" | |
2204 "movq %%mm1, (%2,%0) \n" | |
2205 "movq %%mm2, (%2,%1) \n" | |
2206 "sub $8, %1 \n" | |
2207 "add $8, %0 \n" | |
2208 "jl 1b \n" | |
2209 "femms \n" | |
2210 :"+r"(i), "+r"(j) | |
2211 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) | |
2212 ); | |
2213 }else | |
2214 #endif | |
2215 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len); | |
2216 } | |
2217 | |
2218 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1, | |
2219 const float *win, float add_bias, int len){ | |
8590 | 2220 #if HAVE_6REGS |
8430 | 2221 if(add_bias == 0){ |
2222 x86_reg i = -len*4; | |
2223 x86_reg j = len*4-16; | |
2224 __asm__ volatile( | |
2225 "1: \n" | |
2226 "movaps (%5,%1), %%xmm1 \n" | |
2227 "movaps (%5,%0), %%xmm0 \n" | |
2228 "movaps (%4,%1), %%xmm5 \n" | |
2229 "movaps (%3,%0), %%xmm4 \n" | |
2230 "shufps $0x1b, %%xmm1, %%xmm1 \n" | |
2231 "shufps $0x1b, %%xmm5, %%xmm5 \n" | |
2232 "movaps %%xmm0, %%xmm2 \n" | |
2233 "movaps %%xmm1, %%xmm3 \n" | |
2234 "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i] | |
2235 "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j] | |
2236 "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j] | |
2237 "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i] | |
2238 "addps %%xmm3, %%xmm2 \n" | |
2239 "subps %%xmm0, %%xmm1 \n" | |
2240 "shufps $0x1b, %%xmm2, %%xmm2 \n" | |
2241 "movaps %%xmm1, (%2,%0) \n" | |
2242 "movaps %%xmm2, (%2,%1) \n" | |
2243 "sub $16, %1 \n" | |
2244 "add $16, %0 \n" | |
2245 "jl 1b \n" | |
2246 :"+r"(i), "+r"(j) | |
2247 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) | |
2248 ); | |
2249 }else | |
2250 #endif | |
2251 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len); | |
2252 } | |
2253 | |
2254 static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len) | |
2255 { | |
2256 x86_reg i = -4*len; | |
2257 __asm__ volatile( | |
2258 "movss %3, %%xmm4 \n" | |
2259 "shufps $0, %%xmm4, %%xmm4 \n" | |
2260 "1: \n" | |
2261 "cvtpi2ps (%2,%0), %%xmm0 \n" | |
2262 "cvtpi2ps 8(%2,%0), %%xmm1 \n" | |
2263 "cvtpi2ps 16(%2,%0), %%xmm2 \n" | |
2264 "cvtpi2ps 24(%2,%0), %%xmm3 \n" | |
2265 "movlhps %%xmm1, %%xmm0 \n" | |
2266 "movlhps %%xmm3, %%xmm2 \n" | |
2267 "mulps %%xmm4, %%xmm0 \n" | |
2268 "mulps %%xmm4, %%xmm2 \n" | |
2269 "movaps %%xmm0, (%1,%0) \n" | |
2270 "movaps %%xmm2, 16(%1,%0) \n" | |
2271 "add $32, %0 \n" | |
2272 "jl 1b \n" | |
2273 :"+r"(i) | |
2274 :"r"(dst+len), "r"(src+len), "m"(mul) | |
2275 ); | |
2276 } | |
2277 | |
2278 static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len) | |
2279 { | |
2280 x86_reg i = -4*len; | |
2281 __asm__ volatile( | |
2282 "movss %3, %%xmm4 \n" | |
2283 "shufps $0, %%xmm4, %%xmm4 \n" | |
2284 "1: \n" | |
2285 "cvtdq2ps (%2,%0), %%xmm0 \n" | |
2286 "cvtdq2ps 16(%2,%0), %%xmm1 \n" | |
2287 "mulps %%xmm4, %%xmm0 \n" | |
2288 "mulps %%xmm4, %%xmm1 \n" | |
2289 "movaps %%xmm0, (%1,%0) \n" | |
2290 "movaps %%xmm1, 16(%1,%0) \n" | |
2291 "add $32, %0 \n" | |
2292 "jl 1b \n" | |
2293 :"+r"(i) | |
2294 :"r"(dst+len), "r"(src+len), "m"(mul) | |
2295 ); | |
2296 } | |
2297 | |
2298 static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){ | |
2299 x86_reg reglen = len; | |
2300 // not bit-exact: pf2id uses different rounding than C and SSE | |
2301 __asm__ volatile( | |
2302 "add %0 , %0 \n\t" | |
2303 "lea (%2,%0,2) , %2 \n\t" | |
2304 "add %0 , %1 \n\t" | |
2305 "neg %0 \n\t" | |
2306 "1: \n\t" | |
2307 "pf2id (%2,%0,2) , %%mm0 \n\t" | |
2308 "pf2id 8(%2,%0,2) , %%mm1 \n\t" | |
2309 "pf2id 16(%2,%0,2) , %%mm2 \n\t" | |
2310 "pf2id 24(%2,%0,2) , %%mm3 \n\t" | |
2311 "packssdw %%mm1 , %%mm0 \n\t" | |
2312 "packssdw %%mm3 , %%mm2 \n\t" | |
2313 "movq %%mm0 , (%1,%0) \n\t" | |
2314 "movq %%mm2 , 8(%1,%0) \n\t" | |
2315 "add $16 , %0 \n\t" | |
2316 " js 1b \n\t" | |
2317 "femms \n\t" | |
2318 :"+r"(reglen), "+r"(dst), "+r"(src) | |
2319 ); | |
2320 } | |
2321 static void float_to_int16_sse(int16_t *dst, const float *src, long len){ | |
2322 x86_reg reglen = len; | |
2323 __asm__ volatile( | |
2324 "add %0 , %0 \n\t" | |
2325 "lea (%2,%0,2) , %2 \n\t" | |
2326 "add %0 , %1 \n\t" | |
2327 "neg %0 \n\t" | |
2328 "1: \n\t" | |
2329 "cvtps2pi (%2,%0,2) , %%mm0 \n\t" | |
2330 "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t" | |
2331 "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t" | |
2332 "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t" | |
2333 "packssdw %%mm1 , %%mm0 \n\t" | |
2334 "packssdw %%mm3 , %%mm2 \n\t" | |
2335 "movq %%mm0 , (%1,%0) \n\t" | |
2336 "movq %%mm2 , 8(%1,%0) \n\t" | |
2337 "add $16 , %0 \n\t" | |
2338 " js 1b \n\t" | |
2339 "emms \n\t" | |
2340 :"+r"(reglen), "+r"(dst), "+r"(src) | |
2341 ); | |
2342 } | |
2343 | |
2344 static void float_to_int16_sse2(int16_t *dst, const float *src, long len){ | |
2345 x86_reg reglen = len; | |
2346 __asm__ volatile( | |
2347 "add %0 , %0 \n\t" | |
2348 "lea (%2,%0,2) , %2 \n\t" | |
2349 "add %0 , %1 \n\t" | |
2350 "neg %0 \n\t" | |
2351 "1: \n\t" | |
2352 "cvtps2dq (%2,%0,2) , %%xmm0 \n\t" | |
2353 "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t" | |
2354 "packssdw %%xmm1 , %%xmm0 \n\t" | |
2355 "movdqa %%xmm0 , (%1,%0) \n\t" | |
2356 "add $16 , %0 \n\t" | |
2357 " js 1b \n\t" | |
2358 :"+r"(reglen), "+r"(dst), "+r"(src) | |
2359 ); | |
2360 } | |
2361 | |
8590 | 2362 #if HAVE_YASM |
8430 | 2363 void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len); |
2364 void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len); | |
2365 void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len); | |
8760 | 2366 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top); |
8430 | 2367 void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0); |
2368 void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0); | |
2369 void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta); | |
2370 void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta); | |
8590 | 2371 #if ARCH_X86_32 |
8430 | 2372 static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta) |
2373 { | |
2374 ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta); | |
2375 ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta); | |
2376 } | |
8463
2ba4e13aa21a
Fix compilation without optimization under 64-bit with x264 deblock asm enabled.
darkshikari
parents:
8430
diff
changeset
|
2377 #endif |
8430 | 2378 void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta); |
2379 void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta); | |
2380 #else | |
2381 #define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6) | |
2382 #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6) | |
2383 #define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6) | |
2384 #endif | |
2385 #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse | |
2386 | |
2387 #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \ | |
2388 /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\ | |
2389 static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\ | |
2390 DECLARE_ALIGNED_16(int16_t, tmp[len]);\ | |
2391 int i,j,c;\ | |
2392 for(c=0; c<channels; c++){\ | |
2393 float_to_int16_##cpu(tmp, src[c], len);\ | |
2394 for(i=0, j=c; i<len; i++, j+=channels)\ | |
2395 dst[j] = tmp[i];\ | |
2396 }\ | |
2397 }\ | |
2398 \ | |
2399 static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\ | |
2400 if(channels==1)\ | |
2401 float_to_int16_##cpu(dst, src[0], len);\ | |
2402 else if(channels==2){\ | |
2403 x86_reg reglen = len; \ | |
2404 const float *src0 = src[0];\ | |
2405 const float *src1 = src[1];\ | |
2406 __asm__ volatile(\ | |
2407 "shl $2, %0 \n"\ | |
2408 "add %0, %1 \n"\ | |
2409 "add %0, %2 \n"\ | |
2410 "add %0, %3 \n"\ | |
2411 "neg %0 \n"\ | |
2412 body\ | |
2413 :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\ | |
2414 );\ | |
2415 }else if(channels==6){\ | |
2416 ff_float_to_int16_interleave6_##cpu(dst, src, len);\ | |
2417 }else\ | |
2418 float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\ | |
2419 } | |
2420 | |
2421 FLOAT_TO_INT16_INTERLEAVE(3dnow, | |
2422 "1: \n" | |
2423 "pf2id (%2,%0), %%mm0 \n" | |
2424 "pf2id 8(%2,%0), %%mm1 \n" | |
2425 "pf2id (%3,%0), %%mm2 \n" | |
2426 "pf2id 8(%3,%0), %%mm3 \n" | |
2427 "packssdw %%mm1, %%mm0 \n" | |
2428 "packssdw %%mm3, %%mm2 \n" | |
2429 "movq %%mm0, %%mm1 \n" | |
2430 "punpcklwd %%mm2, %%mm0 \n" | |
2431 "punpckhwd %%mm2, %%mm1 \n" | |
2432 "movq %%mm0, (%1,%0)\n" | |
2433 "movq %%mm1, 8(%1,%0)\n" | |
2434 "add $16, %0 \n" | |
2435 "js 1b \n" | |
2436 "femms \n" | |
2437 ) | |
2438 | |
2439 FLOAT_TO_INT16_INTERLEAVE(sse, | |
2440 "1: \n" | |
2441 "cvtps2pi (%2,%0), %%mm0 \n" | |
2442 "cvtps2pi 8(%2,%0), %%mm1 \n" | |
2443 "cvtps2pi (%3,%0), %%mm2 \n" | |
2444 "cvtps2pi 8(%3,%0), %%mm3 \n" | |
2445 "packssdw %%mm1, %%mm0 \n" | |
2446 "packssdw %%mm3, %%mm2 \n" | |
2447 "movq %%mm0, %%mm1 \n" | |
2448 "punpcklwd %%mm2, %%mm0 \n" | |
2449 "punpckhwd %%mm2, %%mm1 \n" | |
2450 "movq %%mm0, (%1,%0)\n" | |
2451 "movq %%mm1, 8(%1,%0)\n" | |
2452 "add $16, %0 \n" | |
2453 "js 1b \n" | |
2454 "emms \n" | |
2455 ) | |
2456 | |
2457 FLOAT_TO_INT16_INTERLEAVE(sse2, | |
2458 "1: \n" | |
2459 "cvtps2dq (%2,%0), %%xmm0 \n" | |
2460 "cvtps2dq (%3,%0), %%xmm1 \n" | |
2461 "packssdw %%xmm1, %%xmm0 \n" | |
2462 "movhlps %%xmm0, %%xmm1 \n" | |
2463 "punpcklwd %%xmm1, %%xmm0 \n" | |
2464 "movdqa %%xmm0, (%1,%0) \n" | |
2465 "add $16, %0 \n" | |
2466 "js 1b \n" | |
2467 ) | |
2468 | |
2469 static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){ | |
2470 if(channels==6) | |
2471 ff_float_to_int16_interleave6_3dn2(dst, src, len); | |
2472 else | |
2473 float_to_int16_interleave_3dnow(dst, src, len, channels); | |
2474 } | |
2475 | |
2476 | |
2477 void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width); | |
2478 void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width); | |
2479 void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width); | |
2480 void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width); | |
2481 void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, | |
2482 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8); | |
2483 void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, | |
2484 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8); | |
2485 | |
2486 | |
2487 static void add_int16_sse2(int16_t * v1, int16_t * v2, int order) | |
2488 { | |
2489 x86_reg o = -(order << 1); | |
2490 v1 += order; | |
2491 v2 += order; | |
2492 __asm__ volatile( | |
2493 "1: \n\t" | |
2494 "movdqu (%1,%2), %%xmm0 \n\t" | |
2495 "movdqu 16(%1,%2), %%xmm1 \n\t" | |
2496 "paddw (%0,%2), %%xmm0 \n\t" | |
2497 "paddw 16(%0,%2), %%xmm1 \n\t" | |
2498 "movdqa %%xmm0, (%0,%2) \n\t" | |
2499 "movdqa %%xmm1, 16(%0,%2) \n\t" | |
2500 "add $32, %2 \n\t" | |
2501 "js 1b \n\t" | |
2502 : "+r"(v1), "+r"(v2), "+r"(o) | |
2503 ); | |
2504 } | |
2505 | |
2506 static void sub_int16_sse2(int16_t * v1, int16_t * v2, int order) | |
2507 { | |
2508 x86_reg o = -(order << 1); | |
2509 v1 += order; | |
2510 v2 += order; | |
2511 __asm__ volatile( | |
2512 "1: \n\t" | |
2513 "movdqa (%0,%2), %%xmm0 \n\t" | |
2514 "movdqa 16(%0,%2), %%xmm2 \n\t" | |
2515 "movdqu (%1,%2), %%xmm1 \n\t" | |
2516 "movdqu 16(%1,%2), %%xmm3 \n\t" | |
2517 "psubw %%xmm1, %%xmm0 \n\t" | |
2518 "psubw %%xmm3, %%xmm2 \n\t" | |
2519 "movdqa %%xmm0, (%0,%2) \n\t" | |
2520 "movdqa %%xmm2, 16(%0,%2) \n\t" | |
2521 "add $32, %2 \n\t" | |
2522 "js 1b \n\t" | |
2523 : "+r"(v1), "+r"(v2), "+r"(o) | |
2524 ); | |
2525 } | |
2526 | |
2527 static int32_t scalarproduct_int16_sse2(int16_t * v1, int16_t * v2, int order, int shift) | |
2528 { | |
2529 int res = 0; | |
8668 | 2530 DECLARE_ALIGNED_16(xmm_reg, sh); |
8430 | 2531 x86_reg o = -(order << 1); |
2532 | |
2533 v1 += order; | |
2534 v2 += order; | |
8668 | 2535 sh.a = shift; |
8430 | 2536 __asm__ volatile( |
2537 "pxor %%xmm7, %%xmm7 \n\t" | |
2538 "1: \n\t" | |
2539 "movdqu (%0,%3), %%xmm0 \n\t" | |
2540 "movdqu 16(%0,%3), %%xmm1 \n\t" | |
2541 "pmaddwd (%1,%3), %%xmm0 \n\t" | |
2542 "pmaddwd 16(%1,%3), %%xmm1 \n\t" | |
2543 "paddd %%xmm0, %%xmm7 \n\t" | |
2544 "paddd %%xmm1, %%xmm7 \n\t" | |
2545 "add $32, %3 \n\t" | |
2546 "js 1b \n\t" | |
2547 "movhlps %%xmm7, %%xmm2 \n\t" | |
2548 "paddd %%xmm2, %%xmm7 \n\t" | |
2549 "psrad %4, %%xmm7 \n\t" | |
2550 "pshuflw $0x4E, %%xmm7,%%xmm2 \n\t" | |
2551 "paddd %%xmm2, %%xmm7 \n\t" | |
2552 "movd %%xmm7, %2 \n\t" | |
2553 : "+r"(v1), "+r"(v2), "=r"(res), "+r"(o) | |
2554 : "m"(sh) | |
2555 ); | |
2556 return res; | |
2557 } | |
2558 | |
2559 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) | |
2560 { | |
2561 mm_flags = mm_support(); | |
2562 | |
2563 if (avctx->dsp_mask) { | |
2564 if (avctx->dsp_mask & FF_MM_FORCE) | |
2565 mm_flags |= (avctx->dsp_mask & 0xffff); | |
2566 else | |
2567 mm_flags &= ~(avctx->dsp_mask & 0xffff); | |
2568 } | |
2569 | |
2570 #if 0 | |
2571 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:"); | |
2572 if (mm_flags & FF_MM_MMX) | |
2573 av_log(avctx, AV_LOG_INFO, " mmx"); | |
2574 if (mm_flags & FF_MM_MMXEXT) | |
2575 av_log(avctx, AV_LOG_INFO, " mmxext"); | |
2576 if (mm_flags & FF_MM_3DNOW) | |
2577 av_log(avctx, AV_LOG_INFO, " 3dnow"); | |
2578 if (mm_flags & FF_MM_SSE) | |
2579 av_log(avctx, AV_LOG_INFO, " sse"); | |
2580 if (mm_flags & FF_MM_SSE2) | |
2581 av_log(avctx, AV_LOG_INFO, " sse2"); | |
2582 av_log(avctx, AV_LOG_INFO, "\n"); | |
2583 #endif | |
2584 | |
2585 if (mm_flags & FF_MM_MMX) { | |
2586 const int idct_algo= avctx->idct_algo; | |
2587 | |
2588 if(avctx->lowres==0){ | |
2589 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){ | |
2590 c->idct_put= ff_simple_idct_put_mmx; | |
2591 c->idct_add= ff_simple_idct_add_mmx; | |
2592 c->idct = ff_simple_idct_mmx; | |
2593 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM; | |
8590 | 2594 #if CONFIG_GPL |
8430 | 2595 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){ |
2596 if(mm_flags & FF_MM_MMXEXT){ | |
2597 c->idct_put= ff_libmpeg2mmx2_idct_put; | |
2598 c->idct_add= ff_libmpeg2mmx2_idct_add; | |
2599 c->idct = ff_mmxext_idct; | |
2600 }else{ | |
2601 c->idct_put= ff_libmpeg2mmx_idct_put; | |
2602 c->idct_add= ff_libmpeg2mmx_idct_add; | |
2603 c->idct = ff_mmx_idct; | |
2604 } | |
2605 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; | |
2606 #endif | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2607 }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER || CONFIG_THEORA_DECODER) && |
8430 | 2608 idct_algo==FF_IDCT_VP3){ |
2609 if(mm_flags & FF_MM_SSE2){ | |
2610 c->idct_put= ff_vp3_idct_put_sse2; | |
2611 c->idct_add= ff_vp3_idct_add_sse2; | |
2612 c->idct = ff_vp3_idct_sse2; | |
2613 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; | |
2614 }else{ | |
2615 c->idct_put= ff_vp3_idct_put_mmx; | |
2616 c->idct_add= ff_vp3_idct_add_mmx; | |
2617 c->idct = ff_vp3_idct_mmx; | |
2618 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM; | |
2619 } | |
2620 }else if(idct_algo==FF_IDCT_CAVS){ | |
2621 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; | |
2622 }else if(idct_algo==FF_IDCT_XVIDMMX){ | |
2623 if(mm_flags & FF_MM_SSE2){ | |
2624 c->idct_put= ff_idct_xvid_sse2_put; | |
2625 c->idct_add= ff_idct_xvid_sse2_add; | |
2626 c->idct = ff_idct_xvid_sse2; | |
2627 c->idct_permutation_type= FF_SSE2_IDCT_PERM; | |
2628 }else if(mm_flags & FF_MM_MMXEXT){ | |
2629 c->idct_put= ff_idct_xvid_mmx2_put; | |
2630 c->idct_add= ff_idct_xvid_mmx2_add; | |
2631 c->idct = ff_idct_xvid_mmx2; | |
2632 }else{ | |
2633 c->idct_put= ff_idct_xvid_mmx_put; | |
2634 c->idct_add= ff_idct_xvid_mmx_add; | |
2635 c->idct = ff_idct_xvid_mmx; | |
2636 } | |
2637 } | |
2638 } | |
2639 | |
2640 c->put_pixels_clamped = put_pixels_clamped_mmx; | |
2641 c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx; | |
2642 c->add_pixels_clamped = add_pixels_clamped_mmx; | |
2643 c->clear_block = clear_block_mmx; | |
2644 c->clear_blocks = clear_blocks_mmx; | |
2645 if (mm_flags & FF_MM_SSE) | |
2646 c->clear_block = clear_block_sse; | |
2647 | |
2648 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \ | |
2649 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \ | |
2650 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \ | |
2651 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \ | |
2652 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU | |
2653 | |
2654 SET_HPEL_FUNCS(put, 0, 16, mmx); | |
2655 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx); | |
2656 SET_HPEL_FUNCS(avg, 0, 16, mmx); | |
2657 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx); | |
2658 SET_HPEL_FUNCS(put, 1, 8, mmx); | |
2659 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx); | |
2660 SET_HPEL_FUNCS(avg, 1, 8, mmx); | |
2661 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx); | |
2662 | |
2663 c->gmc= gmc_mmx; | |
2664 | |
2665 c->add_bytes= add_bytes_mmx; | |
2666 c->add_bytes_l2= add_bytes_l2_mmx; | |
2667 | |
2668 c->draw_edges = draw_edges_mmx; | |
2669 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2670 if (CONFIG_ANY_H263) { |
8430 | 2671 c->h263_v_loop_filter= h263_v_loop_filter_mmx; |
2672 c->h263_h_loop_filter= h263_h_loop_filter_mmx; | |
2673 } | |
2674 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd; | |
2675 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx; | |
2676 c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_nornd; | |
2677 | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2678 c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2679 c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2680 |
8430 | 2681 c->h264_idct_dc_add= |
2682 c->h264_idct_add= ff_h264_idct_add_mmx; | |
2683 c->h264_idct8_dc_add= | |
2684 c->h264_idct8_add= ff_h264_idct8_add_mmx; | |
2685 | |
2686 c->h264_idct_add16 = ff_h264_idct_add16_mmx; | |
2687 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx; | |
2688 c->h264_idct_add8 = ff_h264_idct_add8_mmx; | |
2689 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx; | |
2690 | |
2691 if (mm_flags & FF_MM_MMXEXT) { | |
2692 c->prefetch = prefetch_mmx2; | |
2693 | |
2694 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; | |
2695 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2; | |
2696 | |
2697 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2; | |
2698 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2; | |
2699 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2; | |
2700 | |
2701 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2; | |
2702 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2; | |
2703 | |
2704 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2; | |
2705 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2; | |
2706 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2; | |
2707 | |
2708 c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2; | |
2709 c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2; | |
2710 c->h264_idct_add16 = ff_h264_idct_add16_mmx2; | |
2711 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2; | |
2712 c->h264_idct_add8 = ff_h264_idct_add8_mmx2; | |
2713 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2; | |
2714 | |
2715 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2716 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2; | |
2717 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2; | |
2718 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2; | |
2719 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2; | |
2720 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2; | |
2721 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2; | |
2722 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2723 if (CONFIG_VP3_DECODER || CONFIG_THEORA_DECODER) { |
8430 | 2724 c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2; |
2725 c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2; | |
2726 } | |
2727 } | |
2728 | |
2729 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \ | |
2730 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \ | |
2731 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \ | |
2732 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \ | |
2733 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \ | |
2734 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \ | |
2735 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \ | |
2736 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \ | |
2737 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \ | |
2738 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \ | |
2739 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \ | |
2740 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \ | |
2741 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \ | |
2742 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \ | |
2743 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \ | |
2744 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \ | |
2745 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU | |
2746 | |
2747 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2); | |
2748 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2); | |
2749 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2); | |
2750 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2); | |
2751 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2); | |
2752 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2); | |
2753 | |
2754 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2); | |
2755 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2); | |
2756 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2); | |
2757 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2); | |
2758 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2); | |
2759 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2); | |
2760 | |
2761 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2); | |
2762 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2); | |
2763 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2); | |
2764 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2); | |
2765 | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2766 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2767 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2768 |
8430 | 2769 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd; |
2770 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2; | |
2771 c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2; | |
2772 c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2; | |
2773 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2; | |
2774 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2; | |
2775 c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2; | |
2776 c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2; | |
2777 c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2; | |
2778 c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2; | |
2779 c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2; | |
2780 | |
2781 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2; | |
2782 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2; | |
2783 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2; | |
2784 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2; | |
2785 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2; | |
2786 c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2; | |
2787 c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2; | |
2788 c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2; | |
2789 | |
2790 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2; | |
2791 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2; | |
2792 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2; | |
2793 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2; | |
2794 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2; | |
2795 c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2; | |
2796 c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2; | |
2797 c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2; | |
2798 | |
8760 | 2799 #if HAVE_YASM |
2800 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; | |
2801 #endif | |
2802 #if HAVE_7REGS | |
2803 if( mm_flags&FF_MM_3DNOW ) | |
2804 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov; | |
2805 #endif | |
2806 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2807 if (CONFIG_CAVS_DECODER) |
8430 | 2808 ff_cavsdsp_init_mmx2(c, avctx); |
2809 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2810 if (CONFIG_VC1_DECODER || CONFIG_WMV3_DECODER) |
8430 | 2811 ff_vc1dsp_init_mmx(c, avctx); |
2812 | |
2813 c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2; | |
2814 } else if (mm_flags & FF_MM_3DNOW) { | |
2815 c->prefetch = prefetch_3dnow; | |
2816 | |
2817 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; | |
2818 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; | |
2819 | |
2820 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow; | |
2821 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow; | |
2822 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow; | |
2823 | |
2824 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow; | |
2825 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow; | |
2826 | |
2827 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow; | |
2828 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow; | |
2829 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow; | |
2830 | |
2831 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2832 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow; | |
2833 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow; | |
2834 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow; | |
2835 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow; | |
2836 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow; | |
2837 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow; | |
2838 } | |
2839 | |
2840 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow); | |
2841 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow); | |
2842 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow); | |
2843 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow); | |
2844 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow); | |
2845 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow); | |
2846 | |
2847 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow); | |
2848 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow); | |
2849 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow); | |
2850 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow); | |
2851 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow); | |
2852 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow); | |
2853 | |
2854 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow); | |
2855 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow); | |
2856 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow); | |
2857 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow); | |
2858 | |
2859 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd; | |
2860 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow; | |
2861 | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2862 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2863 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2864 |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2865 if (CONFIG_CAVS_DECODER) |
8430 | 2866 ff_cavsdsp_init_3dnow(c, avctx); |
2867 } | |
2868 | |
2869 | |
2870 #define H264_QPEL_FUNCS(x, y, CPU)\ | |
2871 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\ | |
2872 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\ | |
2873 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\ | |
2874 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU; | |
2875 if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){ | |
2876 // these functions are slower than mmx on AMD, but faster on Intel | |
2877 /* FIXME works in most codecs, but crashes svq1 due to unaligned chroma | |
2878 c->put_pixels_tab[0][0] = put_pixels16_sse2; | |
2879 c->avg_pixels_tab[0][0] = avg_pixels16_sse2; | |
2880 */ | |
2881 H264_QPEL_FUNCS(0, 0, sse2); | |
2882 } | |
2883 if(mm_flags & FF_MM_SSE2){ | |
2884 c->h264_idct8_add = ff_h264_idct8_add_sse2; | |
2885 c->h264_idct8_add4= ff_h264_idct8_add4_sse2; | |
2886 | |
2887 H264_QPEL_FUNCS(0, 1, sse2); | |
2888 H264_QPEL_FUNCS(0, 2, sse2); | |
2889 H264_QPEL_FUNCS(0, 3, sse2); | |
2890 H264_QPEL_FUNCS(1, 1, sse2); | |
2891 H264_QPEL_FUNCS(1, 2, sse2); | |
2892 H264_QPEL_FUNCS(1, 3, sse2); | |
2893 H264_QPEL_FUNCS(2, 1, sse2); | |
2894 H264_QPEL_FUNCS(2, 2, sse2); | |
2895 H264_QPEL_FUNCS(2, 3, sse2); | |
2896 H264_QPEL_FUNCS(3, 1, sse2); | |
2897 H264_QPEL_FUNCS(3, 2, sse2); | |
2898 H264_QPEL_FUNCS(3, 3, sse2); | |
2899 } | |
8590 | 2900 #if HAVE_SSSE3 |
8430 | 2901 if(mm_flags & FF_MM_SSSE3){ |
2902 H264_QPEL_FUNCS(1, 0, ssse3); | |
2903 H264_QPEL_FUNCS(1, 1, ssse3); | |
2904 H264_QPEL_FUNCS(1, 2, ssse3); | |
2905 H264_QPEL_FUNCS(1, 3, ssse3); | |
2906 H264_QPEL_FUNCS(2, 0, ssse3); | |
2907 H264_QPEL_FUNCS(2, 1, ssse3); | |
2908 H264_QPEL_FUNCS(2, 2, ssse3); | |
2909 H264_QPEL_FUNCS(2, 3, ssse3); | |
2910 H264_QPEL_FUNCS(3, 0, ssse3); | |
2911 H264_QPEL_FUNCS(3, 1, ssse3); | |
2912 H264_QPEL_FUNCS(3, 2, ssse3); | |
2913 H264_QPEL_FUNCS(3, 3, ssse3); | |
2914 c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_nornd; | |
2915 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd; | |
2916 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd; | |
2917 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3; | |
2918 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3; | |
2919 c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3; | |
2920 } | |
2921 #endif | |
2922 | |
8590 | 2923 #if CONFIG_GPL && HAVE_YASM |
8430 | 2924 if( mm_flags&FF_MM_MMXEXT ){ |
8590 | 2925 #if ARCH_X86_32 |
8430 | 2926 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext; |
2927 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext; | |
2928 #endif | |
8510
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2929 if( mm_flags&FF_MM_SSE2 ){ |
8590 | 2930 #if ARCH_X86_64 || !defined(__ICC) || __ICC > 1100 |
8430 | 2931 c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2; |
2932 c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2; | |
2933 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2; | |
2934 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2; | |
8510
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2935 #endif |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2936 c->h264_idct_add16 = ff_h264_idct_add16_sse2; |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2937 c->h264_idct_add8 = ff_h264_idct_add8_sse2; |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2938 c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2; |
8430 | 2939 } |
2940 } | |
2941 #endif | |
2942 | |
8590 | 2943 #if CONFIG_SNOW_DECODER |
8430 | 2944 if(mm_flags & FF_MM_SSE2 & 0){ |
2945 c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2; | |
8590 | 2946 #if HAVE_7REGS |
8430 | 2947 c->vertical_compose97i = ff_snow_vertical_compose97i_sse2; |
2948 #endif | |
2949 c->inner_add_yblock = ff_snow_inner_add_yblock_sse2; | |
2950 } | |
2951 else{ | |
2952 if(mm_flags & FF_MM_MMXEXT){ | |
2953 c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx; | |
8590 | 2954 #if HAVE_7REGS |
8430 | 2955 c->vertical_compose97i = ff_snow_vertical_compose97i_mmx; |
2956 #endif | |
2957 } | |
2958 c->inner_add_yblock = ff_snow_inner_add_yblock_mmx; | |
2959 } | |
2960 #endif | |
2961 | |
2962 if(mm_flags & FF_MM_3DNOW){ | |
2963 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow; | |
2964 c->vector_fmul = vector_fmul_3dnow; | |
2965 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2966 c->float_to_int16 = float_to_int16_3dnow; | |
2967 c->float_to_int16_interleave = float_to_int16_interleave_3dnow; | |
2968 } | |
2969 } | |
2970 if(mm_flags & FF_MM_3DNOWEXT){ | |
2971 c->vector_fmul_reverse = vector_fmul_reverse_3dnow2; | |
2972 c->vector_fmul_window = vector_fmul_window_3dnow2; | |
2973 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2974 c->float_to_int16_interleave = float_to_int16_interleave_3dn2; | |
2975 } | |
2976 } | |
2977 if(mm_flags & FF_MM_SSE){ | |
2978 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse; | |
2979 c->ac3_downmix = ac3_downmix_sse; | |
2980 c->vector_fmul = vector_fmul_sse; | |
2981 c->vector_fmul_reverse = vector_fmul_reverse_sse; | |
2982 c->vector_fmul_add_add = vector_fmul_add_add_sse; | |
2983 c->vector_fmul_window = vector_fmul_window_sse; | |
2984 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse; | |
2985 c->float_to_int16 = float_to_int16_sse; | |
2986 c->float_to_int16_interleave = float_to_int16_interleave_sse; | |
2987 } | |
2988 if(mm_flags & FF_MM_3DNOW) | |
2989 c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse | |
2990 if(mm_flags & FF_MM_SSE2){ | |
2991 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2; | |
2992 c->float_to_int16 = float_to_int16_sse2; | |
2993 c->float_to_int16_interleave = float_to_int16_interleave_sse2; | |
2994 c->add_int16 = add_int16_sse2; | |
2995 c->sub_int16 = sub_int16_sse2; | |
2996 c->scalarproduct_int16 = scalarproduct_int16_sse2; | |
2997 } | |
2998 } | |
2999 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
3000 if (CONFIG_ENCODERS) |
8430 | 3001 dsputilenc_init_mmx(c, avctx); |
3002 | |
3003 #if 0 | |
3004 // for speed testing | |
3005 get_pixels = just_return; | |
3006 put_pixels_clamped = just_return; | |
3007 add_pixels_clamped = just_return; | |
3008 | |
3009 pix_abs16x16 = just_return; | |
3010 pix_abs16x16_x2 = just_return; | |
3011 pix_abs16x16_y2 = just_return; | |
3012 pix_abs16x16_xy2 = just_return; | |
3013 | |
3014 put_pixels_tab[0] = just_return; | |
3015 put_pixels_tab[1] = just_return; | |
3016 put_pixels_tab[2] = just_return; | |
3017 put_pixels_tab[3] = just_return; | |
3018 | |
3019 put_no_rnd_pixels_tab[0] = just_return; | |
3020 put_no_rnd_pixels_tab[1] = just_return; | |
3021 put_no_rnd_pixels_tab[2] = just_return; | |
3022 put_no_rnd_pixels_tab[3] = just_return; | |
3023 | |
3024 avg_pixels_tab[0] = just_return; | |
3025 avg_pixels_tab[1] = just_return; | |
3026 avg_pixels_tab[2] = just_return; | |
3027 avg_pixels_tab[3] = just_return; | |
3028 | |
3029 avg_no_rnd_pixels_tab[0] = just_return; | |
3030 avg_no_rnd_pixels_tab[1] = just_return; | |
3031 avg_no_rnd_pixels_tab[2] = just_return; | |
3032 avg_no_rnd_pixels_tab[3] = just_return; | |
3033 | |
3034 //av_fdct = just_return; | |
3035 //ff_idct = just_return; | |
3036 #endif | |
3037 } |