Mercurial > libavcodec.hg
annotate x86/dsputil_mmx.c @ 9378:52c348a0740f libavcodec
Do not use SSE2 SAD for snow as it requires more alignment than can be
easily provided.
Fixes issue315.
author | michael |
---|---|
date | Thu, 09 Apr 2009 21:53:48 +0000 |
parents | 7f594601d5e9 |
children | ef3a7b711cc0 |
rev | line source |
---|---|
8430 | 1 /* |
2 * MMX optimized DSP utils | |
8629
04423b2f6e0b
cosmetics: Remove pointless period after copyright statement non-sentences.
diego
parents:
8596
diff
changeset
|
3 * Copyright (c) 2000, 2001 Fabrice Bellard |
8430 | 4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> |
5 * | |
6 * This file is part of FFmpeg. | |
7 * | |
8 * FFmpeg is free software; you can redistribute it and/or | |
9 * modify it under the terms of the GNU Lesser General Public | |
10 * License as published by the Free Software Foundation; either | |
11 * version 2.1 of the License, or (at your option) any later version. | |
12 * | |
13 * FFmpeg is distributed in the hope that it will be useful, | |
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 * Lesser General Public License for more details. | |
17 * | |
18 * You should have received a copy of the GNU Lesser General Public | |
19 * License along with FFmpeg; if not, write to the Free Software | |
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 * | |
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru> | |
23 */ | |
24 | |
25 #include "libavutil/x86_cpu.h" | |
26 #include "libavcodec/dsputil.h" | |
27 #include "libavcodec/h263.h" | |
28 #include "libavcodec/mpegvideo.h" | |
29 #include "libavcodec/simple_idct.h" | |
30 #include "dsputil_mmx.h" | |
31 #include "vp3dsp_mmx.h" | |
32 #include "vp3dsp_sse2.h" | |
8817 | 33 #include "vp6dsp_mmx.h" |
8818 | 34 #include "vp6dsp_sse2.h" |
8430 | 35 #include "idct_xvid.h" |
36 | |
37 //#undef NDEBUG | |
38 //#include <assert.h> | |
39 | |
40 int mm_flags; /* multimedia extension flags */ | |
41 | |
42 /* pixel operations */ | |
43 DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL; | |
44 DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL; | |
45 | |
46 DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) = | |
47 {0x8000000080000000ULL, 0x8000000080000000ULL}; | |
48 | |
49 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL; | |
50 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4 ) = 0x0004000400040004ULL; | |
51 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL}; | |
52 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL}; | |
53 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL; | |
54 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL}; | |
55 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL; | |
56 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL}; | |
57 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL}; | |
58 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL; | |
8816
53f9f3994ec8
convert ff_pw_64 into an xmm_reg for future use in vp6 sse code
aurel
parents:
8798
diff
changeset
|
59 DECLARE_ALIGNED_16(const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL}; |
8430 | 60 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL; |
61 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL; | |
62 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL; | |
63 | |
64 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1 ) = 0x0101010101010101ULL; | |
65 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3 ) = 0x0303030303030303ULL; | |
66 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL; | |
67 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL; | |
68 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL; | |
69 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL; | |
70 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL; | |
71 DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL; | |
72 | |
73 DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 }; | |
74 DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 }; | |
75 | |
76 #define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::) | |
77 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::) | |
78 | |
79 #define MOVQ_BFE(regd) \ | |
80 __asm__ volatile ( \ | |
81 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\ | |
82 "paddb %%" #regd ", %%" #regd " \n\t" ::) | |
83 | |
84 #ifndef PIC | |
85 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone)) | |
86 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo)) | |
87 #else | |
88 // for shared library it's better to use this way for accessing constants | |
89 // pcmpeqd -> -1 | |
90 #define MOVQ_BONE(regd) \ | |
91 __asm__ volatile ( \ | |
92 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ | |
93 "psrlw $15, %%" #regd " \n\t" \ | |
94 "packuswb %%" #regd ", %%" #regd " \n\t" ::) | |
95 | |
96 #define MOVQ_WTWO(regd) \ | |
97 __asm__ volatile ( \ | |
98 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ | |
99 "psrlw $15, %%" #regd " \n\t" \ | |
100 "psllw $1, %%" #regd " \n\t"::) | |
101 | |
102 #endif | |
103 | |
104 // using regr as temporary and for the output result | |
105 // first argument is unmodifed and second is trashed | |
106 // regfe is supposed to contain 0xfefefefefefefefe | |
107 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \ | |
108 "movq " #rega ", " #regr " \n\t"\ | |
109 "pand " #regb ", " #regr " \n\t"\ | |
110 "pxor " #rega ", " #regb " \n\t"\ | |
111 "pand " #regfe "," #regb " \n\t"\ | |
112 "psrlq $1, " #regb " \n\t"\ | |
113 "paddb " #regb ", " #regr " \n\t" | |
114 | |
115 #define PAVGB_MMX(rega, regb, regr, regfe) \ | |
116 "movq " #rega ", " #regr " \n\t"\ | |
117 "por " #regb ", " #regr " \n\t"\ | |
118 "pxor " #rega ", " #regb " \n\t"\ | |
119 "pand " #regfe "," #regb " \n\t"\ | |
120 "psrlq $1, " #regb " \n\t"\ | |
121 "psubb " #regb ", " #regr " \n\t" | |
122 | |
123 // mm6 is supposed to contain 0xfefefefefefefefe | |
124 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \ | |
125 "movq " #rega ", " #regr " \n\t"\ | |
126 "movq " #regc ", " #regp " \n\t"\ | |
127 "pand " #regb ", " #regr " \n\t"\ | |
128 "pand " #regd ", " #regp " \n\t"\ | |
129 "pxor " #rega ", " #regb " \n\t"\ | |
130 "pxor " #regc ", " #regd " \n\t"\ | |
131 "pand %%mm6, " #regb " \n\t"\ | |
132 "pand %%mm6, " #regd " \n\t"\ | |
133 "psrlq $1, " #regb " \n\t"\ | |
134 "psrlq $1, " #regd " \n\t"\ | |
135 "paddb " #regb ", " #regr " \n\t"\ | |
136 "paddb " #regd ", " #regp " \n\t" | |
137 | |
138 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \ | |
139 "movq " #rega ", " #regr " \n\t"\ | |
140 "movq " #regc ", " #regp " \n\t"\ | |
141 "por " #regb ", " #regr " \n\t"\ | |
142 "por " #regd ", " #regp " \n\t"\ | |
143 "pxor " #rega ", " #regb " \n\t"\ | |
144 "pxor " #regc ", " #regd " \n\t"\ | |
145 "pand %%mm6, " #regb " \n\t"\ | |
146 "pand %%mm6, " #regd " \n\t"\ | |
147 "psrlq $1, " #regd " \n\t"\ | |
148 "psrlq $1, " #regb " \n\t"\ | |
149 "psubb " #regb ", " #regr " \n\t"\ | |
150 "psubb " #regd ", " #regp " \n\t" | |
151 | |
152 /***********************************/ | |
153 /* MMX no rounding */ | |
154 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx | |
155 #define SET_RND MOVQ_WONE | |
156 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f) | |
157 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e) | |
158 | |
159 #include "dsputil_mmx_rnd_template.c" | |
160 | |
161 #undef DEF | |
162 #undef SET_RND | |
163 #undef PAVGBP | |
164 #undef PAVGB | |
165 /***********************************/ | |
166 /* MMX rounding */ | |
167 | |
168 #define DEF(x, y) x ## _ ## y ##_mmx | |
169 #define SET_RND MOVQ_WTWO | |
170 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f) | |
171 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e) | |
172 | |
173 #include "dsputil_mmx_rnd_template.c" | |
174 | |
175 #undef DEF | |
176 #undef SET_RND | |
177 #undef PAVGBP | |
178 #undef PAVGB | |
179 | |
180 /***********************************/ | |
181 /* 3Dnow specific */ | |
182 | |
183 #define DEF(x) x ## _3dnow | |
184 #define PAVGB "pavgusb" | |
185 | |
186 #include "dsputil_mmx_avg_template.c" | |
187 | |
188 #undef DEF | |
189 #undef PAVGB | |
190 | |
191 /***********************************/ | |
192 /* MMX2 specific */ | |
193 | |
194 #define DEF(x) x ## _mmx2 | |
195 | |
196 /* Introduced only in MMX2 set */ | |
197 #define PAVGB "pavgb" | |
198 | |
199 #include "dsputil_mmx_avg_template.c" | |
200 | |
201 #undef DEF | |
202 #undef PAVGB | |
203 | |
204 #define put_no_rnd_pixels16_mmx put_pixels16_mmx | |
205 #define put_no_rnd_pixels8_mmx put_pixels8_mmx | |
206 #define put_pixels16_mmx2 put_pixels16_mmx | |
207 #define put_pixels8_mmx2 put_pixels8_mmx | |
208 #define put_pixels4_mmx2 put_pixels4_mmx | |
209 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx | |
210 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx | |
211 #define put_pixels16_3dnow put_pixels16_mmx | |
212 #define put_pixels8_3dnow put_pixels8_mmx | |
213 #define put_pixels4_3dnow put_pixels4_mmx | |
214 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx | |
215 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx | |
216 | |
217 /***********************************/ | |
218 /* standard MMX */ | |
219 | |
220 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) | |
221 { | |
222 const DCTELEM *p; | |
223 uint8_t *pix; | |
224 | |
225 /* read the pixels */ | |
226 p = block; | |
227 pix = pixels; | |
228 /* unrolled loop */ | |
229 __asm__ volatile( | |
230 "movq %3, %%mm0 \n\t" | |
231 "movq 8%3, %%mm1 \n\t" | |
232 "movq 16%3, %%mm2 \n\t" | |
233 "movq 24%3, %%mm3 \n\t" | |
234 "movq 32%3, %%mm4 \n\t" | |
235 "movq 40%3, %%mm5 \n\t" | |
236 "movq 48%3, %%mm6 \n\t" | |
237 "movq 56%3, %%mm7 \n\t" | |
238 "packuswb %%mm1, %%mm0 \n\t" | |
239 "packuswb %%mm3, %%mm2 \n\t" | |
240 "packuswb %%mm5, %%mm4 \n\t" | |
241 "packuswb %%mm7, %%mm6 \n\t" | |
242 "movq %%mm0, (%0) \n\t" | |
243 "movq %%mm2, (%0, %1) \n\t" | |
244 "movq %%mm4, (%0, %1, 2) \n\t" | |
245 "movq %%mm6, (%0, %2) \n\t" | |
246 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p) | |
247 :"memory"); | |
248 pix += line_size*4; | |
249 p += 32; | |
250 | |
251 // if here would be an exact copy of the code above | |
252 // compiler would generate some very strange code | |
253 // thus using "r" | |
254 __asm__ volatile( | |
255 "movq (%3), %%mm0 \n\t" | |
256 "movq 8(%3), %%mm1 \n\t" | |
257 "movq 16(%3), %%mm2 \n\t" | |
258 "movq 24(%3), %%mm3 \n\t" | |
259 "movq 32(%3), %%mm4 \n\t" | |
260 "movq 40(%3), %%mm5 \n\t" | |
261 "movq 48(%3), %%mm6 \n\t" | |
262 "movq 56(%3), %%mm7 \n\t" | |
263 "packuswb %%mm1, %%mm0 \n\t" | |
264 "packuswb %%mm3, %%mm2 \n\t" | |
265 "packuswb %%mm5, %%mm4 \n\t" | |
266 "packuswb %%mm7, %%mm6 \n\t" | |
267 "movq %%mm0, (%0) \n\t" | |
268 "movq %%mm2, (%0, %1) \n\t" | |
269 "movq %%mm4, (%0, %1, 2) \n\t" | |
270 "movq %%mm6, (%0, %2) \n\t" | |
271 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p) | |
272 :"memory"); | |
273 } | |
274 | |
9339
5e020cbd1599
Use DECLARE_ASM_CONST for non-global ff_vector128 constant used via MANGLE
reimar
parents:
9337
diff
changeset
|
275 DECLARE_ASM_CONST(8, uint8_t, ff_vector128[8]) = |
8430 | 276 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }; |
277 | |
9337
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
278 #define put_signed_pixels_clamped_mmx_half(off) \ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
279 "movq "#off"(%2), %%mm1 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
280 "movq 16+"#off"(%2), %%mm2 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
281 "movq 32+"#off"(%2), %%mm3 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
282 "movq 48+"#off"(%2), %%mm4 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
283 "packsswb 8+"#off"(%2), %%mm1 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
284 "packsswb 24+"#off"(%2), %%mm2 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
285 "packsswb 40+"#off"(%2), %%mm3 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
286 "packsswb 56+"#off"(%2), %%mm4 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
287 "paddb %%mm0, %%mm1 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
288 "paddb %%mm0, %%mm2 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
289 "paddb %%mm0, %%mm3 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
290 "paddb %%mm0, %%mm4 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
291 "movq %%mm1, (%0) \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
292 "movq %%mm2, (%0, %3) \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
293 "movq %%mm3, (%0, %3, 2) \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
294 "movq %%mm4, (%0, %1) \n\t" |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
295 |
8430 | 296 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) |
297 { | |
9337
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
298 x86_reg line_skip = line_size; |
9341
06532529c428
Mark line_skip3 asm argument as output-only instead of using av_uninit.
reimar
parents:
9340
diff
changeset
|
299 x86_reg line_skip3; |
8430 | 300 |
9337
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
301 __asm__ volatile ( |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
302 "movq "MANGLE(ff_vector128)", %%mm0 \n\t" |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
303 "lea (%3, %3, 2), %1 \n\t" |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
304 put_signed_pixels_clamped_mmx_half(0) |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
305 "lea (%0, %3, 4), %0 \n\t" |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
306 put_signed_pixels_clamped_mmx_half(64) |
9341
06532529c428
Mark line_skip3 asm argument as output-only instead of using av_uninit.
reimar
parents:
9340
diff
changeset
|
307 :"+&r" (pixels), "=&r" (line_skip3) |
9337
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
308 :"r" (block), "r"(line_skip) |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
309 :"memory"); |
8430 | 310 } |
311 | |
312 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) | |
313 { | |
314 const DCTELEM *p; | |
315 uint8_t *pix; | |
316 int i; | |
317 | |
318 /* read the pixels */ | |
319 p = block; | |
320 pix = pixels; | |
321 MOVQ_ZERO(mm7); | |
322 i = 4; | |
323 do { | |
324 __asm__ volatile( | |
325 "movq (%2), %%mm0 \n\t" | |
326 "movq 8(%2), %%mm1 \n\t" | |
327 "movq 16(%2), %%mm2 \n\t" | |
328 "movq 24(%2), %%mm3 \n\t" | |
329 "movq %0, %%mm4 \n\t" | |
330 "movq %1, %%mm6 \n\t" | |
331 "movq %%mm4, %%mm5 \n\t" | |
332 "punpcklbw %%mm7, %%mm4 \n\t" | |
333 "punpckhbw %%mm7, %%mm5 \n\t" | |
334 "paddsw %%mm4, %%mm0 \n\t" | |
335 "paddsw %%mm5, %%mm1 \n\t" | |
336 "movq %%mm6, %%mm5 \n\t" | |
337 "punpcklbw %%mm7, %%mm6 \n\t" | |
338 "punpckhbw %%mm7, %%mm5 \n\t" | |
339 "paddsw %%mm6, %%mm2 \n\t" | |
340 "paddsw %%mm5, %%mm3 \n\t" | |
341 "packuswb %%mm1, %%mm0 \n\t" | |
342 "packuswb %%mm3, %%mm2 \n\t" | |
343 "movq %%mm0, %0 \n\t" | |
344 "movq %%mm2, %1 \n\t" | |
345 :"+m"(*pix), "+m"(*(pix+line_size)) | |
346 :"r"(p) | |
347 :"memory"); | |
348 pix += line_size*2; | |
349 p += 16; | |
350 } while (--i); | |
351 } | |
352 | |
353 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
354 { | |
355 __asm__ volatile( | |
356 "lea (%3, %3), %%"REG_a" \n\t" | |
357 ASMALIGN(3) | |
358 "1: \n\t" | |
359 "movd (%1), %%mm0 \n\t" | |
360 "movd (%1, %3), %%mm1 \n\t" | |
361 "movd %%mm0, (%2) \n\t" | |
362 "movd %%mm1, (%2, %3) \n\t" | |
363 "add %%"REG_a", %1 \n\t" | |
364 "add %%"REG_a", %2 \n\t" | |
365 "movd (%1), %%mm0 \n\t" | |
366 "movd (%1, %3), %%mm1 \n\t" | |
367 "movd %%mm0, (%2) \n\t" | |
368 "movd %%mm1, (%2, %3) \n\t" | |
369 "add %%"REG_a", %1 \n\t" | |
370 "add %%"REG_a", %2 \n\t" | |
371 "subl $4, %0 \n\t" | |
372 "jnz 1b \n\t" | |
373 : "+g"(h), "+r" (pixels), "+r" (block) | |
374 : "r"((x86_reg)line_size) | |
375 : "%"REG_a, "memory" | |
376 ); | |
377 } | |
378 | |
379 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
380 { | |
381 __asm__ volatile( | |
382 "lea (%3, %3), %%"REG_a" \n\t" | |
383 ASMALIGN(3) | |
384 "1: \n\t" | |
385 "movq (%1), %%mm0 \n\t" | |
386 "movq (%1, %3), %%mm1 \n\t" | |
387 "movq %%mm0, (%2) \n\t" | |
388 "movq %%mm1, (%2, %3) \n\t" | |
389 "add %%"REG_a", %1 \n\t" | |
390 "add %%"REG_a", %2 \n\t" | |
391 "movq (%1), %%mm0 \n\t" | |
392 "movq (%1, %3), %%mm1 \n\t" | |
393 "movq %%mm0, (%2) \n\t" | |
394 "movq %%mm1, (%2, %3) \n\t" | |
395 "add %%"REG_a", %1 \n\t" | |
396 "add %%"REG_a", %2 \n\t" | |
397 "subl $4, %0 \n\t" | |
398 "jnz 1b \n\t" | |
399 : "+g"(h), "+r" (pixels), "+r" (block) | |
400 : "r"((x86_reg)line_size) | |
401 : "%"REG_a, "memory" | |
402 ); | |
403 } | |
404 | |
405 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
406 { | |
407 __asm__ volatile( | |
408 "lea (%3, %3), %%"REG_a" \n\t" | |
409 ASMALIGN(3) | |
410 "1: \n\t" | |
411 "movq (%1), %%mm0 \n\t" | |
412 "movq 8(%1), %%mm4 \n\t" | |
413 "movq (%1, %3), %%mm1 \n\t" | |
414 "movq 8(%1, %3), %%mm5 \n\t" | |
415 "movq %%mm0, (%2) \n\t" | |
416 "movq %%mm4, 8(%2) \n\t" | |
417 "movq %%mm1, (%2, %3) \n\t" | |
418 "movq %%mm5, 8(%2, %3) \n\t" | |
419 "add %%"REG_a", %1 \n\t" | |
420 "add %%"REG_a", %2 \n\t" | |
421 "movq (%1), %%mm0 \n\t" | |
422 "movq 8(%1), %%mm4 \n\t" | |
423 "movq (%1, %3), %%mm1 \n\t" | |
424 "movq 8(%1, %3), %%mm5 \n\t" | |
425 "movq %%mm0, (%2) \n\t" | |
426 "movq %%mm4, 8(%2) \n\t" | |
427 "movq %%mm1, (%2, %3) \n\t" | |
428 "movq %%mm5, 8(%2, %3) \n\t" | |
429 "add %%"REG_a", %1 \n\t" | |
430 "add %%"REG_a", %2 \n\t" | |
431 "subl $4, %0 \n\t" | |
432 "jnz 1b \n\t" | |
433 : "+g"(h), "+r" (pixels), "+r" (block) | |
434 : "r"((x86_reg)line_size) | |
435 : "%"REG_a, "memory" | |
436 ); | |
437 } | |
438 | |
439 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
440 { | |
441 __asm__ volatile( | |
442 "1: \n\t" | |
443 "movdqu (%1), %%xmm0 \n\t" | |
444 "movdqu (%1,%3), %%xmm1 \n\t" | |
445 "movdqu (%1,%3,2), %%xmm2 \n\t" | |
446 "movdqu (%1,%4), %%xmm3 \n\t" | |
447 "movdqa %%xmm0, (%2) \n\t" | |
448 "movdqa %%xmm1, (%2,%3) \n\t" | |
449 "movdqa %%xmm2, (%2,%3,2) \n\t" | |
450 "movdqa %%xmm3, (%2,%4) \n\t" | |
451 "subl $4, %0 \n\t" | |
452 "lea (%1,%3,4), %1 \n\t" | |
453 "lea (%2,%3,4), %2 \n\t" | |
454 "jnz 1b \n\t" | |
455 : "+g"(h), "+r" (pixels), "+r" (block) | |
456 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) | |
457 : "memory" | |
458 ); | |
459 } | |
460 | |
461 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
462 { | |
463 __asm__ volatile( | |
464 "1: \n\t" | |
465 "movdqu (%1), %%xmm0 \n\t" | |
466 "movdqu (%1,%3), %%xmm1 \n\t" | |
467 "movdqu (%1,%3,2), %%xmm2 \n\t" | |
468 "movdqu (%1,%4), %%xmm3 \n\t" | |
469 "pavgb (%2), %%xmm0 \n\t" | |
470 "pavgb (%2,%3), %%xmm1 \n\t" | |
471 "pavgb (%2,%3,2), %%xmm2 \n\t" | |
472 "pavgb (%2,%4), %%xmm3 \n\t" | |
473 "movdqa %%xmm0, (%2) \n\t" | |
474 "movdqa %%xmm1, (%2,%3) \n\t" | |
475 "movdqa %%xmm2, (%2,%3,2) \n\t" | |
476 "movdqa %%xmm3, (%2,%4) \n\t" | |
477 "subl $4, %0 \n\t" | |
478 "lea (%1,%3,4), %1 \n\t" | |
479 "lea (%2,%3,4), %2 \n\t" | |
480 "jnz 1b \n\t" | |
481 : "+g"(h), "+r" (pixels), "+r" (block) | |
482 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) | |
483 : "memory" | |
484 ); | |
485 } | |
486 | |
487 #define CLEAR_BLOCKS(name,n) \ | |
488 static void name(DCTELEM *blocks)\ | |
489 {\ | |
490 __asm__ volatile(\ | |
491 "pxor %%mm7, %%mm7 \n\t"\ | |
492 "mov %1, %%"REG_a" \n\t"\ | |
493 "1: \n\t"\ | |
494 "movq %%mm7, (%0, %%"REG_a") \n\t"\ | |
495 "movq %%mm7, 8(%0, %%"REG_a") \n\t"\ | |
496 "movq %%mm7, 16(%0, %%"REG_a") \n\t"\ | |
497 "movq %%mm7, 24(%0, %%"REG_a") \n\t"\ | |
498 "add $32, %%"REG_a" \n\t"\ | |
499 " js 1b \n\t"\ | |
500 : : "r" (((uint8_t *)blocks)+128*n),\ | |
501 "i" (-128*n)\ | |
502 : "%"REG_a\ | |
503 );\ | |
504 } | |
505 CLEAR_BLOCKS(clear_blocks_mmx, 6) | |
506 CLEAR_BLOCKS(clear_block_mmx, 1) | |
507 | |
508 static void clear_block_sse(DCTELEM *block) | |
509 { | |
510 __asm__ volatile( | |
511 "xorps %%xmm0, %%xmm0 \n" | |
512 "movaps %%xmm0, (%0) \n" | |
513 "movaps %%xmm0, 16(%0) \n" | |
514 "movaps %%xmm0, 32(%0) \n" | |
515 "movaps %%xmm0, 48(%0) \n" | |
516 "movaps %%xmm0, 64(%0) \n" | |
517 "movaps %%xmm0, 80(%0) \n" | |
518 "movaps %%xmm0, 96(%0) \n" | |
519 "movaps %%xmm0, 112(%0) \n" | |
520 :: "r"(block) | |
521 : "memory" | |
522 ); | |
523 } | |
524 | |
525 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){ | |
526 x86_reg i=0; | |
527 __asm__ volatile( | |
528 "jmp 2f \n\t" | |
529 "1: \n\t" | |
530 "movq (%1, %0), %%mm0 \n\t" | |
531 "movq (%2, %0), %%mm1 \n\t" | |
532 "paddb %%mm0, %%mm1 \n\t" | |
533 "movq %%mm1, (%2, %0) \n\t" | |
534 "movq 8(%1, %0), %%mm0 \n\t" | |
535 "movq 8(%2, %0), %%mm1 \n\t" | |
536 "paddb %%mm0, %%mm1 \n\t" | |
537 "movq %%mm1, 8(%2, %0) \n\t" | |
538 "add $16, %0 \n\t" | |
539 "2: \n\t" | |
540 "cmp %3, %0 \n\t" | |
541 " js 1b \n\t" | |
542 : "+r" (i) | |
543 : "r"(src), "r"(dst), "r"((x86_reg)w-15) | |
544 ); | |
545 for(; i<w; i++) | |
546 dst[i+0] += src[i+0]; | |
547 } | |
548 | |
549 static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ | |
550 x86_reg i=0; | |
551 __asm__ volatile( | |
552 "jmp 2f \n\t" | |
553 "1: \n\t" | |
554 "movq (%2, %0), %%mm0 \n\t" | |
555 "movq 8(%2, %0), %%mm1 \n\t" | |
556 "paddb (%3, %0), %%mm0 \n\t" | |
557 "paddb 8(%3, %0), %%mm1 \n\t" | |
558 "movq %%mm0, (%1, %0) \n\t" | |
559 "movq %%mm1, 8(%1, %0) \n\t" | |
560 "add $16, %0 \n\t" | |
561 "2: \n\t" | |
562 "cmp %4, %0 \n\t" | |
563 " js 1b \n\t" | |
564 : "+r" (i) | |
565 : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15) | |
566 ); | |
567 for(; i<w; i++) | |
568 dst[i] = src1[i] + src2[i]; | |
569 } | |
570 | |
8798
a5c8210814d7
Add check whether the compiler/assembler supports 10 or more operands.
diego
parents:
8760
diff
changeset
|
571 #if HAVE_7REGS && HAVE_TEN_OPERANDS |
8760 | 572 static void add_hfyu_median_prediction_cmov(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top) { |
573 x86_reg w2 = -w; | |
574 x86_reg x; | |
575 int l = *left & 0xff; | |
576 int tl = *left_top & 0xff; | |
577 int t; | |
578 __asm__ volatile( | |
579 "mov %7, %3 \n" | |
580 "1: \n" | |
581 "movzx (%3,%4), %2 \n" | |
582 "mov %2, %k3 \n" | |
583 "sub %b1, %b3 \n" | |
584 "add %b0, %b3 \n" | |
585 "mov %2, %1 \n" | |
586 "cmp %0, %2 \n" | |
587 "cmovg %0, %2 \n" | |
588 "cmovg %1, %0 \n" | |
589 "cmp %k3, %0 \n" | |
590 "cmovg %k3, %0 \n" | |
591 "mov %7, %3 \n" | |
592 "cmp %2, %0 \n" | |
593 "cmovl %2, %0 \n" | |
594 "add (%6,%4), %b0 \n" | |
595 "mov %b0, (%5,%4) \n" | |
596 "inc %4 \n" | |
597 "jl 1b \n" | |
598 :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2) | |
599 :"r"(dst+w), "r"(diff+w), "rm"(top+w) | |
600 ); | |
601 *left = l; | |
602 *left_top = tl; | |
603 } | |
604 #endif | |
605 | |
8430 | 606 #define H263_LOOP_FILTER \ |
607 "pxor %%mm7, %%mm7 \n\t"\ | |
608 "movq %0, %%mm0 \n\t"\ | |
609 "movq %0, %%mm1 \n\t"\ | |
610 "movq %3, %%mm2 \n\t"\ | |
611 "movq %3, %%mm3 \n\t"\ | |
612 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
613 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
614 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
615 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
616 "psubw %%mm2, %%mm0 \n\t"\ | |
617 "psubw %%mm3, %%mm1 \n\t"\ | |
618 "movq %1, %%mm2 \n\t"\ | |
619 "movq %1, %%mm3 \n\t"\ | |
620 "movq %2, %%mm4 \n\t"\ | |
621 "movq %2, %%mm5 \n\t"\ | |
622 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
623 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
624 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
625 "punpckhbw %%mm7, %%mm5 \n\t"\ | |
626 "psubw %%mm2, %%mm4 \n\t"\ | |
627 "psubw %%mm3, %%mm5 \n\t"\ | |
628 "psllw $2, %%mm4 \n\t"\ | |
629 "psllw $2, %%mm5 \n\t"\ | |
630 "paddw %%mm0, %%mm4 \n\t"\ | |
631 "paddw %%mm1, %%mm5 \n\t"\ | |
632 "pxor %%mm6, %%mm6 \n\t"\ | |
633 "pcmpgtw %%mm4, %%mm6 \n\t"\ | |
634 "pcmpgtw %%mm5, %%mm7 \n\t"\ | |
635 "pxor %%mm6, %%mm4 \n\t"\ | |
636 "pxor %%mm7, %%mm5 \n\t"\ | |
637 "psubw %%mm6, %%mm4 \n\t"\ | |
638 "psubw %%mm7, %%mm5 \n\t"\ | |
639 "psrlw $3, %%mm4 \n\t"\ | |
640 "psrlw $3, %%mm5 \n\t"\ | |
641 "packuswb %%mm5, %%mm4 \n\t"\ | |
642 "packsswb %%mm7, %%mm6 \n\t"\ | |
643 "pxor %%mm7, %%mm7 \n\t"\ | |
644 "movd %4, %%mm2 \n\t"\ | |
645 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
646 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
647 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
648 "psubusb %%mm4, %%mm2 \n\t"\ | |
649 "movq %%mm2, %%mm3 \n\t"\ | |
650 "psubusb %%mm4, %%mm3 \n\t"\ | |
651 "psubb %%mm3, %%mm2 \n\t"\ | |
652 "movq %1, %%mm3 \n\t"\ | |
653 "movq %2, %%mm4 \n\t"\ | |
654 "pxor %%mm6, %%mm3 \n\t"\ | |
655 "pxor %%mm6, %%mm4 \n\t"\ | |
656 "paddusb %%mm2, %%mm3 \n\t"\ | |
657 "psubusb %%mm2, %%mm4 \n\t"\ | |
658 "pxor %%mm6, %%mm3 \n\t"\ | |
659 "pxor %%mm6, %%mm4 \n\t"\ | |
660 "paddusb %%mm2, %%mm2 \n\t"\ | |
661 "packsswb %%mm1, %%mm0 \n\t"\ | |
662 "pcmpgtb %%mm0, %%mm7 \n\t"\ | |
663 "pxor %%mm7, %%mm0 \n\t"\ | |
664 "psubb %%mm7, %%mm0 \n\t"\ | |
665 "movq %%mm0, %%mm1 \n\t"\ | |
666 "psubusb %%mm2, %%mm0 \n\t"\ | |
667 "psubb %%mm0, %%mm1 \n\t"\ | |
668 "pand %5, %%mm1 \n\t"\ | |
669 "psrlw $2, %%mm1 \n\t"\ | |
670 "pxor %%mm7, %%mm1 \n\t"\ | |
671 "psubb %%mm7, %%mm1 \n\t"\ | |
672 "movq %0, %%mm5 \n\t"\ | |
673 "movq %3, %%mm6 \n\t"\ | |
674 "psubb %%mm1, %%mm5 \n\t"\ | |
675 "paddb %%mm1, %%mm6 \n\t" | |
676 | |
677 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){ | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
678 if(CONFIG_ANY_H263) { |
8430 | 679 const int strength= ff_h263_loop_filter_strength[qscale]; |
680 | |
681 __asm__ volatile( | |
682 | |
683 H263_LOOP_FILTER | |
684 | |
685 "movq %%mm3, %1 \n\t" | |
686 "movq %%mm4, %2 \n\t" | |
687 "movq %%mm5, %0 \n\t" | |
688 "movq %%mm6, %3 \n\t" | |
689 : "+m" (*(uint64_t*)(src - 2*stride)), | |
690 "+m" (*(uint64_t*)(src - 1*stride)), | |
691 "+m" (*(uint64_t*)(src + 0*stride)), | |
692 "+m" (*(uint64_t*)(src + 1*stride)) | |
693 : "g" (2*strength), "m"(ff_pb_FC) | |
694 ); | |
695 } | |
696 } | |
697 | |
698 static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){ | |
699 __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ... | |
700 "movd %4, %%mm0 \n\t" | |
701 "movd %5, %%mm1 \n\t" | |
702 "movd %6, %%mm2 \n\t" | |
703 "movd %7, %%mm3 \n\t" | |
704 "punpcklbw %%mm1, %%mm0 \n\t" | |
705 "punpcklbw %%mm3, %%mm2 \n\t" | |
706 "movq %%mm0, %%mm1 \n\t" | |
707 "punpcklwd %%mm2, %%mm0 \n\t" | |
708 "punpckhwd %%mm2, %%mm1 \n\t" | |
709 "movd %%mm0, %0 \n\t" | |
710 "punpckhdq %%mm0, %%mm0 \n\t" | |
711 "movd %%mm0, %1 \n\t" | |
712 "movd %%mm1, %2 \n\t" | |
713 "punpckhdq %%mm1, %%mm1 \n\t" | |
714 "movd %%mm1, %3 \n\t" | |
715 | |
716 : "=m" (*(uint32_t*)(dst + 0*dst_stride)), | |
717 "=m" (*(uint32_t*)(dst + 1*dst_stride)), | |
718 "=m" (*(uint32_t*)(dst + 2*dst_stride)), | |
719 "=m" (*(uint32_t*)(dst + 3*dst_stride)) | |
720 : "m" (*(uint32_t*)(src + 0*src_stride)), | |
721 "m" (*(uint32_t*)(src + 1*src_stride)), | |
722 "m" (*(uint32_t*)(src + 2*src_stride)), | |
723 "m" (*(uint32_t*)(src + 3*src_stride)) | |
724 ); | |
725 } | |
726 | |
727 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){ | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
728 if(CONFIG_ANY_H263) { |
8430 | 729 const int strength= ff_h263_loop_filter_strength[qscale]; |
730 DECLARE_ALIGNED(8, uint64_t, temp[4]); | |
731 uint8_t *btemp= (uint8_t*)temp; | |
732 | |
733 src -= 2; | |
734 | |
735 transpose4x4(btemp , src , 8, stride); | |
736 transpose4x4(btemp+4, src + 4*stride, 8, stride); | |
737 __asm__ volatile( | |
738 H263_LOOP_FILTER // 5 3 4 6 | |
739 | |
740 : "+m" (temp[0]), | |
741 "+m" (temp[1]), | |
742 "+m" (temp[2]), | |
743 "+m" (temp[3]) | |
744 : "g" (2*strength), "m"(ff_pb_FC) | |
745 ); | |
746 | |
747 __asm__ volatile( | |
748 "movq %%mm5, %%mm1 \n\t" | |
749 "movq %%mm4, %%mm0 \n\t" | |
750 "punpcklbw %%mm3, %%mm5 \n\t" | |
751 "punpcklbw %%mm6, %%mm4 \n\t" | |
752 "punpckhbw %%mm3, %%mm1 \n\t" | |
753 "punpckhbw %%mm6, %%mm0 \n\t" | |
754 "movq %%mm5, %%mm3 \n\t" | |
755 "movq %%mm1, %%mm6 \n\t" | |
756 "punpcklwd %%mm4, %%mm5 \n\t" | |
757 "punpcklwd %%mm0, %%mm1 \n\t" | |
758 "punpckhwd %%mm4, %%mm3 \n\t" | |
759 "punpckhwd %%mm0, %%mm6 \n\t" | |
760 "movd %%mm5, (%0) \n\t" | |
761 "punpckhdq %%mm5, %%mm5 \n\t" | |
762 "movd %%mm5, (%0,%2) \n\t" | |
763 "movd %%mm3, (%0,%2,2) \n\t" | |
764 "punpckhdq %%mm3, %%mm3 \n\t" | |
765 "movd %%mm3, (%0,%3) \n\t" | |
766 "movd %%mm1, (%1) \n\t" | |
767 "punpckhdq %%mm1, %%mm1 \n\t" | |
768 "movd %%mm1, (%1,%2) \n\t" | |
769 "movd %%mm6, (%1,%2,2) \n\t" | |
770 "punpckhdq %%mm6, %%mm6 \n\t" | |
771 "movd %%mm6, (%1,%3) \n\t" | |
772 :: "r" (src), | |
773 "r" (src + 4*stride), | |
774 "r" ((x86_reg) stride ), | |
775 "r" ((x86_reg)(3*stride)) | |
776 ); | |
777 } | |
778 } | |
779 | |
780 /* draw the edges of width 'w' of an image of size width, height | |
781 this mmx version can only handle w==8 || w==16 */ | |
782 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w) | |
783 { | |
784 uint8_t *ptr, *last_line; | |
785 int i; | |
786 | |
787 last_line = buf + (height - 1) * wrap; | |
788 /* left and right */ | |
789 ptr = buf; | |
790 if(w==8) | |
791 { | |
792 __asm__ volatile( | |
793 "1: \n\t" | |
794 "movd (%0), %%mm0 \n\t" | |
795 "punpcklbw %%mm0, %%mm0 \n\t" | |
796 "punpcklwd %%mm0, %%mm0 \n\t" | |
797 "punpckldq %%mm0, %%mm0 \n\t" | |
798 "movq %%mm0, -8(%0) \n\t" | |
799 "movq -8(%0, %2), %%mm1 \n\t" | |
800 "punpckhbw %%mm1, %%mm1 \n\t" | |
801 "punpckhwd %%mm1, %%mm1 \n\t" | |
802 "punpckhdq %%mm1, %%mm1 \n\t" | |
803 "movq %%mm1, (%0, %2) \n\t" | |
804 "add %1, %0 \n\t" | |
805 "cmp %3, %0 \n\t" | |
806 " jb 1b \n\t" | |
807 : "+r" (ptr) | |
808 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) | |
809 ); | |
810 } | |
811 else | |
812 { | |
813 __asm__ volatile( | |
814 "1: \n\t" | |
815 "movd (%0), %%mm0 \n\t" | |
816 "punpcklbw %%mm0, %%mm0 \n\t" | |
817 "punpcklwd %%mm0, %%mm0 \n\t" | |
818 "punpckldq %%mm0, %%mm0 \n\t" | |
819 "movq %%mm0, -8(%0) \n\t" | |
820 "movq %%mm0, -16(%0) \n\t" | |
821 "movq -8(%0, %2), %%mm1 \n\t" | |
822 "punpckhbw %%mm1, %%mm1 \n\t" | |
823 "punpckhwd %%mm1, %%mm1 \n\t" | |
824 "punpckhdq %%mm1, %%mm1 \n\t" | |
825 "movq %%mm1, (%0, %2) \n\t" | |
826 "movq %%mm1, 8(%0, %2) \n\t" | |
827 "add %1, %0 \n\t" | |
828 "cmp %3, %0 \n\t" | |
829 " jb 1b \n\t" | |
830 : "+r" (ptr) | |
831 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) | |
832 ); | |
833 } | |
834 | |
835 for(i=0;i<w;i+=4) { | |
836 /* top and bottom (and hopefully also the corners) */ | |
837 ptr= buf - (i + 1) * wrap - w; | |
838 __asm__ volatile( | |
839 "1: \n\t" | |
840 "movq (%1, %0), %%mm0 \n\t" | |
841 "movq %%mm0, (%0) \n\t" | |
842 "movq %%mm0, (%0, %2) \n\t" | |
843 "movq %%mm0, (%0, %2, 2) \n\t" | |
844 "movq %%mm0, (%0, %3) \n\t" | |
845 "add $8, %0 \n\t" | |
846 "cmp %4, %0 \n\t" | |
847 " jb 1b \n\t" | |
848 : "+r" (ptr) | |
849 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w) | |
850 ); | |
851 ptr= last_line + (i + 1) * wrap - w; | |
852 __asm__ volatile( | |
853 "1: \n\t" | |
854 "movq (%1, %0), %%mm0 \n\t" | |
855 "movq %%mm0, (%0) \n\t" | |
856 "movq %%mm0, (%0, %2) \n\t" | |
857 "movq %%mm0, (%0, %2, 2) \n\t" | |
858 "movq %%mm0, (%0, %3) \n\t" | |
859 "add $8, %0 \n\t" | |
860 "cmp %4, %0 \n\t" | |
861 " jb 1b \n\t" | |
862 : "+r" (ptr) | |
863 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w) | |
864 ); | |
865 } | |
866 } | |
867 | |
868 #define PAETH(cpu, abs3)\ | |
869 static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\ | |
870 {\ | |
871 x86_reg i = -bpp;\ | |
872 x86_reg end = w-3;\ | |
873 __asm__ volatile(\ | |
874 "pxor %%mm7, %%mm7 \n"\ | |
875 "movd (%1,%0), %%mm0 \n"\ | |
876 "movd (%2,%0), %%mm1 \n"\ | |
877 "punpcklbw %%mm7, %%mm0 \n"\ | |
878 "punpcklbw %%mm7, %%mm1 \n"\ | |
879 "add %4, %0 \n"\ | |
880 "1: \n"\ | |
881 "movq %%mm1, %%mm2 \n"\ | |
882 "movd (%2,%0), %%mm1 \n"\ | |
883 "movq %%mm2, %%mm3 \n"\ | |
884 "punpcklbw %%mm7, %%mm1 \n"\ | |
885 "movq %%mm2, %%mm4 \n"\ | |
886 "psubw %%mm1, %%mm3 \n"\ | |
887 "psubw %%mm0, %%mm4 \n"\ | |
888 "movq %%mm3, %%mm5 \n"\ | |
889 "paddw %%mm4, %%mm5 \n"\ | |
890 abs3\ | |
891 "movq %%mm4, %%mm6 \n"\ | |
892 "pminsw %%mm5, %%mm6 \n"\ | |
893 "pcmpgtw %%mm6, %%mm3 \n"\ | |
894 "pcmpgtw %%mm5, %%mm4 \n"\ | |
895 "movq %%mm4, %%mm6 \n"\ | |
896 "pand %%mm3, %%mm4 \n"\ | |
897 "pandn %%mm3, %%mm6 \n"\ | |
898 "pandn %%mm0, %%mm3 \n"\ | |
899 "movd (%3,%0), %%mm0 \n"\ | |
900 "pand %%mm1, %%mm6 \n"\ | |
901 "pand %%mm4, %%mm2 \n"\ | |
902 "punpcklbw %%mm7, %%mm0 \n"\ | |
903 "movq %6, %%mm5 \n"\ | |
904 "paddw %%mm6, %%mm0 \n"\ | |
905 "paddw %%mm2, %%mm3 \n"\ | |
906 "paddw %%mm3, %%mm0 \n"\ | |
907 "pand %%mm5, %%mm0 \n"\ | |
908 "movq %%mm0, %%mm3 \n"\ | |
909 "packuswb %%mm3, %%mm3 \n"\ | |
910 "movd %%mm3, (%1,%0) \n"\ | |
911 "add %4, %0 \n"\ | |
912 "cmp %5, %0 \n"\ | |
913 "jle 1b \n"\ | |
914 :"+r"(i)\ | |
915 :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\ | |
916 "m"(ff_pw_255)\ | |
917 :"memory"\ | |
918 );\ | |
919 } | |
920 | |
921 #define ABS3_MMX2\ | |
922 "psubw %%mm5, %%mm7 \n"\ | |
923 "pmaxsw %%mm7, %%mm5 \n"\ | |
924 "pxor %%mm6, %%mm6 \n"\ | |
925 "pxor %%mm7, %%mm7 \n"\ | |
926 "psubw %%mm3, %%mm6 \n"\ | |
927 "psubw %%mm4, %%mm7 \n"\ | |
928 "pmaxsw %%mm6, %%mm3 \n"\ | |
929 "pmaxsw %%mm7, %%mm4 \n"\ | |
930 "pxor %%mm7, %%mm7 \n" | |
931 | |
932 #define ABS3_SSSE3\ | |
933 "pabsw %%mm3, %%mm3 \n"\ | |
934 "pabsw %%mm4, %%mm4 \n"\ | |
935 "pabsw %%mm5, %%mm5 \n" | |
936 | |
937 PAETH(mmx2, ABS3_MMX2) | |
8590 | 938 #if HAVE_SSSE3 |
8430 | 939 PAETH(ssse3, ABS3_SSSE3) |
940 #endif | |
941 | |
942 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\ | |
943 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\ | |
944 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\ | |
945 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\ | |
946 "movq "#in7", " #m3 " \n\t" /* d */\ | |
947 "movq "#in0", %%mm5 \n\t" /* D */\ | |
948 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\ | |
949 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\ | |
950 "movq "#in1", %%mm5 \n\t" /* C */\ | |
951 "movq "#in2", %%mm6 \n\t" /* B */\ | |
952 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\ | |
953 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\ | |
954 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\ | |
955 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\ | |
956 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\ | |
957 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\ | |
958 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\ | |
959 "psraw $5, %%mm5 \n\t"\ | |
960 "packuswb %%mm5, %%mm5 \n\t"\ | |
961 OP(%%mm5, out, %%mm7, d) | |
962 | |
963 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\ | |
964 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
965 uint64_t temp;\ | |
966 \ | |
967 __asm__ volatile(\ | |
968 "pxor %%mm7, %%mm7 \n\t"\ | |
969 "1: \n\t"\ | |
970 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ | |
971 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ | |
972 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ | |
973 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ | |
974 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ | |
975 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ | |
976 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ | |
977 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ | |
978 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ | |
979 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ | |
980 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ | |
981 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ | |
982 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ | |
983 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ | |
984 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ | |
985 "paddw %%mm3, %%mm5 \n\t" /* b */\ | |
986 "paddw %%mm2, %%mm6 \n\t" /* c */\ | |
987 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
988 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ | |
989 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ | |
990 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ | |
991 "paddw %%mm4, %%mm0 \n\t" /* a */\ | |
992 "paddw %%mm1, %%mm5 \n\t" /* d */\ | |
993 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ | |
994 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ | |
995 "paddw %6, %%mm6 \n\t"\ | |
996 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ | |
997 "psraw $5, %%mm0 \n\t"\ | |
998 "movq %%mm0, %5 \n\t"\ | |
999 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ | |
1000 \ | |
1001 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\ | |
1002 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\ | |
1003 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\ | |
1004 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\ | |
1005 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\ | |
1006 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\ | |
1007 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\ | |
1008 "paddw %%mm0, %%mm2 \n\t" /* b */\ | |
1009 "paddw %%mm5, %%mm3 \n\t" /* c */\ | |
1010 "paddw %%mm2, %%mm2 \n\t" /* 2b */\ | |
1011 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ | |
1012 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\ | |
1013 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\ | |
1014 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\ | |
1015 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\ | |
1016 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ | |
1017 "paddw %%mm2, %%mm1 \n\t" /* a */\ | |
1018 "paddw %%mm6, %%mm4 \n\t" /* d */\ | |
1019 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ | |
1020 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\ | |
1021 "paddw %6, %%mm1 \n\t"\ | |
1022 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\ | |
1023 "psraw $5, %%mm3 \n\t"\ | |
1024 "movq %5, %%mm1 \n\t"\ | |
1025 "packuswb %%mm3, %%mm1 \n\t"\ | |
1026 OP_MMX2(%%mm1, (%1),%%mm4, q)\ | |
1027 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\ | |
1028 \ | |
1029 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\ | |
1030 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\ | |
1031 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\ | |
1032 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\ | |
1033 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\ | |
1034 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\ | |
1035 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\ | |
1036 "paddw %%mm1, %%mm5 \n\t" /* b */\ | |
1037 "paddw %%mm4, %%mm0 \n\t" /* c */\ | |
1038 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
1039 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\ | |
1040 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\ | |
1041 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\ | |
1042 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\ | |
1043 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\ | |
1044 "paddw %%mm3, %%mm2 \n\t" /* d */\ | |
1045 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\ | |
1046 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\ | |
1047 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\ | |
1048 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\ | |
1049 "paddw %%mm2, %%mm6 \n\t" /* a */\ | |
1050 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\ | |
1051 "paddw %6, %%mm0 \n\t"\ | |
1052 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ | |
1053 "psraw $5, %%mm0 \n\t"\ | |
1054 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\ | |
1055 \ | |
1056 "paddw %%mm5, %%mm3 \n\t" /* a */\ | |
1057 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\ | |
1058 "paddw %%mm4, %%mm6 \n\t" /* b */\ | |
1059 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\ | |
1060 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\ | |
1061 "paddw %%mm1, %%mm4 \n\t" /* c */\ | |
1062 "paddw %%mm2, %%mm5 \n\t" /* d */\ | |
1063 "paddw %%mm6, %%mm6 \n\t" /* 2b */\ | |
1064 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\ | |
1065 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\ | |
1066 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\ | |
1067 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\ | |
1068 "paddw %6, %%mm4 \n\t"\ | |
1069 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\ | |
1070 "psraw $5, %%mm4 \n\t"\ | |
1071 "packuswb %%mm4, %%mm0 \n\t"\ | |
1072 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\ | |
1073 \ | |
1074 "add %3, %0 \n\t"\ | |
1075 "add %4, %1 \n\t"\ | |
1076 "decl %2 \n\t"\ | |
1077 " jnz 1b \n\t"\ | |
1078 : "+a"(src), "+c"(dst), "+D"(h)\ | |
1079 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\ | |
1080 : "memory"\ | |
1081 );\ | |
1082 }\ | |
1083 \ | |
1084 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1085 int i;\ | |
1086 int16_t temp[16];\ | |
1087 /* quick HACK, XXX FIXME MUST be optimized */\ | |
1088 for(i=0; i<h; i++)\ | |
1089 {\ | |
1090 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ | |
1091 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ | |
1092 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ | |
1093 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ | |
1094 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ | |
1095 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\ | |
1096 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\ | |
1097 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\ | |
1098 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\ | |
1099 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\ | |
1100 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\ | |
1101 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\ | |
1102 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\ | |
1103 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\ | |
1104 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\ | |
1105 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\ | |
1106 __asm__ volatile(\ | |
1107 "movq (%0), %%mm0 \n\t"\ | |
1108 "movq 8(%0), %%mm1 \n\t"\ | |
1109 "paddw %2, %%mm0 \n\t"\ | |
1110 "paddw %2, %%mm1 \n\t"\ | |
1111 "psraw $5, %%mm0 \n\t"\ | |
1112 "psraw $5, %%mm1 \n\t"\ | |
1113 "packuswb %%mm1, %%mm0 \n\t"\ | |
1114 OP_3DNOW(%%mm0, (%1), %%mm1, q)\ | |
1115 "movq 16(%0), %%mm0 \n\t"\ | |
1116 "movq 24(%0), %%mm1 \n\t"\ | |
1117 "paddw %2, %%mm0 \n\t"\ | |
1118 "paddw %2, %%mm1 \n\t"\ | |
1119 "psraw $5, %%mm0 \n\t"\ | |
1120 "psraw $5, %%mm1 \n\t"\ | |
1121 "packuswb %%mm1, %%mm0 \n\t"\ | |
1122 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\ | |
1123 :: "r"(temp), "r"(dst), "m"(ROUNDER)\ | |
1124 : "memory"\ | |
1125 );\ | |
1126 dst+=dstStride;\ | |
1127 src+=srcStride;\ | |
1128 }\ | |
1129 }\ | |
1130 \ | |
1131 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1132 __asm__ volatile(\ | |
1133 "pxor %%mm7, %%mm7 \n\t"\ | |
1134 "1: \n\t"\ | |
1135 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ | |
1136 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ | |
1137 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ | |
1138 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ | |
1139 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ | |
1140 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ | |
1141 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ | |
1142 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ | |
1143 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ | |
1144 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ | |
1145 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ | |
1146 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ | |
1147 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ | |
1148 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ | |
1149 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ | |
1150 "paddw %%mm3, %%mm5 \n\t" /* b */\ | |
1151 "paddw %%mm2, %%mm6 \n\t" /* c */\ | |
1152 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
1153 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ | |
1154 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ | |
1155 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ | |
1156 "paddw %%mm4, %%mm0 \n\t" /* a */\ | |
1157 "paddw %%mm1, %%mm5 \n\t" /* d */\ | |
1158 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ | |
1159 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ | |
1160 "paddw %5, %%mm6 \n\t"\ | |
1161 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ | |
1162 "psraw $5, %%mm0 \n\t"\ | |
1163 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ | |
1164 \ | |
1165 "movd 5(%0), %%mm5 \n\t" /* FGHI */\ | |
1166 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\ | |
1167 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\ | |
1168 "paddw %%mm5, %%mm1 \n\t" /* a */\ | |
1169 "paddw %%mm6, %%mm2 \n\t" /* b */\ | |
1170 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\ | |
1171 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\ | |
1172 "paddw %%mm6, %%mm3 \n\t" /* c */\ | |
1173 "paddw %%mm5, %%mm4 \n\t" /* d */\ | |
1174 "paddw %%mm2, %%mm2 \n\t" /* 2b */\ | |
1175 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ | |
1176 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ | |
1177 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ | |
1178 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\ | |
1179 "paddw %5, %%mm1 \n\t"\ | |
1180 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\ | |
1181 "psraw $5, %%mm3 \n\t"\ | |
1182 "packuswb %%mm3, %%mm0 \n\t"\ | |
1183 OP_MMX2(%%mm0, (%1), %%mm4, q)\ | |
1184 \ | |
1185 "add %3, %0 \n\t"\ | |
1186 "add %4, %1 \n\t"\ | |
1187 "decl %2 \n\t"\ | |
1188 " jnz 1b \n\t"\ | |
1189 : "+a"(src), "+c"(dst), "+d"(h)\ | |
1190 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\ | |
1191 : "memory"\ | |
1192 );\ | |
1193 }\ | |
1194 \ | |
1195 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1196 int i;\ | |
1197 int16_t temp[8];\ | |
1198 /* quick HACK, XXX FIXME MUST be optimized */\ | |
1199 for(i=0; i<h; i++)\ | |
1200 {\ | |
1201 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ | |
1202 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ | |
1203 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ | |
1204 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ | |
1205 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ | |
1206 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\ | |
1207 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\ | |
1208 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\ | |
1209 __asm__ volatile(\ | |
1210 "movq (%0), %%mm0 \n\t"\ | |
1211 "movq 8(%0), %%mm1 \n\t"\ | |
1212 "paddw %2, %%mm0 \n\t"\ | |
1213 "paddw %2, %%mm1 \n\t"\ | |
1214 "psraw $5, %%mm0 \n\t"\ | |
1215 "psraw $5, %%mm1 \n\t"\ | |
1216 "packuswb %%mm1, %%mm0 \n\t"\ | |
1217 OP_3DNOW(%%mm0, (%1), %%mm1, q)\ | |
1218 :: "r"(temp), "r"(dst), "m"(ROUNDER)\ | |
1219 :"memory"\ | |
1220 );\ | |
1221 dst+=dstStride;\ | |
1222 src+=srcStride;\ | |
1223 }\ | |
1224 } | |
1225 | |
1226 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\ | |
1227 \ | |
1228 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1229 uint64_t temp[17*4];\ | |
1230 uint64_t *temp_ptr= temp;\ | |
1231 int count= 17;\ | |
1232 \ | |
1233 /*FIXME unroll */\ | |
1234 __asm__ volatile(\ | |
1235 "pxor %%mm7, %%mm7 \n\t"\ | |
1236 "1: \n\t"\ | |
1237 "movq (%0), %%mm0 \n\t"\ | |
1238 "movq (%0), %%mm1 \n\t"\ | |
1239 "movq 8(%0), %%mm2 \n\t"\ | |
1240 "movq 8(%0), %%mm3 \n\t"\ | |
1241 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1242 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
1243 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1244 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
1245 "movq %%mm0, (%1) \n\t"\ | |
1246 "movq %%mm1, 17*8(%1) \n\t"\ | |
1247 "movq %%mm2, 2*17*8(%1) \n\t"\ | |
1248 "movq %%mm3, 3*17*8(%1) \n\t"\ | |
1249 "add $8, %1 \n\t"\ | |
1250 "add %3, %0 \n\t"\ | |
1251 "decl %2 \n\t"\ | |
1252 " jnz 1b \n\t"\ | |
1253 : "+r" (src), "+r" (temp_ptr), "+r"(count)\ | |
1254 : "r" ((x86_reg)srcStride)\ | |
1255 : "memory"\ | |
1256 );\ | |
1257 \ | |
1258 temp_ptr= temp;\ | |
1259 count=4;\ | |
1260 \ | |
1261 /*FIXME reorder for speed */\ | |
1262 __asm__ volatile(\ | |
1263 /*"pxor %%mm7, %%mm7 \n\t"*/\ | |
1264 "1: \n\t"\ | |
1265 "movq (%0), %%mm0 \n\t"\ | |
1266 "movq 8(%0), %%mm1 \n\t"\ | |
1267 "movq 16(%0), %%mm2 \n\t"\ | |
1268 "movq 24(%0), %%mm3 \n\t"\ | |
1269 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ | |
1270 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ | |
1271 "add %4, %1 \n\t"\ | |
1272 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ | |
1273 \ | |
1274 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ | |
1275 "add %4, %1 \n\t"\ | |
1276 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ | |
1277 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\ | |
1278 "add %4, %1 \n\t"\ | |
1279 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\ | |
1280 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\ | |
1281 "add %4, %1 \n\t"\ | |
1282 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\ | |
1283 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\ | |
1284 "add %4, %1 \n\t"\ | |
1285 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\ | |
1286 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\ | |
1287 "add %4, %1 \n\t"\ | |
1288 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\ | |
1289 \ | |
1290 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\ | |
1291 "add %4, %1 \n\t" \ | |
1292 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\ | |
1293 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\ | |
1294 \ | |
1295 "add $136, %0 \n\t"\ | |
1296 "add %6, %1 \n\t"\ | |
1297 "decl %2 \n\t"\ | |
1298 " jnz 1b \n\t"\ | |
1299 \ | |
1300 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ | |
1301 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\ | |
1302 :"memory"\ | |
1303 );\ | |
1304 }\ | |
1305 \ | |
1306 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1307 uint64_t temp[9*2];\ | |
1308 uint64_t *temp_ptr= temp;\ | |
1309 int count= 9;\ | |
1310 \ | |
1311 /*FIXME unroll */\ | |
1312 __asm__ volatile(\ | |
1313 "pxor %%mm7, %%mm7 \n\t"\ | |
1314 "1: \n\t"\ | |
1315 "movq (%0), %%mm0 \n\t"\ | |
1316 "movq (%0), %%mm1 \n\t"\ | |
1317 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1318 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
1319 "movq %%mm0, (%1) \n\t"\ | |
1320 "movq %%mm1, 9*8(%1) \n\t"\ | |
1321 "add $8, %1 \n\t"\ | |
1322 "add %3, %0 \n\t"\ | |
1323 "decl %2 \n\t"\ | |
1324 " jnz 1b \n\t"\ | |
1325 : "+r" (src), "+r" (temp_ptr), "+r"(count)\ | |
1326 : "r" ((x86_reg)srcStride)\ | |
1327 : "memory"\ | |
1328 );\ | |
1329 \ | |
1330 temp_ptr= temp;\ | |
1331 count=2;\ | |
1332 \ | |
1333 /*FIXME reorder for speed */\ | |
1334 __asm__ volatile(\ | |
1335 /*"pxor %%mm7, %%mm7 \n\t"*/\ | |
1336 "1: \n\t"\ | |
1337 "movq (%0), %%mm0 \n\t"\ | |
1338 "movq 8(%0), %%mm1 \n\t"\ | |
1339 "movq 16(%0), %%mm2 \n\t"\ | |
1340 "movq 24(%0), %%mm3 \n\t"\ | |
1341 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ | |
1342 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ | |
1343 "add %4, %1 \n\t"\ | |
1344 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ | |
1345 \ | |
1346 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ | |
1347 "add %4, %1 \n\t"\ | |
1348 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ | |
1349 \ | |
1350 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\ | |
1351 "add %4, %1 \n\t"\ | |
1352 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\ | |
1353 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\ | |
1354 \ | |
1355 "add $72, %0 \n\t"\ | |
1356 "add %6, %1 \n\t"\ | |
1357 "decl %2 \n\t"\ | |
1358 " jnz 1b \n\t"\ | |
1359 \ | |
1360 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ | |
1361 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\ | |
1362 : "memory"\ | |
1363 );\ | |
1364 }\ | |
1365 \ | |
1366 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ | |
1367 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\ | |
1368 }\ | |
1369 \ | |
1370 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1371 uint64_t temp[8];\ | |
1372 uint8_t * const half= (uint8_t*)temp;\ | |
1373 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ | |
1374 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ | |
1375 }\ | |
1376 \ | |
1377 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1378 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\ | |
1379 }\ | |
1380 \ | |
1381 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1382 uint64_t temp[8];\ | |
1383 uint8_t * const half= (uint8_t*)temp;\ | |
1384 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ | |
1385 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\ | |
1386 }\ | |
1387 \ | |
1388 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1389 uint64_t temp[8];\ | |
1390 uint8_t * const half= (uint8_t*)temp;\ | |
1391 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ | |
1392 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ | |
1393 }\ | |
1394 \ | |
1395 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1396 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\ | |
1397 }\ | |
1398 \ | |
1399 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1400 uint64_t temp[8];\ | |
1401 uint8_t * const half= (uint8_t*)temp;\ | |
1402 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ | |
1403 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\ | |
1404 }\ | |
1405 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1406 uint64_t half[8 + 9];\ | |
1407 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1408 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1409 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1410 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ | |
1411 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1412 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ | |
1413 }\ | |
1414 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1415 uint64_t half[8 + 9];\ | |
1416 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1417 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1418 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1419 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ | |
1420 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1421 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ | |
1422 }\ | |
1423 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1424 uint64_t half[8 + 9];\ | |
1425 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1426 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1427 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1428 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ | |
1429 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1430 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ | |
1431 }\ | |
1432 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1433 uint64_t half[8 + 9];\ | |
1434 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1435 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1436 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1437 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ | |
1438 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1439 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ | |
1440 }\ | |
1441 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1442 uint64_t half[8 + 9];\ | |
1443 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1444 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1445 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1446 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1447 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ | |
1448 }\ | |
1449 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1450 uint64_t half[8 + 9];\ | |
1451 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1452 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1453 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1454 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1455 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ | |
1456 }\ | |
1457 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1458 uint64_t half[8 + 9];\ | |
1459 uint8_t * const halfH= ((uint8_t*)half);\ | |
1460 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1461 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ | |
1462 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ | |
1463 }\ | |
1464 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1465 uint64_t half[8 + 9];\ | |
1466 uint8_t * const halfH= ((uint8_t*)half);\ | |
1467 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1468 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ | |
1469 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ | |
1470 }\ | |
1471 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1472 uint64_t half[9];\ | |
1473 uint8_t * const halfH= ((uint8_t*)half);\ | |
1474 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1475 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ | |
1476 }\ | |
1477 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ | |
1478 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\ | |
1479 }\ | |
1480 \ | |
1481 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1482 uint64_t temp[32];\ | |
1483 uint8_t * const half= (uint8_t*)temp;\ | |
1484 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ | |
1485 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ | |
1486 }\ | |
1487 \ | |
1488 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1489 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\ | |
1490 }\ | |
1491 \ | |
1492 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1493 uint64_t temp[32];\ | |
1494 uint8_t * const half= (uint8_t*)temp;\ | |
1495 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ | |
1496 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\ | |
1497 }\ | |
1498 \ | |
1499 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1500 uint64_t temp[32];\ | |
1501 uint8_t * const half= (uint8_t*)temp;\ | |
1502 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ | |
1503 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ | |
1504 }\ | |
1505 \ | |
1506 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1507 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\ | |
1508 }\ | |
1509 \ | |
1510 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1511 uint64_t temp[32];\ | |
1512 uint8_t * const half= (uint8_t*)temp;\ | |
1513 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ | |
1514 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\ | |
1515 }\ | |
1516 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1517 uint64_t half[16*2 + 17*2];\ | |
1518 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1519 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1520 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1521 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ | |
1522 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1523 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ | |
1524 }\ | |
1525 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1526 uint64_t half[16*2 + 17*2];\ | |
1527 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1528 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1529 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1530 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ | |
1531 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1532 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ | |
1533 }\ | |
1534 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1535 uint64_t half[16*2 + 17*2];\ | |
1536 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1537 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1538 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1539 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ | |
1540 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1541 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ | |
1542 }\ | |
1543 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1544 uint64_t half[16*2 + 17*2];\ | |
1545 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1546 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1547 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1548 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ | |
1549 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1550 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ | |
1551 }\ | |
1552 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1553 uint64_t half[16*2 + 17*2];\ | |
1554 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1555 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1556 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1557 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1558 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ | |
1559 }\ | |
1560 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1561 uint64_t half[16*2 + 17*2];\ | |
1562 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1563 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1564 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1565 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1566 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ | |
1567 }\ | |
1568 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1569 uint64_t half[17*2];\ | |
1570 uint8_t * const halfH= ((uint8_t*)half);\ | |
1571 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1572 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ | |
1573 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ | |
1574 }\ | |
1575 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1576 uint64_t half[17*2];\ | |
1577 uint8_t * const halfH= ((uint8_t*)half);\ | |
1578 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1579 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ | |
1580 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ | |
1581 }\ | |
1582 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1583 uint64_t half[17*2];\ | |
1584 uint8_t * const halfH= ((uint8_t*)half);\ | |
1585 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1586 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ | |
1587 } | |
1588 | |
1589 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t" | |
1590 #define AVG_3DNOW_OP(a,b,temp, size) \ | |
1591 "mov" #size " " #b ", " #temp " \n\t"\ | |
1592 "pavgusb " #temp ", " #a " \n\t"\ | |
1593 "mov" #size " " #a ", " #b " \n\t" | |
1594 #define AVG_MMX2_OP(a,b,temp, size) \ | |
1595 "mov" #size " " #b ", " #temp " \n\t"\ | |
1596 "pavgb " #temp ", " #a " \n\t"\ | |
1597 "mov" #size " " #a ", " #b " \n\t" | |
1598 | |
1599 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP) | |
1600 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP) | |
1601 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP) | |
1602 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow) | |
1603 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow) | |
1604 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow) | |
1605 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2) | |
1606 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2) | |
1607 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2) | |
1608 | |
1609 /***********************************/ | |
1610 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */ | |
1611 | |
1612 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\ | |
1613 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1614 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\ | |
1615 } | |
1616 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\ | |
1617 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1618 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\ | |
1619 } | |
1620 | |
1621 #define QPEL_2TAP(OPNAME, SIZE, MMX)\ | |
1622 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\ | |
1623 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\ | |
1624 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\ | |
1625 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\ | |
1626 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\ | |
1627 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\ | |
1628 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\ | |
1629 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\ | |
1630 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\ | |
1631 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1632 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\ | |
1633 }\ | |
1634 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1635 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\ | |
1636 }\ | |
1637 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\ | |
1638 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\ | |
1639 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\ | |
1640 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\ | |
1641 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\ | |
1642 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\ | |
1643 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\ | |
1644 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\ | |
1645 | |
1646 QPEL_2TAP(put_, 16, mmx2) | |
1647 QPEL_2TAP(avg_, 16, mmx2) | |
1648 QPEL_2TAP(put_, 8, mmx2) | |
1649 QPEL_2TAP(avg_, 8, mmx2) | |
1650 QPEL_2TAP(put_, 16, 3dnow) | |
1651 QPEL_2TAP(avg_, 16, 3dnow) | |
1652 QPEL_2TAP(put_, 8, 3dnow) | |
1653 QPEL_2TAP(avg_, 8, 3dnow) | |
1654 | |
1655 | |
1656 #if 0 | |
8527
f8bf438c6000
Add missing 'void' keyword to parameterless function declarations.
diego
parents:
8519
diff
changeset
|
1657 static void just_return(void) { return; } |
8430 | 1658 #endif |
1659 | |
1660 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, | |
1661 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){ | |
1662 const int w = 8; | |
1663 const int ix = ox>>(16+shift); | |
1664 const int iy = oy>>(16+shift); | |
1665 const int oxs = ox>>4; | |
1666 const int oys = oy>>4; | |
1667 const int dxxs = dxx>>4; | |
1668 const int dxys = dxy>>4; | |
1669 const int dyxs = dyx>>4; | |
1670 const int dyys = dyy>>4; | |
1671 const uint16_t r4[4] = {r,r,r,r}; | |
1672 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys}; | |
1673 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys}; | |
1674 const uint64_t shift2 = 2*shift; | |
1675 uint8_t edge_buf[(h+1)*stride]; | |
1676 int x, y; | |
1677 | |
1678 const int dxw = (dxx-(1<<(16+shift)))*(w-1); | |
1679 const int dyh = (dyy-(1<<(16+shift)))*(h-1); | |
1680 const int dxh = dxy*(h-1); | |
1681 const int dyw = dyx*(w-1); | |
1682 if( // non-constant fullpel offset (3% of blocks) | |
1683 ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) | | |
1684 (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift) | |
1685 // uses more than 16 bits of subpel mv (only at huge resolution) | |
1686 || (dxx|dxy|dyx|dyy)&15 ) | |
1687 { | |
1688 //FIXME could still use mmx for some of the rows | |
1689 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height); | |
1690 return; | |
1691 } | |
1692 | |
1693 src += ix + iy*stride; | |
1694 if( (unsigned)ix >= width-w || | |
1695 (unsigned)iy >= height-h ) | |
1696 { | |
1697 ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height); | |
1698 src = edge_buf; | |
1699 } | |
1700 | |
1701 __asm__ volatile( | |
1702 "movd %0, %%mm6 \n\t" | |
1703 "pxor %%mm7, %%mm7 \n\t" | |
1704 "punpcklwd %%mm6, %%mm6 \n\t" | |
1705 "punpcklwd %%mm6, %%mm6 \n\t" | |
1706 :: "r"(1<<shift) | |
1707 ); | |
1708 | |
1709 for(x=0; x<w; x+=4){ | |
1710 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0), | |
1711 oxs - dxys + dxxs*(x+1), | |
1712 oxs - dxys + dxxs*(x+2), | |
1713 oxs - dxys + dxxs*(x+3) }; | |
1714 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0), | |
1715 oys - dyys + dyxs*(x+1), | |
1716 oys - dyys + dyxs*(x+2), | |
1717 oys - dyys + dyxs*(x+3) }; | |
1718 | |
1719 for(y=0; y<h; y++){ | |
1720 __asm__ volatile( | |
1721 "movq %0, %%mm4 \n\t" | |
1722 "movq %1, %%mm5 \n\t" | |
1723 "paddw %2, %%mm4 \n\t" | |
1724 "paddw %3, %%mm5 \n\t" | |
1725 "movq %%mm4, %0 \n\t" | |
1726 "movq %%mm5, %1 \n\t" | |
1727 "psrlw $12, %%mm4 \n\t" | |
1728 "psrlw $12, %%mm5 \n\t" | |
1729 : "+m"(*dx4), "+m"(*dy4) | |
1730 : "m"(*dxy4), "m"(*dyy4) | |
1731 ); | |
1732 | |
1733 __asm__ volatile( | |
1734 "movq %%mm6, %%mm2 \n\t" | |
1735 "movq %%mm6, %%mm1 \n\t" | |
1736 "psubw %%mm4, %%mm2 \n\t" | |
1737 "psubw %%mm5, %%mm1 \n\t" | |
1738 "movq %%mm2, %%mm0 \n\t" | |
1739 "movq %%mm4, %%mm3 \n\t" | |
1740 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy) | |
1741 "pmullw %%mm5, %%mm3 \n\t" // dx*dy | |
1742 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy | |
1743 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy) | |
1744 | |
1745 "movd %4, %%mm5 \n\t" | |
1746 "movd %3, %%mm4 \n\t" | |
1747 "punpcklbw %%mm7, %%mm5 \n\t" | |
1748 "punpcklbw %%mm7, %%mm4 \n\t" | |
1749 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy | |
1750 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy | |
1751 | |
1752 "movd %2, %%mm5 \n\t" | |
1753 "movd %1, %%mm4 \n\t" | |
1754 "punpcklbw %%mm7, %%mm5 \n\t" | |
1755 "punpcklbw %%mm7, %%mm4 \n\t" | |
1756 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy) | |
1757 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy) | |
1758 "paddw %5, %%mm1 \n\t" | |
1759 "paddw %%mm3, %%mm2 \n\t" | |
1760 "paddw %%mm1, %%mm0 \n\t" | |
1761 "paddw %%mm2, %%mm0 \n\t" | |
1762 | |
1763 "psrlw %6, %%mm0 \n\t" | |
1764 "packuswb %%mm0, %%mm0 \n\t" | |
1765 "movd %%mm0, %0 \n\t" | |
1766 | |
1767 : "=m"(dst[x+y*stride]) | |
1768 : "m"(src[0]), "m"(src[1]), | |
1769 "m"(src[stride]), "m"(src[stride+1]), | |
1770 "m"(*r4), "m"(shift2) | |
1771 ); | |
1772 src += stride; | |
1773 } | |
1774 src += 4-h*stride; | |
1775 } | |
1776 } | |
1777 | |
1778 #define PREFETCH(name, op) \ | |
1779 static void name(void *mem, int stride, int h){\ | |
1780 const uint8_t *p= mem;\ | |
1781 do{\ | |
1782 __asm__ volatile(#op" %0" :: "m"(*p));\ | |
1783 p+= stride;\ | |
1784 }while(--h);\ | |
1785 } | |
1786 PREFETCH(prefetch_mmx2, prefetcht0) | |
1787 PREFETCH(prefetch_3dnow, prefetch) | |
1788 #undef PREFETCH | |
1789 | |
1790 #include "h264dsp_mmx.c" | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
1791 #include "rv40dsp_mmx.c" |
8430 | 1792 |
1793 /* CAVS specific */ | |
1794 void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx); | |
1795 void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx); | |
1796 | |
1797 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1798 put_pixels8_mmx(dst, src, stride, 8); | |
1799 } | |
1800 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1801 avg_pixels8_mmx(dst, src, stride, 8); | |
1802 } | |
1803 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1804 put_pixels16_mmx(dst, src, stride, 16); | |
1805 } | |
1806 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1807 avg_pixels16_mmx(dst, src, stride, 16); | |
1808 } | |
1809 | |
1810 /* VC1 specific */ | |
1811 void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx); | |
1812 | |
1813 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { | |
1814 put_pixels8_mmx(dst, src, stride, 8); | |
1815 } | |
1816 | |
1817 /* external functions, from idct_mmx.c */ | |
1818 void ff_mmx_idct(DCTELEM *block); | |
1819 void ff_mmxext_idct(DCTELEM *block); | |
1820 | |
1821 /* XXX: those functions should be suppressed ASAP when all IDCTs are | |
1822 converted */ | |
8590 | 1823 #if CONFIG_GPL |
8430 | 1824 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block) |
1825 { | |
1826 ff_mmx_idct (block); | |
1827 put_pixels_clamped_mmx(block, dest, line_size); | |
1828 } | |
1829 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1830 { | |
1831 ff_mmx_idct (block); | |
1832 add_pixels_clamped_mmx(block, dest, line_size); | |
1833 } | |
1834 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block) | |
1835 { | |
1836 ff_mmxext_idct (block); | |
1837 put_pixels_clamped_mmx(block, dest, line_size); | |
1838 } | |
1839 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1840 { | |
1841 ff_mmxext_idct (block); | |
1842 add_pixels_clamped_mmx(block, dest, line_size); | |
1843 } | |
1844 #endif | |
1845 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block) | |
1846 { | |
1847 ff_idct_xvid_mmx (block); | |
1848 put_pixels_clamped_mmx(block, dest, line_size); | |
1849 } | |
1850 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1851 { | |
1852 ff_idct_xvid_mmx (block); | |
1853 add_pixels_clamped_mmx(block, dest, line_size); | |
1854 } | |
1855 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block) | |
1856 { | |
1857 ff_idct_xvid_mmx2 (block); | |
1858 put_pixels_clamped_mmx(block, dest, line_size); | |
1859 } | |
1860 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1861 { | |
1862 ff_idct_xvid_mmx2 (block); | |
1863 add_pixels_clamped_mmx(block, dest, line_size); | |
1864 } | |
1865 | |
1866 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize) | |
1867 { | |
1868 int i; | |
1869 __asm__ volatile("pxor %%mm7, %%mm7":); | |
1870 for(i=0; i<blocksize; i+=2) { | |
1871 __asm__ volatile( | |
1872 "movq %0, %%mm0 \n\t" | |
1873 "movq %1, %%mm1 \n\t" | |
1874 "movq %%mm0, %%mm2 \n\t" | |
1875 "movq %%mm1, %%mm3 \n\t" | |
1876 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0 | |
1877 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0 | |
1878 "pslld $31, %%mm2 \n\t" // keep only the sign bit | |
1879 "pxor %%mm2, %%mm1 \n\t" | |
1880 "movq %%mm3, %%mm4 \n\t" | |
1881 "pand %%mm1, %%mm3 \n\t" | |
1882 "pandn %%mm1, %%mm4 \n\t" | |
1883 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) | |
1884 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) | |
1885 "movq %%mm3, %1 \n\t" | |
1886 "movq %%mm0, %0 \n\t" | |
1887 :"+m"(mag[i]), "+m"(ang[i]) | |
1888 ::"memory" | |
1889 ); | |
1890 } | |
1891 __asm__ volatile("femms"); | |
1892 } | |
1893 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize) | |
1894 { | |
1895 int i; | |
1896 | |
1897 __asm__ volatile( | |
1898 "movaps %0, %%xmm5 \n\t" | |
1899 ::"m"(ff_pdw_80000000[0]) | |
1900 ); | |
1901 for(i=0; i<blocksize; i+=4) { | |
1902 __asm__ volatile( | |
1903 "movaps %0, %%xmm0 \n\t" | |
1904 "movaps %1, %%xmm1 \n\t" | |
1905 "xorps %%xmm2, %%xmm2 \n\t" | |
1906 "xorps %%xmm3, %%xmm3 \n\t" | |
1907 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0 | |
1908 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0 | |
1909 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit | |
1910 "xorps %%xmm2, %%xmm1 \n\t" | |
1911 "movaps %%xmm3, %%xmm4 \n\t" | |
1912 "andps %%xmm1, %%xmm3 \n\t" | |
1913 "andnps %%xmm1, %%xmm4 \n\t" | |
1914 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) | |
1915 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) | |
1916 "movaps %%xmm3, %1 \n\t" | |
1917 "movaps %%xmm0, %0 \n\t" | |
1918 :"+m"(mag[i]), "+m"(ang[i]) | |
1919 ::"memory" | |
1920 ); | |
1921 } | |
1922 } | |
1923 | |
1924 #define IF1(x) x | |
1925 #define IF0(x) | |
1926 | |
1927 #define MIX5(mono,stereo)\ | |
1928 __asm__ volatile(\ | |
1929 "movss 0(%2), %%xmm5 \n"\ | |
1930 "movss 8(%2), %%xmm6 \n"\ | |
1931 "movss 24(%2), %%xmm7 \n"\ | |
1932 "shufps $0, %%xmm5, %%xmm5 \n"\ | |
1933 "shufps $0, %%xmm6, %%xmm6 \n"\ | |
1934 "shufps $0, %%xmm7, %%xmm7 \n"\ | |
1935 "1: \n"\ | |
1936 "movaps (%0,%1), %%xmm0 \n"\ | |
1937 "movaps 0x400(%0,%1), %%xmm1 \n"\ | |
1938 "movaps 0x800(%0,%1), %%xmm2 \n"\ | |
1939 "movaps 0xc00(%0,%1), %%xmm3 \n"\ | |
1940 "movaps 0x1000(%0,%1), %%xmm4 \n"\ | |
1941 "mulps %%xmm5, %%xmm0 \n"\ | |
1942 "mulps %%xmm6, %%xmm1 \n"\ | |
1943 "mulps %%xmm5, %%xmm2 \n"\ | |
1944 "mulps %%xmm7, %%xmm3 \n"\ | |
1945 "mulps %%xmm7, %%xmm4 \n"\ | |
1946 stereo("addps %%xmm1, %%xmm0 \n")\ | |
1947 "addps %%xmm1, %%xmm2 \n"\ | |
1948 "addps %%xmm3, %%xmm0 \n"\ | |
1949 "addps %%xmm4, %%xmm2 \n"\ | |
1950 mono("addps %%xmm2, %%xmm0 \n")\ | |
1951 "movaps %%xmm0, (%0,%1) \n"\ | |
1952 stereo("movaps %%xmm2, 0x400(%0,%1) \n")\ | |
1953 "add $16, %0 \n"\ | |
1954 "jl 1b \n"\ | |
1955 :"+&r"(i)\ | |
1956 :"r"(samples[0]+len), "r"(matrix)\ | |
1957 :"memory"\ | |
1958 ); | |
1959 | |
1960 #define MIX_MISC(stereo)\ | |
1961 __asm__ volatile(\ | |
1962 "1: \n"\ | |
1963 "movaps (%3,%0), %%xmm0 \n"\ | |
1964 stereo("movaps %%xmm0, %%xmm1 \n")\ | |
1965 "mulps %%xmm6, %%xmm0 \n"\ | |
1966 stereo("mulps %%xmm7, %%xmm1 \n")\ | |
1967 "lea 1024(%3,%0), %1 \n"\ | |
1968 "mov %5, %2 \n"\ | |
1969 "2: \n"\ | |
1970 "movaps (%1), %%xmm2 \n"\ | |
1971 stereo("movaps %%xmm2, %%xmm3 \n")\ | |
1972 "mulps (%4,%2), %%xmm2 \n"\ | |
1973 stereo("mulps 16(%4,%2), %%xmm3 \n")\ | |
1974 "addps %%xmm2, %%xmm0 \n"\ | |
1975 stereo("addps %%xmm3, %%xmm1 \n")\ | |
1976 "add $1024, %1 \n"\ | |
1977 "add $32, %2 \n"\ | |
1978 "jl 2b \n"\ | |
1979 "movaps %%xmm0, (%3,%0) \n"\ | |
1980 stereo("movaps %%xmm1, 1024(%3,%0) \n")\ | |
1981 "add $16, %0 \n"\ | |
1982 "jl 1b \n"\ | |
1983 :"+&r"(i), "=&r"(j), "=&r"(k)\ | |
1984 :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\ | |
1985 :"memory"\ | |
1986 ); | |
1987 | |
1988 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len) | |
1989 { | |
1990 int (*matrix_cmp)[2] = (int(*)[2])matrix; | |
1991 intptr_t i,j,k; | |
1992 | |
1993 i = -len*sizeof(float); | |
1994 if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) { | |
1995 MIX5(IF0,IF1); | |
1996 } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) { | |
1997 MIX5(IF1,IF0); | |
1998 } else { | |
1999 DECLARE_ALIGNED_16(float, matrix_simd[in_ch][2][4]); | |
2000 j = 2*in_ch*sizeof(float); | |
2001 __asm__ volatile( | |
2002 "1: \n" | |
2003 "sub $8, %0 \n" | |
2004 "movss (%2,%0), %%xmm6 \n" | |
2005 "movss 4(%2,%0), %%xmm7 \n" | |
2006 "shufps $0, %%xmm6, %%xmm6 \n" | |
2007 "shufps $0, %%xmm7, %%xmm7 \n" | |
2008 "movaps %%xmm6, (%1,%0,4) \n" | |
2009 "movaps %%xmm7, 16(%1,%0,4) \n" | |
2010 "jg 1b \n" | |
2011 :"+&r"(j) | |
2012 :"r"(matrix_simd), "r"(matrix) | |
2013 :"memory" | |
2014 ); | |
2015 if(out_ch == 2) { | |
2016 MIX_MISC(IF1); | |
2017 } else { | |
2018 MIX_MISC(IF0); | |
2019 } | |
2020 } | |
2021 } | |
2022 | |
2023 static void vector_fmul_3dnow(float *dst, const float *src, int len){ | |
2024 x86_reg i = (len-4)*4; | |
2025 __asm__ volatile( | |
2026 "1: \n\t" | |
2027 "movq (%1,%0), %%mm0 \n\t" | |
2028 "movq 8(%1,%0), %%mm1 \n\t" | |
2029 "pfmul (%2,%0), %%mm0 \n\t" | |
2030 "pfmul 8(%2,%0), %%mm1 \n\t" | |
2031 "movq %%mm0, (%1,%0) \n\t" | |
2032 "movq %%mm1, 8(%1,%0) \n\t" | |
2033 "sub $16, %0 \n\t" | |
2034 "jge 1b \n\t" | |
2035 "femms \n\t" | |
2036 :"+r"(i) | |
2037 :"r"(dst), "r"(src) | |
2038 :"memory" | |
2039 ); | |
2040 } | |
2041 static void vector_fmul_sse(float *dst, const float *src, int len){ | |
2042 x86_reg i = (len-8)*4; | |
2043 __asm__ volatile( | |
2044 "1: \n\t" | |
2045 "movaps (%1,%0), %%xmm0 \n\t" | |
2046 "movaps 16(%1,%0), %%xmm1 \n\t" | |
2047 "mulps (%2,%0), %%xmm0 \n\t" | |
2048 "mulps 16(%2,%0), %%xmm1 \n\t" | |
2049 "movaps %%xmm0, (%1,%0) \n\t" | |
2050 "movaps %%xmm1, 16(%1,%0) \n\t" | |
2051 "sub $32, %0 \n\t" | |
2052 "jge 1b \n\t" | |
2053 :"+r"(i) | |
2054 :"r"(dst), "r"(src) | |
2055 :"memory" | |
2056 ); | |
2057 } | |
2058 | |
2059 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){ | |
2060 x86_reg i = len*4-16; | |
2061 __asm__ volatile( | |
2062 "1: \n\t" | |
2063 "pswapd 8(%1), %%mm0 \n\t" | |
2064 "pswapd (%1), %%mm1 \n\t" | |
2065 "pfmul (%3,%0), %%mm0 \n\t" | |
2066 "pfmul 8(%3,%0), %%mm1 \n\t" | |
2067 "movq %%mm0, (%2,%0) \n\t" | |
2068 "movq %%mm1, 8(%2,%0) \n\t" | |
2069 "add $16, %1 \n\t" | |
2070 "sub $16, %0 \n\t" | |
2071 "jge 1b \n\t" | |
2072 :"+r"(i), "+r"(src1) | |
2073 :"r"(dst), "r"(src0) | |
2074 ); | |
2075 __asm__ volatile("femms"); | |
2076 } | |
2077 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){ | |
2078 x86_reg i = len*4-32; | |
2079 __asm__ volatile( | |
2080 "1: \n\t" | |
2081 "movaps 16(%1), %%xmm0 \n\t" | |
2082 "movaps (%1), %%xmm1 \n\t" | |
2083 "shufps $0x1b, %%xmm0, %%xmm0 \n\t" | |
2084 "shufps $0x1b, %%xmm1, %%xmm1 \n\t" | |
2085 "mulps (%3,%0), %%xmm0 \n\t" | |
2086 "mulps 16(%3,%0), %%xmm1 \n\t" | |
2087 "movaps %%xmm0, (%2,%0) \n\t" | |
2088 "movaps %%xmm1, 16(%2,%0) \n\t" | |
2089 "add $32, %1 \n\t" | |
2090 "sub $32, %0 \n\t" | |
2091 "jge 1b \n\t" | |
2092 :"+r"(i), "+r"(src1) | |
2093 :"r"(dst), "r"(src0) | |
2094 ); | |
2095 } | |
2096 | |
2097 static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1, | |
2098 const float *src2, int src3, int len, int step){ | |
2099 x86_reg i = (len-4)*4; | |
2100 if(step == 2 && src3 == 0){ | |
2101 dst += (len-4)*2; | |
2102 __asm__ volatile( | |
2103 "1: \n\t" | |
2104 "movq (%2,%0), %%mm0 \n\t" | |
2105 "movq 8(%2,%0), %%mm1 \n\t" | |
2106 "pfmul (%3,%0), %%mm0 \n\t" | |
2107 "pfmul 8(%3,%0), %%mm1 \n\t" | |
2108 "pfadd (%4,%0), %%mm0 \n\t" | |
2109 "pfadd 8(%4,%0), %%mm1 \n\t" | |
2110 "movd %%mm0, (%1) \n\t" | |
2111 "movd %%mm1, 16(%1) \n\t" | |
2112 "psrlq $32, %%mm0 \n\t" | |
2113 "psrlq $32, %%mm1 \n\t" | |
2114 "movd %%mm0, 8(%1) \n\t" | |
2115 "movd %%mm1, 24(%1) \n\t" | |
2116 "sub $32, %1 \n\t" | |
2117 "sub $16, %0 \n\t" | |
2118 "jge 1b \n\t" | |
2119 :"+r"(i), "+r"(dst) | |
2120 :"r"(src0), "r"(src1), "r"(src2) | |
2121 :"memory" | |
2122 ); | |
2123 } | |
2124 else if(step == 1 && src3 == 0){ | |
2125 __asm__ volatile( | |
2126 "1: \n\t" | |
2127 "movq (%2,%0), %%mm0 \n\t" | |
2128 "movq 8(%2,%0), %%mm1 \n\t" | |
2129 "pfmul (%3,%0), %%mm0 \n\t" | |
2130 "pfmul 8(%3,%0), %%mm1 \n\t" | |
2131 "pfadd (%4,%0), %%mm0 \n\t" | |
2132 "pfadd 8(%4,%0), %%mm1 \n\t" | |
2133 "movq %%mm0, (%1,%0) \n\t" | |
2134 "movq %%mm1, 8(%1,%0) \n\t" | |
2135 "sub $16, %0 \n\t" | |
2136 "jge 1b \n\t" | |
2137 :"+r"(i) | |
2138 :"r"(dst), "r"(src0), "r"(src1), "r"(src2) | |
2139 :"memory" | |
2140 ); | |
2141 } | |
2142 else | |
2143 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step); | |
2144 __asm__ volatile("femms"); | |
2145 } | |
2146 static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1, | |
2147 const float *src2, int src3, int len, int step){ | |
2148 x86_reg i = (len-8)*4; | |
2149 if(step == 2 && src3 == 0){ | |
2150 dst += (len-8)*2; | |
2151 __asm__ volatile( | |
2152 "1: \n\t" | |
2153 "movaps (%2,%0), %%xmm0 \n\t" | |
2154 "movaps 16(%2,%0), %%xmm1 \n\t" | |
2155 "mulps (%3,%0), %%xmm0 \n\t" | |
2156 "mulps 16(%3,%0), %%xmm1 \n\t" | |
2157 "addps (%4,%0), %%xmm0 \n\t" | |
2158 "addps 16(%4,%0), %%xmm1 \n\t" | |
2159 "movss %%xmm0, (%1) \n\t" | |
2160 "movss %%xmm1, 32(%1) \n\t" | |
2161 "movhlps %%xmm0, %%xmm2 \n\t" | |
2162 "movhlps %%xmm1, %%xmm3 \n\t" | |
2163 "movss %%xmm2, 16(%1) \n\t" | |
2164 "movss %%xmm3, 48(%1) \n\t" | |
2165 "shufps $0xb1, %%xmm0, %%xmm0 \n\t" | |
2166 "shufps $0xb1, %%xmm1, %%xmm1 \n\t" | |
2167 "movss %%xmm0, 8(%1) \n\t" | |
2168 "movss %%xmm1, 40(%1) \n\t" | |
2169 "movhlps %%xmm0, %%xmm2 \n\t" | |
2170 "movhlps %%xmm1, %%xmm3 \n\t" | |
2171 "movss %%xmm2, 24(%1) \n\t" | |
2172 "movss %%xmm3, 56(%1) \n\t" | |
2173 "sub $64, %1 \n\t" | |
2174 "sub $32, %0 \n\t" | |
2175 "jge 1b \n\t" | |
2176 :"+r"(i), "+r"(dst) | |
2177 :"r"(src0), "r"(src1), "r"(src2) | |
2178 :"memory" | |
2179 ); | |
2180 } | |
2181 else if(step == 1 && src3 == 0){ | |
2182 __asm__ volatile( | |
2183 "1: \n\t" | |
2184 "movaps (%2,%0), %%xmm0 \n\t" | |
2185 "movaps 16(%2,%0), %%xmm1 \n\t" | |
2186 "mulps (%3,%0), %%xmm0 \n\t" | |
2187 "mulps 16(%3,%0), %%xmm1 \n\t" | |
2188 "addps (%4,%0), %%xmm0 \n\t" | |
2189 "addps 16(%4,%0), %%xmm1 \n\t" | |
2190 "movaps %%xmm0, (%1,%0) \n\t" | |
2191 "movaps %%xmm1, 16(%1,%0) \n\t" | |
2192 "sub $32, %0 \n\t" | |
2193 "jge 1b \n\t" | |
2194 :"+r"(i) | |
2195 :"r"(dst), "r"(src0), "r"(src1), "r"(src2) | |
2196 :"memory" | |
2197 ); | |
2198 } | |
2199 else | |
2200 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step); | |
2201 } | |
2202 | |
2203 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1, | |
2204 const float *win, float add_bias, int len){ | |
8590 | 2205 #if HAVE_6REGS |
8430 | 2206 if(add_bias == 0){ |
2207 x86_reg i = -len*4; | |
2208 x86_reg j = len*4-8; | |
2209 __asm__ volatile( | |
2210 "1: \n" | |
2211 "pswapd (%5,%1), %%mm1 \n" | |
2212 "movq (%5,%0), %%mm0 \n" | |
2213 "pswapd (%4,%1), %%mm5 \n" | |
2214 "movq (%3,%0), %%mm4 \n" | |
2215 "movq %%mm0, %%mm2 \n" | |
2216 "movq %%mm1, %%mm3 \n" | |
2217 "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i] | |
2218 "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j] | |
2219 "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j] | |
2220 "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i] | |
2221 "pfadd %%mm3, %%mm2 \n" | |
2222 "pfsub %%mm0, %%mm1 \n" | |
2223 "pswapd %%mm2, %%mm2 \n" | |
2224 "movq %%mm1, (%2,%0) \n" | |
2225 "movq %%mm2, (%2,%1) \n" | |
2226 "sub $8, %1 \n" | |
2227 "add $8, %0 \n" | |
2228 "jl 1b \n" | |
2229 "femms \n" | |
2230 :"+r"(i), "+r"(j) | |
2231 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) | |
2232 ); | |
2233 }else | |
2234 #endif | |
2235 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len); | |
2236 } | |
2237 | |
2238 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1, | |
2239 const float *win, float add_bias, int len){ | |
8590 | 2240 #if HAVE_6REGS |
8430 | 2241 if(add_bias == 0){ |
2242 x86_reg i = -len*4; | |
2243 x86_reg j = len*4-16; | |
2244 __asm__ volatile( | |
2245 "1: \n" | |
2246 "movaps (%5,%1), %%xmm1 \n" | |
2247 "movaps (%5,%0), %%xmm0 \n" | |
2248 "movaps (%4,%1), %%xmm5 \n" | |
2249 "movaps (%3,%0), %%xmm4 \n" | |
2250 "shufps $0x1b, %%xmm1, %%xmm1 \n" | |
2251 "shufps $0x1b, %%xmm5, %%xmm5 \n" | |
2252 "movaps %%xmm0, %%xmm2 \n" | |
2253 "movaps %%xmm1, %%xmm3 \n" | |
2254 "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i] | |
2255 "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j] | |
2256 "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j] | |
2257 "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i] | |
2258 "addps %%xmm3, %%xmm2 \n" | |
2259 "subps %%xmm0, %%xmm1 \n" | |
2260 "shufps $0x1b, %%xmm2, %%xmm2 \n" | |
2261 "movaps %%xmm1, (%2,%0) \n" | |
2262 "movaps %%xmm2, (%2,%1) \n" | |
2263 "sub $16, %1 \n" | |
2264 "add $16, %0 \n" | |
2265 "jl 1b \n" | |
2266 :"+r"(i), "+r"(j) | |
2267 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) | |
2268 ); | |
2269 }else | |
2270 #endif | |
2271 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len); | |
2272 } | |
2273 | |
2274 static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len) | |
2275 { | |
2276 x86_reg i = -4*len; | |
2277 __asm__ volatile( | |
2278 "movss %3, %%xmm4 \n" | |
2279 "shufps $0, %%xmm4, %%xmm4 \n" | |
2280 "1: \n" | |
2281 "cvtpi2ps (%2,%0), %%xmm0 \n" | |
2282 "cvtpi2ps 8(%2,%0), %%xmm1 \n" | |
2283 "cvtpi2ps 16(%2,%0), %%xmm2 \n" | |
2284 "cvtpi2ps 24(%2,%0), %%xmm3 \n" | |
2285 "movlhps %%xmm1, %%xmm0 \n" | |
2286 "movlhps %%xmm3, %%xmm2 \n" | |
2287 "mulps %%xmm4, %%xmm0 \n" | |
2288 "mulps %%xmm4, %%xmm2 \n" | |
2289 "movaps %%xmm0, (%1,%0) \n" | |
2290 "movaps %%xmm2, 16(%1,%0) \n" | |
2291 "add $32, %0 \n" | |
2292 "jl 1b \n" | |
2293 :"+r"(i) | |
2294 :"r"(dst+len), "r"(src+len), "m"(mul) | |
2295 ); | |
2296 } | |
2297 | |
2298 static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len) | |
2299 { | |
2300 x86_reg i = -4*len; | |
2301 __asm__ volatile( | |
2302 "movss %3, %%xmm4 \n" | |
2303 "shufps $0, %%xmm4, %%xmm4 \n" | |
2304 "1: \n" | |
2305 "cvtdq2ps (%2,%0), %%xmm0 \n" | |
2306 "cvtdq2ps 16(%2,%0), %%xmm1 \n" | |
2307 "mulps %%xmm4, %%xmm0 \n" | |
2308 "mulps %%xmm4, %%xmm1 \n" | |
2309 "movaps %%xmm0, (%1,%0) \n" | |
2310 "movaps %%xmm1, 16(%1,%0) \n" | |
2311 "add $32, %0 \n" | |
2312 "jl 1b \n" | |
2313 :"+r"(i) | |
2314 :"r"(dst+len), "r"(src+len), "m"(mul) | |
2315 ); | |
2316 } | |
2317 | |
2318 static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){ | |
2319 x86_reg reglen = len; | |
2320 // not bit-exact: pf2id uses different rounding than C and SSE | |
2321 __asm__ volatile( | |
2322 "add %0 , %0 \n\t" | |
2323 "lea (%2,%0,2) , %2 \n\t" | |
2324 "add %0 , %1 \n\t" | |
2325 "neg %0 \n\t" | |
2326 "1: \n\t" | |
2327 "pf2id (%2,%0,2) , %%mm0 \n\t" | |
2328 "pf2id 8(%2,%0,2) , %%mm1 \n\t" | |
2329 "pf2id 16(%2,%0,2) , %%mm2 \n\t" | |
2330 "pf2id 24(%2,%0,2) , %%mm3 \n\t" | |
2331 "packssdw %%mm1 , %%mm0 \n\t" | |
2332 "packssdw %%mm3 , %%mm2 \n\t" | |
2333 "movq %%mm0 , (%1,%0) \n\t" | |
2334 "movq %%mm2 , 8(%1,%0) \n\t" | |
2335 "add $16 , %0 \n\t" | |
2336 " js 1b \n\t" | |
2337 "femms \n\t" | |
2338 :"+r"(reglen), "+r"(dst), "+r"(src) | |
2339 ); | |
2340 } | |
2341 static void float_to_int16_sse(int16_t *dst, const float *src, long len){ | |
2342 x86_reg reglen = len; | |
2343 __asm__ volatile( | |
2344 "add %0 , %0 \n\t" | |
2345 "lea (%2,%0,2) , %2 \n\t" | |
2346 "add %0 , %1 \n\t" | |
2347 "neg %0 \n\t" | |
2348 "1: \n\t" | |
2349 "cvtps2pi (%2,%0,2) , %%mm0 \n\t" | |
2350 "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t" | |
2351 "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t" | |
2352 "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t" | |
2353 "packssdw %%mm1 , %%mm0 \n\t" | |
2354 "packssdw %%mm3 , %%mm2 \n\t" | |
2355 "movq %%mm0 , (%1,%0) \n\t" | |
2356 "movq %%mm2 , 8(%1,%0) \n\t" | |
2357 "add $16 , %0 \n\t" | |
2358 " js 1b \n\t" | |
2359 "emms \n\t" | |
2360 :"+r"(reglen), "+r"(dst), "+r"(src) | |
2361 ); | |
2362 } | |
2363 | |
2364 static void float_to_int16_sse2(int16_t *dst, const float *src, long len){ | |
2365 x86_reg reglen = len; | |
2366 __asm__ volatile( | |
2367 "add %0 , %0 \n\t" | |
2368 "lea (%2,%0,2) , %2 \n\t" | |
2369 "add %0 , %1 \n\t" | |
2370 "neg %0 \n\t" | |
2371 "1: \n\t" | |
2372 "cvtps2dq (%2,%0,2) , %%xmm0 \n\t" | |
2373 "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t" | |
2374 "packssdw %%xmm1 , %%xmm0 \n\t" | |
2375 "movdqa %%xmm0 , (%1,%0) \n\t" | |
2376 "add $16 , %0 \n\t" | |
2377 " js 1b \n\t" | |
2378 :"+r"(reglen), "+r"(dst), "+r"(src) | |
2379 ); | |
2380 } | |
2381 | |
8590 | 2382 #if HAVE_YASM |
8430 | 2383 void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len); |
2384 void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len); | |
2385 void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len); | |
8760 | 2386 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top); |
8430 | 2387 void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0); |
2388 void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0); | |
2389 void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta); | |
2390 void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta); | |
8590 | 2391 #if ARCH_X86_32 |
8430 | 2392 static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta) |
2393 { | |
2394 ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta); | |
2395 ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta); | |
2396 } | |
8463
2ba4e13aa21a
Fix compilation without optimization under 64-bit with x264 deblock asm enabled.
darkshikari
parents:
8430
diff
changeset
|
2397 #endif |
8430 | 2398 void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta); |
2399 void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta); | |
2400 #else | |
2401 #define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6) | |
2402 #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6) | |
2403 #define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6) | |
2404 #endif | |
2405 #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse | |
2406 | |
2407 #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \ | |
2408 /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\ | |
2409 static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\ | |
2410 DECLARE_ALIGNED_16(int16_t, tmp[len]);\ | |
2411 int i,j,c;\ | |
2412 for(c=0; c<channels; c++){\ | |
2413 float_to_int16_##cpu(tmp, src[c], len);\ | |
2414 for(i=0, j=c; i<len; i++, j+=channels)\ | |
2415 dst[j] = tmp[i];\ | |
2416 }\ | |
2417 }\ | |
2418 \ | |
2419 static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\ | |
2420 if(channels==1)\ | |
2421 float_to_int16_##cpu(dst, src[0], len);\ | |
2422 else if(channels==2){\ | |
2423 x86_reg reglen = len; \ | |
2424 const float *src0 = src[0];\ | |
2425 const float *src1 = src[1];\ | |
2426 __asm__ volatile(\ | |
2427 "shl $2, %0 \n"\ | |
2428 "add %0, %1 \n"\ | |
2429 "add %0, %2 \n"\ | |
2430 "add %0, %3 \n"\ | |
2431 "neg %0 \n"\ | |
2432 body\ | |
2433 :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\ | |
2434 );\ | |
2435 }else if(channels==6){\ | |
2436 ff_float_to_int16_interleave6_##cpu(dst, src, len);\ | |
2437 }else\ | |
2438 float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\ | |
2439 } | |
2440 | |
2441 FLOAT_TO_INT16_INTERLEAVE(3dnow, | |
2442 "1: \n" | |
2443 "pf2id (%2,%0), %%mm0 \n" | |
2444 "pf2id 8(%2,%0), %%mm1 \n" | |
2445 "pf2id (%3,%0), %%mm2 \n" | |
2446 "pf2id 8(%3,%0), %%mm3 \n" | |
2447 "packssdw %%mm1, %%mm0 \n" | |
2448 "packssdw %%mm3, %%mm2 \n" | |
2449 "movq %%mm0, %%mm1 \n" | |
2450 "punpcklwd %%mm2, %%mm0 \n" | |
2451 "punpckhwd %%mm2, %%mm1 \n" | |
2452 "movq %%mm0, (%1,%0)\n" | |
2453 "movq %%mm1, 8(%1,%0)\n" | |
2454 "add $16, %0 \n" | |
2455 "js 1b \n" | |
2456 "femms \n" | |
2457 ) | |
2458 | |
2459 FLOAT_TO_INT16_INTERLEAVE(sse, | |
2460 "1: \n" | |
2461 "cvtps2pi (%2,%0), %%mm0 \n" | |
2462 "cvtps2pi 8(%2,%0), %%mm1 \n" | |
2463 "cvtps2pi (%3,%0), %%mm2 \n" | |
2464 "cvtps2pi 8(%3,%0), %%mm3 \n" | |
2465 "packssdw %%mm1, %%mm0 \n" | |
2466 "packssdw %%mm3, %%mm2 \n" | |
2467 "movq %%mm0, %%mm1 \n" | |
2468 "punpcklwd %%mm2, %%mm0 \n" | |
2469 "punpckhwd %%mm2, %%mm1 \n" | |
2470 "movq %%mm0, (%1,%0)\n" | |
2471 "movq %%mm1, 8(%1,%0)\n" | |
2472 "add $16, %0 \n" | |
2473 "js 1b \n" | |
2474 "emms \n" | |
2475 ) | |
2476 | |
2477 FLOAT_TO_INT16_INTERLEAVE(sse2, | |
2478 "1: \n" | |
2479 "cvtps2dq (%2,%0), %%xmm0 \n" | |
2480 "cvtps2dq (%3,%0), %%xmm1 \n" | |
2481 "packssdw %%xmm1, %%xmm0 \n" | |
2482 "movhlps %%xmm0, %%xmm1 \n" | |
2483 "punpcklwd %%xmm1, %%xmm0 \n" | |
2484 "movdqa %%xmm0, (%1,%0) \n" | |
2485 "add $16, %0 \n" | |
2486 "js 1b \n" | |
2487 ) | |
2488 | |
2489 static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){ | |
2490 if(channels==6) | |
2491 ff_float_to_int16_interleave6_3dn2(dst, src, len); | |
2492 else | |
2493 float_to_int16_interleave_3dnow(dst, src, len, channels); | |
2494 } | |
2495 | |
2496 | |
2497 void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width); | |
2498 void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width); | |
2499 void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width); | |
2500 void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width); | |
2501 void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, | |
2502 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8); | |
2503 void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, | |
2504 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8); | |
2505 | |
2506 | |
2507 static void add_int16_sse2(int16_t * v1, int16_t * v2, int order) | |
2508 { | |
2509 x86_reg o = -(order << 1); | |
2510 v1 += order; | |
2511 v2 += order; | |
2512 __asm__ volatile( | |
2513 "1: \n\t" | |
2514 "movdqu (%1,%2), %%xmm0 \n\t" | |
2515 "movdqu 16(%1,%2), %%xmm1 \n\t" | |
2516 "paddw (%0,%2), %%xmm0 \n\t" | |
2517 "paddw 16(%0,%2), %%xmm1 \n\t" | |
2518 "movdqa %%xmm0, (%0,%2) \n\t" | |
2519 "movdqa %%xmm1, 16(%0,%2) \n\t" | |
2520 "add $32, %2 \n\t" | |
2521 "js 1b \n\t" | |
2522 : "+r"(v1), "+r"(v2), "+r"(o) | |
2523 ); | |
2524 } | |
2525 | |
2526 static void sub_int16_sse2(int16_t * v1, int16_t * v2, int order) | |
2527 { | |
2528 x86_reg o = -(order << 1); | |
2529 v1 += order; | |
2530 v2 += order; | |
2531 __asm__ volatile( | |
2532 "1: \n\t" | |
2533 "movdqa (%0,%2), %%xmm0 \n\t" | |
2534 "movdqa 16(%0,%2), %%xmm2 \n\t" | |
2535 "movdqu (%1,%2), %%xmm1 \n\t" | |
2536 "movdqu 16(%1,%2), %%xmm3 \n\t" | |
2537 "psubw %%xmm1, %%xmm0 \n\t" | |
2538 "psubw %%xmm3, %%xmm2 \n\t" | |
2539 "movdqa %%xmm0, (%0,%2) \n\t" | |
2540 "movdqa %%xmm2, 16(%0,%2) \n\t" | |
2541 "add $32, %2 \n\t" | |
2542 "js 1b \n\t" | |
2543 : "+r"(v1), "+r"(v2), "+r"(o) | |
2544 ); | |
2545 } | |
2546 | |
2547 static int32_t scalarproduct_int16_sse2(int16_t * v1, int16_t * v2, int order, int shift) | |
2548 { | |
2549 int res = 0; | |
8668 | 2550 DECLARE_ALIGNED_16(xmm_reg, sh); |
8430 | 2551 x86_reg o = -(order << 1); |
2552 | |
2553 v1 += order; | |
2554 v2 += order; | |
8668 | 2555 sh.a = shift; |
8430 | 2556 __asm__ volatile( |
2557 "pxor %%xmm7, %%xmm7 \n\t" | |
2558 "1: \n\t" | |
2559 "movdqu (%0,%3), %%xmm0 \n\t" | |
2560 "movdqu 16(%0,%3), %%xmm1 \n\t" | |
2561 "pmaddwd (%1,%3), %%xmm0 \n\t" | |
2562 "pmaddwd 16(%1,%3), %%xmm1 \n\t" | |
2563 "paddd %%xmm0, %%xmm7 \n\t" | |
2564 "paddd %%xmm1, %%xmm7 \n\t" | |
2565 "add $32, %3 \n\t" | |
2566 "js 1b \n\t" | |
2567 "movhlps %%xmm7, %%xmm2 \n\t" | |
2568 "paddd %%xmm2, %%xmm7 \n\t" | |
2569 "psrad %4, %%xmm7 \n\t" | |
2570 "pshuflw $0x4E, %%xmm7,%%xmm2 \n\t" | |
2571 "paddd %%xmm2, %%xmm7 \n\t" | |
2572 "movd %%xmm7, %2 \n\t" | |
2573 : "+r"(v1), "+r"(v2), "=r"(res), "+r"(o) | |
2574 : "m"(sh) | |
2575 ); | |
2576 return res; | |
2577 } | |
2578 | |
2579 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) | |
2580 { | |
2581 mm_flags = mm_support(); | |
2582 | |
2583 if (avctx->dsp_mask) { | |
2584 if (avctx->dsp_mask & FF_MM_FORCE) | |
2585 mm_flags |= (avctx->dsp_mask & 0xffff); | |
2586 else | |
2587 mm_flags &= ~(avctx->dsp_mask & 0xffff); | |
2588 } | |
2589 | |
2590 #if 0 | |
2591 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:"); | |
2592 if (mm_flags & FF_MM_MMX) | |
2593 av_log(avctx, AV_LOG_INFO, " mmx"); | |
9342
7f594601d5e9
Rename FF_MM_MMXEXT to FF_MM_MMX2, for both clarity and consistency
stefano
parents:
9341
diff
changeset
|
2594 if (mm_flags & FF_MM_MMX2) |
7f594601d5e9
Rename FF_MM_MMXEXT to FF_MM_MMX2, for both clarity and consistency
stefano
parents:
9341
diff
changeset
|
2595 av_log(avctx, AV_LOG_INFO, " mmx2"); |
8430 | 2596 if (mm_flags & FF_MM_3DNOW) |
2597 av_log(avctx, AV_LOG_INFO, " 3dnow"); | |
2598 if (mm_flags & FF_MM_SSE) | |
2599 av_log(avctx, AV_LOG_INFO, " sse"); | |
2600 if (mm_flags & FF_MM_SSE2) | |
2601 av_log(avctx, AV_LOG_INFO, " sse2"); | |
2602 av_log(avctx, AV_LOG_INFO, "\n"); | |
2603 #endif | |
2604 | |
2605 if (mm_flags & FF_MM_MMX) { | |
2606 const int idct_algo= avctx->idct_algo; | |
2607 | |
2608 if(avctx->lowres==0){ | |
2609 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){ | |
2610 c->idct_put= ff_simple_idct_put_mmx; | |
2611 c->idct_add= ff_simple_idct_add_mmx; | |
2612 c->idct = ff_simple_idct_mmx; | |
2613 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM; | |
8590 | 2614 #if CONFIG_GPL |
8430 | 2615 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){ |
9342
7f594601d5e9
Rename FF_MM_MMXEXT to FF_MM_MMX2, for both clarity and consistency
stefano
parents:
9341
diff
changeset
|
2616 if(mm_flags & FF_MM_MMX2){ |
8430 | 2617 c->idct_put= ff_libmpeg2mmx2_idct_put; |
2618 c->idct_add= ff_libmpeg2mmx2_idct_add; | |
2619 c->idct = ff_mmxext_idct; | |
2620 }else{ | |
2621 c->idct_put= ff_libmpeg2mmx_idct_put; | |
2622 c->idct_add= ff_libmpeg2mmx_idct_add; | |
2623 c->idct = ff_mmx_idct; | |
2624 } | |
2625 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; | |
2626 #endif | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2627 }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER || CONFIG_THEORA_DECODER) && |
8430 | 2628 idct_algo==FF_IDCT_VP3){ |
2629 if(mm_flags & FF_MM_SSE2){ | |
2630 c->idct_put= ff_vp3_idct_put_sse2; | |
2631 c->idct_add= ff_vp3_idct_add_sse2; | |
2632 c->idct = ff_vp3_idct_sse2; | |
2633 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; | |
2634 }else{ | |
2635 c->idct_put= ff_vp3_idct_put_mmx; | |
2636 c->idct_add= ff_vp3_idct_add_mmx; | |
2637 c->idct = ff_vp3_idct_mmx; | |
2638 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM; | |
2639 } | |
2640 }else if(idct_algo==FF_IDCT_CAVS){ | |
2641 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; | |
2642 }else if(idct_algo==FF_IDCT_XVIDMMX){ | |
2643 if(mm_flags & FF_MM_SSE2){ | |
2644 c->idct_put= ff_idct_xvid_sse2_put; | |
2645 c->idct_add= ff_idct_xvid_sse2_add; | |
2646 c->idct = ff_idct_xvid_sse2; | |
2647 c->idct_permutation_type= FF_SSE2_IDCT_PERM; | |
9342
7f594601d5e9
Rename FF_MM_MMXEXT to FF_MM_MMX2, for both clarity and consistency
stefano
parents:
9341
diff
changeset
|
2648 }else if(mm_flags & FF_MM_MMX2){ |
8430 | 2649 c->idct_put= ff_idct_xvid_mmx2_put; |
2650 c->idct_add= ff_idct_xvid_mmx2_add; | |
2651 c->idct = ff_idct_xvid_mmx2; | |
2652 }else{ | |
2653 c->idct_put= ff_idct_xvid_mmx_put; | |
2654 c->idct_add= ff_idct_xvid_mmx_add; | |
2655 c->idct = ff_idct_xvid_mmx; | |
2656 } | |
2657 } | |
2658 } | |
2659 | |
2660 c->put_pixels_clamped = put_pixels_clamped_mmx; | |
2661 c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx; | |
2662 c->add_pixels_clamped = add_pixels_clamped_mmx; | |
2663 c->clear_block = clear_block_mmx; | |
2664 c->clear_blocks = clear_blocks_mmx; | |
2665 if (mm_flags & FF_MM_SSE) | |
2666 c->clear_block = clear_block_sse; | |
2667 | |
2668 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \ | |
2669 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \ | |
2670 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \ | |
2671 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \ | |
2672 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU | |
2673 | |
2674 SET_HPEL_FUNCS(put, 0, 16, mmx); | |
2675 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx); | |
2676 SET_HPEL_FUNCS(avg, 0, 16, mmx); | |
2677 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx); | |
2678 SET_HPEL_FUNCS(put, 1, 8, mmx); | |
2679 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx); | |
2680 SET_HPEL_FUNCS(avg, 1, 8, mmx); | |
2681 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx); | |
2682 | |
2683 c->gmc= gmc_mmx; | |
2684 | |
2685 c->add_bytes= add_bytes_mmx; | |
2686 c->add_bytes_l2= add_bytes_l2_mmx; | |
2687 | |
2688 c->draw_edges = draw_edges_mmx; | |
2689 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2690 if (CONFIG_ANY_H263) { |
8430 | 2691 c->h263_v_loop_filter= h263_v_loop_filter_mmx; |
2692 c->h263_h_loop_filter= h263_h_loop_filter_mmx; | |
2693 } | |
2694 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd; | |
2695 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx; | |
2696 c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_nornd; | |
2697 | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2698 c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2699 c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2700 |
8430 | 2701 c->h264_idct_dc_add= |
2702 c->h264_idct_add= ff_h264_idct_add_mmx; | |
2703 c->h264_idct8_dc_add= | |
2704 c->h264_idct8_add= ff_h264_idct8_add_mmx; | |
2705 | |
2706 c->h264_idct_add16 = ff_h264_idct_add16_mmx; | |
2707 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx; | |
2708 c->h264_idct_add8 = ff_h264_idct_add8_mmx; | |
2709 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx; | |
2710 | |
8817 | 2711 if (CONFIG_VP6_DECODER) { |
2712 c->vp6_filter_diag4 = ff_vp6_filter_diag4_mmx; | |
2713 } | |
2714 | |
9342
7f594601d5e9
Rename FF_MM_MMXEXT to FF_MM_MMX2, for both clarity and consistency
stefano
parents:
9341
diff
changeset
|
2715 if (mm_flags & FF_MM_MMX2) { |
8430 | 2716 c->prefetch = prefetch_mmx2; |
2717 | |
2718 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; | |
2719 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2; | |
2720 | |
2721 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2; | |
2722 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2; | |
2723 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2; | |
2724 | |
2725 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2; | |
2726 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2; | |
2727 | |
2728 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2; | |
2729 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2; | |
2730 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2; | |
2731 | |
2732 c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2; | |
2733 c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2; | |
2734 c->h264_idct_add16 = ff_h264_idct_add16_mmx2; | |
2735 c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2; | |
2736 c->h264_idct_add8 = ff_h264_idct_add8_mmx2; | |
2737 c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2; | |
2738 | |
2739 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2740 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2; | |
2741 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2; | |
2742 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2; | |
2743 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2; | |
2744 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2; | |
2745 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2; | |
2746 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2747 if (CONFIG_VP3_DECODER || CONFIG_THEORA_DECODER) { |
8430 | 2748 c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2; |
2749 c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2; | |
2750 } | |
2751 } | |
2752 | |
2753 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \ | |
2754 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \ | |
2755 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \ | |
2756 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \ | |
2757 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \ | |
2758 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \ | |
2759 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \ | |
2760 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \ | |
2761 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \ | |
2762 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \ | |
2763 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \ | |
2764 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \ | |
2765 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \ | |
2766 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \ | |
2767 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \ | |
2768 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \ | |
2769 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU | |
2770 | |
2771 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2); | |
2772 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2); | |
2773 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2); | |
2774 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2); | |
2775 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2); | |
2776 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2); | |
2777 | |
2778 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2); | |
2779 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2); | |
2780 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2); | |
2781 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2); | |
2782 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2); | |
2783 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2); | |
2784 | |
2785 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2); | |
2786 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2); | |
2787 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2); | |
2788 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2); | |
2789 | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2790 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2791 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2792 |
8430 | 2793 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd; |
2794 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2; | |
2795 c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2; | |
2796 c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2; | |
2797 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2; | |
2798 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2; | |
2799 c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2; | |
2800 c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2; | |
2801 c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2; | |
2802 c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2; | |
2803 c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2; | |
2804 | |
2805 c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2; | |
2806 c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2; | |
2807 c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2; | |
2808 c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2; | |
2809 c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2; | |
2810 c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2; | |
2811 c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2; | |
2812 c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2; | |
2813 | |
2814 c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2; | |
2815 c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2; | |
2816 c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2; | |
2817 c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2; | |
2818 c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2; | |
2819 c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2; | |
2820 c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2; | |
2821 c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2; | |
2822 | |
8760 | 2823 #if HAVE_YASM |
2824 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; | |
2825 #endif | |
8798
a5c8210814d7
Add check whether the compiler/assembler supports 10 or more operands.
diego
parents:
8760
diff
changeset
|
2826 #if HAVE_7REGS && HAVE_TEN_OPERANDS |
8760 | 2827 if( mm_flags&FF_MM_3DNOW ) |
2828 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov; | |
2829 #endif | |
2830 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2831 if (CONFIG_CAVS_DECODER) |
8430 | 2832 ff_cavsdsp_init_mmx2(c, avctx); |
2833 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2834 if (CONFIG_VC1_DECODER || CONFIG_WMV3_DECODER) |
8430 | 2835 ff_vc1dsp_init_mmx(c, avctx); |
2836 | |
2837 c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2; | |
2838 } else if (mm_flags & FF_MM_3DNOW) { | |
2839 c->prefetch = prefetch_3dnow; | |
2840 | |
2841 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; | |
2842 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; | |
2843 | |
2844 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow; | |
2845 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow; | |
2846 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow; | |
2847 | |
2848 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow; | |
2849 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow; | |
2850 | |
2851 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow; | |
2852 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow; | |
2853 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow; | |
2854 | |
2855 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2856 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow; | |
2857 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow; | |
2858 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow; | |
2859 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow; | |
2860 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow; | |
2861 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow; | |
2862 } | |
2863 | |
2864 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow); | |
2865 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow); | |
2866 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow); | |
2867 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow); | |
2868 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow); | |
2869 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow); | |
2870 | |
2871 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow); | |
2872 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow); | |
2873 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow); | |
2874 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow); | |
2875 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow); | |
2876 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow); | |
2877 | |
2878 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow); | |
2879 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow); | |
2880 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow); | |
2881 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow); | |
2882 | |
2883 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd; | |
2884 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow; | |
2885 | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2886 c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2887 c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow; |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2888 |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2889 if (CONFIG_CAVS_DECODER) |
8430 | 2890 ff_cavsdsp_init_3dnow(c, avctx); |
2891 } | |
2892 | |
2893 | |
2894 #define H264_QPEL_FUNCS(x, y, CPU)\ | |
2895 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\ | |
2896 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\ | |
2897 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\ | |
2898 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU; | |
2899 if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){ | |
2900 // these functions are slower than mmx on AMD, but faster on Intel | |
2901 /* FIXME works in most codecs, but crashes svq1 due to unaligned chroma | |
2902 c->put_pixels_tab[0][0] = put_pixels16_sse2; | |
2903 c->avg_pixels_tab[0][0] = avg_pixels16_sse2; | |
2904 */ | |
2905 H264_QPEL_FUNCS(0, 0, sse2); | |
2906 } | |
2907 if(mm_flags & FF_MM_SSE2){ | |
2908 c->h264_idct8_add = ff_h264_idct8_add_sse2; | |
2909 c->h264_idct8_add4= ff_h264_idct8_add4_sse2; | |
2910 | |
2911 H264_QPEL_FUNCS(0, 1, sse2); | |
2912 H264_QPEL_FUNCS(0, 2, sse2); | |
2913 H264_QPEL_FUNCS(0, 3, sse2); | |
2914 H264_QPEL_FUNCS(1, 1, sse2); | |
2915 H264_QPEL_FUNCS(1, 2, sse2); | |
2916 H264_QPEL_FUNCS(1, 3, sse2); | |
2917 H264_QPEL_FUNCS(2, 1, sse2); | |
2918 H264_QPEL_FUNCS(2, 2, sse2); | |
2919 H264_QPEL_FUNCS(2, 3, sse2); | |
2920 H264_QPEL_FUNCS(3, 1, sse2); | |
2921 H264_QPEL_FUNCS(3, 2, sse2); | |
2922 H264_QPEL_FUNCS(3, 3, sse2); | |
8818 | 2923 |
2924 if (CONFIG_VP6_DECODER) { | |
2925 c->vp6_filter_diag4 = ff_vp6_filter_diag4_sse2; | |
2926 } | |
8430 | 2927 } |
8590 | 2928 #if HAVE_SSSE3 |
8430 | 2929 if(mm_flags & FF_MM_SSSE3){ |
2930 H264_QPEL_FUNCS(1, 0, ssse3); | |
2931 H264_QPEL_FUNCS(1, 1, ssse3); | |
2932 H264_QPEL_FUNCS(1, 2, ssse3); | |
2933 H264_QPEL_FUNCS(1, 3, ssse3); | |
2934 H264_QPEL_FUNCS(2, 0, ssse3); | |
2935 H264_QPEL_FUNCS(2, 1, ssse3); | |
2936 H264_QPEL_FUNCS(2, 2, ssse3); | |
2937 H264_QPEL_FUNCS(2, 3, ssse3); | |
2938 H264_QPEL_FUNCS(3, 0, ssse3); | |
2939 H264_QPEL_FUNCS(3, 1, ssse3); | |
2940 H264_QPEL_FUNCS(3, 2, ssse3); | |
2941 H264_QPEL_FUNCS(3, 3, ssse3); | |
2942 c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_nornd; | |
2943 c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd; | |
2944 c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd; | |
2945 c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3; | |
2946 c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3; | |
2947 c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3; | |
2948 } | |
2949 #endif | |
2950 | |
8590 | 2951 #if CONFIG_GPL && HAVE_YASM |
9342
7f594601d5e9
Rename FF_MM_MMXEXT to FF_MM_MMX2, for both clarity and consistency
stefano
parents:
9341
diff
changeset
|
2952 if (mm_flags & FF_MM_MMX2){ |
8590 | 2953 #if ARCH_X86_32 |
8430 | 2954 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext; |
2955 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext; | |
2956 #endif | |
8510
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2957 if( mm_flags&FF_MM_SSE2 ){ |
8590 | 2958 #if ARCH_X86_64 || !defined(__ICC) || __ICC > 1100 |
8430 | 2959 c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2; |
2960 c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2; | |
2961 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2; | |
2962 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2; | |
8510
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2963 #endif |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2964 c->h264_idct_add16 = ff_h264_idct_add16_sse2; |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2965 c->h264_idct_add8 = ff_h264_idct_add8_sse2; |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8499
diff
changeset
|
2966 c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2; |
8430 | 2967 } |
2968 } | |
2969 #endif | |
2970 | |
8590 | 2971 #if CONFIG_SNOW_DECODER |
8430 | 2972 if(mm_flags & FF_MM_SSE2 & 0){ |
2973 c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2; | |
8590 | 2974 #if HAVE_7REGS |
8430 | 2975 c->vertical_compose97i = ff_snow_vertical_compose97i_sse2; |
2976 #endif | |
2977 c->inner_add_yblock = ff_snow_inner_add_yblock_sse2; | |
2978 } | |
2979 else{ | |
9342
7f594601d5e9
Rename FF_MM_MMXEXT to FF_MM_MMX2, for both clarity and consistency
stefano
parents:
9341
diff
changeset
|
2980 if(mm_flags & FF_MM_MMX2){ |
8430 | 2981 c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx; |
8590 | 2982 #if HAVE_7REGS |
8430 | 2983 c->vertical_compose97i = ff_snow_vertical_compose97i_mmx; |
2984 #endif | |
2985 } | |
2986 c->inner_add_yblock = ff_snow_inner_add_yblock_mmx; | |
2987 } | |
2988 #endif | |
2989 | |
2990 if(mm_flags & FF_MM_3DNOW){ | |
2991 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow; | |
2992 c->vector_fmul = vector_fmul_3dnow; | |
2993 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2994 c->float_to_int16 = float_to_int16_3dnow; | |
2995 c->float_to_int16_interleave = float_to_int16_interleave_3dnow; | |
2996 } | |
2997 } | |
2998 if(mm_flags & FF_MM_3DNOWEXT){ | |
2999 c->vector_fmul_reverse = vector_fmul_reverse_3dnow2; | |
3000 c->vector_fmul_window = vector_fmul_window_3dnow2; | |
3001 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
3002 c->float_to_int16_interleave = float_to_int16_interleave_3dn2; | |
3003 } | |
3004 } | |
3005 if(mm_flags & FF_MM_SSE){ | |
3006 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse; | |
3007 c->ac3_downmix = ac3_downmix_sse; | |
3008 c->vector_fmul = vector_fmul_sse; | |
3009 c->vector_fmul_reverse = vector_fmul_reverse_sse; | |
3010 c->vector_fmul_add_add = vector_fmul_add_add_sse; | |
3011 c->vector_fmul_window = vector_fmul_window_sse; | |
3012 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse; | |
3013 c->float_to_int16 = float_to_int16_sse; | |
3014 c->float_to_int16_interleave = float_to_int16_interleave_sse; | |
3015 } | |
3016 if(mm_flags & FF_MM_3DNOW) | |
3017 c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse | |
3018 if(mm_flags & FF_MM_SSE2){ | |
3019 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2; | |
3020 c->float_to_int16 = float_to_int16_sse2; | |
3021 c->float_to_int16_interleave = float_to_int16_interleave_sse2; | |
3022 c->add_int16 = add_int16_sse2; | |
3023 c->sub_int16 = sub_int16_sse2; | |
3024 c->scalarproduct_int16 = scalarproduct_int16_sse2; | |
3025 } | |
3026 } | |
3027 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
3028 if (CONFIG_ENCODERS) |
8430 | 3029 dsputilenc_init_mmx(c, avctx); |
3030 | |
3031 #if 0 | |
3032 // for speed testing | |
3033 get_pixels = just_return; | |
3034 put_pixels_clamped = just_return; | |
3035 add_pixels_clamped = just_return; | |
3036 | |
3037 pix_abs16x16 = just_return; | |
3038 pix_abs16x16_x2 = just_return; | |
3039 pix_abs16x16_y2 = just_return; | |
3040 pix_abs16x16_xy2 = just_return; | |
3041 | |
3042 put_pixels_tab[0] = just_return; | |
3043 put_pixels_tab[1] = just_return; | |
3044 put_pixels_tab[2] = just_return; | |
3045 put_pixels_tab[3] = just_return; | |
3046 | |
3047 put_no_rnd_pixels_tab[0] = just_return; | |
3048 put_no_rnd_pixels_tab[1] = just_return; | |
3049 put_no_rnd_pixels_tab[2] = just_return; | |
3050 put_no_rnd_pixels_tab[3] = just_return; | |
3051 | |
3052 avg_pixels_tab[0] = just_return; | |
3053 avg_pixels_tab[1] = just_return; | |
3054 avg_pixels_tab[2] = just_return; | |
3055 avg_pixels_tab[3] = just_return; | |
3056 | |
3057 avg_no_rnd_pixels_tab[0] = just_return; | |
3058 avg_no_rnd_pixels_tab[1] = just_return; | |
3059 avg_no_rnd_pixels_tab[2] = just_return; | |
3060 avg_no_rnd_pixels_tab[3] = just_return; | |
3061 | |
3062 //av_fdct = just_return; | |
3063 //ff_idct = just_return; | |
3064 #endif | |
3065 } |