Mercurial > libavcodec.hg
annotate x86/h264dsp_mmx.c @ 12435:fe78a4548d12 libavcodec
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
functions.
author | rbultje |
---|---|
date | Mon, 30 Aug 2010 16:22:27 +0000 |
parents | 3fc4c625b6f3 |
children | b242eb86ea9a |
rev | line source |
---|---|
8430 | 1 /* |
2 * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt | |
3 * | |
4 * This file is part of FFmpeg. | |
5 * | |
6 * FFmpeg is free software; you can redistribute it and/or | |
7 * modify it under the terms of the GNU Lesser General Public | |
8 * License as published by the Free Software Foundation; either | |
9 * version 2.1 of the License, or (at your option) any later version. | |
10 * | |
11 * FFmpeg is distributed in the hope that it will be useful, | |
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 * Lesser General Public License for more details. | |
15 * | |
16 * You should have received a copy of the GNU Lesser General Public | |
17 * License along with FFmpeg; if not, write to the Free Software | |
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
19 */ | |
20 | |
21 #include "dsputil_mmx.h" | |
11951
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
22 #include "libavcodec/h264pred.h" |
8430 | 23 |
11369 | 24 DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL; |
25 DECLARE_ALIGNED(8, static const uint64_t, ff_pb_7_3 ) = 0x0307030703070307ULL; | |
8430 | 26 |
27 /***********************************/ | |
28 /* IDCT */ | |
29 | |
30 #define SUMSUB_BADC( a, b, c, d ) \ | |
31 "paddw "#b", "#a" \n\t"\ | |
32 "paddw "#d", "#c" \n\t"\ | |
33 "paddw "#b", "#b" \n\t"\ | |
34 "paddw "#d", "#d" \n\t"\ | |
35 "psubw "#a", "#b" \n\t"\ | |
36 "psubw "#c", "#d" \n\t" | |
37 | |
38 #define SUMSUBD2_AB( a, b, t ) \ | |
39 "movq "#b", "#t" \n\t"\ | |
40 "psraw $1 , "#b" \n\t"\ | |
41 "paddw "#a", "#b" \n\t"\ | |
42 "psraw $1 , "#a" \n\t"\ | |
43 "psubw "#t", "#a" \n\t" | |
44 | |
45 #define IDCT4_1D( s02, s13, d02, d13, t ) \ | |
46 SUMSUB_BA ( s02, d02 )\ | |
47 SUMSUBD2_AB( s13, d13, t )\ | |
48 SUMSUB_BADC( d13, s02, s13, d02 ) | |
49 | |
50 #define STORE_DIFF_4P( p, t, z ) \ | |
51 "psraw $6, "#p" \n\t"\ | |
52 "movd (%0), "#t" \n\t"\ | |
53 "punpcklbw "#z", "#t" \n\t"\ | |
54 "paddsw "#t", "#p" \n\t"\ | |
55 "packuswb "#z", "#p" \n\t"\ | |
56 "movd "#p", (%0) \n\t" | |
57 | |
58 static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride) | |
59 { | |
60 /* Load dct coeffs */ | |
61 __asm__ volatile( | |
62 "movq (%0), %%mm0 \n\t" | |
63 "movq 8(%0), %%mm1 \n\t" | |
64 "movq 16(%0), %%mm2 \n\t" | |
65 "movq 24(%0), %%mm3 \n\t" | |
66 :: "r"(block) ); | |
67 | |
68 __asm__ volatile( | |
69 /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */ | |
70 IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 ) | |
71 | |
72 "movq %0, %%mm6 \n\t" | |
73 /* in: 1,4,0,2 out: 1,2,3,0 */ | |
74 TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 ) | |
75 | |
76 "paddw %%mm6, %%mm3 \n\t" | |
77 | |
78 /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */ | |
79 IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 ) | |
80 | |
81 "pxor %%mm7, %%mm7 \n\t" | |
82 :: "m"(ff_pw_32)); | |
83 | |
84 __asm__ volatile( | |
85 STORE_DIFF_4P( %%mm0, %%mm1, %%mm7) | |
86 "add %1, %0 \n\t" | |
87 STORE_DIFF_4P( %%mm2, %%mm1, %%mm7) | |
88 "add %1, %0 \n\t" | |
89 STORE_DIFF_4P( %%mm3, %%mm1, %%mm7) | |
90 "add %1, %0 \n\t" | |
91 STORE_DIFF_4P( %%mm4, %%mm1, %%mm7) | |
92 : "+r"(dst) | |
93 : "r" ((x86_reg)stride) | |
94 ); | |
95 } | |
96 | |
97 static inline void h264_idct8_1d(int16_t *block) | |
98 { | |
99 __asm__ volatile( | |
100 "movq 112(%0), %%mm7 \n\t" | |
101 "movq 80(%0), %%mm0 \n\t" | |
102 "movq 48(%0), %%mm3 \n\t" | |
103 "movq 16(%0), %%mm5 \n\t" | |
104 | |
105 "movq %%mm0, %%mm4 \n\t" | |
106 "movq %%mm5, %%mm1 \n\t" | |
107 "psraw $1, %%mm4 \n\t" | |
108 "psraw $1, %%mm1 \n\t" | |
109 "paddw %%mm0, %%mm4 \n\t" | |
110 "paddw %%mm5, %%mm1 \n\t" | |
111 "paddw %%mm7, %%mm4 \n\t" | |
112 "paddw %%mm0, %%mm1 \n\t" | |
113 "psubw %%mm5, %%mm4 \n\t" | |
114 "paddw %%mm3, %%mm1 \n\t" | |
115 | |
116 "psubw %%mm3, %%mm5 \n\t" | |
117 "psubw %%mm3, %%mm0 \n\t" | |
118 "paddw %%mm7, %%mm5 \n\t" | |
119 "psubw %%mm7, %%mm0 \n\t" | |
120 "psraw $1, %%mm3 \n\t" | |
121 "psraw $1, %%mm7 \n\t" | |
122 "psubw %%mm3, %%mm5 \n\t" | |
123 "psubw %%mm7, %%mm0 \n\t" | |
124 | |
125 "movq %%mm4, %%mm3 \n\t" | |
126 "movq %%mm1, %%mm7 \n\t" | |
127 "psraw $2, %%mm1 \n\t" | |
128 "psraw $2, %%mm3 \n\t" | |
129 "paddw %%mm5, %%mm3 \n\t" | |
130 "psraw $2, %%mm5 \n\t" | |
131 "paddw %%mm0, %%mm1 \n\t" | |
132 "psraw $2, %%mm0 \n\t" | |
133 "psubw %%mm4, %%mm5 \n\t" | |
134 "psubw %%mm0, %%mm7 \n\t" | |
135 | |
136 "movq 32(%0), %%mm2 \n\t" | |
137 "movq 96(%0), %%mm6 \n\t" | |
138 "movq %%mm2, %%mm4 \n\t" | |
139 "movq %%mm6, %%mm0 \n\t" | |
140 "psraw $1, %%mm4 \n\t" | |
141 "psraw $1, %%mm6 \n\t" | |
142 "psubw %%mm0, %%mm4 \n\t" | |
143 "paddw %%mm2, %%mm6 \n\t" | |
144 | |
145 "movq (%0), %%mm2 \n\t" | |
146 "movq 64(%0), %%mm0 \n\t" | |
147 SUMSUB_BA( %%mm0, %%mm2 ) | |
148 SUMSUB_BA( %%mm6, %%mm0 ) | |
149 SUMSUB_BA( %%mm4, %%mm2 ) | |
150 SUMSUB_BA( %%mm7, %%mm6 ) | |
151 SUMSUB_BA( %%mm5, %%mm4 ) | |
152 SUMSUB_BA( %%mm3, %%mm2 ) | |
153 SUMSUB_BA( %%mm1, %%mm0 ) | |
154 :: "r"(block) | |
155 ); | |
156 } | |
157 | |
158 static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride) | |
159 { | |
160 int i; | |
11369 | 161 DECLARE_ALIGNED(8, int16_t, b2)[64]; |
8430 | 162 |
163 block[0] += 32; | |
164 | |
165 for(i=0; i<2; i++){ | |
11369 | 166 DECLARE_ALIGNED(8, uint64_t, tmp); |
8430 | 167 |
168 h264_idct8_1d(block+4*i); | |
169 | |
170 __asm__ volatile( | |
171 "movq %%mm7, %0 \n\t" | |
172 TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 ) | |
173 "movq %%mm0, 8(%1) \n\t" | |
174 "movq %%mm6, 24(%1) \n\t" | |
175 "movq %%mm7, 40(%1) \n\t" | |
176 "movq %%mm4, 56(%1) \n\t" | |
177 "movq %0, %%mm7 \n\t" | |
178 TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 ) | |
179 "movq %%mm7, (%1) \n\t" | |
180 "movq %%mm1, 16(%1) \n\t" | |
181 "movq %%mm0, 32(%1) \n\t" | |
182 "movq %%mm3, 48(%1) \n\t" | |
183 : "=m"(tmp) | |
184 : "r"(b2+32*i) | |
185 : "memory" | |
186 ); | |
187 } | |
188 | |
189 for(i=0; i<2; i++){ | |
190 h264_idct8_1d(b2+4*i); | |
191 | |
192 __asm__ volatile( | |
193 "psraw $6, %%mm7 \n\t" | |
194 "psraw $6, %%mm6 \n\t" | |
195 "psraw $6, %%mm5 \n\t" | |
196 "psraw $6, %%mm4 \n\t" | |
197 "psraw $6, %%mm3 \n\t" | |
198 "psraw $6, %%mm2 \n\t" | |
199 "psraw $6, %%mm1 \n\t" | |
200 "psraw $6, %%mm0 \n\t" | |
201 | |
202 "movq %%mm7, (%0) \n\t" | |
203 "movq %%mm5, 16(%0) \n\t" | |
204 "movq %%mm3, 32(%0) \n\t" | |
205 "movq %%mm1, 48(%0) \n\t" | |
206 "movq %%mm0, 64(%0) \n\t" | |
207 "movq %%mm2, 80(%0) \n\t" | |
208 "movq %%mm4, 96(%0) \n\t" | |
209 "movq %%mm6, 112(%0) \n\t" | |
210 :: "r"(b2+4*i) | |
211 : "memory" | |
212 ); | |
213 } | |
214 | |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12414
diff
changeset
|
215 ff_add_pixels_clamped_mmx(b2, dst, stride); |
8430 | 216 } |
217 | |
218 #define STORE_DIFF_8P( p, d, t, z )\ | |
219 "movq "#d", "#t" \n"\ | |
220 "psraw $6, "#p" \n"\ | |
221 "punpcklbw "#z", "#t" \n"\ | |
222 "paddsw "#t", "#p" \n"\ | |
223 "packuswb "#p", "#p" \n"\ | |
224 "movq "#p", "#d" \n" | |
225 | |
226 #define H264_IDCT8_1D_SSE2(a,b,c,d,e,f,g,h)\ | |
227 "movdqa "#c", "#a" \n"\ | |
228 "movdqa "#g", "#e" \n"\ | |
229 "psraw $1, "#c" \n"\ | |
230 "psraw $1, "#g" \n"\ | |
231 "psubw "#e", "#c" \n"\ | |
232 "paddw "#a", "#g" \n"\ | |
233 "movdqa "#b", "#e" \n"\ | |
234 "psraw $1, "#e" \n"\ | |
235 "paddw "#b", "#e" \n"\ | |
236 "paddw "#d", "#e" \n"\ | |
237 "paddw "#f", "#e" \n"\ | |
238 "movdqa "#f", "#a" \n"\ | |
239 "psraw $1, "#a" \n"\ | |
240 "paddw "#f", "#a" \n"\ | |
241 "paddw "#h", "#a" \n"\ | |
242 "psubw "#b", "#a" \n"\ | |
243 "psubw "#d", "#b" \n"\ | |
244 "psubw "#d", "#f" \n"\ | |
245 "paddw "#h", "#b" \n"\ | |
246 "psubw "#h", "#f" \n"\ | |
247 "psraw $1, "#d" \n"\ | |
248 "psraw $1, "#h" \n"\ | |
249 "psubw "#d", "#b" \n"\ | |
250 "psubw "#h", "#f" \n"\ | |
251 "movdqa "#e", "#d" \n"\ | |
252 "movdqa "#a", "#h" \n"\ | |
253 "psraw $2, "#d" \n"\ | |
254 "psraw $2, "#h" \n"\ | |
255 "paddw "#f", "#d" \n"\ | |
256 "paddw "#b", "#h" \n"\ | |
257 "psraw $2, "#f" \n"\ | |
258 "psraw $2, "#b" \n"\ | |
259 "psubw "#f", "#e" \n"\ | |
260 "psubw "#a", "#b" \n"\ | |
261 "movdqa 0x00(%1), "#a" \n"\ | |
262 "movdqa 0x40(%1), "#f" \n"\ | |
263 SUMSUB_BA(f, a)\ | |
264 SUMSUB_BA(g, f)\ | |
265 SUMSUB_BA(c, a)\ | |
266 SUMSUB_BA(e, g)\ | |
267 SUMSUB_BA(b, c)\ | |
268 SUMSUB_BA(h, a)\ | |
269 SUMSUB_BA(d, f) | |
270 | |
271 static void ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride) | |
272 { | |
273 __asm__ volatile( | |
274 "movdqa 0x10(%1), %%xmm1 \n" | |
275 "movdqa 0x20(%1), %%xmm2 \n" | |
276 "movdqa 0x30(%1), %%xmm3 \n" | |
277 "movdqa 0x50(%1), %%xmm5 \n" | |
278 "movdqa 0x60(%1), %%xmm6 \n" | |
279 "movdqa 0x70(%1), %%xmm7 \n" | |
280 H264_IDCT8_1D_SSE2(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7) | |
281 TRANSPOSE8(%%xmm4, %%xmm1, %%xmm7, %%xmm3, %%xmm5, %%xmm0, %%xmm2, %%xmm6, (%1)) | |
282 "paddw %4, %%xmm4 \n" | |
283 "movdqa %%xmm4, 0x00(%1) \n" | |
284 "movdqa %%xmm2, 0x40(%1) \n" | |
285 H264_IDCT8_1D_SSE2(%%xmm4, %%xmm0, %%xmm6, %%xmm3, %%xmm2, %%xmm5, %%xmm7, %%xmm1) | |
286 "movdqa %%xmm6, 0x60(%1) \n" | |
287 "movdqa %%xmm7, 0x70(%1) \n" | |
288 "pxor %%xmm7, %%xmm7 \n" | |
289 STORE_DIFF_8P(%%xmm2, (%0), %%xmm6, %%xmm7) | |
290 STORE_DIFF_8P(%%xmm0, (%0,%2), %%xmm6, %%xmm7) | |
291 STORE_DIFF_8P(%%xmm1, (%0,%2,2), %%xmm6, %%xmm7) | |
292 STORE_DIFF_8P(%%xmm3, (%0,%3), %%xmm6, %%xmm7) | |
293 "lea (%0,%2,4), %0 \n" | |
294 STORE_DIFF_8P(%%xmm5, (%0), %%xmm6, %%xmm7) | |
295 STORE_DIFF_8P(%%xmm4, (%0,%2), %%xmm6, %%xmm7) | |
296 "movdqa 0x60(%1), %%xmm0 \n" | |
297 "movdqa 0x70(%1), %%xmm1 \n" | |
298 STORE_DIFF_8P(%%xmm0, (%0,%2,2), %%xmm6, %%xmm7) | |
299 STORE_DIFF_8P(%%xmm1, (%0,%3), %%xmm6, %%xmm7) | |
300 :"+r"(dst) | |
301 :"r"(block), "r"((x86_reg)stride), "r"((x86_reg)3L*stride), "m"(ff_pw_32) | |
302 ); | |
303 } | |
304 | |
305 static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride) | |
306 { | |
307 int dc = (block[0] + 32) >> 6; | |
308 __asm__ volatile( | |
309 "movd %0, %%mm0 \n\t" | |
310 "pshufw $0, %%mm0, %%mm0 \n\t" | |
311 "pxor %%mm1, %%mm1 \n\t" | |
312 "psubw %%mm0, %%mm1 \n\t" | |
313 "packuswb %%mm0, %%mm0 \n\t" | |
314 "packuswb %%mm1, %%mm1 \n\t" | |
315 ::"r"(dc) | |
316 ); | |
317 __asm__ volatile( | |
318 "movd %0, %%mm2 \n\t" | |
319 "movd %1, %%mm3 \n\t" | |
320 "movd %2, %%mm4 \n\t" | |
321 "movd %3, %%mm5 \n\t" | |
322 "paddusb %%mm0, %%mm2 \n\t" | |
323 "paddusb %%mm0, %%mm3 \n\t" | |
324 "paddusb %%mm0, %%mm4 \n\t" | |
325 "paddusb %%mm0, %%mm5 \n\t" | |
326 "psubusb %%mm1, %%mm2 \n\t" | |
327 "psubusb %%mm1, %%mm3 \n\t" | |
328 "psubusb %%mm1, %%mm4 \n\t" | |
329 "psubusb %%mm1, %%mm5 \n\t" | |
330 "movd %%mm2, %0 \n\t" | |
331 "movd %%mm3, %1 \n\t" | |
332 "movd %%mm4, %2 \n\t" | |
333 "movd %%mm5, %3 \n\t" | |
334 :"+m"(*(uint32_t*)(dst+0*stride)), | |
335 "+m"(*(uint32_t*)(dst+1*stride)), | |
336 "+m"(*(uint32_t*)(dst+2*stride)), | |
337 "+m"(*(uint32_t*)(dst+3*stride)) | |
338 ); | |
339 } | |
340 | |
341 static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride) | |
342 { | |
343 int dc = (block[0] + 32) >> 6; | |
344 int y; | |
345 __asm__ volatile( | |
346 "movd %0, %%mm0 \n\t" | |
347 "pshufw $0, %%mm0, %%mm0 \n\t" | |
348 "pxor %%mm1, %%mm1 \n\t" | |
349 "psubw %%mm0, %%mm1 \n\t" | |
350 "packuswb %%mm0, %%mm0 \n\t" | |
351 "packuswb %%mm1, %%mm1 \n\t" | |
352 ::"r"(dc) | |
353 ); | |
354 for(y=2; y--; dst += 4*stride){ | |
355 __asm__ volatile( | |
356 "movq %0, %%mm2 \n\t" | |
357 "movq %1, %%mm3 \n\t" | |
358 "movq %2, %%mm4 \n\t" | |
359 "movq %3, %%mm5 \n\t" | |
360 "paddusb %%mm0, %%mm2 \n\t" | |
361 "paddusb %%mm0, %%mm3 \n\t" | |
362 "paddusb %%mm0, %%mm4 \n\t" | |
363 "paddusb %%mm0, %%mm5 \n\t" | |
364 "psubusb %%mm1, %%mm2 \n\t" | |
365 "psubusb %%mm1, %%mm3 \n\t" | |
366 "psubusb %%mm1, %%mm4 \n\t" | |
367 "psubusb %%mm1, %%mm5 \n\t" | |
368 "movq %%mm2, %0 \n\t" | |
369 "movq %%mm3, %1 \n\t" | |
370 "movq %%mm4, %2 \n\t" | |
371 "movq %%mm5, %3 \n\t" | |
372 :"+m"(*(uint64_t*)(dst+0*stride)), | |
373 "+m"(*(uint64_t*)(dst+1*stride)), | |
374 "+m"(*(uint64_t*)(dst+2*stride)), | |
375 "+m"(*(uint64_t*)(dst+3*stride)) | |
376 ); | |
377 } | |
378 } | |
379 | |
380 //FIXME this table is a duplicate from h264data.h, and will be removed once the tables from, h264 have been split | |
381 static const uint8_t scan8[16 + 2*4]={ | |
382 4+1*8, 5+1*8, 4+2*8, 5+2*8, | |
383 6+1*8, 7+1*8, 6+2*8, 7+2*8, | |
384 4+3*8, 5+3*8, 4+4*8, 5+4*8, | |
385 6+3*8, 7+3*8, 6+4*8, 7+4*8, | |
386 1+1*8, 2+1*8, | |
387 1+2*8, 2+2*8, | |
388 1+4*8, 2+4*8, | |
389 1+5*8, 2+5*8, | |
390 }; | |
391 | |
392 static void ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ | |
393 int i; | |
394 for(i=0; i<16; i++){ | |
395 if(nnzc[ scan8[i] ]) | |
396 ff_h264_idct_add_mmx(dst + block_offset[i], block + i*16, stride); | |
397 } | |
398 } | |
399 | |
400 static void ff_h264_idct8_add4_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ | |
401 int i; | |
402 for(i=0; i<16; i+=4){ | |
403 if(nnzc[ scan8[i] ]) | |
404 ff_h264_idct8_add_mmx(dst + block_offset[i], block + i*16, stride); | |
405 } | |
406 } | |
407 | |
408 | |
409 static void ff_h264_idct_add16_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ | |
410 int i; | |
411 for(i=0; i<16; i++){ | |
412 int nnz = nnzc[ scan8[i] ]; | |
413 if(nnz){ | |
414 if(nnz==1 && block[i*16]) ff_h264_idct_dc_add_mmx2(dst + block_offset[i], block + i*16, stride); | |
415 else ff_h264_idct_add_mmx (dst + block_offset[i], block + i*16, stride); | |
416 } | |
417 } | |
418 } | |
419 | |
420 static void ff_h264_idct_add16intra_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ | |
421 int i; | |
422 for(i=0; i<16; i++){ | |
423 if(nnzc[ scan8[i] ] || block[i*16]) | |
424 ff_h264_idct_add_mmx(dst + block_offset[i], block + i*16, stride); | |
425 } | |
426 } | |
427 | |
428 static void ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ | |
429 int i; | |
430 for(i=0; i<16; i++){ | |
431 if(nnzc[ scan8[i] ]) ff_h264_idct_add_mmx (dst + block_offset[i], block + i*16, stride); | |
432 else if(block[i*16]) ff_h264_idct_dc_add_mmx2(dst + block_offset[i], block + i*16, stride); | |
433 } | |
434 } | |
435 | |
436 static void ff_h264_idct8_add4_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ | |
437 int i; | |
438 for(i=0; i<16; i+=4){ | |
439 int nnz = nnzc[ scan8[i] ]; | |
440 if(nnz){ | |
441 if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_mmx2(dst + block_offset[i], block + i*16, stride); | |
442 else ff_h264_idct8_add_mmx (dst + block_offset[i], block + i*16, stride); | |
443 } | |
444 } | |
445 } | |
446 | |
447 static void ff_h264_idct8_add4_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ | |
448 int i; | |
449 for(i=0; i<16; i+=4){ | |
450 int nnz = nnzc[ scan8[i] ]; | |
451 if(nnz){ | |
452 if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_mmx2(dst + block_offset[i], block + i*16, stride); | |
453 else ff_h264_idct8_add_sse2 (dst + block_offset[i], block + i*16, stride); | |
454 } | |
455 } | |
456 } | |
457 | |
458 static void ff_h264_idct_add8_mmx(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ | |
459 int i; | |
460 for(i=16; i<16+8; i++){ | |
461 if(nnzc[ scan8[i] ] || block[i*16]) | |
462 ff_h264_idct_add_mmx (dest[(i&4)>>2] + block_offset[i], block + i*16, stride); | |
463 } | |
464 } | |
465 | |
466 static void ff_h264_idct_add8_mmx2(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ | |
467 int i; | |
468 for(i=16; i<16+8; i++){ | |
469 if(nnzc[ scan8[i] ]) | |
470 ff_h264_idct_add_mmx (dest[(i&4)>>2] + block_offset[i], block + i*16, stride); | |
471 else if(block[i*16]) | |
472 ff_h264_idct_dc_add_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride); | |
473 } | |
474 } | |
475 | |
8590 | 476 #if CONFIG_GPL && HAVE_YASM |
8510
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
477 static void ff_h264_idct_dc_add8_mmx2(uint8_t *dst, int16_t *block, int stride) |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
478 { |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
479 __asm__ volatile( |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
480 "movd %0, %%mm0 \n\t" // 0 0 X D |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
481 "punpcklwd %1, %%mm0 \n\t" // x X d D |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
482 "paddsw %2, %%mm0 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
483 "psraw $6, %%mm0 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
484 "punpcklwd %%mm0, %%mm0 \n\t" // d d D D |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
485 "pxor %%mm1, %%mm1 \n\t" // 0 0 0 0 |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
486 "psubw %%mm0, %%mm1 \n\t" // -d-d-D-D |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
487 "packuswb %%mm1, %%mm0 \n\t" // -d-d-D-D d d D D |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
488 "pshufw $0xFA, %%mm0, %%mm1 \n\t" // -d-d-d-d-D-D-D-D |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
489 "punpcklwd %%mm0, %%mm0 \n\t" // d d d d D D D D |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
490 ::"m"(block[ 0]), |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
491 "m"(block[16]), |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
492 "m"(ff_pw_32) |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
493 ); |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
494 __asm__ volatile( |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
495 "movq %0, %%mm2 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
496 "movq %1, %%mm3 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
497 "movq %2, %%mm4 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
498 "movq %3, %%mm5 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
499 "paddusb %%mm0, %%mm2 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
500 "paddusb %%mm0, %%mm3 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
501 "paddusb %%mm0, %%mm4 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
502 "paddusb %%mm0, %%mm5 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
503 "psubusb %%mm1, %%mm2 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
504 "psubusb %%mm1, %%mm3 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
505 "psubusb %%mm1, %%mm4 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
506 "psubusb %%mm1, %%mm5 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
507 "movq %%mm2, %0 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
508 "movq %%mm3, %1 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
509 "movq %%mm4, %2 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
510 "movq %%mm5, %3 \n\t" |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
511 :"+m"(*(uint64_t*)(dst+0*stride)), |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
512 "+m"(*(uint64_t*)(dst+1*stride)), |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
513 "+m"(*(uint64_t*)(dst+2*stride)), |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
514 "+m"(*(uint64_t*)(dst+3*stride)) |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
515 ); |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
516 } |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
517 |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
518 extern void ff_x264_add8x4_idct_sse2(uint8_t *dst, int16_t *block, int stride); |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
519 |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
520 static void ff_h264_idct_add16_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
521 int i; |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
522 for(i=0; i<16; i+=2) |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
523 if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ]) |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
524 ff_x264_add8x4_idct_sse2 (dst + block_offset[i], block + i*16, stride); |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
525 } |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
526 |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
527 static void ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
528 int i; |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
529 for(i=0; i<16; i+=2){ |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
530 if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ]) |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
531 ff_x264_add8x4_idct_sse2 (dst + block_offset[i], block + i*16, stride); |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
532 else if(block[i*16]|block[i*16+16]) |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
533 ff_h264_idct_dc_add8_mmx2(dst + block_offset[i], block + i*16, stride); |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
534 } |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
535 } |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
536 |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
537 static void ff_h264_idct_add8_sse2(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){ |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
538 int i; |
8758
93980b03673e
fix typo in h264dsp_mmx (no effect currently as the function is not used), approved by Dark Shikari on IRC
bcoudurier
parents:
8590
diff
changeset
|
539 for(i=16; i<16+8; i+=2){ |
8510
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
540 if(nnzc[ scan8[i+0] ]|nnzc[ scan8[i+1] ]) |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
541 ff_x264_add8x4_idct_sse2 (dest[(i&4)>>2] + block_offset[i], block + i*16, stride); |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
542 else if(block[i*16]|block[i*16+16]) |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
543 ff_h264_idct_dc_add8_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride); |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
544 } |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
545 } |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
546 #endif |
cea216e44ee3
Add x264 SSE2 iDCT functions to H.264 decoder.
darkshikari
parents:
8430
diff
changeset
|
547 |
8430 | 548 /***********************************/ |
549 /* deblocking */ | |
550 | |
551 // out: o = |x-y|>a | |
552 // clobbers: t | |
553 #define DIFF_GT_MMX(x,y,a,o,t)\ | |
554 "movq "#y", "#t" \n\t"\ | |
555 "movq "#x", "#o" \n\t"\ | |
556 "psubusb "#x", "#t" \n\t"\ | |
557 "psubusb "#y", "#o" \n\t"\ | |
558 "por "#t", "#o" \n\t"\ | |
559 "psubusb "#a", "#o" \n\t" | |
560 | |
561 // out: o = |x-y|>a | |
562 // clobbers: t | |
563 #define DIFF_GT2_MMX(x,y,a,o,t)\ | |
564 "movq "#y", "#t" \n\t"\ | |
565 "movq "#x", "#o" \n\t"\ | |
566 "psubusb "#x", "#t" \n\t"\ | |
567 "psubusb "#y", "#o" \n\t"\ | |
568 "psubusb "#a", "#t" \n\t"\ | |
569 "psubusb "#a", "#o" \n\t"\ | |
570 "pcmpeqb "#t", "#o" \n\t"\ | |
571 | |
572 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 | |
573 // out: mm5=beta-1, mm7=mask | |
574 // clobbers: mm4,mm6 | |
575 #define H264_DEBLOCK_MASK(alpha1, beta1) \ | |
576 "pshufw $0, "#alpha1", %%mm4 \n\t"\ | |
577 "pshufw $0, "#beta1 ", %%mm5 \n\t"\ | |
578 "packuswb %%mm4, %%mm4 \n\t"\ | |
579 "packuswb %%mm5, %%mm5 \n\t"\ | |
580 DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\ | |
581 DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\ | |
582 "por %%mm4, %%mm7 \n\t"\ | |
583 DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\ | |
584 "por %%mm4, %%mm7 \n\t"\ | |
585 "pxor %%mm6, %%mm6 \n\t"\ | |
586 "pcmpeqb %%mm6, %%mm7 \n\t" | |
587 | |
588 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) | |
589 // out: mm1=p0' mm2=q0' | |
590 // clobbers: mm0,3-6 | |
591 #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\ | |
592 "movq %%mm1 , %%mm5 \n\t"\ | |
593 "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\ | |
594 "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\ | |
595 "pcmpeqb %%mm4 , %%mm4 \n\t"\ | |
596 "pxor %%mm4 , %%mm3 \n\t"\ | |
597 "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\ | |
598 "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\ | |
599 "pxor %%mm1 , %%mm4 \n\t"\ | |
600 "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\ | |
601 "pavgb %%mm5 , %%mm3 \n\t"\ | |
602 "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\ | |
603 "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\ | |
604 "psubusb %%mm3 , %%mm6 \n\t"\ | |
605 "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\ | |
606 "pminub %%mm7 , %%mm6 \n\t"\ | |
607 "pminub %%mm7 , %%mm3 \n\t"\ | |
608 "psubusb %%mm6 , %%mm1 \n\t"\ | |
609 "psubusb %%mm3 , %%mm2 \n\t"\ | |
610 "paddusb %%mm3 , %%mm1 \n\t"\ | |
611 "paddusb %%mm6 , %%mm2 \n\t" | |
612 | |
613 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=ff_bone | |
614 // out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 ) | |
615 // clobbers: q2, tmp, tc0 | |
616 #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\ | |
617 "movq %%mm1, "#tmp" \n\t"\ | |
618 "pavgb %%mm2, "#tmp" \n\t"\ | |
619 "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\ | |
620 "pxor "q2addr", "#tmp" \n\t"\ | |
10955
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
621 "pand %9, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\ |
8430 | 622 "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\ |
623 "movq "#p1", "#tmp" \n\t"\ | |
624 "psubusb "#tc0", "#tmp" \n\t"\ | |
625 "paddusb "#p1", "#tc0" \n\t"\ | |
626 "pmaxub "#tmp", "#q2" \n\t"\ | |
627 "pminub "#tc0", "#q2" \n\t"\ | |
628 "movq "#q2", "q1addr" \n\t" | |
629 | |
630 static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0) | |
631 { | |
11369 | 632 DECLARE_ALIGNED(8, uint64_t, tmp0)[2]; |
8430 | 633 |
634 __asm__ volatile( | |
10955
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
635 "movq (%2,%4), %%mm0 \n\t" //p1 |
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
636 "movq (%2,%4,2), %%mm1 \n\t" //p0 |
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
637 "movq (%3), %%mm2 \n\t" //q0 |
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
638 "movq (%3,%4), %%mm3 \n\t" //q1 |
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
639 H264_DEBLOCK_MASK(%7, %8) |
8430 | 640 |
10955
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
641 "movd %6, %%mm4 \n\t" |
8430 | 642 "punpcklbw %%mm4, %%mm4 \n\t" |
643 "punpcklwd %%mm4, %%mm4 \n\t" | |
644 "pcmpeqb %%mm3, %%mm3 \n\t" | |
645 "movq %%mm4, %%mm6 \n\t" | |
646 "pcmpgtb %%mm3, %%mm4 \n\t" | |
10955
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
647 "movq %%mm6, %1 \n\t" |
8430 | 648 "pand %%mm4, %%mm7 \n\t" |
649 "movq %%mm7, %0 \n\t" | |
650 | |
651 /* filter p1 */ | |
10955
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
652 "movq (%2), %%mm3 \n\t" //p2 |
8430 | 653 DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1 |
654 "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta | |
10955
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
655 "pand %1, %%mm7 \n\t" // mask & tc0 |
8430 | 656 "movq %%mm7, %%mm4 \n\t" |
657 "psubb %%mm6, %%mm7 \n\t" | |
658 "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0 | |
10955
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
659 H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%2)", "(%2,%4)", %%mm6, %%mm4) |
8430 | 660 |
661 /* filter q1 */ | |
10955
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
662 "movq (%3,%4,2), %%mm4 \n\t" //q2 |
8430 | 663 DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1 |
664 "pand %0, %%mm6 \n\t" | |
10955
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
665 "movq %1, %%mm5 \n\t" // can be merged with the and below but is slower then |
8430 | 666 "pand %%mm6, %%mm5 \n\t" |
667 "psubb %%mm6, %%mm7 \n\t" | |
10955
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
668 "movq (%3,%4), %%mm3 \n\t" |
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
669 H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%3,%4,2)", "(%3,%4)", %%mm5, %%mm6) |
8430 | 670 |
671 /* filter p0, q0 */ | |
10955
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
672 H264_DEBLOCK_P0_Q0(%9, unused) |
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
673 "movq %%mm1, (%2,%4,2) \n\t" |
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
674 "movq %%mm2, (%3) \n\t" |
8430 | 675 |
10955
fdddf3d4238f
Use two separate memory arguments since 8+() is invalid gas syntax
conrad
parents:
10953
diff
changeset
|
676 : "=m"(tmp0[0]), "=m"(tmp0[1]) |
8430 | 677 : "r"(pix-3*stride), "r"(pix), "r"((x86_reg)stride), |
678 "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1), | |
679 "m"(ff_bone) | |
680 ); | |
681 } | |
682 | |
683 static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) | |
684 { | |
685 if((tc0[0] & tc0[1]) >= 0) | |
686 h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0); | |
687 if((tc0[2] & tc0[3]) >= 0) | |
688 h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2); | |
689 } | |
690 static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) | |
691 { | |
692 //FIXME: could cut some load/stores by merging transpose with filter | |
693 // also, it only needs to transpose 6x8 | |
11369 | 694 DECLARE_ALIGNED(8, uint8_t, trans)[8*8]; |
8430 | 695 int i; |
696 for(i=0; i<2; i++, pix+=8*stride, tc0+=2) { | |
697 if((tc0[0] & tc0[1]) < 0) | |
698 continue; | |
699 transpose4x4(trans, pix-4, 8, stride); | |
700 transpose4x4(trans +4*8, pix, 8, stride); | |
701 transpose4x4(trans+4, pix-4+4*stride, 8, stride); | |
702 transpose4x4(trans+4+4*8, pix +4*stride, 8, stride); | |
703 h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0); | |
704 transpose4x4(pix-2, trans +2*8, stride, 8); | |
705 transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8); | |
706 } | |
707 } | |
708 | |
709 static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0) | |
710 { | |
711 __asm__ volatile( | |
712 "movq (%0), %%mm0 \n\t" //p1 | |
713 "movq (%0,%2), %%mm1 \n\t" //p0 | |
714 "movq (%1), %%mm2 \n\t" //q0 | |
715 "movq (%1,%2), %%mm3 \n\t" //q1 | |
716 H264_DEBLOCK_MASK(%4, %5) | |
717 "movd %3, %%mm6 \n\t" | |
718 "punpcklbw %%mm6, %%mm6 \n\t" | |
719 "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask | |
720 H264_DEBLOCK_P0_Q0(%6, %7) | |
721 "movq %%mm1, (%0,%2) \n\t" | |
722 "movq %%mm2, (%1) \n\t" | |
723 | |
724 :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride), | |
725 "r"(*(uint32_t*)tc0), | |
726 "m"(alpha1), "m"(beta1), "m"(ff_bone), "m"(ff_pb_3F) | |
727 ); | |
728 } | |
729 | |
730 static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) | |
731 { | |
732 h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0); | |
733 } | |
734 | |
735 static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) | |
736 { | |
737 //FIXME: could cut some load/stores by merging transpose with filter | |
11369 | 738 DECLARE_ALIGNED(8, uint8_t, trans)[8*4]; |
8430 | 739 transpose4x4(trans, pix-2, 8, stride); |
740 transpose4x4(trans+4, pix-2+4*stride, 8, stride); | |
741 h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0); | |
742 transpose4x4(pix-2, trans, stride, 8); | |
743 transpose4x4(pix-2+4*stride, trans+4, stride, 8); | |
744 } | |
745 | |
746 // p0 = (p0 + q1 + 2*p1 + 2) >> 2 | |
747 #define H264_FILTER_CHROMA4(p0, p1, q1, one) \ | |
748 "movq "#p0", %%mm4 \n\t"\ | |
749 "pxor "#q1", %%mm4 \n\t"\ | |
750 "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\ | |
751 "pavgb "#q1", "#p0" \n\t"\ | |
752 "psubusb %%mm4, "#p0" \n\t"\ | |
753 "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\ | |
754 | |
755 static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1) | |
756 { | |
757 __asm__ volatile( | |
758 "movq (%0), %%mm0 \n\t" | |
759 "movq (%0,%2), %%mm1 \n\t" | |
760 "movq (%1), %%mm2 \n\t" | |
761 "movq (%1,%2), %%mm3 \n\t" | |
762 H264_DEBLOCK_MASK(%3, %4) | |
763 "movq %%mm1, %%mm5 \n\t" | |
764 "movq %%mm2, %%mm6 \n\t" | |
765 H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0' | |
766 H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0' | |
767 "psubb %%mm5, %%mm1 \n\t" | |
768 "psubb %%mm6, %%mm2 \n\t" | |
769 "pand %%mm7, %%mm1 \n\t" | |
770 "pand %%mm7, %%mm2 \n\t" | |
771 "paddb %%mm5, %%mm1 \n\t" | |
772 "paddb %%mm6, %%mm2 \n\t" | |
773 "movq %%mm1, (%0,%2) \n\t" | |
774 "movq %%mm2, (%1) \n\t" | |
775 :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride), | |
776 "m"(alpha1), "m"(beta1), "m"(ff_bone) | |
777 ); | |
778 } | |
779 | |
780 static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta) | |
781 { | |
782 h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1); | |
783 } | |
784 | |
785 static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta) | |
786 { | |
787 //FIXME: could cut some load/stores by merging transpose with filter | |
11369 | 788 DECLARE_ALIGNED(8, uint8_t, trans)[8*4]; |
8430 | 789 transpose4x4(trans, pix-2, 8, stride); |
790 transpose4x4(trans+4, pix-2+4*stride, 8, stride); | |
791 h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1); | |
792 transpose4x4(pix-2, trans, stride, 8); | |
793 transpose4x4(pix-2+4*stride, trans+4, stride, 8); | |
794 } | |
795 | |
796 static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], | |
797 int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) { | |
798 int dir; | |
799 __asm__ volatile( | |
11017 | 800 "movq %0, %%mm7 \n" |
801 "movq %1, %%mm6 \n" | |
802 ::"m"(ff_pb_1), "m"(ff_pb_3) | |
8430 | 803 ); |
804 if(field) | |
805 __asm__ volatile( | |
11017 | 806 "movq %0, %%mm6 \n" |
807 ::"m"(ff_pb_3_1) | |
8430 | 808 ); |
11017 | 809 __asm__ volatile( |
810 "movq %%mm6, %%mm5 \n" | |
811 "paddb %%mm5, %%mm5 \n" | |
812 :); | |
8430 | 813 |
814 // could do a special case for dir==0 && edges==1, but it only reduces the | |
815 // average filter time by 1.2% | |
816 for( dir=1; dir>=0; dir-- ) { | |
10953 | 817 const x86_reg d_idx = dir ? -8 : -1; |
8430 | 818 const int mask_mv = dir ? mask_mv1 : mask_mv0; |
11369 | 819 DECLARE_ALIGNED(8, const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL; |
11017 | 820 int b_idx, edge; |
8430 | 821 for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) { |
822 __asm__ volatile( | |
823 "pand %0, %%mm0 \n\t" | |
824 ::"m"(mask_dir) | |
825 ); | |
826 if(!(mask_mv & edge)) { | |
11017 | 827 if(bidir) { |
8430 | 828 __asm__ volatile( |
11017 | 829 "movd (%1,%0), %%mm2 \n" |
830 "punpckldq 40(%1,%0), %%mm2 \n" // { ref0[bn], ref1[bn] } | |
831 "pshufw $0x44, (%1), %%mm0 \n" // { ref0[b], ref0[b] } | |
832 "pshufw $0x44, 40(%1), %%mm1 \n" // { ref1[b], ref1[b] } | |
833 "pshufw $0x4E, %%mm2, %%mm3 \n" | |
834 "psubb %%mm2, %%mm0 \n" // { ref0[b]!=ref0[bn], ref0[b]!=ref1[bn] } | |
835 "psubb %%mm3, %%mm1 \n" // { ref1[b]!=ref1[bn], ref1[b]!=ref0[bn] } | |
836 "1: \n" | |
837 "por %%mm1, %%mm0 \n" | |
838 "movq (%2,%0,4), %%mm1 \n" | |
839 "movq 8(%2,%0,4), %%mm2 \n" | |
840 "movq %%mm1, %%mm3 \n" | |
841 "movq %%mm2, %%mm4 \n" | |
842 "psubw (%2), %%mm1 \n" | |
843 "psubw 8(%2), %%mm2 \n" | |
844 "psubw 160(%2), %%mm3 \n" | |
845 "psubw 168(%2), %%mm4 \n" | |
846 "packsswb %%mm2, %%mm1 \n" | |
847 "packsswb %%mm4, %%mm3 \n" | |
848 "paddb %%mm6, %%mm1 \n" | |
849 "paddb %%mm6, %%mm3 \n" | |
850 "psubusb %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit | |
851 "psubusb %%mm5, %%mm3 \n" | |
852 "packsswb %%mm3, %%mm1 \n" | |
853 "add $40, %0 \n" | |
854 "cmp $40, %0 \n" | |
855 "jl 1b \n" | |
856 "sub $80, %0 \n" | |
857 "pshufw $0x4E, %%mm1, %%mm1 \n" | |
858 "por %%mm1, %%mm0 \n" | |
859 "pshufw $0x4E, %%mm0, %%mm1 \n" | |
860 "pminub %%mm1, %%mm0 \n" | |
861 ::"r"(d_idx), | |
862 "r"(ref[0]+b_idx), | |
863 "r"(mv[0]+b_idx) | |
8430 | 864 ); |
11017 | 865 } else { |
10938
2a50b786b888
Fix h264_loop_filter_strength_mmx2() so it works with b frames.
michael
parents:
10937
diff
changeset
|
866 __asm__ volatile( |
11017 | 867 "movd (%1), %%mm0 \n" |
868 "psubb (%1,%0), %%mm0 \n" // ref[b] != ref[bn] | |
869 "movq (%2), %%mm1 \n" | |
870 "movq 8(%2), %%mm2 \n" | |
871 "psubw (%2,%0,4), %%mm1 \n" | |
872 "psubw 8(%2,%0,4), %%mm2 \n" | |
873 "packsswb %%mm2, %%mm1 \n" | |
874 "paddb %%mm6, %%mm1 \n" | |
875 "psubusb %%mm5, %%mm1 \n" // abs(mv[b] - mv[bn]) >= limit | |
876 "packsswb %%mm1, %%mm1 \n" | |
877 "por %%mm1, %%mm0 \n" | |
878 ::"r"(d_idx), | |
879 "r"(ref[0]+b_idx), | |
880 "r"(mv[0]+b_idx) | |
10938
2a50b786b888
Fix h264_loop_filter_strength_mmx2() so it works with b frames.
michael
parents:
10937
diff
changeset
|
881 ); |
2a50b786b888
Fix h264_loop_filter_strength_mmx2() so it works with b frames.
michael
parents:
10937
diff
changeset
|
882 } |
8430 | 883 } |
884 __asm__ volatile( | |
11017 | 885 "movd %0, %%mm1 \n" |
886 "por %1, %%mm1 \n" // nnz[b] || nnz[bn] | |
8430 | 887 ::"m"(nnz[b_idx]), |
888 "m"(nnz[b_idx+d_idx]) | |
889 ); | |
890 __asm__ volatile( | |
11017 | 891 "pminub %%mm7, %%mm1 \n" |
892 "pminub %%mm7, %%mm0 \n" | |
893 "psllw $1, %%mm1 \n" | |
894 "pxor %%mm2, %%mm2 \n" | |
895 "pmaxub %%mm0, %%mm1 \n" | |
896 "punpcklbw %%mm2, %%mm1 \n" | |
897 "movq %%mm1, %0 \n" | |
8430 | 898 :"=m"(*bS[dir][edge]) |
899 ::"memory" | |
900 ); | |
901 } | |
902 edges = 4; | |
903 step = 1; | |
904 } | |
905 __asm__ volatile( | |
906 "movq (%0), %%mm0 \n\t" | |
907 "movq 8(%0), %%mm1 \n\t" | |
908 "movq 16(%0), %%mm2 \n\t" | |
909 "movq 24(%0), %%mm3 \n\t" | |
910 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4) | |
911 "movq %%mm0, (%0) \n\t" | |
912 "movq %%mm3, 8(%0) \n\t" | |
913 "movq %%mm4, 16(%0) \n\t" | |
914 "movq %%mm2, 24(%0) \n\t" | |
915 ::"r"(bS[0]) | |
916 :"memory" | |
917 ); | |
918 } | |
919 | |
920 /***********************************/ | |
921 /* motion compensation */ | |
922 | |
923 #define QPEL_H264V_MM(A,B,C,D,E,F,OP,T,Z,d,q)\ | |
924 "mov"#q" "#C", "#T" \n\t"\ | |
925 "mov"#d" (%0), "#F" \n\t"\ | |
926 "paddw "#D", "#T" \n\t"\ | |
927 "psllw $2, "#T" \n\t"\ | |
928 "psubw "#B", "#T" \n\t"\ | |
929 "psubw "#E", "#T" \n\t"\ | |
930 "punpcklbw "#Z", "#F" \n\t"\ | |
931 "pmullw %4, "#T" \n\t"\ | |
932 "paddw %5, "#A" \n\t"\ | |
933 "add %2, %0 \n\t"\ | |
934 "paddw "#F", "#A" \n\t"\ | |
935 "paddw "#A", "#T" \n\t"\ | |
936 "psraw $5, "#T" \n\t"\ | |
937 "packuswb "#T", "#T" \n\t"\ | |
938 OP(T, (%1), A, d)\ | |
939 "add %3, %1 \n\t" | |
940 | |
941 #define QPEL_H264HV_MM(A,B,C,D,E,F,OF,T,Z,d,q)\ | |
942 "mov"#q" "#C", "#T" \n\t"\ | |
943 "mov"#d" (%0), "#F" \n\t"\ | |
944 "paddw "#D", "#T" \n\t"\ | |
945 "psllw $2, "#T" \n\t"\ | |
946 "paddw %4, "#A" \n\t"\ | |
947 "psubw "#B", "#T" \n\t"\ | |
948 "psubw "#E", "#T" \n\t"\ | |
949 "punpcklbw "#Z", "#F" \n\t"\ | |
950 "pmullw %3, "#T" \n\t"\ | |
951 "paddw "#F", "#A" \n\t"\ | |
952 "add %2, %0 \n\t"\ | |
953 "paddw "#A", "#T" \n\t"\ | |
954 "mov"#q" "#T", "#OF"(%1) \n\t" | |
955 | |
956 #define QPEL_H264V(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%mm6,%%mm7,d,q) | |
957 #define QPEL_H264HV(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%mm6,%%mm7,d,q) | |
958 #define QPEL_H264V_XMM(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%xmm6,%%xmm7,q,dqa) | |
959 #define QPEL_H264HV_XMM(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%xmm6,%%xmm7,q,dqa) | |
960 | |
961 | |
962 #define QPEL_H264(OPNAME, OP, MMX)\ | |
963 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
964 int h=4;\ | |
965 \ | |
966 __asm__ volatile(\ | |
967 "pxor %%mm7, %%mm7 \n\t"\ | |
11569
731050abce41
Convert two "m" constraints to MANGLE to fix compilation with some compilers.
reimar
parents:
11369
diff
changeset
|
968 "movq "MANGLE(ff_pw_5) ", %%mm4\n\t"\ |
731050abce41
Convert two "m" constraints to MANGLE to fix compilation with some compilers.
reimar
parents:
11369
diff
changeset
|
969 "movq "MANGLE(ff_pw_16)", %%mm5\n\t"\ |
8430 | 970 "1: \n\t"\ |
971 "movd -1(%0), %%mm1 \n\t"\ | |
972 "movd (%0), %%mm2 \n\t"\ | |
973 "movd 1(%0), %%mm3 \n\t"\ | |
974 "movd 2(%0), %%mm0 \n\t"\ | |
975 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
976 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
977 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
978 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
979 "paddw %%mm0, %%mm1 \n\t"\ | |
980 "paddw %%mm3, %%mm2 \n\t"\ | |
981 "movd -2(%0), %%mm0 \n\t"\ | |
982 "movd 3(%0), %%mm3 \n\t"\ | |
983 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
984 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
985 "paddw %%mm3, %%mm0 \n\t"\ | |
986 "psllw $2, %%mm2 \n\t"\ | |
987 "psubw %%mm1, %%mm2 \n\t"\ | |
988 "pmullw %%mm4, %%mm2 \n\t"\ | |
989 "paddw %%mm5, %%mm0 \n\t"\ | |
990 "paddw %%mm2, %%mm0 \n\t"\ | |
991 "psraw $5, %%mm0 \n\t"\ | |
992 "packuswb %%mm0, %%mm0 \n\t"\ | |
993 OP(%%mm0, (%1),%%mm6, d)\ | |
994 "add %3, %0 \n\t"\ | |
995 "add %4, %1 \n\t"\ | |
996 "decl %2 \n\t"\ | |
997 " jnz 1b \n\t"\ | |
998 : "+a"(src), "+c"(dst), "+g"(h)\ | |
11569
731050abce41
Convert two "m" constraints to MANGLE to fix compilation with some compilers.
reimar
parents:
11369
diff
changeset
|
999 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\ |
8430 | 1000 : "memory"\ |
1001 );\ | |
1002 }\ | |
1003 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ | |
1004 int h=4;\ | |
1005 __asm__ volatile(\ | |
1006 "pxor %%mm7, %%mm7 \n\t"\ | |
1007 "movq %0, %%mm4 \n\t"\ | |
1008 "movq %1, %%mm5 \n\t"\ | |
1009 :: "m"(ff_pw_5), "m"(ff_pw_16)\ | |
1010 );\ | |
1011 do{\ | |
1012 __asm__ volatile(\ | |
1013 "movd -1(%0), %%mm1 \n\t"\ | |
1014 "movd (%0), %%mm2 \n\t"\ | |
1015 "movd 1(%0), %%mm3 \n\t"\ | |
1016 "movd 2(%0), %%mm0 \n\t"\ | |
1017 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
1018 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1019 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
1020 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1021 "paddw %%mm0, %%mm1 \n\t"\ | |
1022 "paddw %%mm3, %%mm2 \n\t"\ | |
1023 "movd -2(%0), %%mm0 \n\t"\ | |
1024 "movd 3(%0), %%mm3 \n\t"\ | |
1025 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1026 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
1027 "paddw %%mm3, %%mm0 \n\t"\ | |
1028 "psllw $2, %%mm2 \n\t"\ | |
1029 "psubw %%mm1, %%mm2 \n\t"\ | |
1030 "pmullw %%mm4, %%mm2 \n\t"\ | |
1031 "paddw %%mm5, %%mm0 \n\t"\ | |
1032 "paddw %%mm2, %%mm0 \n\t"\ | |
1033 "movd (%2), %%mm3 \n\t"\ | |
1034 "psraw $5, %%mm0 \n\t"\ | |
1035 "packuswb %%mm0, %%mm0 \n\t"\ | |
1036 PAVGB" %%mm3, %%mm0 \n\t"\ | |
1037 OP(%%mm0, (%1),%%mm6, d)\ | |
1038 "add %4, %0 \n\t"\ | |
1039 "add %4, %1 \n\t"\ | |
1040 "add %3, %2 \n\t"\ | |
1041 : "+a"(src), "+c"(dst), "+d"(src2)\ | |
1042 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\ | |
1043 : "memory"\ | |
1044 );\ | |
1045 }while(--h);\ | |
1046 }\ | |
1047 static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1048 src -= 2*srcStride;\ | |
1049 __asm__ volatile(\ | |
1050 "pxor %%mm7, %%mm7 \n\t"\ | |
1051 "movd (%0), %%mm0 \n\t"\ | |
1052 "add %2, %0 \n\t"\ | |
1053 "movd (%0), %%mm1 \n\t"\ | |
1054 "add %2, %0 \n\t"\ | |
1055 "movd (%0), %%mm2 \n\t"\ | |
1056 "add %2, %0 \n\t"\ | |
1057 "movd (%0), %%mm3 \n\t"\ | |
1058 "add %2, %0 \n\t"\ | |
1059 "movd (%0), %%mm4 \n\t"\ | |
1060 "add %2, %0 \n\t"\ | |
1061 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1062 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
1063 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1064 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
1065 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
1066 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ | |
1067 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ | |
1068 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ | |
1069 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ | |
1070 \ | |
1071 : "+a"(src), "+c"(dst)\ | |
1072 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
1073 : "memory"\ | |
1074 );\ | |
1075 }\ | |
1076 static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ | |
1077 int h=4;\ | |
1078 int w=3;\ | |
1079 src -= 2*srcStride+2;\ | |
1080 while(w--){\ | |
1081 __asm__ volatile(\ | |
1082 "pxor %%mm7, %%mm7 \n\t"\ | |
1083 "movd (%0), %%mm0 \n\t"\ | |
1084 "add %2, %0 \n\t"\ | |
1085 "movd (%0), %%mm1 \n\t"\ | |
1086 "add %2, %0 \n\t"\ | |
1087 "movd (%0), %%mm2 \n\t"\ | |
1088 "add %2, %0 \n\t"\ | |
1089 "movd (%0), %%mm3 \n\t"\ | |
1090 "add %2, %0 \n\t"\ | |
1091 "movd (%0), %%mm4 \n\t"\ | |
1092 "add %2, %0 \n\t"\ | |
1093 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1094 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
1095 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1096 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
1097 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
1098 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\ | |
1099 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\ | |
1100 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\ | |
1101 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\ | |
1102 \ | |
1103 : "+a"(src)\ | |
1104 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
1105 : "memory"\ | |
1106 );\ | |
1107 tmp += 4;\ | |
1108 src += 4 - 9*srcStride;\ | |
1109 }\ | |
1110 tmp -= 3*4;\ | |
1111 __asm__ volatile(\ | |
1112 "1: \n\t"\ | |
1113 "movq (%0), %%mm0 \n\t"\ | |
1114 "paddw 10(%0), %%mm0 \n\t"\ | |
1115 "movq 2(%0), %%mm1 \n\t"\ | |
1116 "paddw 8(%0), %%mm1 \n\t"\ | |
1117 "movq 4(%0), %%mm2 \n\t"\ | |
1118 "paddw 6(%0), %%mm2 \n\t"\ | |
1119 "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\ | |
1120 "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\ | |
1121 "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\ | |
1122 "paddsw %%mm2, %%mm0 \n\t"\ | |
1123 "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\ | |
1124 "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 */\ | |
1125 "psraw $6, %%mm0 \n\t"\ | |
1126 "packuswb %%mm0, %%mm0 \n\t"\ | |
1127 OP(%%mm0, (%1),%%mm7, d)\ | |
1128 "add $24, %0 \n\t"\ | |
1129 "add %3, %1 \n\t"\ | |
1130 "decl %2 \n\t"\ | |
1131 " jnz 1b \n\t"\ | |
1132 : "+a"(tmp), "+c"(dst), "+g"(h)\ | |
1133 : "S"((x86_reg)dstStride)\ | |
1134 : "memory"\ | |
1135 );\ | |
1136 }\ | |
1137 \ | |
1138 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1139 int h=8;\ | |
1140 __asm__ volatile(\ | |
1141 "pxor %%mm7, %%mm7 \n\t"\ | |
11701
953d0c2d2c0a
Replace more "m" constraints with MANGLE to fix compilation issues
reimar
parents:
11569
diff
changeset
|
1142 "movq "MANGLE(ff_pw_5)", %%mm6\n\t"\ |
8430 | 1143 "1: \n\t"\ |
1144 "movq (%0), %%mm0 \n\t"\ | |
1145 "movq 1(%0), %%mm2 \n\t"\ | |
1146 "movq %%mm0, %%mm1 \n\t"\ | |
1147 "movq %%mm2, %%mm3 \n\t"\ | |
1148 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1149 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
1150 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1151 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
1152 "paddw %%mm2, %%mm0 \n\t"\ | |
1153 "paddw %%mm3, %%mm1 \n\t"\ | |
1154 "psllw $2, %%mm0 \n\t"\ | |
1155 "psllw $2, %%mm1 \n\t"\ | |
1156 "movq -1(%0), %%mm2 \n\t"\ | |
1157 "movq 2(%0), %%mm4 \n\t"\ | |
1158 "movq %%mm2, %%mm3 \n\t"\ | |
1159 "movq %%mm4, %%mm5 \n\t"\ | |
1160 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1161 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
1162 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
1163 "punpckhbw %%mm7, %%mm5 \n\t"\ | |
1164 "paddw %%mm4, %%mm2 \n\t"\ | |
1165 "paddw %%mm3, %%mm5 \n\t"\ | |
1166 "psubw %%mm2, %%mm0 \n\t"\ | |
1167 "psubw %%mm5, %%mm1 \n\t"\ | |
1168 "pmullw %%mm6, %%mm0 \n\t"\ | |
1169 "pmullw %%mm6, %%mm1 \n\t"\ | |
1170 "movd -2(%0), %%mm2 \n\t"\ | |
1171 "movd 7(%0), %%mm5 \n\t"\ | |
1172 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1173 "punpcklbw %%mm7, %%mm5 \n\t"\ | |
1174 "paddw %%mm3, %%mm2 \n\t"\ | |
1175 "paddw %%mm5, %%mm4 \n\t"\ | |
11701
953d0c2d2c0a
Replace more "m" constraints with MANGLE to fix compilation issues
reimar
parents:
11569
diff
changeset
|
1176 "movq "MANGLE(ff_pw_16)", %%mm5\n\t"\ |
8430 | 1177 "paddw %%mm5, %%mm2 \n\t"\ |
1178 "paddw %%mm5, %%mm4 \n\t"\ | |
1179 "paddw %%mm2, %%mm0 \n\t"\ | |
1180 "paddw %%mm4, %%mm1 \n\t"\ | |
1181 "psraw $5, %%mm0 \n\t"\ | |
1182 "psraw $5, %%mm1 \n\t"\ | |
1183 "packuswb %%mm1, %%mm0 \n\t"\ | |
1184 OP(%%mm0, (%1),%%mm5, q)\ | |
1185 "add %3, %0 \n\t"\ | |
1186 "add %4, %1 \n\t"\ | |
1187 "decl %2 \n\t"\ | |
1188 " jnz 1b \n\t"\ | |
1189 : "+a"(src), "+c"(dst), "+g"(h)\ | |
11701
953d0c2d2c0a
Replace more "m" constraints with MANGLE to fix compilation issues
reimar
parents:
11569
diff
changeset
|
1190 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\ |
8430 | 1191 : "memory"\ |
1192 );\ | |
1193 }\ | |
1194 \ | |
1195 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ | |
1196 int h=8;\ | |
1197 __asm__ volatile(\ | |
1198 "pxor %%mm7, %%mm7 \n\t"\ | |
1199 "movq %0, %%mm6 \n\t"\ | |
1200 :: "m"(ff_pw_5)\ | |
1201 );\ | |
1202 do{\ | |
1203 __asm__ volatile(\ | |
1204 "movq (%0), %%mm0 \n\t"\ | |
1205 "movq 1(%0), %%mm2 \n\t"\ | |
1206 "movq %%mm0, %%mm1 \n\t"\ | |
1207 "movq %%mm2, %%mm3 \n\t"\ | |
1208 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1209 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
1210 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1211 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
1212 "paddw %%mm2, %%mm0 \n\t"\ | |
1213 "paddw %%mm3, %%mm1 \n\t"\ | |
1214 "psllw $2, %%mm0 \n\t"\ | |
1215 "psllw $2, %%mm1 \n\t"\ | |
1216 "movq -1(%0), %%mm2 \n\t"\ | |
1217 "movq 2(%0), %%mm4 \n\t"\ | |
1218 "movq %%mm2, %%mm3 \n\t"\ | |
1219 "movq %%mm4, %%mm5 \n\t"\ | |
1220 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1221 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
1222 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
1223 "punpckhbw %%mm7, %%mm5 \n\t"\ | |
1224 "paddw %%mm4, %%mm2 \n\t"\ | |
1225 "paddw %%mm3, %%mm5 \n\t"\ | |
1226 "psubw %%mm2, %%mm0 \n\t"\ | |
1227 "psubw %%mm5, %%mm1 \n\t"\ | |
1228 "pmullw %%mm6, %%mm0 \n\t"\ | |
1229 "pmullw %%mm6, %%mm1 \n\t"\ | |
1230 "movd -2(%0), %%mm2 \n\t"\ | |
1231 "movd 7(%0), %%mm5 \n\t"\ | |
1232 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1233 "punpcklbw %%mm7, %%mm5 \n\t"\ | |
1234 "paddw %%mm3, %%mm2 \n\t"\ | |
1235 "paddw %%mm5, %%mm4 \n\t"\ | |
1236 "movq %5, %%mm5 \n\t"\ | |
1237 "paddw %%mm5, %%mm2 \n\t"\ | |
1238 "paddw %%mm5, %%mm4 \n\t"\ | |
1239 "paddw %%mm2, %%mm0 \n\t"\ | |
1240 "paddw %%mm4, %%mm1 \n\t"\ | |
1241 "psraw $5, %%mm0 \n\t"\ | |
1242 "psraw $5, %%mm1 \n\t"\ | |
1243 "movq (%2), %%mm4 \n\t"\ | |
1244 "packuswb %%mm1, %%mm0 \n\t"\ | |
1245 PAVGB" %%mm4, %%mm0 \n\t"\ | |
1246 OP(%%mm0, (%1),%%mm5, q)\ | |
1247 "add %4, %0 \n\t"\ | |
1248 "add %4, %1 \n\t"\ | |
1249 "add %3, %2 \n\t"\ | |
1250 : "+a"(src), "+c"(dst), "+d"(src2)\ | |
1251 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\ | |
1252 "m"(ff_pw_16)\ | |
1253 : "memory"\ | |
1254 );\ | |
1255 }while(--h);\ | |
1256 }\ | |
1257 \ | |
1258 static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1259 int w= 2;\ | |
1260 src -= 2*srcStride;\ | |
1261 \ | |
1262 while(w--){\ | |
1263 __asm__ volatile(\ | |
1264 "pxor %%mm7, %%mm7 \n\t"\ | |
1265 "movd (%0), %%mm0 \n\t"\ | |
1266 "add %2, %0 \n\t"\ | |
1267 "movd (%0), %%mm1 \n\t"\ | |
1268 "add %2, %0 \n\t"\ | |
1269 "movd (%0), %%mm2 \n\t"\ | |
1270 "add %2, %0 \n\t"\ | |
1271 "movd (%0), %%mm3 \n\t"\ | |
1272 "add %2, %0 \n\t"\ | |
1273 "movd (%0), %%mm4 \n\t"\ | |
1274 "add %2, %0 \n\t"\ | |
1275 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1276 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
1277 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1278 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
1279 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
1280 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ | |
1281 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ | |
1282 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ | |
1283 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ | |
1284 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ | |
1285 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ | |
1286 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ | |
1287 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ | |
1288 \ | |
1289 : "+a"(src), "+c"(dst)\ | |
1290 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
1291 : "memory"\ | |
1292 );\ | |
1293 if(h==16){\ | |
1294 __asm__ volatile(\ | |
1295 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ | |
1296 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ | |
1297 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ | |
1298 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ | |
1299 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ | |
1300 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ | |
1301 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ | |
1302 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ | |
1303 \ | |
1304 : "+a"(src), "+c"(dst)\ | |
1305 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
1306 : "memory"\ | |
1307 );\ | |
1308 }\ | |
1309 src += 4-(h+5)*srcStride;\ | |
1310 dst += 4-h*dstStride;\ | |
1311 }\ | |
1312 }\ | |
1313 static av_always_inline void OPNAME ## h264_qpel8or16_hv1_lowpass_ ## MMX(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){\ | |
1314 int w = (size+8)>>2;\ | |
1315 src -= 2*srcStride+2;\ | |
1316 while(w--){\ | |
1317 __asm__ volatile(\ | |
1318 "pxor %%mm7, %%mm7 \n\t"\ | |
1319 "movd (%0), %%mm0 \n\t"\ | |
1320 "add %2, %0 \n\t"\ | |
1321 "movd (%0), %%mm1 \n\t"\ | |
1322 "add %2, %0 \n\t"\ | |
1323 "movd (%0), %%mm2 \n\t"\ | |
1324 "add %2, %0 \n\t"\ | |
1325 "movd (%0), %%mm3 \n\t"\ | |
1326 "add %2, %0 \n\t"\ | |
1327 "movd (%0), %%mm4 \n\t"\ | |
1328 "add %2, %0 \n\t"\ | |
1329 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1330 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
1331 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1332 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
1333 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
1334 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\ | |
1335 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\ | |
1336 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\ | |
1337 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\ | |
1338 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\ | |
1339 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\ | |
1340 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\ | |
1341 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\ | |
1342 : "+a"(src)\ | |
1343 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
1344 : "memory"\ | |
1345 );\ | |
1346 if(size==16){\ | |
1347 __asm__ volatile(\ | |
1348 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\ | |
1349 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\ | |
1350 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\ | |
1351 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\ | |
1352 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\ | |
1353 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\ | |
1354 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\ | |
1355 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\ | |
1356 : "+a"(src)\ | |
1357 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
1358 : "memory"\ | |
1359 );\ | |
1360 }\ | |
1361 tmp += 4;\ | |
1362 src += 4 - (size+5)*srcStride;\ | |
1363 }\ | |
1364 }\ | |
1365 static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\ | |
1366 int w = size>>4;\ | |
1367 do{\ | |
1368 int h = size;\ | |
1369 __asm__ volatile(\ | |
1370 "1: \n\t"\ | |
1371 "movq (%0), %%mm0 \n\t"\ | |
1372 "movq 8(%0), %%mm3 \n\t"\ | |
1373 "movq 2(%0), %%mm1 \n\t"\ | |
1374 "movq 10(%0), %%mm4 \n\t"\ | |
1375 "paddw %%mm4, %%mm0 \n\t"\ | |
1376 "paddw %%mm3, %%mm1 \n\t"\ | |
1377 "paddw 18(%0), %%mm3 \n\t"\ | |
1378 "paddw 16(%0), %%mm4 \n\t"\ | |
1379 "movq 4(%0), %%mm2 \n\t"\ | |
1380 "movq 12(%0), %%mm5 \n\t"\ | |
1381 "paddw 6(%0), %%mm2 \n\t"\ | |
1382 "paddw 14(%0), %%mm5 \n\t"\ | |
1383 "psubw %%mm1, %%mm0 \n\t"\ | |
1384 "psubw %%mm4, %%mm3 \n\t"\ | |
1385 "psraw $2, %%mm0 \n\t"\ | |
1386 "psraw $2, %%mm3 \n\t"\ | |
1387 "psubw %%mm1, %%mm0 \n\t"\ | |
1388 "psubw %%mm4, %%mm3 \n\t"\ | |
1389 "paddsw %%mm2, %%mm0 \n\t"\ | |
1390 "paddsw %%mm5, %%mm3 \n\t"\ | |
1391 "psraw $2, %%mm0 \n\t"\ | |
1392 "psraw $2, %%mm3 \n\t"\ | |
1393 "paddw %%mm2, %%mm0 \n\t"\ | |
1394 "paddw %%mm5, %%mm3 \n\t"\ | |
1395 "psraw $6, %%mm0 \n\t"\ | |
1396 "psraw $6, %%mm3 \n\t"\ | |
1397 "packuswb %%mm3, %%mm0 \n\t"\ | |
1398 OP(%%mm0, (%1),%%mm7, q)\ | |
1399 "add $48, %0 \n\t"\ | |
1400 "add %3, %1 \n\t"\ | |
1401 "decl %2 \n\t"\ | |
1402 " jnz 1b \n\t"\ | |
1403 : "+a"(tmp), "+c"(dst), "+g"(h)\ | |
1404 : "S"((x86_reg)dstStride)\ | |
1405 : "memory"\ | |
1406 );\ | |
1407 tmp += 8 - size*24;\ | |
1408 dst += 8 - size*dstStride;\ | |
1409 }while(w--);\ | |
1410 }\ | |
1411 \ | |
1412 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1413 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\ | |
1414 }\ | |
1415 static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1416 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\ | |
1417 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ | |
1418 }\ | |
1419 \ | |
1420 static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1421 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ | |
1422 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ | |
1423 src += 8*srcStride;\ | |
1424 dst += 8*dstStride;\ | |
1425 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ | |
1426 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ | |
1427 }\ | |
1428 \ | |
1429 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ | |
1430 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ | |
1431 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ | |
1432 src += 8*dstStride;\ | |
1433 dst += 8*dstStride;\ | |
1434 src2 += 8*src2Stride;\ | |
1435 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ | |
1436 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ | |
1437 }\ | |
1438 \ | |
1439 static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\ | |
1440 put_h264_qpel8or16_hv1_lowpass_ ## MMX(tmp, src, tmpStride, srcStride, size);\ | |
1441 OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\ | |
1442 }\ | |
1443 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ | |
1444 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\ | |
1445 }\ | |
1446 \ | |
1447 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ | |
1448 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\ | |
1449 }\ | |
1450 \ | |
1451 static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ | |
1452 {\ | |
1453 __asm__ volatile(\ | |
1454 "movq (%1), %%mm0 \n\t"\ | |
1455 "movq 24(%1), %%mm1 \n\t"\ | |
1456 "psraw $5, %%mm0 \n\t"\ | |
1457 "psraw $5, %%mm1 \n\t"\ | |
1458 "packuswb %%mm0, %%mm0 \n\t"\ | |
1459 "packuswb %%mm1, %%mm1 \n\t"\ | |
1460 PAVGB" (%0), %%mm0 \n\t"\ | |
1461 PAVGB" (%0,%3), %%mm1 \n\t"\ | |
1462 OP(%%mm0, (%2), %%mm4, d)\ | |
1463 OP(%%mm1, (%2,%4), %%mm5, d)\ | |
1464 "lea (%0,%3,2), %0 \n\t"\ | |
1465 "lea (%2,%4,2), %2 \n\t"\ | |
1466 "movq 48(%1), %%mm0 \n\t"\ | |
1467 "movq 72(%1), %%mm1 \n\t"\ | |
1468 "psraw $5, %%mm0 \n\t"\ | |
1469 "psraw $5, %%mm1 \n\t"\ | |
1470 "packuswb %%mm0, %%mm0 \n\t"\ | |
1471 "packuswb %%mm1, %%mm1 \n\t"\ | |
1472 PAVGB" (%0), %%mm0 \n\t"\ | |
1473 PAVGB" (%0,%3), %%mm1 \n\t"\ | |
1474 OP(%%mm0, (%2), %%mm4, d)\ | |
1475 OP(%%mm1, (%2,%4), %%mm5, d)\ | |
1476 :"+a"(src8), "+c"(src16), "+d"(dst)\ | |
1477 :"S"((x86_reg)src8Stride), "D"((x86_reg)dstStride)\ | |
1478 :"memory");\ | |
1479 }\ | |
1480 static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ | |
1481 {\ | |
1482 do{\ | |
1483 __asm__ volatile(\ | |
1484 "movq (%1), %%mm0 \n\t"\ | |
1485 "movq 8(%1), %%mm1 \n\t"\ | |
1486 "movq 48(%1), %%mm2 \n\t"\ | |
1487 "movq 8+48(%1), %%mm3 \n\t"\ | |
1488 "psraw $5, %%mm0 \n\t"\ | |
1489 "psraw $5, %%mm1 \n\t"\ | |
1490 "psraw $5, %%mm2 \n\t"\ | |
1491 "psraw $5, %%mm3 \n\t"\ | |
1492 "packuswb %%mm1, %%mm0 \n\t"\ | |
1493 "packuswb %%mm3, %%mm2 \n\t"\ | |
1494 PAVGB" (%0), %%mm0 \n\t"\ | |
1495 PAVGB" (%0,%3), %%mm2 \n\t"\ | |
1496 OP(%%mm0, (%2), %%mm5, q)\ | |
1497 OP(%%mm2, (%2,%4), %%mm5, q)\ | |
1498 ::"a"(src8), "c"(src16), "d"(dst),\ | |
1499 "r"((x86_reg)src8Stride), "r"((x86_reg)dstStride)\ | |
1500 :"memory");\ | |
1501 src8 += 2L*src8Stride;\ | |
1502 src16 += 48;\ | |
1503 dst += 2L*dstStride;\ | |
1504 }while(h-=2);\ | |
1505 }\ | |
1506 static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ | |
1507 {\ | |
1508 OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\ | |
1509 OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\ | |
1510 }\ | |
1511 | |
1512 | |
8590 | 1513 #if ARCH_X86_64 |
8430 | 1514 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\ |
1515 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ | |
1516 int h=16;\ | |
1517 __asm__ volatile(\ | |
1518 "pxor %%xmm15, %%xmm15 \n\t"\ | |
1519 "movdqa %6, %%xmm14 \n\t"\ | |
1520 "movdqa %7, %%xmm13 \n\t"\ | |
1521 "1: \n\t"\ | |
9739 | 1522 "lddqu 6(%0), %%xmm1 \n\t"\ |
1523 "lddqu -2(%0), %%xmm7 \n\t"\ | |
8430 | 1524 "movdqa %%xmm1, %%xmm0 \n\t"\ |
1525 "punpckhbw %%xmm15, %%xmm1 \n\t"\ | |
1526 "punpcklbw %%xmm15, %%xmm0 \n\t"\ | |
1527 "punpcklbw %%xmm15, %%xmm7 \n\t"\ | |
1528 "movdqa %%xmm1, %%xmm2 \n\t"\ | |
1529 "movdqa %%xmm0, %%xmm6 \n\t"\ | |
1530 "movdqa %%xmm1, %%xmm3 \n\t"\ | |
1531 "movdqa %%xmm0, %%xmm8 \n\t"\ | |
1532 "movdqa %%xmm1, %%xmm4 \n\t"\ | |
1533 "movdqa %%xmm0, %%xmm9 \n\t"\ | |
9739 | 1534 "movdqa %%xmm0, %%xmm12 \n\t"\ |
1535 "movdqa %%xmm1, %%xmm11 \n\t"\ | |
1536 "palignr $10,%%xmm0, %%xmm11\n\t"\ | |
1537 "palignr $10,%%xmm7, %%xmm12\n\t"\ | |
1538 "palignr $2, %%xmm0, %%xmm4 \n\t"\ | |
1539 "palignr $2, %%xmm7, %%xmm9 \n\t"\ | |
1540 "palignr $4, %%xmm0, %%xmm3 \n\t"\ | |
1541 "palignr $4, %%xmm7, %%xmm8 \n\t"\ | |
1542 "palignr $6, %%xmm0, %%xmm2 \n\t"\ | |
1543 "palignr $6, %%xmm7, %%xmm6 \n\t"\ | |
1544 "paddw %%xmm0 ,%%xmm11 \n\t"\ | |
1545 "palignr $8, %%xmm0, %%xmm1 \n\t"\ | |
1546 "palignr $8, %%xmm7, %%xmm0 \n\t"\ | |
1547 "paddw %%xmm12,%%xmm7 \n\t"\ | |
8430 | 1548 "paddw %%xmm3, %%xmm2 \n\t"\ |
1549 "paddw %%xmm8, %%xmm6 \n\t"\ | |
1550 "paddw %%xmm4, %%xmm1 \n\t"\ | |
1551 "paddw %%xmm9, %%xmm0 \n\t"\ | |
1552 "psllw $2, %%xmm2 \n\t"\ | |
1553 "psllw $2, %%xmm6 \n\t"\ | |
1554 "psubw %%xmm1, %%xmm2 \n\t"\ | |
1555 "psubw %%xmm0, %%xmm6 \n\t"\ | |
9739 | 1556 "paddw %%xmm13,%%xmm11 \n\t"\ |
1557 "paddw %%xmm13,%%xmm7 \n\t"\ | |
8430 | 1558 "pmullw %%xmm14,%%xmm2 \n\t"\ |
1559 "pmullw %%xmm14,%%xmm6 \n\t"\ | |
1560 "lddqu (%2), %%xmm3 \n\t"\ | |
9739 | 1561 "paddw %%xmm11,%%xmm2 \n\t"\ |
1562 "paddw %%xmm7, %%xmm6 \n\t"\ | |
8430 | 1563 "psraw $5, %%xmm2 \n\t"\ |
1564 "psraw $5, %%xmm6 \n\t"\ | |
1565 "packuswb %%xmm2,%%xmm6 \n\t"\ | |
1566 "pavgb %%xmm3, %%xmm6 \n\t"\ | |
1567 OP(%%xmm6, (%1), %%xmm4, dqa)\ | |
1568 "add %5, %0 \n\t"\ | |
1569 "add %5, %1 \n\t"\ | |
1570 "add %4, %2 \n\t"\ | |
1571 "decl %3 \n\t"\ | |
1572 "jg 1b \n\t"\ | |
1573 : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\ | |
1574 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\ | |
1575 "m"(ff_pw_5), "m"(ff_pw_16)\ | |
1576 : "memory"\ | |
1577 );\ | |
1578 } | |
1579 #else // ARCH_X86_64 | |
1580 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\ | |
1581 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ | |
1582 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ | |
1583 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ | |
1584 src += 8*dstStride;\ | |
1585 dst += 8*dstStride;\ | |
1586 src2 += 8*src2Stride;\ | |
1587 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ | |
1588 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ | |
1589 } | |
1590 #endif // ARCH_X86_64 | |
1591 | |
1592 #define QPEL_H264_H_XMM(OPNAME, OP, MMX)\ | |
1593 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ | |
1594 int h=8;\ | |
1595 __asm__ volatile(\ | |
1596 "pxor %%xmm7, %%xmm7 \n\t"\ | |
1597 "movdqa %0, %%xmm6 \n\t"\ | |
1598 :: "m"(ff_pw_5)\ | |
1599 );\ | |
1600 do{\ | |
1601 __asm__ volatile(\ | |
9739 | 1602 "lddqu -2(%0), %%xmm1 \n\t"\ |
8430 | 1603 "movdqa %%xmm1, %%xmm0 \n\t"\ |
1604 "punpckhbw %%xmm7, %%xmm1 \n\t"\ | |
1605 "punpcklbw %%xmm7, %%xmm0 \n\t"\ | |
1606 "movdqa %%xmm1, %%xmm2 \n\t"\ | |
1607 "movdqa %%xmm1, %%xmm3 \n\t"\ | |
1608 "movdqa %%xmm1, %%xmm4 \n\t"\ | |
1609 "movdqa %%xmm1, %%xmm5 \n\t"\ | |
9739 | 1610 "palignr $2, %%xmm0, %%xmm4 \n\t"\ |
1611 "palignr $4, %%xmm0, %%xmm3 \n\t"\ | |
1612 "palignr $6, %%xmm0, %%xmm2 \n\t"\ | |
1613 "palignr $8, %%xmm0, %%xmm1 \n\t"\ | |
1614 "palignr $10,%%xmm0, %%xmm5 \n\t"\ | |
1615 "paddw %%xmm5, %%xmm0 \n\t"\ | |
8430 | 1616 "paddw %%xmm3, %%xmm2 \n\t"\ |
1617 "paddw %%xmm4, %%xmm1 \n\t"\ | |
1618 "psllw $2, %%xmm2 \n\t"\ | |
1619 "movq (%2), %%xmm3 \n\t"\ | |
1620 "psubw %%xmm1, %%xmm2 \n\t"\ | |
9739 | 1621 "paddw %5, %%xmm0 \n\t"\ |
8430 | 1622 "pmullw %%xmm6, %%xmm2 \n\t"\ |
9739 | 1623 "paddw %%xmm0, %%xmm2 \n\t"\ |
8430 | 1624 "psraw $5, %%xmm2 \n\t"\ |
1625 "packuswb %%xmm2, %%xmm2 \n\t"\ | |
1626 "pavgb %%xmm3, %%xmm2 \n\t"\ | |
1627 OP(%%xmm2, (%1), %%xmm4, q)\ | |
1628 "add %4, %0 \n\t"\ | |
1629 "add %4, %1 \n\t"\ | |
1630 "add %3, %2 \n\t"\ | |
1631 : "+a"(src), "+c"(dst), "+d"(src2)\ | |
1632 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\ | |
1633 "m"(ff_pw_16)\ | |
1634 : "memory"\ | |
1635 );\ | |
1636 }while(--h);\ | |
1637 }\ | |
1638 QPEL_H264_H16_XMM(OPNAME, OP, MMX)\ | |
1639 \ | |
1640 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1641 int h=8;\ | |
1642 __asm__ volatile(\ | |
1643 "pxor %%xmm7, %%xmm7 \n\t"\ | |
11701
953d0c2d2c0a
Replace more "m" constraints with MANGLE to fix compilation issues
reimar
parents:
11569
diff
changeset
|
1644 "movdqa "MANGLE(ff_pw_5)", %%xmm6\n\t"\ |
8430 | 1645 "1: \n\t"\ |
9739 | 1646 "lddqu -2(%0), %%xmm1 \n\t"\ |
8430 | 1647 "movdqa %%xmm1, %%xmm0 \n\t"\ |
1648 "punpckhbw %%xmm7, %%xmm1 \n\t"\ | |
1649 "punpcklbw %%xmm7, %%xmm0 \n\t"\ | |
1650 "movdqa %%xmm1, %%xmm2 \n\t"\ | |
1651 "movdqa %%xmm1, %%xmm3 \n\t"\ | |
1652 "movdqa %%xmm1, %%xmm4 \n\t"\ | |
1653 "movdqa %%xmm1, %%xmm5 \n\t"\ | |
9739 | 1654 "palignr $2, %%xmm0, %%xmm4 \n\t"\ |
1655 "palignr $4, %%xmm0, %%xmm3 \n\t"\ | |
1656 "palignr $6, %%xmm0, %%xmm2 \n\t"\ | |
1657 "palignr $8, %%xmm0, %%xmm1 \n\t"\ | |
1658 "palignr $10,%%xmm0, %%xmm5 \n\t"\ | |
1659 "paddw %%xmm5, %%xmm0 \n\t"\ | |
8430 | 1660 "paddw %%xmm3, %%xmm2 \n\t"\ |
1661 "paddw %%xmm4, %%xmm1 \n\t"\ | |
1662 "psllw $2, %%xmm2 \n\t"\ | |
1663 "psubw %%xmm1, %%xmm2 \n\t"\ | |
11701
953d0c2d2c0a
Replace more "m" constraints with MANGLE to fix compilation issues
reimar
parents:
11569
diff
changeset
|
1664 "paddw "MANGLE(ff_pw_16)", %%xmm0\n\t"\ |
8430 | 1665 "pmullw %%xmm6, %%xmm2 \n\t"\ |
9739 | 1666 "paddw %%xmm0, %%xmm2 \n\t"\ |
8430 | 1667 "psraw $5, %%xmm2 \n\t"\ |
1668 "packuswb %%xmm2, %%xmm2 \n\t"\ | |
1669 OP(%%xmm2, (%1), %%xmm4, q)\ | |
1670 "add %3, %0 \n\t"\ | |
1671 "add %4, %1 \n\t"\ | |
1672 "decl %2 \n\t"\ | |
1673 " jnz 1b \n\t"\ | |
1674 : "+a"(src), "+c"(dst), "+g"(h)\ | |
11701
953d0c2d2c0a
Replace more "m" constraints with MANGLE to fix compilation issues
reimar
parents:
11569
diff
changeset
|
1675 : "D"((x86_reg)srcStride), "S"((x86_reg)dstStride)\ |
8430 | 1676 : "memory"\ |
1677 );\ | |
1678 }\ | |
1679 static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1680 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ | |
1681 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ | |
1682 src += 8*srcStride;\ | |
1683 dst += 8*dstStride;\ | |
1684 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ | |
1685 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ | |
1686 }\ | |
1687 | |
1688 #define QPEL_H264_V_XMM(OPNAME, OP, MMX)\ | |
1689 static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1690 src -= 2*srcStride;\ | |
1691 \ | |
1692 __asm__ volatile(\ | |
1693 "pxor %%xmm7, %%xmm7 \n\t"\ | |
1694 "movq (%0), %%xmm0 \n\t"\ | |
1695 "add %2, %0 \n\t"\ | |
1696 "movq (%0), %%xmm1 \n\t"\ | |
1697 "add %2, %0 \n\t"\ | |
1698 "movq (%0), %%xmm2 \n\t"\ | |
1699 "add %2, %0 \n\t"\ | |
1700 "movq (%0), %%xmm3 \n\t"\ | |
1701 "add %2, %0 \n\t"\ | |
1702 "movq (%0), %%xmm4 \n\t"\ | |
1703 "add %2, %0 \n\t"\ | |
1704 "punpcklbw %%xmm7, %%xmm0 \n\t"\ | |
1705 "punpcklbw %%xmm7, %%xmm1 \n\t"\ | |
1706 "punpcklbw %%xmm7, %%xmm2 \n\t"\ | |
1707 "punpcklbw %%xmm7, %%xmm3 \n\t"\ | |
1708 "punpcklbw %%xmm7, %%xmm4 \n\t"\ | |
1709 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\ | |
1710 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\ | |
1711 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\ | |
1712 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\ | |
1713 QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\ | |
1714 QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\ | |
1715 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\ | |
1716 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\ | |
1717 \ | |
1718 : "+a"(src), "+c"(dst)\ | |
1719 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
1720 : "memory"\ | |
1721 );\ | |
1722 if(h==16){\ | |
1723 __asm__ volatile(\ | |
1724 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\ | |
1725 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\ | |
1726 QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\ | |
1727 QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\ | |
1728 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\ | |
1729 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\ | |
1730 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\ | |
1731 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\ | |
1732 \ | |
1733 : "+a"(src), "+c"(dst)\ | |
1734 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
1735 : "memory"\ | |
1736 );\ | |
1737 }\ | |
1738 }\ | |
1739 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1740 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\ | |
1741 }\ | |
1742 static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1743 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\ | |
1744 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ | |
1745 } | |
1746 | |
1747 static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){ | |
1748 int w = (size+8)>>3; | |
1749 src -= 2*srcStride+2; | |
1750 while(w--){ | |
1751 __asm__ volatile( | |
1752 "pxor %%xmm7, %%xmm7 \n\t" | |
1753 "movq (%0), %%xmm0 \n\t" | |
1754 "add %2, %0 \n\t" | |
1755 "movq (%0), %%xmm1 \n\t" | |
1756 "add %2, %0 \n\t" | |
1757 "movq (%0), %%xmm2 \n\t" | |
1758 "add %2, %0 \n\t" | |
1759 "movq (%0), %%xmm3 \n\t" | |
1760 "add %2, %0 \n\t" | |
1761 "movq (%0), %%xmm4 \n\t" | |
1762 "add %2, %0 \n\t" | |
1763 "punpcklbw %%xmm7, %%xmm0 \n\t" | |
1764 "punpcklbw %%xmm7, %%xmm1 \n\t" | |
1765 "punpcklbw %%xmm7, %%xmm2 \n\t" | |
1766 "punpcklbw %%xmm7, %%xmm3 \n\t" | |
1767 "punpcklbw %%xmm7, %%xmm4 \n\t" | |
1768 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 0*48) | |
1769 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 1*48) | |
1770 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 2*48) | |
1771 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 3*48) | |
1772 QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 4*48) | |
1773 QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 5*48) | |
1774 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 6*48) | |
1775 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 7*48) | |
1776 : "+a"(src) | |
1777 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16) | |
1778 : "memory" | |
1779 ); | |
1780 if(size==16){ | |
1781 __asm__ volatile( | |
1782 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 8*48) | |
1783 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 9*48) | |
1784 QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 10*48) | |
1785 QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 11*48) | |
1786 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 12*48) | |
1787 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 13*48) | |
1788 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 14*48) | |
1789 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 15*48) | |
1790 : "+a"(src) | |
1791 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16) | |
1792 : "memory" | |
1793 ); | |
1794 } | |
1795 tmp += 8; | |
1796 src += 8 - (size+5)*srcStride; | |
1797 } | |
1798 } | |
1799 | |
1800 #define QPEL_H264_HV2_XMM(OPNAME, OP, MMX)\ | |
1801 static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\ | |
1802 int h = size;\ | |
1803 if(size == 16){\ | |
1804 __asm__ volatile(\ | |
1805 "1: \n\t"\ | |
1806 "movdqa 32(%0), %%xmm4 \n\t"\ | |
1807 "movdqa 16(%0), %%xmm5 \n\t"\ | |
1808 "movdqa (%0), %%xmm7 \n\t"\ | |
1809 "movdqa %%xmm4, %%xmm3 \n\t"\ | |
1810 "movdqa %%xmm4, %%xmm2 \n\t"\ | |
1811 "movdqa %%xmm4, %%xmm1 \n\t"\ | |
1812 "movdqa %%xmm4, %%xmm0 \n\t"\ | |
1813 "palignr $10, %%xmm5, %%xmm0 \n\t"\ | |
1814 "palignr $8, %%xmm5, %%xmm1 \n\t"\ | |
1815 "palignr $6, %%xmm5, %%xmm2 \n\t"\ | |
1816 "palignr $4, %%xmm5, %%xmm3 \n\t"\ | |
1817 "palignr $2, %%xmm5, %%xmm4 \n\t"\ | |
1818 "paddw %%xmm5, %%xmm0 \n\t"\ | |
1819 "paddw %%xmm4, %%xmm1 \n\t"\ | |
1820 "paddw %%xmm3, %%xmm2 \n\t"\ | |
1821 "movdqa %%xmm5, %%xmm6 \n\t"\ | |
1822 "movdqa %%xmm5, %%xmm4 \n\t"\ | |
1823 "movdqa %%xmm5, %%xmm3 \n\t"\ | |
1824 "palignr $8, %%xmm7, %%xmm4 \n\t"\ | |
1825 "palignr $2, %%xmm7, %%xmm6 \n\t"\ | |
1826 "palignr $10, %%xmm7, %%xmm3 \n\t"\ | |
1827 "paddw %%xmm6, %%xmm4 \n\t"\ | |
1828 "movdqa %%xmm5, %%xmm6 \n\t"\ | |
1829 "palignr $6, %%xmm7, %%xmm5 \n\t"\ | |
1830 "palignr $4, %%xmm7, %%xmm6 \n\t"\ | |
1831 "paddw %%xmm7, %%xmm3 \n\t"\ | |
1832 "paddw %%xmm6, %%xmm5 \n\t"\ | |
1833 \ | |
1834 "psubw %%xmm1, %%xmm0 \n\t"\ | |
1835 "psubw %%xmm4, %%xmm3 \n\t"\ | |
1836 "psraw $2, %%xmm0 \n\t"\ | |
1837 "psraw $2, %%xmm3 \n\t"\ | |
1838 "psubw %%xmm1, %%xmm0 \n\t"\ | |
1839 "psubw %%xmm4, %%xmm3 \n\t"\ | |
1840 "paddw %%xmm2, %%xmm0 \n\t"\ | |
1841 "paddw %%xmm5, %%xmm3 \n\t"\ | |
1842 "psraw $2, %%xmm0 \n\t"\ | |
1843 "psraw $2, %%xmm3 \n\t"\ | |
1844 "paddw %%xmm2, %%xmm0 \n\t"\ | |
1845 "paddw %%xmm5, %%xmm3 \n\t"\ | |
1846 "psraw $6, %%xmm0 \n\t"\ | |
1847 "psraw $6, %%xmm3 \n\t"\ | |
1848 "packuswb %%xmm0, %%xmm3 \n\t"\ | |
1849 OP(%%xmm3, (%1), %%xmm7, dqa)\ | |
1850 "add $48, %0 \n\t"\ | |
1851 "add %3, %1 \n\t"\ | |
1852 "decl %2 \n\t"\ | |
1853 " jnz 1b \n\t"\ | |
1854 : "+a"(tmp), "+c"(dst), "+g"(h)\ | |
1855 : "S"((x86_reg)dstStride)\ | |
1856 : "memory"\ | |
1857 );\ | |
1858 }else{\ | |
1859 __asm__ volatile(\ | |
1860 "1: \n\t"\ | |
1861 "movdqa 16(%0), %%xmm1 \n\t"\ | |
1862 "movdqa (%0), %%xmm0 \n\t"\ | |
1863 "movdqa %%xmm1, %%xmm2 \n\t"\ | |
1864 "movdqa %%xmm1, %%xmm3 \n\t"\ | |
1865 "movdqa %%xmm1, %%xmm4 \n\t"\ | |
1866 "movdqa %%xmm1, %%xmm5 \n\t"\ | |
1867 "palignr $10, %%xmm0, %%xmm5 \n\t"\ | |
1868 "palignr $8, %%xmm0, %%xmm4 \n\t"\ | |
1869 "palignr $6, %%xmm0, %%xmm3 \n\t"\ | |
1870 "palignr $4, %%xmm0, %%xmm2 \n\t"\ | |
1871 "palignr $2, %%xmm0, %%xmm1 \n\t"\ | |
1872 "paddw %%xmm5, %%xmm0 \n\t"\ | |
1873 "paddw %%xmm4, %%xmm1 \n\t"\ | |
1874 "paddw %%xmm3, %%xmm2 \n\t"\ | |
1875 "psubw %%xmm1, %%xmm0 \n\t"\ | |
1876 "psraw $2, %%xmm0 \n\t"\ | |
1877 "psubw %%xmm1, %%xmm0 \n\t"\ | |
1878 "paddw %%xmm2, %%xmm0 \n\t"\ | |
1879 "psraw $2, %%xmm0 \n\t"\ | |
1880 "paddw %%xmm2, %%xmm0 \n\t"\ | |
1881 "psraw $6, %%xmm0 \n\t"\ | |
1882 "packuswb %%xmm0, %%xmm0 \n\t"\ | |
1883 OP(%%xmm0, (%1), %%xmm7, q)\ | |
1884 "add $48, %0 \n\t"\ | |
1885 "add %3, %1 \n\t"\ | |
1886 "decl %2 \n\t"\ | |
1887 " jnz 1b \n\t"\ | |
1888 : "+a"(tmp), "+c"(dst), "+g"(h)\ | |
1889 : "S"((x86_reg)dstStride)\ | |
1890 : "memory"\ | |
1891 );\ | |
1892 }\ | |
1893 } | |
1894 | |
1895 #define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\ | |
1896 static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\ | |
1897 put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\ | |
1898 OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\ | |
1899 }\ | |
1900 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ | |
1901 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 8);\ | |
1902 }\ | |
1903 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ | |
1904 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 16);\ | |
1905 }\ | |
1906 | |
1907 #define put_pixels8_l2_sse2 put_pixels8_l2_mmx2 | |
1908 #define avg_pixels8_l2_sse2 avg_pixels8_l2_mmx2 | |
1909 #define put_pixels16_l2_sse2 put_pixels16_l2_mmx2 | |
1910 #define avg_pixels16_l2_sse2 avg_pixels16_l2_mmx2 | |
1911 #define put_pixels8_l2_ssse3 put_pixels8_l2_mmx2 | |
1912 #define avg_pixels8_l2_ssse3 avg_pixels8_l2_mmx2 | |
1913 #define put_pixels16_l2_ssse3 put_pixels16_l2_mmx2 | |
1914 #define avg_pixels16_l2_ssse3 avg_pixels16_l2_mmx2 | |
1915 | |
1916 #define put_pixels8_l2_shift5_sse2 put_pixels8_l2_shift5_mmx2 | |
1917 #define avg_pixels8_l2_shift5_sse2 avg_pixels8_l2_shift5_mmx2 | |
1918 #define put_pixels16_l2_shift5_sse2 put_pixels16_l2_shift5_mmx2 | |
1919 #define avg_pixels16_l2_shift5_sse2 avg_pixels16_l2_shift5_mmx2 | |
1920 #define put_pixels8_l2_shift5_ssse3 put_pixels8_l2_shift5_mmx2 | |
1921 #define avg_pixels8_l2_shift5_ssse3 avg_pixels8_l2_shift5_mmx2 | |
1922 #define put_pixels16_l2_shift5_ssse3 put_pixels16_l2_shift5_mmx2 | |
1923 #define avg_pixels16_l2_shift5_ssse3 avg_pixels16_l2_shift5_mmx2 | |
1924 | |
1925 #define put_h264_qpel8_h_lowpass_l2_sse2 put_h264_qpel8_h_lowpass_l2_mmx2 | |
1926 #define avg_h264_qpel8_h_lowpass_l2_sse2 avg_h264_qpel8_h_lowpass_l2_mmx2 | |
1927 #define put_h264_qpel16_h_lowpass_l2_sse2 put_h264_qpel16_h_lowpass_l2_mmx2 | |
1928 #define avg_h264_qpel16_h_lowpass_l2_sse2 avg_h264_qpel16_h_lowpass_l2_mmx2 | |
1929 | |
1930 #define put_h264_qpel8_v_lowpass_ssse3 put_h264_qpel8_v_lowpass_sse2 | |
1931 #define avg_h264_qpel8_v_lowpass_ssse3 avg_h264_qpel8_v_lowpass_sse2 | |
1932 #define put_h264_qpel16_v_lowpass_ssse3 put_h264_qpel16_v_lowpass_sse2 | |
1933 #define avg_h264_qpel16_v_lowpass_ssse3 avg_h264_qpel16_v_lowpass_sse2 | |
1934 | |
1935 #define put_h264_qpel8or16_hv2_lowpass_sse2 put_h264_qpel8or16_hv2_lowpass_mmx2 | |
1936 #define avg_h264_qpel8or16_hv2_lowpass_sse2 avg_h264_qpel8or16_hv2_lowpass_mmx2 | |
1937 | |
1938 #define H264_MC(OPNAME, SIZE, MMX, ALIGN) \ | |
1939 H264_MC_C(OPNAME, SIZE, MMX, ALIGN)\ | |
1940 H264_MC_V(OPNAME, SIZE, MMX, ALIGN)\ | |
1941 H264_MC_H(OPNAME, SIZE, MMX, ALIGN)\ | |
1942 H264_MC_HV(OPNAME, SIZE, MMX, ALIGN)\ | |
1943 | |
1944 static void put_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){ | |
1945 put_pixels16_sse2(dst, src, stride, 16); | |
1946 } | |
1947 static void avg_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){ | |
1948 avg_pixels16_sse2(dst, src, stride, 16); | |
1949 } | |
1950 #define put_h264_qpel8_mc00_sse2 put_h264_qpel8_mc00_mmx2 | |
1951 #define avg_h264_qpel8_mc00_sse2 avg_h264_qpel8_mc00_mmx2 | |
1952 | |
1953 #define H264_MC_C(OPNAME, SIZE, MMX, ALIGN) \ | |
1954 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ | |
1955 OPNAME ## pixels ## SIZE ## _ ## MMX(dst, src, stride, SIZE);\ | |
1956 }\ | |
1957 | |
1958 #define H264_MC_H(OPNAME, SIZE, MMX, ALIGN) \ | |
1959 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1960 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\ | |
1961 }\ | |
1962 \ | |
1963 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1964 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\ | |
1965 }\ | |
1966 \ | |
1967 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1968 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\ | |
1969 }\ | |
1970 | |
1971 #define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \ | |
1972 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
10961
34a65026fa06
Move array specifiers outside DECLARE_ALIGNED() invocations
mru
parents:
10955
diff
changeset
|
1973 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ |
8430 | 1974 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\ |
1975 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\ | |
1976 }\ | |
1977 \ | |
1978 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1979 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\ | |
1980 }\ | |
1981 \ | |
1982 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
10961
34a65026fa06
Move array specifiers outside DECLARE_ALIGNED() invocations
mru
parents:
10955
diff
changeset
|
1983 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ |
8430 | 1984 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\ |
1985 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\ | |
1986 }\ | |
1987 | |
1988 #define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \ | |
1989 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
10961
34a65026fa06
Move array specifiers outside DECLARE_ALIGNED() invocations
mru
parents:
10955
diff
changeset
|
1990 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ |
8430 | 1991 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\ |
1992 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\ | |
1993 }\ | |
1994 \ | |
1995 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
10961
34a65026fa06
Move array specifiers outside DECLARE_ALIGNED() invocations
mru
parents:
10955
diff
changeset
|
1996 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ |
8430 | 1997 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\ |
1998 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\ | |
1999 }\ | |
2000 \ | |
2001 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
10961
34a65026fa06
Move array specifiers outside DECLARE_ALIGNED() invocations
mru
parents:
10955
diff
changeset
|
2002 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ |
8430 | 2003 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\ |
2004 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\ | |
2005 }\ | |
2006 \ | |
2007 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
10961
34a65026fa06
Move array specifiers outside DECLARE_ALIGNED() invocations
mru
parents:
10955
diff
changeset
|
2008 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\ |
8430 | 2009 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\ |
2010 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\ | |
2011 }\ | |
2012 \ | |
2013 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
10961
34a65026fa06
Move array specifiers outside DECLARE_ALIGNED() invocations
mru
parents:
10955
diff
changeset
|
2014 DECLARE_ALIGNED(ALIGN, uint16_t, temp)[SIZE*(SIZE<8?12:24)];\ |
8430 | 2015 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, SIZE, stride);\ |
2016 }\ | |
2017 \ | |
2018 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
10961
34a65026fa06
Move array specifiers outside DECLARE_ALIGNED() invocations
mru
parents:
10955
diff
changeset
|
2019 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\ |
8430 | 2020 uint8_t * const halfHV= temp;\ |
2021 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\ | |
2022 assert(((int)temp & 7) == 0);\ | |
2023 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ | |
2024 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\ | |
2025 }\ | |
2026 \ | |
2027 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
10961
34a65026fa06
Move array specifiers outside DECLARE_ALIGNED() invocations
mru
parents:
10955
diff
changeset
|
2028 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\ |
8430 | 2029 uint8_t * const halfHV= temp;\ |
2030 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\ | |
2031 assert(((int)temp & 7) == 0);\ | |
2032 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ | |
2033 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\ | |
2034 }\ | |
2035 \ | |
2036 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
10961
34a65026fa06
Move array specifiers outside DECLARE_ALIGNED() invocations
mru
parents:
10955
diff
changeset
|
2037 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\ |
8430 | 2038 uint8_t * const halfHV= temp;\ |
2039 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\ | |
2040 assert(((int)temp & 7) == 0);\ | |
2041 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ | |
2042 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\ | |
2043 }\ | |
2044 \ | |
2045 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
10961
34a65026fa06
Move array specifiers outside DECLARE_ALIGNED() invocations
mru
parents:
10955
diff
changeset
|
2046 DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\ |
8430 | 2047 uint8_t * const halfHV= temp;\ |
2048 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\ | |
2049 assert(((int)temp & 7) == 0);\ | |
2050 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ | |
2051 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\ | |
2052 }\ | |
2053 | |
2054 #define H264_MC_4816(MMX)\ | |
2055 H264_MC(put_, 4, MMX, 8)\ | |
2056 H264_MC(put_, 8, MMX, 8)\ | |
2057 H264_MC(put_, 16,MMX, 8)\ | |
2058 H264_MC(avg_, 4, MMX, 8)\ | |
2059 H264_MC(avg_, 8, MMX, 8)\ | |
2060 H264_MC(avg_, 16,MMX, 8)\ | |
2061 | |
2062 #define H264_MC_816(QPEL, XMM)\ | |
2063 QPEL(put_, 8, XMM, 16)\ | |
2064 QPEL(put_, 16,XMM, 16)\ | |
2065 QPEL(avg_, 8, XMM, 16)\ | |
2066 QPEL(avg_, 16,XMM, 16)\ | |
2067 | |
2068 | |
2069 #define AVG_3DNOW_OP(a,b,temp, size) \ | |
2070 "mov" #size " " #b ", " #temp " \n\t"\ | |
2071 "pavgusb " #temp ", " #a " \n\t"\ | |
2072 "mov" #size " " #a ", " #b " \n\t" | |
2073 #define AVG_MMX2_OP(a,b,temp, size) \ | |
2074 "mov" #size " " #b ", " #temp " \n\t"\ | |
2075 "pavgb " #temp ", " #a " \n\t"\ | |
2076 "mov" #size " " #a ", " #b " \n\t" | |
2077 | |
2078 #define PAVGB "pavgusb" | |
2079 QPEL_H264(put_, PUT_OP, 3dnow) | |
2080 QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow) | |
2081 #undef PAVGB | |
2082 #define PAVGB "pavgb" | |
2083 QPEL_H264(put_, PUT_OP, mmx2) | |
2084 QPEL_H264(avg_, AVG_MMX2_OP, mmx2) | |
2085 QPEL_H264_V_XMM(put_, PUT_OP, sse2) | |
2086 QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2) | |
2087 QPEL_H264_HV_XMM(put_, PUT_OP, sse2) | |
2088 QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2) | |
8590 | 2089 #if HAVE_SSSE3 |
8430 | 2090 QPEL_H264_H_XMM(put_, PUT_OP, ssse3) |
2091 QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3) | |
2092 QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3) | |
2093 QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3) | |
2094 QPEL_H264_HV_XMM(put_, PUT_OP, ssse3) | |
2095 QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, ssse3) | |
2096 #endif | |
2097 #undef PAVGB | |
2098 | |
2099 H264_MC_4816(3dnow) | |
2100 H264_MC_4816(mmx2) | |
2101 H264_MC_816(H264_MC_V, sse2) | |
2102 H264_MC_816(H264_MC_HV, sse2) | |
8590 | 2103 #if HAVE_SSSE3 |
8430 | 2104 H264_MC_816(H264_MC_H, ssse3) |
2105 H264_MC_816(H264_MC_HV, ssse3) | |
2106 #endif | |
2107 | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2108 /* rnd interleaved with rnd div 8, use p+1 to access rnd div 8 */ |
11369 | 2109 DECLARE_ALIGNED(8, static const uint64_t, h264_rnd_reg)[4] = { |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2110 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2111 }; |
8430 | 2112 |
2113 #define H264_CHROMA_OP(S,D) | |
2114 #define H264_CHROMA_OP4(S,D,T) | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2115 #define H264_CHROMA_MC8_TMPL put_h264_chroma_generic_mc8_mmx |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2116 #define H264_CHROMA_MC4_TMPL put_h264_chroma_generic_mc4_mmx |
8430 | 2117 #define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2 |
2118 #define H264_CHROMA_MC8_MV0 put_pixels8_mmx | |
2119 #include "dsputil_h264_template_mmx.c" | |
2120 | |
2121 static void put_h264_chroma_mc8_mmx_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |
2122 { | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2123 put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg); |
8430 | 2124 } |
9439
ef3a7b711cc0
Rename put_no_rnd_h264_chroma* to reflect its usage in VC1 only
conrad
parents:
8758
diff
changeset
|
2125 static void put_vc1_chroma_mc8_mmx_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
8430 | 2126 { |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2127 put_h264_chroma_generic_mc8_mmx(dst, src, stride, h, x, y, h264_rnd_reg+2); |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2128 } |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2129 static void put_h264_chroma_mc4_mmx(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2130 { |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2131 put_h264_chroma_generic_mc4_mmx(dst, src, stride, h, x, y, h264_rnd_reg); |
8430 | 2132 } |
2133 | |
2134 #undef H264_CHROMA_OP | |
2135 #undef H264_CHROMA_OP4 | |
2136 #undef H264_CHROMA_MC8_TMPL | |
2137 #undef H264_CHROMA_MC4_TMPL | |
2138 #undef H264_CHROMA_MC2_TMPL | |
2139 #undef H264_CHROMA_MC8_MV0 | |
2140 | |
2141 #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t" | |
2142 #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\ | |
2143 "pavgb " #T ", " #D " \n\t" | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2144 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_mmx2 |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2145 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_mmx2 |
8430 | 2146 #define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2 |
2147 #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2 | |
2148 #include "dsputil_h264_template_mmx.c" | |
2149 static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |
2150 { | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2151 avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg); |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2152 } |
9440 | 2153 static void avg_vc1_chroma_mc8_mmx2_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
2154 { | |
2155 avg_h264_chroma_generic_mc8_mmx2(dst, src, stride, h, x, y, h264_rnd_reg+2); | |
2156 } | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2157 static void avg_h264_chroma_mc4_mmx2(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2158 { |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2159 avg_h264_chroma_generic_mc4_mmx2(dst, src, stride, h, x, y, h264_rnd_reg); |
8430 | 2160 } |
2161 #undef H264_CHROMA_OP | |
2162 #undef H264_CHROMA_OP4 | |
2163 #undef H264_CHROMA_MC8_TMPL | |
2164 #undef H264_CHROMA_MC4_TMPL | |
2165 #undef H264_CHROMA_MC2_TMPL | |
2166 #undef H264_CHROMA_MC8_MV0 | |
2167 | |
2168 #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t" | |
2169 #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\ | |
2170 "pavgusb " #T ", " #D " \n\t" | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2171 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_generic_mc8_3dnow |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2172 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_generic_mc4_3dnow |
8430 | 2173 #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow |
2174 #include "dsputil_h264_template_mmx.c" | |
2175 static void avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |
2176 { | |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2177 avg_h264_chroma_generic_mc8_3dnow(dst, src, stride, h, x, y, h264_rnd_reg); |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2178 } |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2179 static void avg_h264_chroma_mc4_3dnow(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2180 { |
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2181 avg_h264_chroma_generic_mc4_3dnow(dst, src, stride, h, x, y, h264_rnd_reg); |
8430 | 2182 } |
2183 #undef H264_CHROMA_OP | |
2184 #undef H264_CHROMA_OP4 | |
2185 #undef H264_CHROMA_MC8_TMPL | |
2186 #undef H264_CHROMA_MC4_TMPL | |
2187 #undef H264_CHROMA_MC8_MV0 | |
2188 | |
8590 | 2189 #if HAVE_SSSE3 |
8430 | 2190 #define AVG_OP(X) |
2191 #undef H264_CHROMA_MC8_TMPL | |
2192 #undef H264_CHROMA_MC4_TMPL | |
2193 #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3 | |
2194 #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3 | |
2195 #define H264_CHROMA_MC8_MV0 put_pixels8_mmx | |
2196 #include "dsputil_h264_template_ssse3.c" | |
2197 static void put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |
2198 { | |
2199 put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1); | |
2200 } | |
9439
ef3a7b711cc0
Rename put_no_rnd_h264_chroma* to reflect its usage in VC1 only
conrad
parents:
8758
diff
changeset
|
2201 static void put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
8430 | 2202 { |
2203 put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0); | |
2204 } | |
2205 | |
2206 #undef AVG_OP | |
2207 #undef H264_CHROMA_MC8_TMPL | |
2208 #undef H264_CHROMA_MC4_TMPL | |
2209 #undef H264_CHROMA_MC8_MV0 | |
2210 #define AVG_OP(X) X | |
2211 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_ssse3 | |
2212 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_ssse3 | |
2213 #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2 | |
2214 #include "dsputil_h264_template_ssse3.c" | |
2215 static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) | |
2216 { | |
2217 avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1); | |
2218 } | |
9440 | 2219 static void avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
2220 { | |
2221 avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0); | |
2222 } | |
8430 | 2223 #undef AVG_OP |
2224 #undef H264_CHROMA_MC8_TMPL | |
2225 #undef H264_CHROMA_MC4_TMPL | |
2226 #undef H264_CHROMA_MC8_MV0 | |
2227 #endif | |
2228 | |
2229 /***********************************/ | |
2230 /* weighted prediction */ | |
2231 | |
2232 static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h) | |
2233 { | |
2234 int x, y; | |
2235 offset <<= log2_denom; | |
2236 offset += (1 << log2_denom) >> 1; | |
2237 __asm__ volatile( | |
2238 "movd %0, %%mm4 \n\t" | |
2239 "movd %1, %%mm5 \n\t" | |
2240 "movd %2, %%mm6 \n\t" | |
2241 "pshufw $0, %%mm4, %%mm4 \n\t" | |
2242 "pshufw $0, %%mm5, %%mm5 \n\t" | |
2243 "pxor %%mm7, %%mm7 \n\t" | |
2244 :: "g"(weight), "g"(offset), "g"(log2_denom) | |
2245 ); | |
2246 for(y=0; y<h; y+=2){ | |
2247 for(x=0; x<w; x+=4){ | |
2248 __asm__ volatile( | |
2249 "movd %0, %%mm0 \n\t" | |
2250 "movd %1, %%mm1 \n\t" | |
2251 "punpcklbw %%mm7, %%mm0 \n\t" | |
2252 "punpcklbw %%mm7, %%mm1 \n\t" | |
2253 "pmullw %%mm4, %%mm0 \n\t" | |
2254 "pmullw %%mm4, %%mm1 \n\t" | |
2255 "paddsw %%mm5, %%mm0 \n\t" | |
2256 "paddsw %%mm5, %%mm1 \n\t" | |
2257 "psraw %%mm6, %%mm0 \n\t" | |
2258 "psraw %%mm6, %%mm1 \n\t" | |
2259 "packuswb %%mm7, %%mm0 \n\t" | |
2260 "packuswb %%mm7, %%mm1 \n\t" | |
2261 "movd %%mm0, %0 \n\t" | |
2262 "movd %%mm1, %1 \n\t" | |
2263 : "+m"(*(uint32_t*)(dst+x)), | |
2264 "+m"(*(uint32_t*)(dst+x+stride)) | |
2265 ); | |
2266 } | |
2267 dst += 2*stride; | |
2268 } | |
2269 } | |
2270 | |
2271 static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h) | |
2272 { | |
2273 int x, y; | |
2274 offset = ((offset + 1) | 1) << log2_denom; | |
2275 __asm__ volatile( | |
2276 "movd %0, %%mm3 \n\t" | |
2277 "movd %1, %%mm4 \n\t" | |
2278 "movd %2, %%mm5 \n\t" | |
2279 "movd %3, %%mm6 \n\t" | |
2280 "pshufw $0, %%mm3, %%mm3 \n\t" | |
2281 "pshufw $0, %%mm4, %%mm4 \n\t" | |
2282 "pshufw $0, %%mm5, %%mm5 \n\t" | |
2283 "pxor %%mm7, %%mm7 \n\t" | |
2284 :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1) | |
2285 ); | |
2286 for(y=0; y<h; y++){ | |
2287 for(x=0; x<w; x+=4){ | |
2288 __asm__ volatile( | |
2289 "movd %0, %%mm0 \n\t" | |
2290 "movd %1, %%mm1 \n\t" | |
2291 "punpcklbw %%mm7, %%mm0 \n\t" | |
2292 "punpcklbw %%mm7, %%mm1 \n\t" | |
2293 "pmullw %%mm3, %%mm0 \n\t" | |
2294 "pmullw %%mm4, %%mm1 \n\t" | |
2295 "paddsw %%mm1, %%mm0 \n\t" | |
2296 "paddsw %%mm5, %%mm0 \n\t" | |
2297 "psraw %%mm6, %%mm0 \n\t" | |
2298 "packuswb %%mm0, %%mm0 \n\t" | |
2299 "movd %%mm0, %0 \n\t" | |
2300 : "+m"(*(uint32_t*)(dst+x)) | |
2301 : "m"(*(uint32_t*)(src+x)) | |
2302 ); | |
2303 } | |
2304 src += stride; | |
2305 dst += stride; | |
2306 } | |
2307 } | |
2308 | |
2309 #define H264_WEIGHT(W,H) \ | |
2310 static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \ | |
2311 ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \ | |
2312 } \ | |
2313 static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \ | |
2314 ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \ | |
2315 } | |
2316 | |
2317 H264_WEIGHT(16,16) | |
2318 H264_WEIGHT(16, 8) | |
2319 H264_WEIGHT( 8,16) | |
2320 H264_WEIGHT( 8, 8) | |
2321 H264_WEIGHT( 8, 4) | |
2322 H264_WEIGHT( 4, 8) | |
2323 H264_WEIGHT( 4, 4) | |
2324 H264_WEIGHT( 4, 2) | |
2325 | |
12366 | 2326 void ff_h264_biweight_8x8_sse2(uint8_t *dst, uint8_t *src, int stride, |
2327 int log2_denom, int weightd, int weights, | |
2328 int offset); | |
2329 | |
2330 void ff_h264_biweight_16x16_sse2(uint8_t *dst, uint8_t *src, int stride, | |
2331 int log2_denom, int weightd, int weights, | |
2332 int offset); | |
2333 | |
2334 void ff_h264_biweight_8x8_ssse3(uint8_t *dst, uint8_t *src, int stride, | |
2335 int log2_denom, int weightd, int weights, | |
2336 int offset); | |
2337 | |
2338 void ff_h264_biweight_16x16_ssse3(uint8_t *dst, uint8_t *src, int stride, | |
2339 int log2_denom, int weightd, int weights, | |
2340 int offset); | |
2341 | |
11951
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2342 void ff_pred16x16_vertical_mmx (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2343 void ff_pred16x16_vertical_sse (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2344 void ff_pred16x16_horizontal_mmx (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2345 void ff_pred16x16_horizontal_mmxext(uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2346 void ff_pred16x16_horizontal_ssse3 (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2347 void ff_pred16x16_dc_mmxext (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2348 void ff_pred16x16_dc_sse2 (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2349 void ff_pred16x16_dc_ssse3 (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2350 void ff_pred16x16_tm_vp8_mmx (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2351 void ff_pred16x16_tm_vp8_mmxext (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2352 void ff_pred16x16_tm_vp8_sse2 (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2353 void ff_pred8x8_dc_rv40_mmxext (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2354 void ff_pred8x8_vertical_mmx (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2355 void ff_pred8x8_horizontal_mmx (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2356 void ff_pred8x8_horizontal_mmxext (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2357 void ff_pred8x8_horizontal_ssse3 (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2358 void ff_pred8x8_tm_vp8_mmx (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2359 void ff_pred8x8_tm_vp8_mmxext (uint8_t *src, int stride); |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2360 void ff_pred8x8_tm_vp8_sse2 (uint8_t *src, int stride); |
11953 | 2361 void ff_pred8x8_tm_vp8_ssse3 (uint8_t *src, int stride); |
12001
953a0949c789
Fix some intra pred MMX functions that used MMXEXT instructions
darkshikari
parents:
11993
diff
changeset
|
2362 void ff_pred4x4_dc_mmxext (uint8_t *src, const uint8_t *topright, int stride); |
12003
3b761226ea35
Add mmx/mmxext/ssse3 4x4 TM intra pred functions for vp8
darkshikari
parents:
12001
diff
changeset
|
2363 void ff_pred4x4_tm_vp8_mmx (uint8_t *src, const uint8_t *topright, int stride); |
3b761226ea35
Add mmx/mmxext/ssse3 4x4 TM intra pred functions for vp8
darkshikari
parents:
12001
diff
changeset
|
2364 void ff_pred4x4_tm_vp8_mmxext (uint8_t *src, const uint8_t *topright, int stride); |
3b761226ea35
Add mmx/mmxext/ssse3 4x4 TM intra pred functions for vp8
darkshikari
parents:
12001
diff
changeset
|
2365 void ff_pred4x4_tm_vp8_ssse3 (uint8_t *src, const uint8_t *topright, int stride); |
12004 | 2366 void ff_pred4x4_vertical_vp8_mmxext(uint8_t *src, const uint8_t *topright, int stride); |
11951
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2367 |
12379 | 2368 #if CONFIG_H264PRED |
11951
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2369 void ff_h264_pred_init_x86(H264PredContext *h, int codec_id) |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2370 { |
12414 | 2371 int mm_flags = mm_support(); |
12012
2d70a8b0ec8a
Add missing mm_support call toff_h264_pred_init_x86.
darkshikari
parents:
12004
diff
changeset
|
2372 |
11951
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2373 #if HAVE_YASM |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2374 if (mm_flags & FF_MM_MMX) { |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2375 h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_mmx; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2376 h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmx; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2377 h->pred8x8 [VERT_PRED8x8] = ff_pred8x8_vertical_mmx; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2378 h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmx; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2379 if (codec_id == CODEC_ID_VP8) { |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2380 h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_mmx; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2381 h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_mmx; |
12003
3b761226ea35
Add mmx/mmxext/ssse3 4x4 TM intra pred functions for vp8
darkshikari
parents:
12001
diff
changeset
|
2382 h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_mmx; |
11951
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2383 } |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2384 } |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2385 |
11993 | 2386 if (mm_flags & FF_MM_MMX2) { |
11951
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2387 h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmxext; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2388 h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_mmxext; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2389 h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmxext; |
12001
953a0949c789
Fix some intra pred MMX functions that used MMXEXT instructions
darkshikari
parents:
11993
diff
changeset
|
2390 h->pred4x4 [DC_PRED ] = ff_pred4x4_dc_mmxext; |
11951
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2391 if (codec_id == CODEC_ID_VP8) { |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2392 h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_mmxext; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2393 h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_rv40_mmxext; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2394 h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_mmxext; |
12003
3b761226ea35
Add mmx/mmxext/ssse3 4x4 TM intra pred functions for vp8
darkshikari
parents:
12001
diff
changeset
|
2395 h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_mmxext; |
12004 | 2396 h->pred4x4 [VERT_PRED ] = ff_pred4x4_vertical_vp8_mmxext; |
11951
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2397 } |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2398 } |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2399 |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2400 if (mm_flags & FF_MM_SSE) { |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2401 h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_sse; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2402 } |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2403 |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2404 if (mm_flags & FF_MM_SSE2) { |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2405 h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_sse2; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2406 if (codec_id == CODEC_ID_VP8) { |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2407 h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_sse2; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2408 h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_sse2; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2409 } |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2410 } |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2411 |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2412 if (mm_flags & FF_MM_SSSE3) { |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2413 h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_ssse3; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2414 h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_ssse3; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2415 h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_ssse3; |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2416 if (codec_id == CODEC_ID_VP8) { |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2417 h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_ssse3; |
12003
3b761226ea35
Add mmx/mmxext/ssse3 4x4 TM intra pred functions for vp8
darkshikari
parents:
12001
diff
changeset
|
2418 h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_ssse3; |
11951
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2419 } |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2420 } |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2421 #endif |
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11701
diff
changeset
|
2422 } |
11987 | 2423 #endif |