8430
|
1 /*
|
|
2 * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
|
|
3 *
|
|
4 * This file is part of FFmpeg.
|
|
5 *
|
|
6 * FFmpeg is free software; you can redistribute it and/or
|
|
7 * modify it under the terms of the GNU Lesser General Public
|
|
8 * License as published by the Free Software Foundation; either
|
|
9 * version 2.1 of the License, or (at your option) any later version.
|
|
10 *
|
|
11 * FFmpeg is distributed in the hope that it will be useful,
|
|
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
14 * Lesser General Public License for more details.
|
|
15 *
|
|
16 * You should have received a copy of the GNU Lesser General Public
|
|
17 * License along with FFmpeg; if not, write to the Free Software
|
|
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
19 */
|
|
20
|
|
21 #include "dsputil_mmx.h"
|
|
22
|
|
23 DECLARE_ALIGNED_8 (static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
|
|
24 DECLARE_ALIGNED_8 (static const uint64_t, ff_pb_7_3 ) = 0x0307030703070307ULL;
|
|
25
|
|
26 /***********************************/
|
|
27 /* IDCT */
|
|
28
|
|
29 #define SUMSUB_BADC( a, b, c, d ) \
|
|
30 "paddw "#b", "#a" \n\t"\
|
|
31 "paddw "#d", "#c" \n\t"\
|
|
32 "paddw "#b", "#b" \n\t"\
|
|
33 "paddw "#d", "#d" \n\t"\
|
|
34 "psubw "#a", "#b" \n\t"\
|
|
35 "psubw "#c", "#d" \n\t"
|
|
36
|
|
37 #define SUMSUBD2_AB( a, b, t ) \
|
|
38 "movq "#b", "#t" \n\t"\
|
|
39 "psraw $1 , "#b" \n\t"\
|
|
40 "paddw "#a", "#b" \n\t"\
|
|
41 "psraw $1 , "#a" \n\t"\
|
|
42 "psubw "#t", "#a" \n\t"
|
|
43
|
|
44 #define IDCT4_1D( s02, s13, d02, d13, t ) \
|
|
45 SUMSUB_BA ( s02, d02 )\
|
|
46 SUMSUBD2_AB( s13, d13, t )\
|
|
47 SUMSUB_BADC( d13, s02, s13, d02 )
|
|
48
|
|
49 #define STORE_DIFF_4P( p, t, z ) \
|
|
50 "psraw $6, "#p" \n\t"\
|
|
51 "movd (%0), "#t" \n\t"\
|
|
52 "punpcklbw "#z", "#t" \n\t"\
|
|
53 "paddsw "#t", "#p" \n\t"\
|
|
54 "packuswb "#z", "#p" \n\t"\
|
|
55 "movd "#p", (%0) \n\t"
|
|
56
|
|
57 static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
|
|
58 {
|
|
59 /* Load dct coeffs */
|
|
60 __asm__ volatile(
|
|
61 "movq (%0), %%mm0 \n\t"
|
|
62 "movq 8(%0), %%mm1 \n\t"
|
|
63 "movq 16(%0), %%mm2 \n\t"
|
|
64 "movq 24(%0), %%mm3 \n\t"
|
|
65 :: "r"(block) );
|
|
66
|
|
67 __asm__ volatile(
|
|
68 /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */
|
|
69 IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 )
|
|
70
|
|
71 "movq %0, %%mm6 \n\t"
|
|
72 /* in: 1,4,0,2 out: 1,2,3,0 */
|
|
73 TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 )
|
|
74
|
|
75 "paddw %%mm6, %%mm3 \n\t"
|
|
76
|
|
77 /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */
|
|
78 IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 )
|
|
79
|
|
80 "pxor %%mm7, %%mm7 \n\t"
|
|
81 :: "m"(ff_pw_32));
|
|
82
|
|
83 __asm__ volatile(
|
|
84 STORE_DIFF_4P( %%mm0, %%mm1, %%mm7)
|
|
85 "add %1, %0 \n\t"
|
|
86 STORE_DIFF_4P( %%mm2, %%mm1, %%mm7)
|
|
87 "add %1, %0 \n\t"
|
|
88 STORE_DIFF_4P( %%mm3, %%mm1, %%mm7)
|
|
89 "add %1, %0 \n\t"
|
|
90 STORE_DIFF_4P( %%mm4, %%mm1, %%mm7)
|
|
91 : "+r"(dst)
|
|
92 : "r" ((x86_reg)stride)
|
|
93 );
|
|
94 }
|
|
95
|
|
96 static inline void h264_idct8_1d(int16_t *block)
|
|
97 {
|
|
98 __asm__ volatile(
|
|
99 "movq 112(%0), %%mm7 \n\t"
|
|
100 "movq 80(%0), %%mm0 \n\t"
|
|
101 "movq 48(%0), %%mm3 \n\t"
|
|
102 "movq 16(%0), %%mm5 \n\t"
|
|
103
|
|
104 "movq %%mm0, %%mm4 \n\t"
|
|
105 "movq %%mm5, %%mm1 \n\t"
|
|
106 "psraw $1, %%mm4 \n\t"
|
|
107 "psraw $1, %%mm1 \n\t"
|
|
108 "paddw %%mm0, %%mm4 \n\t"
|
|
109 "paddw %%mm5, %%mm1 \n\t"
|
|
110 "paddw %%mm7, %%mm4 \n\t"
|
|
111 "paddw %%mm0, %%mm1 \n\t"
|
|
112 "psubw %%mm5, %%mm4 \n\t"
|
|
113 "paddw %%mm3, %%mm1 \n\t"
|
|
114
|
|
115 "psubw %%mm3, %%mm5 \n\t"
|
|
116 "psubw %%mm3, %%mm0 \n\t"
|
|
117 "paddw %%mm7, %%mm5 \n\t"
|
|
118 "psubw %%mm7, %%mm0 \n\t"
|
|
119 "psraw $1, %%mm3 \n\t"
|
|
120 "psraw $1, %%mm7 \n\t"
|
|
121 "psubw %%mm3, %%mm5 \n\t"
|
|
122 "psubw %%mm7, %%mm0 \n\t"
|
|
123
|
|
124 "movq %%mm4, %%mm3 \n\t"
|
|
125 "movq %%mm1, %%mm7 \n\t"
|
|
126 "psraw $2, %%mm1 \n\t"
|
|
127 "psraw $2, %%mm3 \n\t"
|
|
128 "paddw %%mm5, %%mm3 \n\t"
|
|
129 "psraw $2, %%mm5 \n\t"
|
|
130 "paddw %%mm0, %%mm1 \n\t"
|
|
131 "psraw $2, %%mm0 \n\t"
|
|
132 "psubw %%mm4, %%mm5 \n\t"
|
|
133 "psubw %%mm0, %%mm7 \n\t"
|
|
134
|
|
135 "movq 32(%0), %%mm2 \n\t"
|
|
136 "movq 96(%0), %%mm6 \n\t"
|
|
137 "movq %%mm2, %%mm4 \n\t"
|
|
138 "movq %%mm6, %%mm0 \n\t"
|
|
139 "psraw $1, %%mm4 \n\t"
|
|
140 "psraw $1, %%mm6 \n\t"
|
|
141 "psubw %%mm0, %%mm4 \n\t"
|
|
142 "paddw %%mm2, %%mm6 \n\t"
|
|
143
|
|
144 "movq (%0), %%mm2 \n\t"
|
|
145 "movq 64(%0), %%mm0 \n\t"
|
|
146 SUMSUB_BA( %%mm0, %%mm2 )
|
|
147 SUMSUB_BA( %%mm6, %%mm0 )
|
|
148 SUMSUB_BA( %%mm4, %%mm2 )
|
|
149 SUMSUB_BA( %%mm7, %%mm6 )
|
|
150 SUMSUB_BA( %%mm5, %%mm4 )
|
|
151 SUMSUB_BA( %%mm3, %%mm2 )
|
|
152 SUMSUB_BA( %%mm1, %%mm0 )
|
|
153 :: "r"(block)
|
|
154 );
|
|
155 }
|
|
156
|
|
157 static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
|
|
158 {
|
|
159 int i;
|
|
160 int16_t __attribute__ ((aligned(8))) b2[64];
|
|
161
|
|
162 block[0] += 32;
|
|
163
|
|
164 for(i=0; i<2; i++){
|
|
165 DECLARE_ALIGNED_8(uint64_t, tmp);
|
|
166
|
|
167 h264_idct8_1d(block+4*i);
|
|
168
|
|
169 __asm__ volatile(
|
|
170 "movq %%mm7, %0 \n\t"
|
|
171 TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
|
|
172 "movq %%mm0, 8(%1) \n\t"
|
|
173 "movq %%mm6, 24(%1) \n\t"
|
|
174 "movq %%mm7, 40(%1) \n\t"
|
|
175 "movq %%mm4, 56(%1) \n\t"
|
|
176 "movq %0, %%mm7 \n\t"
|
|
177 TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
|
|
178 "movq %%mm7, (%1) \n\t"
|
|
179 "movq %%mm1, 16(%1) \n\t"
|
|
180 "movq %%mm0, 32(%1) \n\t"
|
|
181 "movq %%mm3, 48(%1) \n\t"
|
|
182 : "=m"(tmp)
|
|
183 : "r"(b2+32*i)
|
|
184 : "memory"
|
|
185 );
|
|
186 }
|
|
187
|
|
188 for(i=0; i<2; i++){
|
|
189 h264_idct8_1d(b2+4*i);
|
|
190
|
|
191 __asm__ volatile(
|
|
192 "psraw $6, %%mm7 \n\t"
|
|
193 "psraw $6, %%mm6 \n\t"
|
|
194 "psraw $6, %%mm5 \n\t"
|
|
195 "psraw $6, %%mm4 \n\t"
|
|
196 "psraw $6, %%mm3 \n\t"
|
|
197 "psraw $6, %%mm2 \n\t"
|
|
198 "psraw $6, %%mm1 \n\t"
|
|
199 "psraw $6, %%mm0 \n\t"
|
|
200
|
|
201 "movq %%mm7, (%0) \n\t"
|
|
202 "movq %%mm5, 16(%0) \n\t"
|
|
203 "movq %%mm3, 32(%0) \n\t"
|
|
204 "movq %%mm1, 48(%0) \n\t"
|
|
205 "movq %%mm0, 64(%0) \n\t"
|
|
206 "movq %%mm2, 80(%0) \n\t"
|
|
207 "movq %%mm4, 96(%0) \n\t"
|
|
208 "movq %%mm6, 112(%0) \n\t"
|
|
209 :: "r"(b2+4*i)
|
|
210 : "memory"
|
|
211 );
|
|
212 }
|
|
213
|
|
214 add_pixels_clamped_mmx(b2, dst, stride);
|
|
215 }
|
|
216
|
|
217 #define STORE_DIFF_8P( p, d, t, z )\
|
|
218 "movq "#d", "#t" \n"\
|
|
219 "psraw $6, "#p" \n"\
|
|
220 "punpcklbw "#z", "#t" \n"\
|
|
221 "paddsw "#t", "#p" \n"\
|
|
222 "packuswb "#p", "#p" \n"\
|
|
223 "movq "#p", "#d" \n"
|
|
224
|
|
225 #define H264_IDCT8_1D_SSE2(a,b,c,d,e,f,g,h)\
|
|
226 "movdqa "#c", "#a" \n"\
|
|
227 "movdqa "#g", "#e" \n"\
|
|
228 "psraw $1, "#c" \n"\
|
|
229 "psraw $1, "#g" \n"\
|
|
230 "psubw "#e", "#c" \n"\
|
|
231 "paddw "#a", "#g" \n"\
|
|
232 "movdqa "#b", "#e" \n"\
|
|
233 "psraw $1, "#e" \n"\
|
|
234 "paddw "#b", "#e" \n"\
|
|
235 "paddw "#d", "#e" \n"\
|
|
236 "paddw "#f", "#e" \n"\
|
|
237 "movdqa "#f", "#a" \n"\
|
|
238 "psraw $1, "#a" \n"\
|
|
239 "paddw "#f", "#a" \n"\
|
|
240 "paddw "#h", "#a" \n"\
|
|
241 "psubw "#b", "#a" \n"\
|
|
242 "psubw "#d", "#b" \n"\
|
|
243 "psubw "#d", "#f" \n"\
|
|
244 "paddw "#h", "#b" \n"\
|
|
245 "psubw "#h", "#f" \n"\
|
|
246 "psraw $1, "#d" \n"\
|
|
247 "psraw $1, "#h" \n"\
|
|
248 "psubw "#d", "#b" \n"\
|
|
249 "psubw "#h", "#f" \n"\
|
|
250 "movdqa "#e", "#d" \n"\
|
|
251 "movdqa "#a", "#h" \n"\
|
|
252 "psraw $2, "#d" \n"\
|
|
253 "psraw $2, "#h" \n"\
|
|
254 "paddw "#f", "#d" \n"\
|
|
255 "paddw "#b", "#h" \n"\
|
|
256 "psraw $2, "#f" \n"\
|
|
257 "psraw $2, "#b" \n"\
|
|
258 "psubw "#f", "#e" \n"\
|
|
259 "psubw "#a", "#b" \n"\
|
|
260 "movdqa 0x00(%1), "#a" \n"\
|
|
261 "movdqa 0x40(%1), "#f" \n"\
|
|
262 SUMSUB_BA(f, a)\
|
|
263 SUMSUB_BA(g, f)\
|
|
264 SUMSUB_BA(c, a)\
|
|
265 SUMSUB_BA(e, g)\
|
|
266 SUMSUB_BA(b, c)\
|
|
267 SUMSUB_BA(h, a)\
|
|
268 SUMSUB_BA(d, f)
|
|
269
|
|
270 static void ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride)
|
|
271 {
|
|
272 __asm__ volatile(
|
|
273 "movdqa 0x10(%1), %%xmm1 \n"
|
|
274 "movdqa 0x20(%1), %%xmm2 \n"
|
|
275 "movdqa 0x30(%1), %%xmm3 \n"
|
|
276 "movdqa 0x50(%1), %%xmm5 \n"
|
|
277 "movdqa 0x60(%1), %%xmm6 \n"
|
|
278 "movdqa 0x70(%1), %%xmm7 \n"
|
|
279 H264_IDCT8_1D_SSE2(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7)
|
|
280 TRANSPOSE8(%%xmm4, %%xmm1, %%xmm7, %%xmm3, %%xmm5, %%xmm0, %%xmm2, %%xmm6, (%1))
|
|
281 "paddw %4, %%xmm4 \n"
|
|
282 "movdqa %%xmm4, 0x00(%1) \n"
|
|
283 "movdqa %%xmm2, 0x40(%1) \n"
|
|
284 H264_IDCT8_1D_SSE2(%%xmm4, %%xmm0, %%xmm6, %%xmm3, %%xmm2, %%xmm5, %%xmm7, %%xmm1)
|
|
285 "movdqa %%xmm6, 0x60(%1) \n"
|
|
286 "movdqa %%xmm7, 0x70(%1) \n"
|
|
287 "pxor %%xmm7, %%xmm7 \n"
|
|
288 STORE_DIFF_8P(%%xmm2, (%0), %%xmm6, %%xmm7)
|
|
289 STORE_DIFF_8P(%%xmm0, (%0,%2), %%xmm6, %%xmm7)
|
|
290 STORE_DIFF_8P(%%xmm1, (%0,%2,2), %%xmm6, %%xmm7)
|
|
291 STORE_DIFF_8P(%%xmm3, (%0,%3), %%xmm6, %%xmm7)
|
|
292 "lea (%0,%2,4), %0 \n"
|
|
293 STORE_DIFF_8P(%%xmm5, (%0), %%xmm6, %%xmm7)
|
|
294 STORE_DIFF_8P(%%xmm4, (%0,%2), %%xmm6, %%xmm7)
|
|
295 "movdqa 0x60(%1), %%xmm0 \n"
|
|
296 "movdqa 0x70(%1), %%xmm1 \n"
|
|
297 STORE_DIFF_8P(%%xmm0, (%0,%2,2), %%xmm6, %%xmm7)
|
|
298 STORE_DIFF_8P(%%xmm1, (%0,%3), %%xmm6, %%xmm7)
|
|
299 :"+r"(dst)
|
|
300 :"r"(block), "r"((x86_reg)stride), "r"((x86_reg)3L*stride), "m"(ff_pw_32)
|
|
301 );
|
|
302 }
|
|
303
|
|
304 static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
|
|
305 {
|
|
306 int dc = (block[0] + 32) >> 6;
|
|
307 __asm__ volatile(
|
|
308 "movd %0, %%mm0 \n\t"
|
|
309 "pshufw $0, %%mm0, %%mm0 \n\t"
|
|
310 "pxor %%mm1, %%mm1 \n\t"
|
|
311 "psubw %%mm0, %%mm1 \n\t"
|
|
312 "packuswb %%mm0, %%mm0 \n\t"
|
|
313 "packuswb %%mm1, %%mm1 \n\t"
|
|
314 ::"r"(dc)
|
|
315 );
|
|
316 __asm__ volatile(
|
|
317 "movd %0, %%mm2 \n\t"
|
|
318 "movd %1, %%mm3 \n\t"
|
|
319 "movd %2, %%mm4 \n\t"
|
|
320 "movd %3, %%mm5 \n\t"
|
|
321 "paddusb %%mm0, %%mm2 \n\t"
|
|
322 "paddusb %%mm0, %%mm3 \n\t"
|
|
323 "paddusb %%mm0, %%mm4 \n\t"
|
|
324 "paddusb %%mm0, %%mm5 \n\t"
|
|
325 "psubusb %%mm1, %%mm2 \n\t"
|
|
326 "psubusb %%mm1, %%mm3 \n\t"
|
|
327 "psubusb %%mm1, %%mm4 \n\t"
|
|
328 "psubusb %%mm1, %%mm5 \n\t"
|
|
329 "movd %%mm2, %0 \n\t"
|
|
330 "movd %%mm3, %1 \n\t"
|
|
331 "movd %%mm4, %2 \n\t"
|
|
332 "movd %%mm5, %3 \n\t"
|
|
333 :"+m"(*(uint32_t*)(dst+0*stride)),
|
|
334 "+m"(*(uint32_t*)(dst+1*stride)),
|
|
335 "+m"(*(uint32_t*)(dst+2*stride)),
|
|
336 "+m"(*(uint32_t*)(dst+3*stride))
|
|
337 );
|
|
338 }
|
|
339
|
|
340 static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
|
|
341 {
|
|
342 int dc = (block[0] + 32) >> 6;
|
|
343 int y;
|
|
344 __asm__ volatile(
|
|
345 "movd %0, %%mm0 \n\t"
|
|
346 "pshufw $0, %%mm0, %%mm0 \n\t"
|
|
347 "pxor %%mm1, %%mm1 \n\t"
|
|
348 "psubw %%mm0, %%mm1 \n\t"
|
|
349 "packuswb %%mm0, %%mm0 \n\t"
|
|
350 "packuswb %%mm1, %%mm1 \n\t"
|
|
351 ::"r"(dc)
|
|
352 );
|
|
353 for(y=2; y--; dst += 4*stride){
|
|
354 __asm__ volatile(
|
|
355 "movq %0, %%mm2 \n\t"
|
|
356 "movq %1, %%mm3 \n\t"
|
|
357 "movq %2, %%mm4 \n\t"
|
|
358 "movq %3, %%mm5 \n\t"
|
|
359 "paddusb %%mm0, %%mm2 \n\t"
|
|
360 "paddusb %%mm0, %%mm3 \n\t"
|
|
361 "paddusb %%mm0, %%mm4 \n\t"
|
|
362 "paddusb %%mm0, %%mm5 \n\t"
|
|
363 "psubusb %%mm1, %%mm2 \n\t"
|
|
364 "psubusb %%mm1, %%mm3 \n\t"
|
|
365 "psubusb %%mm1, %%mm4 \n\t"
|
|
366 "psubusb %%mm1, %%mm5 \n\t"
|
|
367 "movq %%mm2, %0 \n\t"
|
|
368 "movq %%mm3, %1 \n\t"
|
|
369 "movq %%mm4, %2 \n\t"
|
|
370 "movq %%mm5, %3 \n\t"
|
|
371 :"+m"(*(uint64_t*)(dst+0*stride)),
|
|
372 "+m"(*(uint64_t*)(dst+1*stride)),
|
|
373 "+m"(*(uint64_t*)(dst+2*stride)),
|
|
374 "+m"(*(uint64_t*)(dst+3*stride))
|
|
375 );
|
|
376 }
|
|
377 }
|
|
378
|
|
379 //FIXME this table is a duplicate from h264data.h, and will be removed once the tables from, h264 have been split
|
|
380 static const uint8_t scan8[16 + 2*4]={
|
|
381 4+1*8, 5+1*8, 4+2*8, 5+2*8,
|
|
382 6+1*8, 7+1*8, 6+2*8, 7+2*8,
|
|
383 4+3*8, 5+3*8, 4+4*8, 5+4*8,
|
|
384 6+3*8, 7+3*8, 6+4*8, 7+4*8,
|
|
385 1+1*8, 2+1*8,
|
|
386 1+2*8, 2+2*8,
|
|
387 1+4*8, 2+4*8,
|
|
388 1+5*8, 2+5*8,
|
|
389 };
|
|
390
|
|
391 static void ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
|
|
392 int i;
|
|
393 for(i=0; i<16; i++){
|
|
394 if(nnzc[ scan8[i] ])
|
|
395 ff_h264_idct_add_mmx(dst + block_offset[i], block + i*16, stride);
|
|
396 }
|
|
397 }
|
|
398
|
|
399 static void ff_h264_idct8_add4_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
|
|
400 int i;
|
|
401 for(i=0; i<16; i+=4){
|
|
402 if(nnzc[ scan8[i] ])
|
|
403 ff_h264_idct8_add_mmx(dst + block_offset[i], block + i*16, stride);
|
|
404 }
|
|
405 }
|
|
406
|
|
407
|
|
408 static void ff_h264_idct_add16_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
|
|
409 int i;
|
|
410 for(i=0; i<16; i++){
|
|
411 int nnz = nnzc[ scan8[i] ];
|
|
412 if(nnz){
|
|
413 if(nnz==1 && block[i*16]) ff_h264_idct_dc_add_mmx2(dst + block_offset[i], block + i*16, stride);
|
|
414 else ff_h264_idct_add_mmx (dst + block_offset[i], block + i*16, stride);
|
|
415 }
|
|
416 }
|
|
417 }
|
|
418
|
|
419 static void ff_h264_idct_add16intra_mmx(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
|
|
420 int i;
|
|
421 for(i=0; i<16; i++){
|
|
422 if(nnzc[ scan8[i] ] || block[i*16])
|
|
423 ff_h264_idct_add_mmx(dst + block_offset[i], block + i*16, stride);
|
|
424 }
|
|
425 }
|
|
426
|
|
427 static void ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
|
|
428 int i;
|
|
429 for(i=0; i<16; i++){
|
|
430 if(nnzc[ scan8[i] ]) ff_h264_idct_add_mmx (dst + block_offset[i], block + i*16, stride);
|
|
431 else if(block[i*16]) ff_h264_idct_dc_add_mmx2(dst + block_offset[i], block + i*16, stride);
|
|
432 }
|
|
433 }
|
|
434
|
|
435 static void ff_h264_idct8_add4_mmx2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
|
|
436 int i;
|
|
437 for(i=0; i<16; i+=4){
|
|
438 int nnz = nnzc[ scan8[i] ];
|
|
439 if(nnz){
|
|
440 if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_mmx2(dst + block_offset[i], block + i*16, stride);
|
|
441 else ff_h264_idct8_add_mmx (dst + block_offset[i], block + i*16, stride);
|
|
442 }
|
|
443 }
|
|
444 }
|
|
445
|
|
446 static void ff_h264_idct8_add4_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
|
|
447 int i;
|
|
448 for(i=0; i<16; i+=4){
|
|
449 int nnz = nnzc[ scan8[i] ];
|
|
450 if(nnz){
|
|
451 if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_mmx2(dst + block_offset[i], block + i*16, stride);
|
|
452 else ff_h264_idct8_add_sse2 (dst + block_offset[i], block + i*16, stride);
|
|
453 }
|
|
454 }
|
|
455 }
|
|
456
|
|
457 static void ff_h264_idct_add8_mmx(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
|
|
458 int i;
|
|
459 for(i=16; i<16+8; i++){
|
|
460 if(nnzc[ scan8[i] ] || block[i*16])
|
|
461 ff_h264_idct_add_mmx (dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
|
|
462 }
|
|
463 }
|
|
464
|
|
465 static void ff_h264_idct_add8_mmx2(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
|
|
466 int i;
|
|
467 for(i=16; i<16+8; i++){
|
|
468 if(nnzc[ scan8[i] ])
|
|
469 ff_h264_idct_add_mmx (dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
|
|
470 else if(block[i*16])
|
|
471 ff_h264_idct_dc_add_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
|
|
472 }
|
|
473 }
|
|
474
|
|
475 /***********************************/
|
|
476 /* deblocking */
|
|
477
|
|
478 // out: o = |x-y|>a
|
|
479 // clobbers: t
|
|
480 #define DIFF_GT_MMX(x,y,a,o,t)\
|
|
481 "movq "#y", "#t" \n\t"\
|
|
482 "movq "#x", "#o" \n\t"\
|
|
483 "psubusb "#x", "#t" \n\t"\
|
|
484 "psubusb "#y", "#o" \n\t"\
|
|
485 "por "#t", "#o" \n\t"\
|
|
486 "psubusb "#a", "#o" \n\t"
|
|
487
|
|
488 // out: o = |x-y|>a
|
|
489 // clobbers: t
|
|
490 #define DIFF_GT2_MMX(x,y,a,o,t)\
|
|
491 "movq "#y", "#t" \n\t"\
|
|
492 "movq "#x", "#o" \n\t"\
|
|
493 "psubusb "#x", "#t" \n\t"\
|
|
494 "psubusb "#y", "#o" \n\t"\
|
|
495 "psubusb "#a", "#t" \n\t"\
|
|
496 "psubusb "#a", "#o" \n\t"\
|
|
497 "pcmpeqb "#t", "#o" \n\t"\
|
|
498
|
|
499 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
|
|
500 // out: mm5=beta-1, mm7=mask
|
|
501 // clobbers: mm4,mm6
|
|
502 #define H264_DEBLOCK_MASK(alpha1, beta1) \
|
|
503 "pshufw $0, "#alpha1", %%mm4 \n\t"\
|
|
504 "pshufw $0, "#beta1 ", %%mm5 \n\t"\
|
|
505 "packuswb %%mm4, %%mm4 \n\t"\
|
|
506 "packuswb %%mm5, %%mm5 \n\t"\
|
|
507 DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
|
|
508 DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
|
|
509 "por %%mm4, %%mm7 \n\t"\
|
|
510 DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
|
|
511 "por %%mm4, %%mm7 \n\t"\
|
|
512 "pxor %%mm6, %%mm6 \n\t"\
|
|
513 "pcmpeqb %%mm6, %%mm7 \n\t"
|
|
514
|
|
515 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
|
|
516 // out: mm1=p0' mm2=q0'
|
|
517 // clobbers: mm0,3-6
|
|
518 #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
|
|
519 "movq %%mm1 , %%mm5 \n\t"\
|
|
520 "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\
|
|
521 "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\
|
|
522 "pcmpeqb %%mm4 , %%mm4 \n\t"\
|
|
523 "pxor %%mm4 , %%mm3 \n\t"\
|
|
524 "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\
|
|
525 "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\
|
|
526 "pxor %%mm1 , %%mm4 \n\t"\
|
|
527 "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\
|
|
528 "pavgb %%mm5 , %%mm3 \n\t"\
|
|
529 "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\
|
|
530 "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\
|
|
531 "psubusb %%mm3 , %%mm6 \n\t"\
|
|
532 "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\
|
|
533 "pminub %%mm7 , %%mm6 \n\t"\
|
|
534 "pminub %%mm7 , %%mm3 \n\t"\
|
|
535 "psubusb %%mm6 , %%mm1 \n\t"\
|
|
536 "psubusb %%mm3 , %%mm2 \n\t"\
|
|
537 "paddusb %%mm3 , %%mm1 \n\t"\
|
|
538 "paddusb %%mm6 , %%mm2 \n\t"
|
|
539
|
|
540 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=ff_bone
|
|
541 // out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
|
|
542 // clobbers: q2, tmp, tc0
|
|
543 #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
|
|
544 "movq %%mm1, "#tmp" \n\t"\
|
|
545 "pavgb %%mm2, "#tmp" \n\t"\
|
|
546 "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
|
|
547 "pxor "q2addr", "#tmp" \n\t"\
|
|
548 "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
|
|
549 "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
|
|
550 "movq "#p1", "#tmp" \n\t"\
|
|
551 "psubusb "#tc0", "#tmp" \n\t"\
|
|
552 "paddusb "#p1", "#tc0" \n\t"\
|
|
553 "pmaxub "#tmp", "#q2" \n\t"\
|
|
554 "pminub "#tc0", "#q2" \n\t"\
|
|
555 "movq "#q2", "q1addr" \n\t"
|
|
556
|
|
557 static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
|
|
558 {
|
|
559 DECLARE_ALIGNED_8(uint64_t, tmp0[2]);
|
|
560
|
|
561 __asm__ volatile(
|
|
562 "movq (%1,%3), %%mm0 \n\t" //p1
|
|
563 "movq (%1,%3,2), %%mm1 \n\t" //p0
|
|
564 "movq (%2), %%mm2 \n\t" //q0
|
|
565 "movq (%2,%3), %%mm3 \n\t" //q1
|
|
566 H264_DEBLOCK_MASK(%6, %7)
|
|
567
|
|
568 "movd %5, %%mm4 \n\t"
|
|
569 "punpcklbw %%mm4, %%mm4 \n\t"
|
|
570 "punpcklwd %%mm4, %%mm4 \n\t"
|
|
571 "pcmpeqb %%mm3, %%mm3 \n\t"
|
|
572 "movq %%mm4, %%mm6 \n\t"
|
|
573 "pcmpgtb %%mm3, %%mm4 \n\t"
|
|
574 "movq %%mm6, 8+%0 \n\t"
|
|
575 "pand %%mm4, %%mm7 \n\t"
|
|
576 "movq %%mm7, %0 \n\t"
|
|
577
|
|
578 /* filter p1 */
|
|
579 "movq (%1), %%mm3 \n\t" //p2
|
|
580 DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
|
|
581 "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
|
|
582 "pand 8+%0, %%mm7 \n\t" // mask & tc0
|
|
583 "movq %%mm7, %%mm4 \n\t"
|
|
584 "psubb %%mm6, %%mm7 \n\t"
|
|
585 "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
|
|
586 H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
|
|
587
|
|
588 /* filter q1 */
|
|
589 "movq (%2,%3,2), %%mm4 \n\t" //q2
|
|
590 DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
|
|
591 "pand %0, %%mm6 \n\t"
|
|
592 "movq 8+%0, %%mm5 \n\t" // can be merged with the and below but is slower then
|
|
593 "pand %%mm6, %%mm5 \n\t"
|
|
594 "psubb %%mm6, %%mm7 \n\t"
|
|
595 "movq (%2,%3), %%mm3 \n\t"
|
|
596 H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
|
|
597
|
|
598 /* filter p0, q0 */
|
|
599 H264_DEBLOCK_P0_Q0(%8, unused)
|
|
600 "movq %%mm1, (%1,%3,2) \n\t"
|
|
601 "movq %%mm2, (%2) \n\t"
|
|
602
|
|
603 : "=m"(*tmp0)
|
|
604 : "r"(pix-3*stride), "r"(pix), "r"((x86_reg)stride),
|
|
605 "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1),
|
|
606 "m"(ff_bone)
|
|
607 );
|
|
608 }
|
|
609
|
|
610 static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
|
|
611 {
|
|
612 if((tc0[0] & tc0[1]) >= 0)
|
|
613 h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
|
|
614 if((tc0[2] & tc0[3]) >= 0)
|
|
615 h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
|
|
616 }
|
|
617 static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
|
|
618 {
|
|
619 //FIXME: could cut some load/stores by merging transpose with filter
|
|
620 // also, it only needs to transpose 6x8
|
|
621 DECLARE_ALIGNED_8(uint8_t, trans[8*8]);
|
|
622 int i;
|
|
623 for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
|
|
624 if((tc0[0] & tc0[1]) < 0)
|
|
625 continue;
|
|
626 transpose4x4(trans, pix-4, 8, stride);
|
|
627 transpose4x4(trans +4*8, pix, 8, stride);
|
|
628 transpose4x4(trans+4, pix-4+4*stride, 8, stride);
|
|
629 transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
|
|
630 h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
|
|
631 transpose4x4(pix-2, trans +2*8, stride, 8);
|
|
632 transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
|
|
633 }
|
|
634 }
|
|
635
|
|
636 static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
|
|
637 {
|
|
638 __asm__ volatile(
|
|
639 "movq (%0), %%mm0 \n\t" //p1
|
|
640 "movq (%0,%2), %%mm1 \n\t" //p0
|
|
641 "movq (%1), %%mm2 \n\t" //q0
|
|
642 "movq (%1,%2), %%mm3 \n\t" //q1
|
|
643 H264_DEBLOCK_MASK(%4, %5)
|
|
644 "movd %3, %%mm6 \n\t"
|
|
645 "punpcklbw %%mm6, %%mm6 \n\t"
|
|
646 "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
|
|
647 H264_DEBLOCK_P0_Q0(%6, %7)
|
|
648 "movq %%mm1, (%0,%2) \n\t"
|
|
649 "movq %%mm2, (%1) \n\t"
|
|
650
|
|
651 :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride),
|
|
652 "r"(*(uint32_t*)tc0),
|
|
653 "m"(alpha1), "m"(beta1), "m"(ff_bone), "m"(ff_pb_3F)
|
|
654 );
|
|
655 }
|
|
656
|
|
657 static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
|
|
658 {
|
|
659 h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
|
|
660 }
|
|
661
|
|
662 static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
|
|
663 {
|
|
664 //FIXME: could cut some load/stores by merging transpose with filter
|
|
665 DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
|
|
666 transpose4x4(trans, pix-2, 8, stride);
|
|
667 transpose4x4(trans+4, pix-2+4*stride, 8, stride);
|
|
668 h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
|
|
669 transpose4x4(pix-2, trans, stride, 8);
|
|
670 transpose4x4(pix-2+4*stride, trans+4, stride, 8);
|
|
671 }
|
|
672
|
|
673 // p0 = (p0 + q1 + 2*p1 + 2) >> 2
|
|
674 #define H264_FILTER_CHROMA4(p0, p1, q1, one) \
|
|
675 "movq "#p0", %%mm4 \n\t"\
|
|
676 "pxor "#q1", %%mm4 \n\t"\
|
|
677 "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
|
|
678 "pavgb "#q1", "#p0" \n\t"\
|
|
679 "psubusb %%mm4, "#p0" \n\t"\
|
|
680 "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
|
|
681
|
|
682 static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
|
|
683 {
|
|
684 __asm__ volatile(
|
|
685 "movq (%0), %%mm0 \n\t"
|
|
686 "movq (%0,%2), %%mm1 \n\t"
|
|
687 "movq (%1), %%mm2 \n\t"
|
|
688 "movq (%1,%2), %%mm3 \n\t"
|
|
689 H264_DEBLOCK_MASK(%3, %4)
|
|
690 "movq %%mm1, %%mm5 \n\t"
|
|
691 "movq %%mm2, %%mm6 \n\t"
|
|
692 H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
|
|
693 H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
|
|
694 "psubb %%mm5, %%mm1 \n\t"
|
|
695 "psubb %%mm6, %%mm2 \n\t"
|
|
696 "pand %%mm7, %%mm1 \n\t"
|
|
697 "pand %%mm7, %%mm2 \n\t"
|
|
698 "paddb %%mm5, %%mm1 \n\t"
|
|
699 "paddb %%mm6, %%mm2 \n\t"
|
|
700 "movq %%mm1, (%0,%2) \n\t"
|
|
701 "movq %%mm2, (%1) \n\t"
|
|
702 :: "r"(pix-2*stride), "r"(pix), "r"((x86_reg)stride),
|
|
703 "m"(alpha1), "m"(beta1), "m"(ff_bone)
|
|
704 );
|
|
705 }
|
|
706
|
|
707 static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
|
|
708 {
|
|
709 h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
|
|
710 }
|
|
711
|
|
712 static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
|
|
713 {
|
|
714 //FIXME: could cut some load/stores by merging transpose with filter
|
|
715 DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
|
|
716 transpose4x4(trans, pix-2, 8, stride);
|
|
717 transpose4x4(trans+4, pix-2+4*stride, 8, stride);
|
|
718 h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
|
|
719 transpose4x4(pix-2, trans, stride, 8);
|
|
720 transpose4x4(pix-2+4*stride, trans+4, stride, 8);
|
|
721 }
|
|
722
|
|
723 static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
|
|
724 int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field ) {
|
|
725 int dir;
|
|
726 __asm__ volatile(
|
|
727 "pxor %%mm7, %%mm7 \n\t"
|
|
728 "movq %0, %%mm6 \n\t"
|
|
729 "movq %1, %%mm5 \n\t"
|
|
730 "movq %2, %%mm4 \n\t"
|
|
731 ::"m"(ff_pb_1), "m"(ff_pb_3), "m"(ff_pb_7)
|
|
732 );
|
|
733 if(field)
|
|
734 __asm__ volatile(
|
|
735 "movq %0, %%mm5 \n\t"
|
|
736 "movq %1, %%mm4 \n\t"
|
|
737 ::"m"(ff_pb_3_1), "m"(ff_pb_7_3)
|
|
738 );
|
|
739
|
|
740 // could do a special case for dir==0 && edges==1, but it only reduces the
|
|
741 // average filter time by 1.2%
|
|
742 for( dir=1; dir>=0; dir-- ) {
|
|
743 const int d_idx = dir ? -8 : -1;
|
|
744 const int mask_mv = dir ? mask_mv1 : mask_mv0;
|
|
745 DECLARE_ALIGNED_8(const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
|
|
746 int b_idx, edge, l;
|
|
747 for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
|
|
748 __asm__ volatile(
|
|
749 "pand %0, %%mm0 \n\t"
|
|
750 ::"m"(mask_dir)
|
|
751 );
|
|
752 if(!(mask_mv & edge)) {
|
|
753 __asm__ volatile("pxor %%mm0, %%mm0 \n\t":);
|
|
754 for( l = bidir; l >= 0; l-- ) {
|
|
755 __asm__ volatile(
|
|
756 "movd %0, %%mm1 \n\t"
|
|
757 "punpckldq %1, %%mm1 \n\t"
|
|
758 "movq %%mm1, %%mm2 \n\t"
|
|
759 "psrlw $7, %%mm2 \n\t"
|
|
760 "pand %%mm6, %%mm2 \n\t"
|
|
761 "por %%mm2, %%mm1 \n\t" // ref_cache with -2 mapped to -1
|
|
762 "punpckldq %%mm1, %%mm2 \n\t"
|
|
763 "pcmpeqb %%mm2, %%mm1 \n\t"
|
|
764 "paddb %%mm6, %%mm1 \n\t"
|
|
765 "punpckhbw %%mm7, %%mm1 \n\t" // ref[b] != ref[bn]
|
|
766 "por %%mm1, %%mm0 \n\t"
|
|
767
|
|
768 "movq %2, %%mm1 \n\t"
|
|
769 "movq %3, %%mm2 \n\t"
|
|
770 "psubw %4, %%mm1 \n\t"
|
|
771 "psubw %5, %%mm2 \n\t"
|
|
772 "packsswb %%mm2, %%mm1 \n\t"
|
|
773 "paddb %%mm5, %%mm1 \n\t"
|
|
774 "pminub %%mm4, %%mm1 \n\t"
|
|
775 "pcmpeqb %%mm4, %%mm1 \n\t" // abs(mv[b] - mv[bn]) >= limit
|
|
776 "por %%mm1, %%mm0 \n\t"
|
|
777 ::"m"(ref[l][b_idx]),
|
|
778 "m"(ref[l][b_idx+d_idx]),
|
|
779 "m"(mv[l][b_idx][0]),
|
|
780 "m"(mv[l][b_idx+2][0]),
|
|
781 "m"(mv[l][b_idx+d_idx][0]),
|
|
782 "m"(mv[l][b_idx+d_idx+2][0])
|
|
783 );
|
|
784 }
|
|
785 }
|
|
786 __asm__ volatile(
|
|
787 "movd %0, %%mm1 \n\t"
|
|
788 "por %1, %%mm1 \n\t"
|
|
789 "punpcklbw %%mm7, %%mm1 \n\t"
|
|
790 "pcmpgtw %%mm7, %%mm1 \n\t" // nnz[b] || nnz[bn]
|
|
791 ::"m"(nnz[b_idx]),
|
|
792 "m"(nnz[b_idx+d_idx])
|
|
793 );
|
|
794 __asm__ volatile(
|
|
795 "pcmpeqw %%mm7, %%mm0 \n\t"
|
|
796 "pcmpeqw %%mm7, %%mm0 \n\t"
|
|
797 "psrlw $15, %%mm0 \n\t" // nonzero -> 1
|
|
798 "psrlw $14, %%mm1 \n\t"
|
|
799 "movq %%mm0, %%mm2 \n\t"
|
|
800 "por %%mm1, %%mm2 \n\t"
|
|
801 "psrlw $1, %%mm1 \n\t"
|
|
802 "pandn %%mm2, %%mm1 \n\t"
|
|
803 "movq %%mm1, %0 \n\t"
|
|
804 :"=m"(*bS[dir][edge])
|
|
805 ::"memory"
|
|
806 );
|
|
807 }
|
|
808 edges = 4;
|
|
809 step = 1;
|
|
810 }
|
|
811 __asm__ volatile(
|
|
812 "movq (%0), %%mm0 \n\t"
|
|
813 "movq 8(%0), %%mm1 \n\t"
|
|
814 "movq 16(%0), %%mm2 \n\t"
|
|
815 "movq 24(%0), %%mm3 \n\t"
|
|
816 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
|
|
817 "movq %%mm0, (%0) \n\t"
|
|
818 "movq %%mm3, 8(%0) \n\t"
|
|
819 "movq %%mm4, 16(%0) \n\t"
|
|
820 "movq %%mm2, 24(%0) \n\t"
|
|
821 ::"r"(bS[0])
|
|
822 :"memory"
|
|
823 );
|
|
824 }
|
|
825
|
|
826 /***********************************/
|
|
827 /* motion compensation */
|
|
828
|
|
829 #define QPEL_H264V_MM(A,B,C,D,E,F,OP,T,Z,d,q)\
|
|
830 "mov"#q" "#C", "#T" \n\t"\
|
|
831 "mov"#d" (%0), "#F" \n\t"\
|
|
832 "paddw "#D", "#T" \n\t"\
|
|
833 "psllw $2, "#T" \n\t"\
|
|
834 "psubw "#B", "#T" \n\t"\
|
|
835 "psubw "#E", "#T" \n\t"\
|
|
836 "punpcklbw "#Z", "#F" \n\t"\
|
|
837 "pmullw %4, "#T" \n\t"\
|
|
838 "paddw %5, "#A" \n\t"\
|
|
839 "add %2, %0 \n\t"\
|
|
840 "paddw "#F", "#A" \n\t"\
|
|
841 "paddw "#A", "#T" \n\t"\
|
|
842 "psraw $5, "#T" \n\t"\
|
|
843 "packuswb "#T", "#T" \n\t"\
|
|
844 OP(T, (%1), A, d)\
|
|
845 "add %3, %1 \n\t"
|
|
846
|
|
847 #define QPEL_H264HV_MM(A,B,C,D,E,F,OF,T,Z,d,q)\
|
|
848 "mov"#q" "#C", "#T" \n\t"\
|
|
849 "mov"#d" (%0), "#F" \n\t"\
|
|
850 "paddw "#D", "#T" \n\t"\
|
|
851 "psllw $2, "#T" \n\t"\
|
|
852 "paddw %4, "#A" \n\t"\
|
|
853 "psubw "#B", "#T" \n\t"\
|
|
854 "psubw "#E", "#T" \n\t"\
|
|
855 "punpcklbw "#Z", "#F" \n\t"\
|
|
856 "pmullw %3, "#T" \n\t"\
|
|
857 "paddw "#F", "#A" \n\t"\
|
|
858 "add %2, %0 \n\t"\
|
|
859 "paddw "#A", "#T" \n\t"\
|
|
860 "mov"#q" "#T", "#OF"(%1) \n\t"
|
|
861
|
|
862 #define QPEL_H264V(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%mm6,%%mm7,d,q)
|
|
863 #define QPEL_H264HV(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%mm6,%%mm7,d,q)
|
|
864 #define QPEL_H264V_XMM(A,B,C,D,E,F,OP) QPEL_H264V_MM(A,B,C,D,E,F,OP,%%xmm6,%%xmm7,q,dqa)
|
|
865 #define QPEL_H264HV_XMM(A,B,C,D,E,F,OF) QPEL_H264HV_MM(A,B,C,D,E,F,OF,%%xmm6,%%xmm7,q,dqa)
|
|
866
|
|
867
|
|
868 #define QPEL_H264(OPNAME, OP, MMX)\
|
|
869 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
870 int h=4;\
|
|
871 \
|
|
872 __asm__ volatile(\
|
|
873 "pxor %%mm7, %%mm7 \n\t"\
|
|
874 "movq %5, %%mm4 \n\t"\
|
|
875 "movq %6, %%mm5 \n\t"\
|
|
876 "1: \n\t"\
|
|
877 "movd -1(%0), %%mm1 \n\t"\
|
|
878 "movd (%0), %%mm2 \n\t"\
|
|
879 "movd 1(%0), %%mm3 \n\t"\
|
|
880 "movd 2(%0), %%mm0 \n\t"\
|
|
881 "punpcklbw %%mm7, %%mm1 \n\t"\
|
|
882 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
883 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
884 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
885 "paddw %%mm0, %%mm1 \n\t"\
|
|
886 "paddw %%mm3, %%mm2 \n\t"\
|
|
887 "movd -2(%0), %%mm0 \n\t"\
|
|
888 "movd 3(%0), %%mm3 \n\t"\
|
|
889 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
890 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
891 "paddw %%mm3, %%mm0 \n\t"\
|
|
892 "psllw $2, %%mm2 \n\t"\
|
|
893 "psubw %%mm1, %%mm2 \n\t"\
|
|
894 "pmullw %%mm4, %%mm2 \n\t"\
|
|
895 "paddw %%mm5, %%mm0 \n\t"\
|
|
896 "paddw %%mm2, %%mm0 \n\t"\
|
|
897 "psraw $5, %%mm0 \n\t"\
|
|
898 "packuswb %%mm0, %%mm0 \n\t"\
|
|
899 OP(%%mm0, (%1),%%mm6, d)\
|
|
900 "add %3, %0 \n\t"\
|
|
901 "add %4, %1 \n\t"\
|
|
902 "decl %2 \n\t"\
|
|
903 " jnz 1b \n\t"\
|
|
904 : "+a"(src), "+c"(dst), "+g"(h)\
|
|
905 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
906 : "memory"\
|
|
907 );\
|
|
908 }\
|
|
909 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
|
|
910 int h=4;\
|
|
911 __asm__ volatile(\
|
|
912 "pxor %%mm7, %%mm7 \n\t"\
|
|
913 "movq %0, %%mm4 \n\t"\
|
|
914 "movq %1, %%mm5 \n\t"\
|
|
915 :: "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
916 );\
|
|
917 do{\
|
|
918 __asm__ volatile(\
|
|
919 "movd -1(%0), %%mm1 \n\t"\
|
|
920 "movd (%0), %%mm2 \n\t"\
|
|
921 "movd 1(%0), %%mm3 \n\t"\
|
|
922 "movd 2(%0), %%mm0 \n\t"\
|
|
923 "punpcklbw %%mm7, %%mm1 \n\t"\
|
|
924 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
925 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
926 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
927 "paddw %%mm0, %%mm1 \n\t"\
|
|
928 "paddw %%mm3, %%mm2 \n\t"\
|
|
929 "movd -2(%0), %%mm0 \n\t"\
|
|
930 "movd 3(%0), %%mm3 \n\t"\
|
|
931 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
932 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
933 "paddw %%mm3, %%mm0 \n\t"\
|
|
934 "psllw $2, %%mm2 \n\t"\
|
|
935 "psubw %%mm1, %%mm2 \n\t"\
|
|
936 "pmullw %%mm4, %%mm2 \n\t"\
|
|
937 "paddw %%mm5, %%mm0 \n\t"\
|
|
938 "paddw %%mm2, %%mm0 \n\t"\
|
|
939 "movd (%2), %%mm3 \n\t"\
|
|
940 "psraw $5, %%mm0 \n\t"\
|
|
941 "packuswb %%mm0, %%mm0 \n\t"\
|
|
942 PAVGB" %%mm3, %%mm0 \n\t"\
|
|
943 OP(%%mm0, (%1),%%mm6, d)\
|
|
944 "add %4, %0 \n\t"\
|
|
945 "add %4, %1 \n\t"\
|
|
946 "add %3, %2 \n\t"\
|
|
947 : "+a"(src), "+c"(dst), "+d"(src2)\
|
|
948 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride)\
|
|
949 : "memory"\
|
|
950 );\
|
|
951 }while(--h);\
|
|
952 }\
|
|
953 static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
954 src -= 2*srcStride;\
|
|
955 __asm__ volatile(\
|
|
956 "pxor %%mm7, %%mm7 \n\t"\
|
|
957 "movd (%0), %%mm0 \n\t"\
|
|
958 "add %2, %0 \n\t"\
|
|
959 "movd (%0), %%mm1 \n\t"\
|
|
960 "add %2, %0 \n\t"\
|
|
961 "movd (%0), %%mm2 \n\t"\
|
|
962 "add %2, %0 \n\t"\
|
|
963 "movd (%0), %%mm3 \n\t"\
|
|
964 "add %2, %0 \n\t"\
|
|
965 "movd (%0), %%mm4 \n\t"\
|
|
966 "add %2, %0 \n\t"\
|
|
967 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
968 "punpcklbw %%mm7, %%mm1 \n\t"\
|
|
969 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
970 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
971 "punpcklbw %%mm7, %%mm4 \n\t"\
|
|
972 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
|
|
973 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
|
|
974 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
|
|
975 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
|
|
976 \
|
|
977 : "+a"(src), "+c"(dst)\
|
|
978 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
979 : "memory"\
|
|
980 );\
|
|
981 }\
|
|
982 static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
|
|
983 int h=4;\
|
|
984 int w=3;\
|
|
985 src -= 2*srcStride+2;\
|
|
986 while(w--){\
|
|
987 __asm__ volatile(\
|
|
988 "pxor %%mm7, %%mm7 \n\t"\
|
|
989 "movd (%0), %%mm0 \n\t"\
|
|
990 "add %2, %0 \n\t"\
|
|
991 "movd (%0), %%mm1 \n\t"\
|
|
992 "add %2, %0 \n\t"\
|
|
993 "movd (%0), %%mm2 \n\t"\
|
|
994 "add %2, %0 \n\t"\
|
|
995 "movd (%0), %%mm3 \n\t"\
|
|
996 "add %2, %0 \n\t"\
|
|
997 "movd (%0), %%mm4 \n\t"\
|
|
998 "add %2, %0 \n\t"\
|
|
999 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
1000 "punpcklbw %%mm7, %%mm1 \n\t"\
|
|
1001 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
1002 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
1003 "punpcklbw %%mm7, %%mm4 \n\t"\
|
|
1004 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
|
|
1005 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
|
|
1006 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
|
|
1007 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
|
|
1008 \
|
|
1009 : "+a"(src)\
|
|
1010 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
1011 : "memory"\
|
|
1012 );\
|
|
1013 tmp += 4;\
|
|
1014 src += 4 - 9*srcStride;\
|
|
1015 }\
|
|
1016 tmp -= 3*4;\
|
|
1017 __asm__ volatile(\
|
|
1018 "1: \n\t"\
|
|
1019 "movq (%0), %%mm0 \n\t"\
|
|
1020 "paddw 10(%0), %%mm0 \n\t"\
|
|
1021 "movq 2(%0), %%mm1 \n\t"\
|
|
1022 "paddw 8(%0), %%mm1 \n\t"\
|
|
1023 "movq 4(%0), %%mm2 \n\t"\
|
|
1024 "paddw 6(%0), %%mm2 \n\t"\
|
|
1025 "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
|
|
1026 "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
|
|
1027 "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
|
|
1028 "paddsw %%mm2, %%mm0 \n\t"\
|
|
1029 "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\
|
|
1030 "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 */\
|
|
1031 "psraw $6, %%mm0 \n\t"\
|
|
1032 "packuswb %%mm0, %%mm0 \n\t"\
|
|
1033 OP(%%mm0, (%1),%%mm7, d)\
|
|
1034 "add $24, %0 \n\t"\
|
|
1035 "add %3, %1 \n\t"\
|
|
1036 "decl %2 \n\t"\
|
|
1037 " jnz 1b \n\t"\
|
|
1038 : "+a"(tmp), "+c"(dst), "+g"(h)\
|
|
1039 : "S"((x86_reg)dstStride)\
|
|
1040 : "memory"\
|
|
1041 );\
|
|
1042 }\
|
|
1043 \
|
|
1044 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
1045 int h=8;\
|
|
1046 __asm__ volatile(\
|
|
1047 "pxor %%mm7, %%mm7 \n\t"\
|
|
1048 "movq %5, %%mm6 \n\t"\
|
|
1049 "1: \n\t"\
|
|
1050 "movq (%0), %%mm0 \n\t"\
|
|
1051 "movq 1(%0), %%mm2 \n\t"\
|
|
1052 "movq %%mm0, %%mm1 \n\t"\
|
|
1053 "movq %%mm2, %%mm3 \n\t"\
|
|
1054 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
1055 "punpckhbw %%mm7, %%mm1 \n\t"\
|
|
1056 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
1057 "punpckhbw %%mm7, %%mm3 \n\t"\
|
|
1058 "paddw %%mm2, %%mm0 \n\t"\
|
|
1059 "paddw %%mm3, %%mm1 \n\t"\
|
|
1060 "psllw $2, %%mm0 \n\t"\
|
|
1061 "psllw $2, %%mm1 \n\t"\
|
|
1062 "movq -1(%0), %%mm2 \n\t"\
|
|
1063 "movq 2(%0), %%mm4 \n\t"\
|
|
1064 "movq %%mm2, %%mm3 \n\t"\
|
|
1065 "movq %%mm4, %%mm5 \n\t"\
|
|
1066 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
1067 "punpckhbw %%mm7, %%mm3 \n\t"\
|
|
1068 "punpcklbw %%mm7, %%mm4 \n\t"\
|
|
1069 "punpckhbw %%mm7, %%mm5 \n\t"\
|
|
1070 "paddw %%mm4, %%mm2 \n\t"\
|
|
1071 "paddw %%mm3, %%mm5 \n\t"\
|
|
1072 "psubw %%mm2, %%mm0 \n\t"\
|
|
1073 "psubw %%mm5, %%mm1 \n\t"\
|
|
1074 "pmullw %%mm6, %%mm0 \n\t"\
|
|
1075 "pmullw %%mm6, %%mm1 \n\t"\
|
|
1076 "movd -2(%0), %%mm2 \n\t"\
|
|
1077 "movd 7(%0), %%mm5 \n\t"\
|
|
1078 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
1079 "punpcklbw %%mm7, %%mm5 \n\t"\
|
|
1080 "paddw %%mm3, %%mm2 \n\t"\
|
|
1081 "paddw %%mm5, %%mm4 \n\t"\
|
|
1082 "movq %6, %%mm5 \n\t"\
|
|
1083 "paddw %%mm5, %%mm2 \n\t"\
|
|
1084 "paddw %%mm5, %%mm4 \n\t"\
|
|
1085 "paddw %%mm2, %%mm0 \n\t"\
|
|
1086 "paddw %%mm4, %%mm1 \n\t"\
|
|
1087 "psraw $5, %%mm0 \n\t"\
|
|
1088 "psraw $5, %%mm1 \n\t"\
|
|
1089 "packuswb %%mm1, %%mm0 \n\t"\
|
|
1090 OP(%%mm0, (%1),%%mm5, q)\
|
|
1091 "add %3, %0 \n\t"\
|
|
1092 "add %4, %1 \n\t"\
|
|
1093 "decl %2 \n\t"\
|
|
1094 " jnz 1b \n\t"\
|
|
1095 : "+a"(src), "+c"(dst), "+g"(h)\
|
|
1096 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
1097 : "memory"\
|
|
1098 );\
|
|
1099 }\
|
|
1100 \
|
|
1101 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
|
|
1102 int h=8;\
|
|
1103 __asm__ volatile(\
|
|
1104 "pxor %%mm7, %%mm7 \n\t"\
|
|
1105 "movq %0, %%mm6 \n\t"\
|
|
1106 :: "m"(ff_pw_5)\
|
|
1107 );\
|
|
1108 do{\
|
|
1109 __asm__ volatile(\
|
|
1110 "movq (%0), %%mm0 \n\t"\
|
|
1111 "movq 1(%0), %%mm2 \n\t"\
|
|
1112 "movq %%mm0, %%mm1 \n\t"\
|
|
1113 "movq %%mm2, %%mm3 \n\t"\
|
|
1114 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
1115 "punpckhbw %%mm7, %%mm1 \n\t"\
|
|
1116 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
1117 "punpckhbw %%mm7, %%mm3 \n\t"\
|
|
1118 "paddw %%mm2, %%mm0 \n\t"\
|
|
1119 "paddw %%mm3, %%mm1 \n\t"\
|
|
1120 "psllw $2, %%mm0 \n\t"\
|
|
1121 "psllw $2, %%mm1 \n\t"\
|
|
1122 "movq -1(%0), %%mm2 \n\t"\
|
|
1123 "movq 2(%0), %%mm4 \n\t"\
|
|
1124 "movq %%mm2, %%mm3 \n\t"\
|
|
1125 "movq %%mm4, %%mm5 \n\t"\
|
|
1126 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
1127 "punpckhbw %%mm7, %%mm3 \n\t"\
|
|
1128 "punpcklbw %%mm7, %%mm4 \n\t"\
|
|
1129 "punpckhbw %%mm7, %%mm5 \n\t"\
|
|
1130 "paddw %%mm4, %%mm2 \n\t"\
|
|
1131 "paddw %%mm3, %%mm5 \n\t"\
|
|
1132 "psubw %%mm2, %%mm0 \n\t"\
|
|
1133 "psubw %%mm5, %%mm1 \n\t"\
|
|
1134 "pmullw %%mm6, %%mm0 \n\t"\
|
|
1135 "pmullw %%mm6, %%mm1 \n\t"\
|
|
1136 "movd -2(%0), %%mm2 \n\t"\
|
|
1137 "movd 7(%0), %%mm5 \n\t"\
|
|
1138 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
1139 "punpcklbw %%mm7, %%mm5 \n\t"\
|
|
1140 "paddw %%mm3, %%mm2 \n\t"\
|
|
1141 "paddw %%mm5, %%mm4 \n\t"\
|
|
1142 "movq %5, %%mm5 \n\t"\
|
|
1143 "paddw %%mm5, %%mm2 \n\t"\
|
|
1144 "paddw %%mm5, %%mm4 \n\t"\
|
|
1145 "paddw %%mm2, %%mm0 \n\t"\
|
|
1146 "paddw %%mm4, %%mm1 \n\t"\
|
|
1147 "psraw $5, %%mm0 \n\t"\
|
|
1148 "psraw $5, %%mm1 \n\t"\
|
|
1149 "movq (%2), %%mm4 \n\t"\
|
|
1150 "packuswb %%mm1, %%mm0 \n\t"\
|
|
1151 PAVGB" %%mm4, %%mm0 \n\t"\
|
|
1152 OP(%%mm0, (%1),%%mm5, q)\
|
|
1153 "add %4, %0 \n\t"\
|
|
1154 "add %4, %1 \n\t"\
|
|
1155 "add %3, %2 \n\t"\
|
|
1156 : "+a"(src), "+c"(dst), "+d"(src2)\
|
|
1157 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
|
|
1158 "m"(ff_pw_16)\
|
|
1159 : "memory"\
|
|
1160 );\
|
|
1161 }while(--h);\
|
|
1162 }\
|
|
1163 \
|
|
1164 static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
|
|
1165 int w= 2;\
|
|
1166 src -= 2*srcStride;\
|
|
1167 \
|
|
1168 while(w--){\
|
|
1169 __asm__ volatile(\
|
|
1170 "pxor %%mm7, %%mm7 \n\t"\
|
|
1171 "movd (%0), %%mm0 \n\t"\
|
|
1172 "add %2, %0 \n\t"\
|
|
1173 "movd (%0), %%mm1 \n\t"\
|
|
1174 "add %2, %0 \n\t"\
|
|
1175 "movd (%0), %%mm2 \n\t"\
|
|
1176 "add %2, %0 \n\t"\
|
|
1177 "movd (%0), %%mm3 \n\t"\
|
|
1178 "add %2, %0 \n\t"\
|
|
1179 "movd (%0), %%mm4 \n\t"\
|
|
1180 "add %2, %0 \n\t"\
|
|
1181 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
1182 "punpcklbw %%mm7, %%mm1 \n\t"\
|
|
1183 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
1184 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
1185 "punpcklbw %%mm7, %%mm4 \n\t"\
|
|
1186 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
|
|
1187 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
|
|
1188 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
|
|
1189 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
|
|
1190 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
|
|
1191 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
|
|
1192 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
|
|
1193 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
|
|
1194 \
|
|
1195 : "+a"(src), "+c"(dst)\
|
|
1196 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
1197 : "memory"\
|
|
1198 );\
|
|
1199 if(h==16){\
|
|
1200 __asm__ volatile(\
|
|
1201 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
|
|
1202 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
|
|
1203 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
|
|
1204 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
|
|
1205 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
|
|
1206 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
|
|
1207 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
|
|
1208 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
|
|
1209 \
|
|
1210 : "+a"(src), "+c"(dst)\
|
|
1211 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
1212 : "memory"\
|
|
1213 );\
|
|
1214 }\
|
|
1215 src += 4-(h+5)*srcStride;\
|
|
1216 dst += 4-h*dstStride;\
|
|
1217 }\
|
|
1218 }\
|
|
1219 static av_always_inline void OPNAME ## h264_qpel8or16_hv1_lowpass_ ## MMX(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){\
|
|
1220 int w = (size+8)>>2;\
|
|
1221 src -= 2*srcStride+2;\
|
|
1222 while(w--){\
|
|
1223 __asm__ volatile(\
|
|
1224 "pxor %%mm7, %%mm7 \n\t"\
|
|
1225 "movd (%0), %%mm0 \n\t"\
|
|
1226 "add %2, %0 \n\t"\
|
|
1227 "movd (%0), %%mm1 \n\t"\
|
|
1228 "add %2, %0 \n\t"\
|
|
1229 "movd (%0), %%mm2 \n\t"\
|
|
1230 "add %2, %0 \n\t"\
|
|
1231 "movd (%0), %%mm3 \n\t"\
|
|
1232 "add %2, %0 \n\t"\
|
|
1233 "movd (%0), %%mm4 \n\t"\
|
|
1234 "add %2, %0 \n\t"\
|
|
1235 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
1236 "punpcklbw %%mm7, %%mm1 \n\t"\
|
|
1237 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
1238 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
1239 "punpcklbw %%mm7, %%mm4 \n\t"\
|
|
1240 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
|
|
1241 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
|
|
1242 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
|
|
1243 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
|
|
1244 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
|
|
1245 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
|
|
1246 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
|
|
1247 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
|
|
1248 : "+a"(src)\
|
|
1249 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
1250 : "memory"\
|
|
1251 );\
|
|
1252 if(size==16){\
|
|
1253 __asm__ volatile(\
|
|
1254 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\
|
|
1255 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\
|
|
1256 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
|
|
1257 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
|
|
1258 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
|
|
1259 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
|
|
1260 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
|
|
1261 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
|
|
1262 : "+a"(src)\
|
|
1263 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
1264 : "memory"\
|
|
1265 );\
|
|
1266 }\
|
|
1267 tmp += 4;\
|
|
1268 src += 4 - (size+5)*srcStride;\
|
|
1269 }\
|
|
1270 }\
|
|
1271 static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
|
|
1272 int w = size>>4;\
|
|
1273 do{\
|
|
1274 int h = size;\
|
|
1275 __asm__ volatile(\
|
|
1276 "1: \n\t"\
|
|
1277 "movq (%0), %%mm0 \n\t"\
|
|
1278 "movq 8(%0), %%mm3 \n\t"\
|
|
1279 "movq 2(%0), %%mm1 \n\t"\
|
|
1280 "movq 10(%0), %%mm4 \n\t"\
|
|
1281 "paddw %%mm4, %%mm0 \n\t"\
|
|
1282 "paddw %%mm3, %%mm1 \n\t"\
|
|
1283 "paddw 18(%0), %%mm3 \n\t"\
|
|
1284 "paddw 16(%0), %%mm4 \n\t"\
|
|
1285 "movq 4(%0), %%mm2 \n\t"\
|
|
1286 "movq 12(%0), %%mm5 \n\t"\
|
|
1287 "paddw 6(%0), %%mm2 \n\t"\
|
|
1288 "paddw 14(%0), %%mm5 \n\t"\
|
|
1289 "psubw %%mm1, %%mm0 \n\t"\
|
|
1290 "psubw %%mm4, %%mm3 \n\t"\
|
|
1291 "psraw $2, %%mm0 \n\t"\
|
|
1292 "psraw $2, %%mm3 \n\t"\
|
|
1293 "psubw %%mm1, %%mm0 \n\t"\
|
|
1294 "psubw %%mm4, %%mm3 \n\t"\
|
|
1295 "paddsw %%mm2, %%mm0 \n\t"\
|
|
1296 "paddsw %%mm5, %%mm3 \n\t"\
|
|
1297 "psraw $2, %%mm0 \n\t"\
|
|
1298 "psraw $2, %%mm3 \n\t"\
|
|
1299 "paddw %%mm2, %%mm0 \n\t"\
|
|
1300 "paddw %%mm5, %%mm3 \n\t"\
|
|
1301 "psraw $6, %%mm0 \n\t"\
|
|
1302 "psraw $6, %%mm3 \n\t"\
|
|
1303 "packuswb %%mm3, %%mm0 \n\t"\
|
|
1304 OP(%%mm0, (%1),%%mm7, q)\
|
|
1305 "add $48, %0 \n\t"\
|
|
1306 "add %3, %1 \n\t"\
|
|
1307 "decl %2 \n\t"\
|
|
1308 " jnz 1b \n\t"\
|
|
1309 : "+a"(tmp), "+c"(dst), "+g"(h)\
|
|
1310 : "S"((x86_reg)dstStride)\
|
|
1311 : "memory"\
|
|
1312 );\
|
|
1313 tmp += 8 - size*24;\
|
|
1314 dst += 8 - size*dstStride;\
|
|
1315 }while(w--);\
|
|
1316 }\
|
|
1317 \
|
|
1318 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
1319 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
|
|
1320 }\
|
|
1321 static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
1322 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
|
|
1323 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
|
|
1324 }\
|
|
1325 \
|
|
1326 static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
1327 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
|
|
1328 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
|
|
1329 src += 8*srcStride;\
|
|
1330 dst += 8*dstStride;\
|
|
1331 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
|
|
1332 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
|
|
1333 }\
|
|
1334 \
|
|
1335 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
|
|
1336 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
|
|
1337 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
|
|
1338 src += 8*dstStride;\
|
|
1339 dst += 8*dstStride;\
|
|
1340 src2 += 8*src2Stride;\
|
|
1341 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
|
|
1342 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
|
|
1343 }\
|
|
1344 \
|
|
1345 static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
|
|
1346 put_h264_qpel8or16_hv1_lowpass_ ## MMX(tmp, src, tmpStride, srcStride, size);\
|
|
1347 OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
|
|
1348 }\
|
|
1349 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
|
|
1350 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\
|
|
1351 }\
|
|
1352 \
|
|
1353 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
|
|
1354 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\
|
|
1355 }\
|
|
1356 \
|
|
1357 static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
|
|
1358 {\
|
|
1359 __asm__ volatile(\
|
|
1360 "movq (%1), %%mm0 \n\t"\
|
|
1361 "movq 24(%1), %%mm1 \n\t"\
|
|
1362 "psraw $5, %%mm0 \n\t"\
|
|
1363 "psraw $5, %%mm1 \n\t"\
|
|
1364 "packuswb %%mm0, %%mm0 \n\t"\
|
|
1365 "packuswb %%mm1, %%mm1 \n\t"\
|
|
1366 PAVGB" (%0), %%mm0 \n\t"\
|
|
1367 PAVGB" (%0,%3), %%mm1 \n\t"\
|
|
1368 OP(%%mm0, (%2), %%mm4, d)\
|
|
1369 OP(%%mm1, (%2,%4), %%mm5, d)\
|
|
1370 "lea (%0,%3,2), %0 \n\t"\
|
|
1371 "lea (%2,%4,2), %2 \n\t"\
|
|
1372 "movq 48(%1), %%mm0 \n\t"\
|
|
1373 "movq 72(%1), %%mm1 \n\t"\
|
|
1374 "psraw $5, %%mm0 \n\t"\
|
|
1375 "psraw $5, %%mm1 \n\t"\
|
|
1376 "packuswb %%mm0, %%mm0 \n\t"\
|
|
1377 "packuswb %%mm1, %%mm1 \n\t"\
|
|
1378 PAVGB" (%0), %%mm0 \n\t"\
|
|
1379 PAVGB" (%0,%3), %%mm1 \n\t"\
|
|
1380 OP(%%mm0, (%2), %%mm4, d)\
|
|
1381 OP(%%mm1, (%2,%4), %%mm5, d)\
|
|
1382 :"+a"(src8), "+c"(src16), "+d"(dst)\
|
|
1383 :"S"((x86_reg)src8Stride), "D"((x86_reg)dstStride)\
|
|
1384 :"memory");\
|
|
1385 }\
|
|
1386 static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
|
|
1387 {\
|
|
1388 do{\
|
|
1389 __asm__ volatile(\
|
|
1390 "movq (%1), %%mm0 \n\t"\
|
|
1391 "movq 8(%1), %%mm1 \n\t"\
|
|
1392 "movq 48(%1), %%mm2 \n\t"\
|
|
1393 "movq 8+48(%1), %%mm3 \n\t"\
|
|
1394 "psraw $5, %%mm0 \n\t"\
|
|
1395 "psraw $5, %%mm1 \n\t"\
|
|
1396 "psraw $5, %%mm2 \n\t"\
|
|
1397 "psraw $5, %%mm3 \n\t"\
|
|
1398 "packuswb %%mm1, %%mm0 \n\t"\
|
|
1399 "packuswb %%mm3, %%mm2 \n\t"\
|
|
1400 PAVGB" (%0), %%mm0 \n\t"\
|
|
1401 PAVGB" (%0,%3), %%mm2 \n\t"\
|
|
1402 OP(%%mm0, (%2), %%mm5, q)\
|
|
1403 OP(%%mm2, (%2,%4), %%mm5, q)\
|
|
1404 ::"a"(src8), "c"(src16), "d"(dst),\
|
|
1405 "r"((x86_reg)src8Stride), "r"((x86_reg)dstStride)\
|
|
1406 :"memory");\
|
|
1407 src8 += 2L*src8Stride;\
|
|
1408 src16 += 48;\
|
|
1409 dst += 2L*dstStride;\
|
|
1410 }while(h-=2);\
|
|
1411 }\
|
|
1412 static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
|
|
1413 {\
|
|
1414 OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\
|
|
1415 OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
|
|
1416 }\
|
|
1417
|
|
1418
|
|
1419 #ifdef ARCH_X86_64
|
|
1420 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
|
|
1421 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
|
|
1422 int h=16;\
|
|
1423 __asm__ volatile(\
|
|
1424 "pxor %%xmm15, %%xmm15 \n\t"\
|
|
1425 "movdqa %6, %%xmm14 \n\t"\
|
|
1426 "movdqa %7, %%xmm13 \n\t"\
|
|
1427 "1: \n\t"\
|
|
1428 "lddqu 3(%0), %%xmm1 \n\t"\
|
|
1429 "lddqu -5(%0), %%xmm7 \n\t"\
|
|
1430 "movdqa %%xmm1, %%xmm0 \n\t"\
|
|
1431 "punpckhbw %%xmm15, %%xmm1 \n\t"\
|
|
1432 "punpcklbw %%xmm15, %%xmm0 \n\t"\
|
|
1433 "punpcklbw %%xmm15, %%xmm7 \n\t"\
|
|
1434 "movdqa %%xmm1, %%xmm2 \n\t"\
|
|
1435 "movdqa %%xmm0, %%xmm6 \n\t"\
|
|
1436 "movdqa %%xmm1, %%xmm3 \n\t"\
|
|
1437 "movdqa %%xmm0, %%xmm8 \n\t"\
|
|
1438 "movdqa %%xmm1, %%xmm4 \n\t"\
|
|
1439 "movdqa %%xmm0, %%xmm9 \n\t"\
|
|
1440 "movdqa %%xmm1, %%xmm5 \n\t"\
|
|
1441 "movdqa %%xmm0, %%xmm10 \n\t"\
|
|
1442 "palignr $6, %%xmm0, %%xmm5 \n\t"\
|
|
1443 "palignr $6, %%xmm7, %%xmm10\n\t"\
|
|
1444 "palignr $8, %%xmm0, %%xmm4 \n\t"\
|
|
1445 "palignr $8, %%xmm7, %%xmm9 \n\t"\
|
|
1446 "palignr $10,%%xmm0, %%xmm3 \n\t"\
|
|
1447 "palignr $10,%%xmm7, %%xmm8 \n\t"\
|
|
1448 "paddw %%xmm1, %%xmm5 \n\t"\
|
|
1449 "paddw %%xmm0, %%xmm10 \n\t"\
|
|
1450 "palignr $12,%%xmm0, %%xmm2 \n\t"\
|
|
1451 "palignr $12,%%xmm7, %%xmm6 \n\t"\
|
|
1452 "palignr $14,%%xmm0, %%xmm1 \n\t"\
|
|
1453 "palignr $14,%%xmm7, %%xmm0 \n\t"\
|
|
1454 "paddw %%xmm3, %%xmm2 \n\t"\
|
|
1455 "paddw %%xmm8, %%xmm6 \n\t"\
|
|
1456 "paddw %%xmm4, %%xmm1 \n\t"\
|
|
1457 "paddw %%xmm9, %%xmm0 \n\t"\
|
|
1458 "psllw $2, %%xmm2 \n\t"\
|
|
1459 "psllw $2, %%xmm6 \n\t"\
|
|
1460 "psubw %%xmm1, %%xmm2 \n\t"\
|
|
1461 "psubw %%xmm0, %%xmm6 \n\t"\
|
|
1462 "paddw %%xmm13,%%xmm5 \n\t"\
|
|
1463 "paddw %%xmm13,%%xmm10 \n\t"\
|
|
1464 "pmullw %%xmm14,%%xmm2 \n\t"\
|
|
1465 "pmullw %%xmm14,%%xmm6 \n\t"\
|
|
1466 "lddqu (%2), %%xmm3 \n\t"\
|
|
1467 "paddw %%xmm5, %%xmm2 \n\t"\
|
|
1468 "paddw %%xmm10,%%xmm6 \n\t"\
|
|
1469 "psraw $5, %%xmm2 \n\t"\
|
|
1470 "psraw $5, %%xmm6 \n\t"\
|
|
1471 "packuswb %%xmm2,%%xmm6 \n\t"\
|
|
1472 "pavgb %%xmm3, %%xmm6 \n\t"\
|
|
1473 OP(%%xmm6, (%1), %%xmm4, dqa)\
|
|
1474 "add %5, %0 \n\t"\
|
|
1475 "add %5, %1 \n\t"\
|
|
1476 "add %4, %2 \n\t"\
|
|
1477 "decl %3 \n\t"\
|
|
1478 "jg 1b \n\t"\
|
|
1479 : "+a"(src), "+c"(dst), "+d"(src2), "+g"(h)\
|
|
1480 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
|
|
1481 "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
1482 : "memory"\
|
|
1483 );\
|
|
1484 }
|
|
1485 #else // ARCH_X86_64
|
|
1486 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
|
|
1487 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
|
|
1488 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
|
|
1489 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
|
|
1490 src += 8*dstStride;\
|
|
1491 dst += 8*dstStride;\
|
|
1492 src2 += 8*src2Stride;\
|
|
1493 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
|
|
1494 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
|
|
1495 }
|
|
1496 #endif // ARCH_X86_64
|
|
1497
|
|
1498 #define QPEL_H264_H_XMM(OPNAME, OP, MMX)\
|
|
1499 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
|
|
1500 int h=8;\
|
|
1501 __asm__ volatile(\
|
|
1502 "pxor %%xmm7, %%xmm7 \n\t"\
|
|
1503 "movdqa %0, %%xmm6 \n\t"\
|
|
1504 :: "m"(ff_pw_5)\
|
|
1505 );\
|
|
1506 do{\
|
|
1507 __asm__ volatile(\
|
|
1508 "lddqu -5(%0), %%xmm1 \n\t"\
|
|
1509 "movdqa %%xmm1, %%xmm0 \n\t"\
|
|
1510 "punpckhbw %%xmm7, %%xmm1 \n\t"\
|
|
1511 "punpcklbw %%xmm7, %%xmm0 \n\t"\
|
|
1512 "movdqa %%xmm1, %%xmm2 \n\t"\
|
|
1513 "movdqa %%xmm1, %%xmm3 \n\t"\
|
|
1514 "movdqa %%xmm1, %%xmm4 \n\t"\
|
|
1515 "movdqa %%xmm1, %%xmm5 \n\t"\
|
|
1516 "palignr $6, %%xmm0, %%xmm5 \n\t"\
|
|
1517 "palignr $8, %%xmm0, %%xmm4 \n\t"\
|
|
1518 "palignr $10,%%xmm0, %%xmm3 \n\t"\
|
|
1519 "paddw %%xmm1, %%xmm5 \n\t"\
|
|
1520 "palignr $12,%%xmm0, %%xmm2 \n\t"\
|
|
1521 "palignr $14,%%xmm0, %%xmm1 \n\t"\
|
|
1522 "paddw %%xmm3, %%xmm2 \n\t"\
|
|
1523 "paddw %%xmm4, %%xmm1 \n\t"\
|
|
1524 "psllw $2, %%xmm2 \n\t"\
|
|
1525 "movq (%2), %%xmm3 \n\t"\
|
|
1526 "psubw %%xmm1, %%xmm2 \n\t"\
|
|
1527 "paddw %5, %%xmm5 \n\t"\
|
|
1528 "pmullw %%xmm6, %%xmm2 \n\t"\
|
|
1529 "paddw %%xmm5, %%xmm2 \n\t"\
|
|
1530 "psraw $5, %%xmm2 \n\t"\
|
|
1531 "packuswb %%xmm2, %%xmm2 \n\t"\
|
|
1532 "pavgb %%xmm3, %%xmm2 \n\t"\
|
|
1533 OP(%%xmm2, (%1), %%xmm4, q)\
|
|
1534 "add %4, %0 \n\t"\
|
|
1535 "add %4, %1 \n\t"\
|
|
1536 "add %3, %2 \n\t"\
|
|
1537 : "+a"(src), "+c"(dst), "+d"(src2)\
|
|
1538 : "D"((x86_reg)src2Stride), "S"((x86_reg)dstStride),\
|
|
1539 "m"(ff_pw_16)\
|
|
1540 : "memory"\
|
|
1541 );\
|
|
1542 }while(--h);\
|
|
1543 }\
|
|
1544 QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
|
|
1545 \
|
|
1546 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
1547 int h=8;\
|
|
1548 __asm__ volatile(\
|
|
1549 "pxor %%xmm7, %%xmm7 \n\t"\
|
|
1550 "movdqa %5, %%xmm6 \n\t"\
|
|
1551 "1: \n\t"\
|
|
1552 "lddqu -5(%0), %%xmm1 \n\t"\
|
|
1553 "movdqa %%xmm1, %%xmm0 \n\t"\
|
|
1554 "punpckhbw %%xmm7, %%xmm1 \n\t"\
|
|
1555 "punpcklbw %%xmm7, %%xmm0 \n\t"\
|
|
1556 "movdqa %%xmm1, %%xmm2 \n\t"\
|
|
1557 "movdqa %%xmm1, %%xmm3 \n\t"\
|
|
1558 "movdqa %%xmm1, %%xmm4 \n\t"\
|
|
1559 "movdqa %%xmm1, %%xmm5 \n\t"\
|
|
1560 "palignr $6, %%xmm0, %%xmm5 \n\t"\
|
|
1561 "palignr $8, %%xmm0, %%xmm4 \n\t"\
|
|
1562 "palignr $10,%%xmm0, %%xmm3 \n\t"\
|
|
1563 "paddw %%xmm1, %%xmm5 \n\t"\
|
|
1564 "palignr $12,%%xmm0, %%xmm2 \n\t"\
|
|
1565 "palignr $14,%%xmm0, %%xmm1 \n\t"\
|
|
1566 "paddw %%xmm3, %%xmm2 \n\t"\
|
|
1567 "paddw %%xmm4, %%xmm1 \n\t"\
|
|
1568 "psllw $2, %%xmm2 \n\t"\
|
|
1569 "psubw %%xmm1, %%xmm2 \n\t"\
|
|
1570 "paddw %6, %%xmm5 \n\t"\
|
|
1571 "pmullw %%xmm6, %%xmm2 \n\t"\
|
|
1572 "paddw %%xmm5, %%xmm2 \n\t"\
|
|
1573 "psraw $5, %%xmm2 \n\t"\
|
|
1574 "packuswb %%xmm2, %%xmm2 \n\t"\
|
|
1575 OP(%%xmm2, (%1), %%xmm4, q)\
|
|
1576 "add %3, %0 \n\t"\
|
|
1577 "add %4, %1 \n\t"\
|
|
1578 "decl %2 \n\t"\
|
|
1579 " jnz 1b \n\t"\
|
|
1580 : "+a"(src), "+c"(dst), "+g"(h)\
|
|
1581 : "D"((x86_reg)srcStride), "S"((x86_reg)dstStride),\
|
|
1582 "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
1583 : "memory"\
|
|
1584 );\
|
|
1585 }\
|
|
1586 static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
1587 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
|
|
1588 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
|
|
1589 src += 8*srcStride;\
|
|
1590 dst += 8*dstStride;\
|
|
1591 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
|
|
1592 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
|
|
1593 }\
|
|
1594
|
|
1595 #define QPEL_H264_V_XMM(OPNAME, OP, MMX)\
|
|
1596 static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
|
|
1597 src -= 2*srcStride;\
|
|
1598 \
|
|
1599 __asm__ volatile(\
|
|
1600 "pxor %%xmm7, %%xmm7 \n\t"\
|
|
1601 "movq (%0), %%xmm0 \n\t"\
|
|
1602 "add %2, %0 \n\t"\
|
|
1603 "movq (%0), %%xmm1 \n\t"\
|
|
1604 "add %2, %0 \n\t"\
|
|
1605 "movq (%0), %%xmm2 \n\t"\
|
|
1606 "add %2, %0 \n\t"\
|
|
1607 "movq (%0), %%xmm3 \n\t"\
|
|
1608 "add %2, %0 \n\t"\
|
|
1609 "movq (%0), %%xmm4 \n\t"\
|
|
1610 "add %2, %0 \n\t"\
|
|
1611 "punpcklbw %%xmm7, %%xmm0 \n\t"\
|
|
1612 "punpcklbw %%xmm7, %%xmm1 \n\t"\
|
|
1613 "punpcklbw %%xmm7, %%xmm2 \n\t"\
|
|
1614 "punpcklbw %%xmm7, %%xmm3 \n\t"\
|
|
1615 "punpcklbw %%xmm7, %%xmm4 \n\t"\
|
|
1616 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
|
|
1617 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
|
|
1618 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
|
|
1619 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
|
|
1620 QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
|
|
1621 QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
|
|
1622 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
|
|
1623 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
|
|
1624 \
|
|
1625 : "+a"(src), "+c"(dst)\
|
|
1626 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
1627 : "memory"\
|
|
1628 );\
|
|
1629 if(h==16){\
|
|
1630 __asm__ volatile(\
|
|
1631 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
|
|
1632 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
|
|
1633 QPEL_H264V_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, OP)\
|
|
1634 QPEL_H264V_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, OP)\
|
|
1635 QPEL_H264V_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, OP)\
|
|
1636 QPEL_H264V_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, OP)\
|
|
1637 QPEL_H264V_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, OP)\
|
|
1638 QPEL_H264V_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, OP)\
|
|
1639 \
|
|
1640 : "+a"(src), "+c"(dst)\
|
|
1641 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
1642 : "memory"\
|
|
1643 );\
|
|
1644 }\
|
|
1645 }\
|
|
1646 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
1647 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
|
|
1648 }\
|
|
1649 static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
1650 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
|
|
1651 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
|
|
1652 }
|
|
1653
|
|
1654 static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp, uint8_t *src, int tmpStride, int srcStride, int size){
|
|
1655 int w = (size+8)>>3;
|
|
1656 src -= 2*srcStride+2;
|
|
1657 while(w--){
|
|
1658 __asm__ volatile(
|
|
1659 "pxor %%xmm7, %%xmm7 \n\t"
|
|
1660 "movq (%0), %%xmm0 \n\t"
|
|
1661 "add %2, %0 \n\t"
|
|
1662 "movq (%0), %%xmm1 \n\t"
|
|
1663 "add %2, %0 \n\t"
|
|
1664 "movq (%0), %%xmm2 \n\t"
|
|
1665 "add %2, %0 \n\t"
|
|
1666 "movq (%0), %%xmm3 \n\t"
|
|
1667 "add %2, %0 \n\t"
|
|
1668 "movq (%0), %%xmm4 \n\t"
|
|
1669 "add %2, %0 \n\t"
|
|
1670 "punpcklbw %%xmm7, %%xmm0 \n\t"
|
|
1671 "punpcklbw %%xmm7, %%xmm1 \n\t"
|
|
1672 "punpcklbw %%xmm7, %%xmm2 \n\t"
|
|
1673 "punpcklbw %%xmm7, %%xmm3 \n\t"
|
|
1674 "punpcklbw %%xmm7, %%xmm4 \n\t"
|
|
1675 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 0*48)
|
|
1676 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 1*48)
|
|
1677 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 2*48)
|
|
1678 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 3*48)
|
|
1679 QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 4*48)
|
|
1680 QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 5*48)
|
|
1681 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 6*48)
|
|
1682 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 7*48)
|
|
1683 : "+a"(src)
|
|
1684 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
|
|
1685 : "memory"
|
|
1686 );
|
|
1687 if(size==16){
|
|
1688 __asm__ volatile(
|
|
1689 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 8*48)
|
|
1690 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 9*48)
|
|
1691 QPEL_H264HV_XMM(%%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, 10*48)
|
|
1692 QPEL_H264HV_XMM(%%xmm5, %%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, 11*48)
|
|
1693 QPEL_H264HV_XMM(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, 12*48)
|
|
1694 QPEL_H264HV_XMM(%%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, 13*48)
|
|
1695 QPEL_H264HV_XMM(%%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, 14*48)
|
|
1696 QPEL_H264HV_XMM(%%xmm3, %%xmm4, %%xmm5, %%xmm0, %%xmm1, %%xmm2, 15*48)
|
|
1697 : "+a"(src)
|
|
1698 : "c"(tmp), "S"((x86_reg)srcStride), "m"(ff_pw_5), "m"(ff_pw_16)
|
|
1699 : "memory"
|
|
1700 );
|
|
1701 }
|
|
1702 tmp += 8;
|
|
1703 src += 8 - (size+5)*srcStride;
|
|
1704 }
|
|
1705 }
|
|
1706
|
|
1707 #define QPEL_H264_HV2_XMM(OPNAME, OP, MMX)\
|
|
1708 static av_always_inline void OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
|
|
1709 int h = size;\
|
|
1710 if(size == 16){\
|
|
1711 __asm__ volatile(\
|
|
1712 "1: \n\t"\
|
|
1713 "movdqa 32(%0), %%xmm4 \n\t"\
|
|
1714 "movdqa 16(%0), %%xmm5 \n\t"\
|
|
1715 "movdqa (%0), %%xmm7 \n\t"\
|
|
1716 "movdqa %%xmm4, %%xmm3 \n\t"\
|
|
1717 "movdqa %%xmm4, %%xmm2 \n\t"\
|
|
1718 "movdqa %%xmm4, %%xmm1 \n\t"\
|
|
1719 "movdqa %%xmm4, %%xmm0 \n\t"\
|
|
1720 "palignr $10, %%xmm5, %%xmm0 \n\t"\
|
|
1721 "palignr $8, %%xmm5, %%xmm1 \n\t"\
|
|
1722 "palignr $6, %%xmm5, %%xmm2 \n\t"\
|
|
1723 "palignr $4, %%xmm5, %%xmm3 \n\t"\
|
|
1724 "palignr $2, %%xmm5, %%xmm4 \n\t"\
|
|
1725 "paddw %%xmm5, %%xmm0 \n\t"\
|
|
1726 "paddw %%xmm4, %%xmm1 \n\t"\
|
|
1727 "paddw %%xmm3, %%xmm2 \n\t"\
|
|
1728 "movdqa %%xmm5, %%xmm6 \n\t"\
|
|
1729 "movdqa %%xmm5, %%xmm4 \n\t"\
|
|
1730 "movdqa %%xmm5, %%xmm3 \n\t"\
|
|
1731 "palignr $8, %%xmm7, %%xmm4 \n\t"\
|
|
1732 "palignr $2, %%xmm7, %%xmm6 \n\t"\
|
|
1733 "palignr $10, %%xmm7, %%xmm3 \n\t"\
|
|
1734 "paddw %%xmm6, %%xmm4 \n\t"\
|
|
1735 "movdqa %%xmm5, %%xmm6 \n\t"\
|
|
1736 "palignr $6, %%xmm7, %%xmm5 \n\t"\
|
|
1737 "palignr $4, %%xmm7, %%xmm6 \n\t"\
|
|
1738 "paddw %%xmm7, %%xmm3 \n\t"\
|
|
1739 "paddw %%xmm6, %%xmm5 \n\t"\
|
|
1740 \
|
|
1741 "psubw %%xmm1, %%xmm0 \n\t"\
|
|
1742 "psubw %%xmm4, %%xmm3 \n\t"\
|
|
1743 "psraw $2, %%xmm0 \n\t"\
|
|
1744 "psraw $2, %%xmm3 \n\t"\
|
|
1745 "psubw %%xmm1, %%xmm0 \n\t"\
|
|
1746 "psubw %%xmm4, %%xmm3 \n\t"\
|
|
1747 "paddw %%xmm2, %%xmm0 \n\t"\
|
|
1748 "paddw %%xmm5, %%xmm3 \n\t"\
|
|
1749 "psraw $2, %%xmm0 \n\t"\
|
|
1750 "psraw $2, %%xmm3 \n\t"\
|
|
1751 "paddw %%xmm2, %%xmm0 \n\t"\
|
|
1752 "paddw %%xmm5, %%xmm3 \n\t"\
|
|
1753 "psraw $6, %%xmm0 \n\t"\
|
|
1754 "psraw $6, %%xmm3 \n\t"\
|
|
1755 "packuswb %%xmm0, %%xmm3 \n\t"\
|
|
1756 OP(%%xmm3, (%1), %%xmm7, dqa)\
|
|
1757 "add $48, %0 \n\t"\
|
|
1758 "add %3, %1 \n\t"\
|
|
1759 "decl %2 \n\t"\
|
|
1760 " jnz 1b \n\t"\
|
|
1761 : "+a"(tmp), "+c"(dst), "+g"(h)\
|
|
1762 : "S"((x86_reg)dstStride)\
|
|
1763 : "memory"\
|
|
1764 );\
|
|
1765 }else{\
|
|
1766 __asm__ volatile(\
|
|
1767 "1: \n\t"\
|
|
1768 "movdqa 16(%0), %%xmm1 \n\t"\
|
|
1769 "movdqa (%0), %%xmm0 \n\t"\
|
|
1770 "movdqa %%xmm1, %%xmm2 \n\t"\
|
|
1771 "movdqa %%xmm1, %%xmm3 \n\t"\
|
|
1772 "movdqa %%xmm1, %%xmm4 \n\t"\
|
|
1773 "movdqa %%xmm1, %%xmm5 \n\t"\
|
|
1774 "palignr $10, %%xmm0, %%xmm5 \n\t"\
|
|
1775 "palignr $8, %%xmm0, %%xmm4 \n\t"\
|
|
1776 "palignr $6, %%xmm0, %%xmm3 \n\t"\
|
|
1777 "palignr $4, %%xmm0, %%xmm2 \n\t"\
|
|
1778 "palignr $2, %%xmm0, %%xmm1 \n\t"\
|
|
1779 "paddw %%xmm5, %%xmm0 \n\t"\
|
|
1780 "paddw %%xmm4, %%xmm1 \n\t"\
|
|
1781 "paddw %%xmm3, %%xmm2 \n\t"\
|
|
1782 "psubw %%xmm1, %%xmm0 \n\t"\
|
|
1783 "psraw $2, %%xmm0 \n\t"\
|
|
1784 "psubw %%xmm1, %%xmm0 \n\t"\
|
|
1785 "paddw %%xmm2, %%xmm0 \n\t"\
|
|
1786 "psraw $2, %%xmm0 \n\t"\
|
|
1787 "paddw %%xmm2, %%xmm0 \n\t"\
|
|
1788 "psraw $6, %%xmm0 \n\t"\
|
|
1789 "packuswb %%xmm0, %%xmm0 \n\t"\
|
|
1790 OP(%%xmm0, (%1), %%xmm7, q)\
|
|
1791 "add $48, %0 \n\t"\
|
|
1792 "add %3, %1 \n\t"\
|
|
1793 "decl %2 \n\t"\
|
|
1794 " jnz 1b \n\t"\
|
|
1795 : "+a"(tmp), "+c"(dst), "+g"(h)\
|
|
1796 : "S"((x86_reg)dstStride)\
|
|
1797 : "memory"\
|
|
1798 );\
|
|
1799 }\
|
|
1800 }
|
|
1801
|
|
1802 #define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\
|
|
1803 static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
|
|
1804 put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\
|
|
1805 OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
|
|
1806 }\
|
|
1807 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
|
|
1808 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 8);\
|
|
1809 }\
|
|
1810 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
|
|
1811 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 16);\
|
|
1812 }\
|
|
1813
|
|
1814 #define put_pixels8_l2_sse2 put_pixels8_l2_mmx2
|
|
1815 #define avg_pixels8_l2_sse2 avg_pixels8_l2_mmx2
|
|
1816 #define put_pixels16_l2_sse2 put_pixels16_l2_mmx2
|
|
1817 #define avg_pixels16_l2_sse2 avg_pixels16_l2_mmx2
|
|
1818 #define put_pixels8_l2_ssse3 put_pixels8_l2_mmx2
|
|
1819 #define avg_pixels8_l2_ssse3 avg_pixels8_l2_mmx2
|
|
1820 #define put_pixels16_l2_ssse3 put_pixels16_l2_mmx2
|
|
1821 #define avg_pixels16_l2_ssse3 avg_pixels16_l2_mmx2
|
|
1822
|
|
1823 #define put_pixels8_l2_shift5_sse2 put_pixels8_l2_shift5_mmx2
|
|
1824 #define avg_pixels8_l2_shift5_sse2 avg_pixels8_l2_shift5_mmx2
|
|
1825 #define put_pixels16_l2_shift5_sse2 put_pixels16_l2_shift5_mmx2
|
|
1826 #define avg_pixels16_l2_shift5_sse2 avg_pixels16_l2_shift5_mmx2
|
|
1827 #define put_pixels8_l2_shift5_ssse3 put_pixels8_l2_shift5_mmx2
|
|
1828 #define avg_pixels8_l2_shift5_ssse3 avg_pixels8_l2_shift5_mmx2
|
|
1829 #define put_pixels16_l2_shift5_ssse3 put_pixels16_l2_shift5_mmx2
|
|
1830 #define avg_pixels16_l2_shift5_ssse3 avg_pixels16_l2_shift5_mmx2
|
|
1831
|
|
1832 #define put_h264_qpel8_h_lowpass_l2_sse2 put_h264_qpel8_h_lowpass_l2_mmx2
|
|
1833 #define avg_h264_qpel8_h_lowpass_l2_sse2 avg_h264_qpel8_h_lowpass_l2_mmx2
|
|
1834 #define put_h264_qpel16_h_lowpass_l2_sse2 put_h264_qpel16_h_lowpass_l2_mmx2
|
|
1835 #define avg_h264_qpel16_h_lowpass_l2_sse2 avg_h264_qpel16_h_lowpass_l2_mmx2
|
|
1836
|
|
1837 #define put_h264_qpel8_v_lowpass_ssse3 put_h264_qpel8_v_lowpass_sse2
|
|
1838 #define avg_h264_qpel8_v_lowpass_ssse3 avg_h264_qpel8_v_lowpass_sse2
|
|
1839 #define put_h264_qpel16_v_lowpass_ssse3 put_h264_qpel16_v_lowpass_sse2
|
|
1840 #define avg_h264_qpel16_v_lowpass_ssse3 avg_h264_qpel16_v_lowpass_sse2
|
|
1841
|
|
1842 #define put_h264_qpel8or16_hv2_lowpass_sse2 put_h264_qpel8or16_hv2_lowpass_mmx2
|
|
1843 #define avg_h264_qpel8or16_hv2_lowpass_sse2 avg_h264_qpel8or16_hv2_lowpass_mmx2
|
|
1844
|
|
1845 #define H264_MC(OPNAME, SIZE, MMX, ALIGN) \
|
|
1846 H264_MC_C(OPNAME, SIZE, MMX, ALIGN)\
|
|
1847 H264_MC_V(OPNAME, SIZE, MMX, ALIGN)\
|
|
1848 H264_MC_H(OPNAME, SIZE, MMX, ALIGN)\
|
|
1849 H264_MC_HV(OPNAME, SIZE, MMX, ALIGN)\
|
|
1850
|
|
1851 static void put_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
|
|
1852 put_pixels16_sse2(dst, src, stride, 16);
|
|
1853 }
|
|
1854 static void avg_h264_qpel16_mc00_sse2 (uint8_t *dst, uint8_t *src, int stride){
|
|
1855 avg_pixels16_sse2(dst, src, stride, 16);
|
|
1856 }
|
|
1857 #define put_h264_qpel8_mc00_sse2 put_h264_qpel8_mc00_mmx2
|
|
1858 #define avg_h264_qpel8_mc00_sse2 avg_h264_qpel8_mc00_mmx2
|
|
1859
|
|
1860 #define H264_MC_C(OPNAME, SIZE, MMX, ALIGN) \
|
|
1861 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
|
|
1862 OPNAME ## pixels ## SIZE ## _ ## MMX(dst, src, stride, SIZE);\
|
|
1863 }\
|
|
1864
|
|
1865 #define H264_MC_H(OPNAME, SIZE, MMX, ALIGN) \
|
|
1866 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1867 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
|
|
1868 }\
|
|
1869 \
|
|
1870 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1871 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
|
|
1872 }\
|
|
1873 \
|
|
1874 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1875 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
|
|
1876 }\
|
|
1877
|
|
1878 #define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \
|
|
1879 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1880 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
|
|
1881 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
|
|
1882 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\
|
|
1883 }\
|
|
1884 \
|
|
1885 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1886 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
|
|
1887 }\
|
|
1888 \
|
|
1889 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1890 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
|
|
1891 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
|
|
1892 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\
|
|
1893 }\
|
|
1894
|
|
1895 #define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \
|
|
1896 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1897 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
|
|
1898 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
|
|
1899 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
|
|
1900 }\
|
|
1901 \
|
|
1902 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1903 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
|
|
1904 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
|
|
1905 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
|
|
1906 }\
|
|
1907 \
|
|
1908 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1909 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
|
|
1910 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
|
|
1911 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
|
|
1912 }\
|
|
1913 \
|
|
1914 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1915 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
|
|
1916 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
|
|
1917 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
|
|
1918 }\
|
|
1919 \
|
|
1920 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1921 DECLARE_ALIGNED(ALIGN, uint16_t, temp[SIZE*(SIZE<8?12:24)]);\
|
|
1922 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, SIZE, stride);\
|
|
1923 }\
|
|
1924 \
|
|
1925 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1926 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
|
|
1927 uint8_t * const halfHV= temp;\
|
|
1928 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
|
|
1929 assert(((int)temp & 7) == 0);\
|
|
1930 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
|
|
1931 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
|
|
1932 }\
|
|
1933 \
|
|
1934 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1935 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
|
|
1936 uint8_t * const halfHV= temp;\
|
|
1937 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
|
|
1938 assert(((int)temp & 7) == 0);\
|
|
1939 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
|
|
1940 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
|
|
1941 }\
|
|
1942 \
|
|
1943 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1944 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
|
|
1945 uint8_t * const halfHV= temp;\
|
|
1946 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
|
|
1947 assert(((int)temp & 7) == 0);\
|
|
1948 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
|
|
1949 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
|
|
1950 }\
|
|
1951 \
|
|
1952 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
1953 DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
|
|
1954 uint8_t * const halfHV= temp;\
|
|
1955 int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
|
|
1956 assert(((int)temp & 7) == 0);\
|
|
1957 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
|
|
1958 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
|
|
1959 }\
|
|
1960
|
|
1961 #define H264_MC_4816(MMX)\
|
|
1962 H264_MC(put_, 4, MMX, 8)\
|
|
1963 H264_MC(put_, 8, MMX, 8)\
|
|
1964 H264_MC(put_, 16,MMX, 8)\
|
|
1965 H264_MC(avg_, 4, MMX, 8)\
|
|
1966 H264_MC(avg_, 8, MMX, 8)\
|
|
1967 H264_MC(avg_, 16,MMX, 8)\
|
|
1968
|
|
1969 #define H264_MC_816(QPEL, XMM)\
|
|
1970 QPEL(put_, 8, XMM, 16)\
|
|
1971 QPEL(put_, 16,XMM, 16)\
|
|
1972 QPEL(avg_, 8, XMM, 16)\
|
|
1973 QPEL(avg_, 16,XMM, 16)\
|
|
1974
|
|
1975
|
|
1976 #define AVG_3DNOW_OP(a,b,temp, size) \
|
|
1977 "mov" #size " " #b ", " #temp " \n\t"\
|
|
1978 "pavgusb " #temp ", " #a " \n\t"\
|
|
1979 "mov" #size " " #a ", " #b " \n\t"
|
|
1980 #define AVG_MMX2_OP(a,b,temp, size) \
|
|
1981 "mov" #size " " #b ", " #temp " \n\t"\
|
|
1982 "pavgb " #temp ", " #a " \n\t"\
|
|
1983 "mov" #size " " #a ", " #b " \n\t"
|
|
1984
|
|
1985 #define PAVGB "pavgusb"
|
|
1986 QPEL_H264(put_, PUT_OP, 3dnow)
|
|
1987 QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
|
|
1988 #undef PAVGB
|
|
1989 #define PAVGB "pavgb"
|
|
1990 QPEL_H264(put_, PUT_OP, mmx2)
|
|
1991 QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
|
|
1992 QPEL_H264_V_XMM(put_, PUT_OP, sse2)
|
|
1993 QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2)
|
|
1994 QPEL_H264_HV_XMM(put_, PUT_OP, sse2)
|
|
1995 QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2)
|
|
1996 #ifdef HAVE_SSSE3
|
|
1997 QPEL_H264_H_XMM(put_, PUT_OP, ssse3)
|
|
1998 QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3)
|
|
1999 QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3)
|
|
2000 QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3)
|
|
2001 QPEL_H264_HV_XMM(put_, PUT_OP, ssse3)
|
|
2002 QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, ssse3)
|
|
2003 #endif
|
|
2004 #undef PAVGB
|
|
2005
|
|
2006 H264_MC_4816(3dnow)
|
|
2007 H264_MC_4816(mmx2)
|
|
2008 H264_MC_816(H264_MC_V, sse2)
|
|
2009 H264_MC_816(H264_MC_HV, sse2)
|
|
2010 #ifdef HAVE_SSSE3
|
|
2011 H264_MC_816(H264_MC_H, ssse3)
|
|
2012 H264_MC_816(H264_MC_HV, ssse3)
|
|
2013 #endif
|
|
2014
|
|
2015
|
|
2016 #define H264_CHROMA_OP(S,D)
|
|
2017 #define H264_CHROMA_OP4(S,D,T)
|
|
2018 #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
|
|
2019 #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx
|
|
2020 #define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2
|
|
2021 #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
|
|
2022 #include "dsputil_h264_template_mmx.c"
|
|
2023
|
|
2024 static void put_h264_chroma_mc8_mmx_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
|
|
2025 {
|
|
2026 put_h264_chroma_mc8_mmx(dst, src, stride, h, x, y, 1);
|
|
2027 }
|
|
2028 static void put_h264_chroma_mc8_mmx_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
|
|
2029 {
|
|
2030 put_h264_chroma_mc8_mmx(dst, src, stride, h, x, y, 0);
|
|
2031 }
|
|
2032
|
|
2033 #undef H264_CHROMA_OP
|
|
2034 #undef H264_CHROMA_OP4
|
|
2035 #undef H264_CHROMA_MC8_TMPL
|
|
2036 #undef H264_CHROMA_MC4_TMPL
|
|
2037 #undef H264_CHROMA_MC2_TMPL
|
|
2038 #undef H264_CHROMA_MC8_MV0
|
|
2039
|
|
2040 #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
|
|
2041 #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
|
|
2042 "pavgb " #T ", " #D " \n\t"
|
|
2043 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
|
|
2044 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2
|
|
2045 #define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2
|
|
2046 #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
|
|
2047 #include "dsputil_h264_template_mmx.c"
|
|
2048 static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
|
|
2049 {
|
|
2050 avg_h264_chroma_mc8_mmx2(dst, src, stride, h, x, y, 1);
|
|
2051 }
|
|
2052 #undef H264_CHROMA_OP
|
|
2053 #undef H264_CHROMA_OP4
|
|
2054 #undef H264_CHROMA_MC8_TMPL
|
|
2055 #undef H264_CHROMA_MC4_TMPL
|
|
2056 #undef H264_CHROMA_MC2_TMPL
|
|
2057 #undef H264_CHROMA_MC8_MV0
|
|
2058
|
|
2059 #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
|
|
2060 #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
|
|
2061 "pavgusb " #T ", " #D " \n\t"
|
|
2062 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
|
|
2063 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_3dnow
|
|
2064 #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
|
|
2065 #include "dsputil_h264_template_mmx.c"
|
|
2066 static void avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
|
|
2067 {
|
|
2068 avg_h264_chroma_mc8_3dnow(dst, src, stride, h, x, y, 1);
|
|
2069 }
|
|
2070 #undef H264_CHROMA_OP
|
|
2071 #undef H264_CHROMA_OP4
|
|
2072 #undef H264_CHROMA_MC8_TMPL
|
|
2073 #undef H264_CHROMA_MC4_TMPL
|
|
2074 #undef H264_CHROMA_MC8_MV0
|
|
2075
|
|
2076 #ifdef HAVE_SSSE3
|
|
2077 #define AVG_OP(X)
|
|
2078 #undef H264_CHROMA_MC8_TMPL
|
|
2079 #undef H264_CHROMA_MC4_TMPL
|
|
2080 #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3
|
|
2081 #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3
|
|
2082 #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
|
|
2083 #include "dsputil_h264_template_ssse3.c"
|
|
2084 static void put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
|
|
2085 {
|
|
2086 put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
|
|
2087 }
|
|
2088 static void put_h264_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
|
|
2089 {
|
|
2090 put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0);
|
|
2091 }
|
|
2092
|
|
2093 #undef AVG_OP
|
|
2094 #undef H264_CHROMA_MC8_TMPL
|
|
2095 #undef H264_CHROMA_MC4_TMPL
|
|
2096 #undef H264_CHROMA_MC8_MV0
|
|
2097 #define AVG_OP(X) X
|
|
2098 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_ssse3
|
|
2099 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_ssse3
|
|
2100 #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
|
|
2101 #include "dsputil_h264_template_ssse3.c"
|
|
2102 static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
|
|
2103 {
|
|
2104 avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
|
|
2105 }
|
|
2106 #undef AVG_OP
|
|
2107 #undef H264_CHROMA_MC8_TMPL
|
|
2108 #undef H264_CHROMA_MC4_TMPL
|
|
2109 #undef H264_CHROMA_MC8_MV0
|
|
2110 #endif
|
|
2111
|
|
2112 /***********************************/
|
|
2113 /* weighted prediction */
|
|
2114
|
|
2115 static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h)
|
|
2116 {
|
|
2117 int x, y;
|
|
2118 offset <<= log2_denom;
|
|
2119 offset += (1 << log2_denom) >> 1;
|
|
2120 __asm__ volatile(
|
|
2121 "movd %0, %%mm4 \n\t"
|
|
2122 "movd %1, %%mm5 \n\t"
|
|
2123 "movd %2, %%mm6 \n\t"
|
|
2124 "pshufw $0, %%mm4, %%mm4 \n\t"
|
|
2125 "pshufw $0, %%mm5, %%mm5 \n\t"
|
|
2126 "pxor %%mm7, %%mm7 \n\t"
|
|
2127 :: "g"(weight), "g"(offset), "g"(log2_denom)
|
|
2128 );
|
|
2129 for(y=0; y<h; y+=2){
|
|
2130 for(x=0; x<w; x+=4){
|
|
2131 __asm__ volatile(
|
|
2132 "movd %0, %%mm0 \n\t"
|
|
2133 "movd %1, %%mm1 \n\t"
|
|
2134 "punpcklbw %%mm7, %%mm0 \n\t"
|
|
2135 "punpcklbw %%mm7, %%mm1 \n\t"
|
|
2136 "pmullw %%mm4, %%mm0 \n\t"
|
|
2137 "pmullw %%mm4, %%mm1 \n\t"
|
|
2138 "paddsw %%mm5, %%mm0 \n\t"
|
|
2139 "paddsw %%mm5, %%mm1 \n\t"
|
|
2140 "psraw %%mm6, %%mm0 \n\t"
|
|
2141 "psraw %%mm6, %%mm1 \n\t"
|
|
2142 "packuswb %%mm7, %%mm0 \n\t"
|
|
2143 "packuswb %%mm7, %%mm1 \n\t"
|
|
2144 "movd %%mm0, %0 \n\t"
|
|
2145 "movd %%mm1, %1 \n\t"
|
|
2146 : "+m"(*(uint32_t*)(dst+x)),
|
|
2147 "+m"(*(uint32_t*)(dst+x+stride))
|
|
2148 );
|
|
2149 }
|
|
2150 dst += 2*stride;
|
|
2151 }
|
|
2152 }
|
|
2153
|
|
2154 static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h)
|
|
2155 {
|
|
2156 int x, y;
|
|
2157 offset = ((offset + 1) | 1) << log2_denom;
|
|
2158 __asm__ volatile(
|
|
2159 "movd %0, %%mm3 \n\t"
|
|
2160 "movd %1, %%mm4 \n\t"
|
|
2161 "movd %2, %%mm5 \n\t"
|
|
2162 "movd %3, %%mm6 \n\t"
|
|
2163 "pshufw $0, %%mm3, %%mm3 \n\t"
|
|
2164 "pshufw $0, %%mm4, %%mm4 \n\t"
|
|
2165 "pshufw $0, %%mm5, %%mm5 \n\t"
|
|
2166 "pxor %%mm7, %%mm7 \n\t"
|
|
2167 :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
|
|
2168 );
|
|
2169 for(y=0; y<h; y++){
|
|
2170 for(x=0; x<w; x+=4){
|
|
2171 __asm__ volatile(
|
|
2172 "movd %0, %%mm0 \n\t"
|
|
2173 "movd %1, %%mm1 \n\t"
|
|
2174 "punpcklbw %%mm7, %%mm0 \n\t"
|
|
2175 "punpcklbw %%mm7, %%mm1 \n\t"
|
|
2176 "pmullw %%mm3, %%mm0 \n\t"
|
|
2177 "pmullw %%mm4, %%mm1 \n\t"
|
|
2178 "paddsw %%mm1, %%mm0 \n\t"
|
|
2179 "paddsw %%mm5, %%mm0 \n\t"
|
|
2180 "psraw %%mm6, %%mm0 \n\t"
|
|
2181 "packuswb %%mm0, %%mm0 \n\t"
|
|
2182 "movd %%mm0, %0 \n\t"
|
|
2183 : "+m"(*(uint32_t*)(dst+x))
|
|
2184 : "m"(*(uint32_t*)(src+x))
|
|
2185 );
|
|
2186 }
|
|
2187 src += stride;
|
|
2188 dst += stride;
|
|
2189 }
|
|
2190 }
|
|
2191
|
|
2192 #define H264_WEIGHT(W,H) \
|
|
2193 static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
|
|
2194 ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
|
|
2195 } \
|
|
2196 static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \
|
|
2197 ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \
|
|
2198 }
|
|
2199
|
|
2200 H264_WEIGHT(16,16)
|
|
2201 H264_WEIGHT(16, 8)
|
|
2202 H264_WEIGHT( 8,16)
|
|
2203 H264_WEIGHT( 8, 8)
|
|
2204 H264_WEIGHT( 8, 4)
|
|
2205 H264_WEIGHT( 4, 8)
|
|
2206 H264_WEIGHT( 4, 4)
|
|
2207 H264_WEIGHT( 4, 2)
|
|
2208
|