2754
|
1 /*
|
|
2 * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
|
|
3 *
|
|
4 * This library is free software; you can redistribute it and/or
|
|
5 * modify it under the terms of the GNU Lesser General Public
|
|
6 * License as published by the Free Software Foundation; either
|
|
7 * version 2 of the License, or (at your option) any later version.
|
|
8 *
|
|
9 * This library is distributed in the hope that it will be useful,
|
|
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
12 * Lesser General Public License for more details.
|
|
13 *
|
|
14 * You should have received a copy of the GNU Lesser General Public
|
|
15 * License along with this library; if not, write to the Free Software
|
|
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
17 */
|
|
18
|
|
19
|
|
20 /***********************************/
|
|
21 /* IDCT */
|
|
22
|
|
23 /* in/out: mma=mma+mmb, mmb=mmb-mma */
|
|
24 #define SUMSUB_BA( a, b ) \
|
|
25 "paddw "#b", "#a" \n\t"\
|
|
26 "paddw "#b", "#b" \n\t"\
|
|
27 "psubw "#a", "#b" \n\t"
|
|
28
|
|
29 #define SUMSUB_BADC( a, b, c, d ) \
|
|
30 "paddw "#b", "#a" \n\t"\
|
|
31 "paddw "#d", "#c" \n\t"\
|
|
32 "paddw "#b", "#b" \n\t"\
|
|
33 "paddw "#d", "#d" \n\t"\
|
|
34 "psubw "#a", "#b" \n\t"\
|
|
35 "psubw "#c", "#d" \n\t"
|
|
36
|
|
37 #define SUMSUBD2_AB( a, b, t ) \
|
|
38 "movq "#b", "#t" \n\t"\
|
|
39 "psraw $1 , "#b" \n\t"\
|
|
40 "paddw "#a", "#b" \n\t"\
|
|
41 "psraw $1 , "#a" \n\t"\
|
|
42 "psubw "#t", "#a" \n\t"
|
|
43
|
|
44 #define IDCT4_1D( s02, s13, d02, d13, t ) \
|
|
45 SUMSUB_BA ( s02, d02 )\
|
|
46 SUMSUBD2_AB( s13, d13, t )\
|
|
47 SUMSUB_BADC( d13, s02, s13, d02 )
|
|
48
|
|
49 #define SBUTTERFLY(a,b,t,n)\
|
|
50 "movq " #a ", " #t " \n\t" /* abcd */\
|
|
51 "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
|
|
52 "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
|
|
53
|
|
54 #define TRANSPOSE4(a,b,c,d,t)\
|
|
55 SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
|
|
56 SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
|
|
57 SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
|
|
58 SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
|
|
59
|
|
60 #define STORE_DIFF_4P( p, t, z ) \
|
|
61 "psraw $6, "#p" \n\t"\
|
|
62 "movd (%0), "#t" \n\t"\
|
|
63 "punpcklbw "#z", "#t" \n\t"\
|
|
64 "paddsw "#t", "#p" \n\t"\
|
|
65 "packuswb "#z", "#p" \n\t"\
|
|
66 "movd "#p", (%0) \n\t"
|
|
67
|
|
68 void ff_h264_idct_add_mmx2(uint8_t *dst, int16_t *block, int stride)
|
|
69 {
|
|
70 /* Load dct coeffs */
|
|
71 asm volatile(
|
|
72 "movq (%0), %%mm0 \n\t"
|
|
73 "movq 8(%0), %%mm1 \n\t"
|
|
74 "movq 16(%0), %%mm2 \n\t"
|
|
75 "movq 24(%0), %%mm3 \n\t"
|
|
76 :: "r"(block) );
|
|
77
|
|
78 asm volatile(
|
|
79 /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */
|
|
80 IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 )
|
|
81
|
|
82 "movq %0, %%mm6 \n\t"
|
|
83 /* in: 1,4,0,2 out: 1,2,3,0 */
|
|
84 TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 )
|
|
85
|
|
86 "paddw %%mm6, %%mm3 \n\t"
|
|
87
|
|
88 /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */
|
|
89 IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 )
|
|
90
|
|
91 "pxor %%mm7, %%mm7 \n\t"
|
|
92 :: "m"(ff_pw_32));
|
|
93
|
|
94 asm volatile(
|
|
95 STORE_DIFF_4P( %%mm0, %%mm1, %%mm7)
|
|
96 "add %1, %0 \n\t"
|
|
97 STORE_DIFF_4P( %%mm2, %%mm1, %%mm7)
|
|
98 "add %1, %0 \n\t"
|
|
99 STORE_DIFF_4P( %%mm3, %%mm1, %%mm7)
|
|
100 "add %1, %0 \n\t"
|
|
101 STORE_DIFF_4P( %%mm4, %%mm1, %%mm7)
|
|
102 : "+r"(dst)
|
|
103 : "r" ((long)stride)
|
|
104 );
|
|
105 }
|
|
106
|
|
107
|
|
108 /***********************************/
|
|
109 /* deblocking */
|
|
110
|
|
111 // out: o = |x-y|>a
|
|
112 // clobbers: t
|
|
113 #define DIFF_GT_MMX(x,y,a,o,t)\
|
|
114 "movq "#y", "#t" \n\t"\
|
|
115 "movq "#x", "#o" \n\t"\
|
|
116 "psubusb "#x", "#t" \n\t"\
|
|
117 "psubusb "#y", "#o" \n\t"\
|
|
118 "por "#t", "#o" \n\t"\
|
|
119 "psubusb "#a", "#o" \n\t"
|
|
120
|
|
121 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
|
|
122 // out: mm5=beta-1, mm7=mask
|
|
123 // clobbers: mm4,mm6
|
|
124 #define H264_DEBLOCK_MASK(alpha1, beta1) \
|
|
125 "pshufw $0, "#alpha1", %%mm4 \n\t"\
|
|
126 "pshufw $0, "#beta1 ", %%mm5 \n\t"\
|
|
127 "packuswb %%mm4, %%mm4 \n\t"\
|
|
128 "packuswb %%mm5, %%mm5 \n\t"\
|
|
129 DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
|
|
130 DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
|
|
131 "por %%mm4, %%mm7 \n\t"\
|
|
132 DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
|
|
133 "por %%mm4, %%mm7 \n\t"\
|
|
134 "pxor %%mm6, %%mm6 \n\t"\
|
|
135 "pcmpeqb %%mm6, %%mm7 \n\t"
|
|
136
|
|
137 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
|
|
138 // out: mm1=p0' mm2=q0'
|
|
139 // clobbers: mm0,3-6
|
|
140 #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
|
|
141 /* a = q0^p0^((p1-q1)>>2) */\
|
|
142 "movq %%mm0, %%mm4 \n\t"\
|
|
143 "psubb %%mm3, %%mm4 \n\t"\
|
|
144 "psrlw $2, %%mm4 \n\t"\
|
|
145 "pxor %%mm1, %%mm4 \n\t"\
|
|
146 "pxor %%mm2, %%mm4 \n\t"\
|
|
147 /* b = p0^(q1>>2) */\
|
|
148 "psrlw $2, %%mm3 \n\t"\
|
|
149 "pand "#pb_3f", %%mm3 \n\t"\
|
|
150 "movq %%mm1, %%mm5 \n\t"\
|
|
151 "pxor %%mm3, %%mm5 \n\t"\
|
|
152 /* c = q0^(p1>>2) */\
|
|
153 "psrlw $2, %%mm0 \n\t"\
|
|
154 "pand "#pb_3f", %%mm0 \n\t"\
|
|
155 "movq %%mm2, %%mm6 \n\t"\
|
|
156 "pxor %%mm0, %%mm6 \n\t"\
|
|
157 /* d = (c^b) & ~(b^a) & 1 */\
|
|
158 "pxor %%mm5, %%mm6 \n\t"\
|
|
159 "pxor %%mm4, %%mm5 \n\t"\
|
|
160 "pandn %%mm6, %%mm5 \n\t"\
|
|
161 "pand "#pb_01", %%mm5 \n\t"\
|
|
162 /* delta = (avg(q0, p1>>2) + (d&a))
|
|
163 * - (avg(p0, q1>>2) + (d&~a)) */\
|
|
164 "pavgb %%mm2, %%mm0 \n\t"\
|
|
165 "movq %%mm5, %%mm6 \n\t"\
|
|
166 "pand %%mm4, %%mm6 \n\t"\
|
|
167 "paddusb %%mm6, %%mm0 \n\t"\
|
|
168 "pavgb %%mm1, %%mm3 \n\t"\
|
|
169 "pandn %%mm5, %%mm4 \n\t"\
|
|
170 "paddusb %%mm4, %%mm3 \n\t"\
|
|
171 /* p0 += clip(delta, -tc0, tc0)
|
|
172 * q0 -= clip(delta, -tc0, tc0) */\
|
|
173 "movq %%mm0, %%mm4 \n\t"\
|
|
174 "psubusb %%mm3, %%mm0 \n\t"\
|
|
175 "psubusb %%mm4, %%mm3 \n\t"\
|
|
176 "pminub %%mm7, %%mm0 \n\t"\
|
|
177 "pminub %%mm7, %%mm3 \n\t"\
|
|
178 "paddusb %%mm0, %%mm1 \n\t"\
|
|
179 "paddusb %%mm3, %%mm2 \n\t"\
|
|
180 "psubusb %%mm3, %%mm1 \n\t"\
|
|
181 "psubusb %%mm0, %%mm2 \n\t"
|
|
182
|
|
183 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=mm_bone
|
|
184 // out: (q1addr) = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
|
|
185 // clobbers: q2, tmp, tc0
|
|
186 #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
|
|
187 "movq %%mm1, "#tmp" \n\t"\
|
|
188 "pavgb %%mm2, "#tmp" \n\t"\
|
|
189 "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
|
|
190 "pxor "q2addr", "#tmp" \n\t"\
|
|
191 "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
|
|
192 "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
|
|
193 "movq "#p1", "#tmp" \n\t"\
|
|
194 "psubusb "#tc0", "#tmp" \n\t"\
|
|
195 "paddusb "#p1", "#tc0" \n\t"\
|
|
196 "pmaxub "#tmp", "#q2" \n\t"\
|
|
197 "pminub "#tc0", "#q2" \n\t"\
|
|
198 "movq "#q2", "q1addr" \n\t"
|
|
199
|
|
200 static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
|
|
201 {
|
|
202 uint64_t tmp0;
|
|
203 uint64_t tc = (uint8_t)tc0[1]*0x01010000 | (uint8_t)tc0[0]*0x0101;
|
|
204 // with luma, tc0=0 doesn't mean no filtering, so we need a separate input mask
|
|
205 uint32_t mask[2] = { (tc0[0]>=0)*0xffffffff, (tc0[1]>=0)*0xffffffff };
|
|
206
|
|
207 asm volatile(
|
|
208 "movq (%1,%3), %%mm0 \n\t" //p1
|
|
209 "movq (%1,%3,2), %%mm1 \n\t" //p0
|
|
210 "movq (%2), %%mm2 \n\t" //q0
|
|
211 "movq (%2,%3), %%mm3 \n\t" //q1
|
|
212 H264_DEBLOCK_MASK(%6, %7)
|
|
213 "pand %5, %%mm7 \n\t"
|
|
214 "movq %%mm7, %0 \n\t"
|
|
215
|
|
216 /* filter p1 */
|
|
217 "movq (%1), %%mm3 \n\t" //p2
|
|
218 DIFF_GT_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
|
|
219 "pandn %%mm7, %%mm6 \n\t"
|
|
220 "pcmpeqb %%mm7, %%mm6 \n\t"
|
|
221 "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
|
|
222 "pshufw $80, %4, %%mm4 \n\t"
|
|
223 "pand %%mm7, %%mm4 \n\t" // mask & tc0
|
|
224 "movq %8, %%mm7 \n\t"
|
|
225 "pand %%mm6, %%mm7 \n\t" // mask & |p2-p0|<beta & 1
|
|
226 "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
|
|
227 "paddb %%mm4, %%mm7 \n\t" // tc++
|
|
228 H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
|
|
229
|
|
230 /* filter q1 */
|
|
231 "movq (%2,%3,2), %%mm4 \n\t" //q2
|
|
232 DIFF_GT_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
|
|
233 "pandn %0, %%mm6 \n\t"
|
|
234 "pcmpeqb %0, %%mm6 \n\t"
|
|
235 "pand %0, %%mm6 \n\t"
|
|
236 "pshufw $80, %4, %%mm5 \n\t"
|
|
237 "pand %%mm6, %%mm5 \n\t"
|
|
238 "pand %8, %%mm6 \n\t"
|
|
239 "paddb %%mm6, %%mm7 \n\t"
|
|
240 "movq (%2,%3), %%mm3 \n\t"
|
|
241 H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
|
|
242
|
|
243 /* filter p0, q0 */
|
|
244 H264_DEBLOCK_P0_Q0(%8, %9)
|
|
245 "movq %%mm1, (%1,%3,2) \n\t"
|
|
246 "movq %%mm2, (%2) \n\t"
|
|
247
|
|
248 : "=m"(tmp0)
|
|
249 : "r"(pix-3*stride), "r"(pix), "r"((long)stride),
|
|
250 "m"(tc), "m"(*(uint64_t*)mask), "m"(alpha1), "m"(beta1),
|
|
251 "m"(mm_bone), "m"(ff_pb_3F)
|
|
252 );
|
|
253 }
|
|
254
|
|
255 static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
|
|
256 {
|
|
257 if((tc0[0] & tc0[1]) >= 0)
|
|
258 h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
|
|
259 if((tc0[2] & tc0[3]) >= 0)
|
|
260 h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
|
|
261 }
|
|
262 static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
|
|
263 {
|
|
264 //FIXME: could cut some load/stores by merging transpose with filter
|
|
265 // also, it only needs to transpose 6x8
|
|
266 uint8_t trans[8*8];
|
|
267 int i;
|
|
268 for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
|
|
269 if((tc0[0] & tc0[1]) < 0)
|
|
270 continue;
|
|
271 transpose4x4(trans, pix-4, 8, stride);
|
|
272 transpose4x4(trans +4*8, pix, 8, stride);
|
|
273 transpose4x4(trans+4, pix-4+4*stride, 8, stride);
|
|
274 transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
|
|
275 h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
|
|
276 transpose4x4(pix-2, trans +2*8, stride, 8);
|
|
277 transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
|
|
278 }
|
|
279 }
|
|
280
|
|
281 static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
|
|
282 {
|
|
283 asm volatile(
|
|
284 "movq (%0), %%mm0 \n\t" //p1
|
|
285 "movq (%0,%2), %%mm1 \n\t" //p0
|
|
286 "movq (%1), %%mm2 \n\t" //q0
|
|
287 "movq (%1,%2), %%mm3 \n\t" //q1
|
|
288 H264_DEBLOCK_MASK(%4, %5)
|
|
289 "movd %3, %%mm6 \n\t"
|
|
290 "punpcklbw %%mm6, %%mm6 \n\t"
|
|
291 "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
|
|
292 H264_DEBLOCK_P0_Q0(%6, %7)
|
|
293 "movq %%mm1, (%0,%2) \n\t"
|
|
294 "movq %%mm2, (%1) \n\t"
|
|
295
|
|
296 :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
|
|
297 "r"(*(uint32_t*)tc0),
|
|
298 "m"(alpha1), "m"(beta1), "m"(mm_bone), "m"(ff_pb_3F)
|
|
299 );
|
|
300 }
|
|
301
|
|
302 static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
|
|
303 {
|
|
304 h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
|
|
305 }
|
|
306
|
|
307 static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
|
|
308 {
|
|
309 //FIXME: could cut some load/stores by merging transpose with filter
|
|
310 uint8_t trans[8*4];
|
|
311 transpose4x4(trans, pix-2, 8, stride);
|
|
312 transpose4x4(trans+4, pix-2+4*stride, 8, stride);
|
|
313 h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
|
|
314 transpose4x4(pix-2, trans, stride, 8);
|
|
315 transpose4x4(pix-2+4*stride, trans+4, stride, 8);
|
|
316 }
|
|
317
|
|
318 // p0 = (p0 + q1 + 2*p1 + 2) >> 2
|
|
319 #define H264_FILTER_CHROMA4(p0, p1, q1, one) \
|
|
320 "movq "#p0", %%mm4 \n\t"\
|
|
321 "pxor "#q1", %%mm4 \n\t"\
|
|
322 "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
|
|
323 "pavgb "#q1", "#p0" \n\t"\
|
|
324 "psubusb %%mm4, "#p0" \n\t"\
|
|
325 "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
|
|
326
|
|
327 static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
|
|
328 {
|
|
329 asm volatile(
|
|
330 "movq (%0), %%mm0 \n\t"
|
|
331 "movq (%0,%2), %%mm1 \n\t"
|
|
332 "movq (%1), %%mm2 \n\t"
|
|
333 "movq (%1,%2), %%mm3 \n\t"
|
|
334 H264_DEBLOCK_MASK(%3, %4)
|
|
335 "movq %%mm1, %%mm5 \n\t"
|
|
336 "movq %%mm2, %%mm6 \n\t"
|
|
337 H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
|
|
338 H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
|
|
339 "psubb %%mm5, %%mm1 \n\t"
|
|
340 "psubb %%mm6, %%mm2 \n\t"
|
|
341 "pand %%mm7, %%mm1 \n\t"
|
|
342 "pand %%mm7, %%mm2 \n\t"
|
|
343 "paddb %%mm5, %%mm1 \n\t"
|
|
344 "paddb %%mm6, %%mm2 \n\t"
|
|
345 "movq %%mm1, (%0,%2) \n\t"
|
|
346 "movq %%mm2, (%1) \n\t"
|
|
347 :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
|
|
348 "m"(alpha1), "m"(beta1), "m"(mm_bone)
|
|
349 );
|
|
350 }
|
|
351
|
|
352 static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
|
|
353 {
|
|
354 h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
|
|
355 }
|
|
356
|
|
357 static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
|
|
358 {
|
|
359 //FIXME: could cut some load/stores by merging transpose with filter
|
|
360 uint8_t trans[8*4];
|
|
361 transpose4x4(trans, pix-2, 8, stride);
|
|
362 transpose4x4(trans+4, pix-2+4*stride, 8, stride);
|
|
363 h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
|
|
364 transpose4x4(pix-2, trans, stride, 8);
|
|
365 transpose4x4(pix-2+4*stride, trans+4, stride, 8);
|
|
366 }
|
|
367
|
|
368
|
|
369 /***********************************/
|
|
370 /* motion compensation */
|
|
371
|
|
372 #define QPEL_H264V(A,B,C,D,E,F,OP)\
|
|
373 "movd (%0), "#F" \n\t"\
|
|
374 "movq "#C", %%mm6 \n\t"\
|
|
375 "paddw "#D", %%mm6 \n\t"\
|
|
376 "psllw $2, %%mm6 \n\t"\
|
|
377 "psubw "#B", %%mm6 \n\t"\
|
|
378 "psubw "#E", %%mm6 \n\t"\
|
|
379 "pmullw %4, %%mm6 \n\t"\
|
|
380 "add %2, %0 \n\t"\
|
|
381 "punpcklbw %%mm7, "#F" \n\t"\
|
|
382 "paddw %5, "#A" \n\t"\
|
|
383 "paddw "#F", "#A" \n\t"\
|
|
384 "paddw "#A", %%mm6 \n\t"\
|
|
385 "psraw $5, %%mm6 \n\t"\
|
|
386 "packuswb %%mm6, %%mm6 \n\t"\
|
|
387 OP(%%mm6, (%1), A, d)\
|
|
388 "add %3, %1 \n\t"
|
|
389
|
|
390 #define QPEL_H264HV(A,B,C,D,E,F,OF)\
|
|
391 "movd (%0), "#F" \n\t"\
|
|
392 "movq "#C", %%mm6 \n\t"\
|
|
393 "paddw "#D", %%mm6 \n\t"\
|
|
394 "psllw $2, %%mm6 \n\t"\
|
|
395 "psubw "#B", %%mm6 \n\t"\
|
|
396 "psubw "#E", %%mm6 \n\t"\
|
|
397 "pmullw %3, %%mm6 \n\t"\
|
|
398 "add %2, %0 \n\t"\
|
|
399 "punpcklbw %%mm7, "#F" \n\t"\
|
|
400 "paddw "#F", "#A" \n\t"\
|
|
401 "paddw "#A", %%mm6 \n\t"\
|
|
402 "movq %%mm6, "#OF"(%1) \n\t"
|
|
403
|
|
404 #define QPEL_H264(OPNAME, OP, MMX)\
|
|
405 static void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
406 int h=4;\
|
|
407 \
|
|
408 asm volatile(\
|
|
409 "pxor %%mm7, %%mm7 \n\t"\
|
|
410 "movq %5, %%mm4 \n\t"\
|
|
411 "movq %6, %%mm5 \n\t"\
|
|
412 "1: \n\t"\
|
|
413 "movd -1(%0), %%mm1 \n\t"\
|
|
414 "movd (%0), %%mm2 \n\t"\
|
|
415 "movd 1(%0), %%mm3 \n\t"\
|
|
416 "movd 2(%0), %%mm0 \n\t"\
|
|
417 "punpcklbw %%mm7, %%mm1 \n\t"\
|
|
418 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
419 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
420 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
421 "paddw %%mm0, %%mm1 \n\t"\
|
|
422 "paddw %%mm3, %%mm2 \n\t"\
|
|
423 "movd -2(%0), %%mm0 \n\t"\
|
|
424 "movd 3(%0), %%mm3 \n\t"\
|
|
425 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
426 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
427 "paddw %%mm3, %%mm0 \n\t"\
|
|
428 "psllw $2, %%mm2 \n\t"\
|
|
429 "psubw %%mm1, %%mm2 \n\t"\
|
|
430 "pmullw %%mm4, %%mm2 \n\t"\
|
|
431 "paddw %%mm5, %%mm0 \n\t"\
|
|
432 "paddw %%mm2, %%mm0 \n\t"\
|
|
433 "psraw $5, %%mm0 \n\t"\
|
|
434 "packuswb %%mm0, %%mm0 \n\t"\
|
|
435 OP(%%mm0, (%1),%%mm6, d)\
|
|
436 "add %3, %0 \n\t"\
|
|
437 "add %4, %1 \n\t"\
|
|
438 "decl %2 \n\t"\
|
|
439 " jnz 1b \n\t"\
|
|
440 : "+a"(src), "+c"(dst), "+m"(h)\
|
|
441 : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
442 : "memory"\
|
|
443 );\
|
|
444 }\
|
|
445 static void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
446 src -= 2*srcStride;\
|
|
447 asm volatile(\
|
|
448 "pxor %%mm7, %%mm7 \n\t"\
|
|
449 "movd (%0), %%mm0 \n\t"\
|
|
450 "add %2, %0 \n\t"\
|
|
451 "movd (%0), %%mm1 \n\t"\
|
|
452 "add %2, %0 \n\t"\
|
|
453 "movd (%0), %%mm2 \n\t"\
|
|
454 "add %2, %0 \n\t"\
|
|
455 "movd (%0), %%mm3 \n\t"\
|
|
456 "add %2, %0 \n\t"\
|
|
457 "movd (%0), %%mm4 \n\t"\
|
|
458 "add %2, %0 \n\t"\
|
|
459 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
460 "punpcklbw %%mm7, %%mm1 \n\t"\
|
|
461 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
462 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
463 "punpcklbw %%mm7, %%mm4 \n\t"\
|
|
464 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
|
|
465 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
|
|
466 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
|
|
467 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
|
|
468 \
|
|
469 : "+a"(src), "+c"(dst)\
|
|
470 : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
471 : "memory"\
|
|
472 );\
|
|
473 }\
|
|
474 static void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
|
|
475 int h=4;\
|
|
476 int w=3;\
|
|
477 src -= 2*srcStride+2;\
|
|
478 while(w--){\
|
|
479 asm volatile(\
|
|
480 "pxor %%mm7, %%mm7 \n\t"\
|
|
481 "movd (%0), %%mm0 \n\t"\
|
|
482 "add %2, %0 \n\t"\
|
|
483 "movd (%0), %%mm1 \n\t"\
|
|
484 "add %2, %0 \n\t"\
|
|
485 "movd (%0), %%mm2 \n\t"\
|
|
486 "add %2, %0 \n\t"\
|
|
487 "movd (%0), %%mm3 \n\t"\
|
|
488 "add %2, %0 \n\t"\
|
|
489 "movd (%0), %%mm4 \n\t"\
|
|
490 "add %2, %0 \n\t"\
|
|
491 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
492 "punpcklbw %%mm7, %%mm1 \n\t"\
|
|
493 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
494 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
495 "punpcklbw %%mm7, %%mm4 \n\t"\
|
|
496 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
|
|
497 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
|
|
498 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
|
|
499 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
|
|
500 \
|
|
501 : "+a"(src)\
|
|
502 : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
|
|
503 : "memory"\
|
|
504 );\
|
|
505 tmp += 4;\
|
|
506 src += 4 - 9*srcStride;\
|
|
507 }\
|
|
508 tmp -= 3*4;\
|
|
509 asm volatile(\
|
|
510 "movq %4, %%mm6 \n\t"\
|
|
511 "1: \n\t"\
|
|
512 "movq (%0), %%mm0 \n\t"\
|
|
513 "paddw 10(%0), %%mm0 \n\t"\
|
|
514 "movq 2(%0), %%mm1 \n\t"\
|
|
515 "paddw 8(%0), %%mm1 \n\t"\
|
|
516 "movq 4(%0), %%mm2 \n\t"\
|
|
517 "paddw 6(%0), %%mm2 \n\t"\
|
|
518 "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
|
|
519 "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
|
|
520 "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
|
|
521 "paddsw %%mm2, %%mm0 \n\t"\
|
|
522 "psraw $2, %%mm0 \n\t"/*((a-b)/4-b)/4 */\
|
|
523 "paddw %%mm6, %%mm2 \n\t"\
|
|
524 "paddw %%mm2, %%mm0 \n\t"\
|
|
525 "psraw $6, %%mm0 \n\t"\
|
|
526 "packuswb %%mm0, %%mm0 \n\t"\
|
|
527 OP(%%mm0, (%1),%%mm7, d)\
|
|
528 "add $24, %0 \n\t"\
|
|
529 "add %3, %1 \n\t"\
|
|
530 "decl %2 \n\t"\
|
|
531 " jnz 1b \n\t"\
|
|
532 : "+a"(tmp), "+c"(dst), "+m"(h)\
|
|
533 : "S"((long)dstStride), "m"(ff_pw_32)\
|
|
534 : "memory"\
|
|
535 );\
|
|
536 }\
|
|
537 \
|
|
538 static void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
539 int h=8;\
|
|
540 asm volatile(\
|
|
541 "pxor %%mm7, %%mm7 \n\t"\
|
|
542 "movq %5, %%mm6 \n\t"\
|
|
543 "1: \n\t"\
|
|
544 "movq (%0), %%mm0 \n\t"\
|
|
545 "movq 1(%0), %%mm2 \n\t"\
|
|
546 "movq %%mm0, %%mm1 \n\t"\
|
|
547 "movq %%mm2, %%mm3 \n\t"\
|
|
548 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
549 "punpckhbw %%mm7, %%mm1 \n\t"\
|
|
550 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
551 "punpckhbw %%mm7, %%mm3 \n\t"\
|
|
552 "paddw %%mm2, %%mm0 \n\t"\
|
|
553 "paddw %%mm3, %%mm1 \n\t"\
|
|
554 "psllw $2, %%mm0 \n\t"\
|
|
555 "psllw $2, %%mm1 \n\t"\
|
|
556 "movq -1(%0), %%mm2 \n\t"\
|
|
557 "movq 2(%0), %%mm4 \n\t"\
|
|
558 "movq %%mm2, %%mm3 \n\t"\
|
|
559 "movq %%mm4, %%mm5 \n\t"\
|
|
560 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
561 "punpckhbw %%mm7, %%mm3 \n\t"\
|
|
562 "punpcklbw %%mm7, %%mm4 \n\t"\
|
|
563 "punpckhbw %%mm7, %%mm5 \n\t"\
|
|
564 "paddw %%mm4, %%mm2 \n\t"\
|
|
565 "paddw %%mm3, %%mm5 \n\t"\
|
|
566 "psubw %%mm2, %%mm0 \n\t"\
|
|
567 "psubw %%mm5, %%mm1 \n\t"\
|
|
568 "pmullw %%mm6, %%mm0 \n\t"\
|
|
569 "pmullw %%mm6, %%mm1 \n\t"\
|
|
570 "movd -2(%0), %%mm2 \n\t"\
|
|
571 "movd 7(%0), %%mm5 \n\t"\
|
|
572 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
573 "punpcklbw %%mm7, %%mm5 \n\t"\
|
|
574 "paddw %%mm3, %%mm2 \n\t"\
|
|
575 "paddw %%mm5, %%mm4 \n\t"\
|
|
576 "movq %6, %%mm5 \n\t"\
|
|
577 "paddw %%mm5, %%mm2 \n\t"\
|
|
578 "paddw %%mm5, %%mm4 \n\t"\
|
|
579 "paddw %%mm2, %%mm0 \n\t"\
|
|
580 "paddw %%mm4, %%mm1 \n\t"\
|
|
581 "psraw $5, %%mm0 \n\t"\
|
|
582 "psraw $5, %%mm1 \n\t"\
|
|
583 "packuswb %%mm1, %%mm0 \n\t"\
|
|
584 OP(%%mm0, (%1),%%mm5, q)\
|
|
585 "add %3, %0 \n\t"\
|
|
586 "add %4, %1 \n\t"\
|
|
587 "decl %2 \n\t"\
|
|
588 " jnz 1b \n\t"\
|
|
589 : "+a"(src), "+c"(dst), "+m"(h)\
|
|
590 : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
591 : "memory"\
|
|
592 );\
|
|
593 }\
|
|
594 \
|
|
595 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
596 int h= 2;\
|
|
597 src -= 2*srcStride;\
|
|
598 \
|
|
599 while(h--){\
|
|
600 asm volatile(\
|
|
601 "pxor %%mm7, %%mm7 \n\t"\
|
|
602 "movd (%0), %%mm0 \n\t"\
|
|
603 "add %2, %0 \n\t"\
|
|
604 "movd (%0), %%mm1 \n\t"\
|
|
605 "add %2, %0 \n\t"\
|
|
606 "movd (%0), %%mm2 \n\t"\
|
|
607 "add %2, %0 \n\t"\
|
|
608 "movd (%0), %%mm3 \n\t"\
|
|
609 "add %2, %0 \n\t"\
|
|
610 "movd (%0), %%mm4 \n\t"\
|
|
611 "add %2, %0 \n\t"\
|
|
612 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
613 "punpcklbw %%mm7, %%mm1 \n\t"\
|
|
614 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
615 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
616 "punpcklbw %%mm7, %%mm4 \n\t"\
|
|
617 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
|
|
618 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
|
|
619 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
|
|
620 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
|
|
621 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
|
|
622 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
|
|
623 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
|
|
624 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
|
|
625 \
|
|
626 : "+a"(src), "+c"(dst)\
|
|
627 : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
|
628 : "memory"\
|
|
629 );\
|
|
630 src += 4-13*srcStride;\
|
|
631 dst += 4-8*dstStride;\
|
|
632 }\
|
|
633 }\
|
|
634 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
|
|
635 int h=8;\
|
|
636 int w=4;\
|
|
637 src -= 2*srcStride+2;\
|
|
638 while(w--){\
|
|
639 asm volatile(\
|
|
640 "pxor %%mm7, %%mm7 \n\t"\
|
|
641 "movd (%0), %%mm0 \n\t"\
|
|
642 "add %2, %0 \n\t"\
|
|
643 "movd (%0), %%mm1 \n\t"\
|
|
644 "add %2, %0 \n\t"\
|
|
645 "movd (%0), %%mm2 \n\t"\
|
|
646 "add %2, %0 \n\t"\
|
|
647 "movd (%0), %%mm3 \n\t"\
|
|
648 "add %2, %0 \n\t"\
|
|
649 "movd (%0), %%mm4 \n\t"\
|
|
650 "add %2, %0 \n\t"\
|
|
651 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
652 "punpcklbw %%mm7, %%mm1 \n\t"\
|
|
653 "punpcklbw %%mm7, %%mm2 \n\t"\
|
|
654 "punpcklbw %%mm7, %%mm3 \n\t"\
|
|
655 "punpcklbw %%mm7, %%mm4 \n\t"\
|
|
656 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*4)\
|
|
657 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*4)\
|
|
658 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*4)\
|
|
659 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*4)\
|
|
660 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*8*4)\
|
|
661 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*8*4)\
|
|
662 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*8*4)\
|
|
663 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*8*4)\
|
|
664 \
|
|
665 : "+a"(src)\
|
|
666 : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
|
|
667 : "memory"\
|
|
668 );\
|
|
669 tmp += 4;\
|
|
670 src += 4 - 13*srcStride;\
|
|
671 }\
|
|
672 tmp -= 4*4;\
|
|
673 asm volatile(\
|
|
674 "movq %4, %%mm6 \n\t"\
|
|
675 "1: \n\t"\
|
|
676 "movq (%0), %%mm0 \n\t"\
|
|
677 "movq 8(%0), %%mm3 \n\t"\
|
|
678 "movq 2(%0), %%mm1 \n\t"\
|
|
679 "movq 10(%0), %%mm4 \n\t"\
|
|
680 "paddw %%mm4, %%mm0 \n\t"\
|
|
681 "paddw %%mm3, %%mm1 \n\t"\
|
|
682 "paddw 18(%0), %%mm3 \n\t"\
|
|
683 "paddw 16(%0), %%mm4 \n\t"\
|
|
684 "movq 4(%0), %%mm2 \n\t"\
|
|
685 "movq 12(%0), %%mm5 \n\t"\
|
|
686 "paddw 6(%0), %%mm2 \n\t"\
|
|
687 "paddw 14(%0), %%mm5 \n\t"\
|
|
688 "psubw %%mm1, %%mm0 \n\t"\
|
|
689 "psubw %%mm4, %%mm3 \n\t"\
|
|
690 "psraw $2, %%mm0 \n\t"\
|
|
691 "psraw $2, %%mm3 \n\t"\
|
|
692 "psubw %%mm1, %%mm0 \n\t"\
|
|
693 "psubw %%mm4, %%mm3 \n\t"\
|
|
694 "paddsw %%mm2, %%mm0 \n\t"\
|
|
695 "paddsw %%mm5, %%mm3 \n\t"\
|
|
696 "psraw $2, %%mm0 \n\t"\
|
|
697 "psraw $2, %%mm3 \n\t"\
|
|
698 "paddw %%mm6, %%mm2 \n\t"\
|
|
699 "paddw %%mm6, %%mm5 \n\t"\
|
|
700 "paddw %%mm2, %%mm0 \n\t"\
|
|
701 "paddw %%mm5, %%mm3 \n\t"\
|
|
702 "psraw $6, %%mm0 \n\t"\
|
|
703 "psraw $6, %%mm3 \n\t"\
|
|
704 "packuswb %%mm3, %%mm0 \n\t"\
|
|
705 OP(%%mm0, (%1),%%mm7, q)\
|
|
706 "add $32, %0 \n\t"\
|
|
707 "add %3, %1 \n\t"\
|
|
708 "decl %2 \n\t"\
|
|
709 " jnz 1b \n\t"\
|
|
710 : "+a"(tmp), "+c"(dst), "+m"(h)\
|
|
711 : "S"((long)dstStride), "m"(ff_pw_32)\
|
|
712 : "memory"\
|
|
713 );\
|
|
714 }\
|
|
715 static void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
716 OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
|
|
717 OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
|
|
718 src += 8*srcStride;\
|
|
719 dst += 8*dstStride;\
|
|
720 OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
|
|
721 OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
|
|
722 }\
|
|
723 \
|
|
724 static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
|
725 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
|
|
726 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
|
|
727 src += 8*srcStride;\
|
|
728 dst += 8*dstStride;\
|
|
729 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
|
|
730 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
|
|
731 }\
|
|
732 \
|
|
733 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
|
|
734 OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride);\
|
|
735 OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst+8, tmp , src+8, dstStride, tmpStride, srcStride);\
|
|
736 src += 8*srcStride;\
|
|
737 dst += 8*dstStride;\
|
|
738 OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride);\
|
|
739 OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst+8, tmp , src+8, dstStride, tmpStride, srcStride);\
|
|
740 }\
|
|
741
|
|
742 #define H264_MC(OPNAME, SIZE, MMX) \
|
|
743 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
|
|
744 OPNAME ## pixels ## SIZE ## _mmx(dst, src, stride, SIZE);\
|
|
745 }\
|
|
746 \
|
|
747 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
748 uint64_t temp[SIZE*SIZE/8];\
|
|
749 uint8_t * const half= (uint8_t*)temp;\
|
|
750 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\
|
|
751 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
|
|
752 }\
|
|
753 \
|
|
754 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
755 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
|
|
756 }\
|
|
757 \
|
|
758 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
759 uint64_t temp[SIZE*SIZE/8];\
|
|
760 uint8_t * const half= (uint8_t*)temp;\
|
|
761 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\
|
|
762 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+1, half, stride, stride, SIZE);\
|
|
763 }\
|
|
764 \
|
|
765 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
766 uint64_t temp[SIZE*SIZE/8];\
|
|
767 uint8_t * const half= (uint8_t*)temp;\
|
|
768 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
|
|
769 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
|
|
770 }\
|
|
771 \
|
|
772 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
773 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
|
|
774 }\
|
|
775 \
|
|
776 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
777 uint64_t temp[SIZE*SIZE/8];\
|
|
778 uint8_t * const half= (uint8_t*)temp;\
|
|
779 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
|
|
780 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, half, stride, stride, SIZE);\
|
|
781 }\
|
|
782 \
|
|
783 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
784 uint64_t temp[SIZE*SIZE/4];\
|
|
785 uint8_t * const halfH= (uint8_t*)temp;\
|
|
786 uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
|
|
787 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
|
|
788 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
|
|
789 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
|
|
790 }\
|
|
791 \
|
|
792 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
793 uint64_t temp[SIZE*SIZE/4];\
|
|
794 uint8_t * const halfH= (uint8_t*)temp;\
|
|
795 uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
|
|
796 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
|
|
797 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
|
|
798 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
|
|
799 }\
|
|
800 \
|
|
801 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
802 uint64_t temp[SIZE*SIZE/4];\
|
|
803 uint8_t * const halfH= (uint8_t*)temp;\
|
|
804 uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
|
|
805 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
|
|
806 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
|
|
807 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
|
|
808 }\
|
|
809 \
|
|
810 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
811 uint64_t temp[SIZE*SIZE/4];\
|
|
812 uint8_t * const halfH= (uint8_t*)temp;\
|
|
813 uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
|
|
814 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
|
|
815 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
|
|
816 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\
|
|
817 }\
|
|
818 \
|
|
819 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
820 uint64_t temp[SIZE*(SIZE+8)/4];\
|
|
821 int16_t * const tmp= (int16_t*)temp;\
|
|
822 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\
|
|
823 }\
|
|
824 \
|
|
825 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
826 uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
|
|
827 uint8_t * const halfH= (uint8_t*)temp;\
|
|
828 uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
|
|
829 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
|
|
830 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\
|
|
831 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
|
|
832 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\
|
|
833 }\
|
|
834 \
|
|
835 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
836 uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
|
|
837 uint8_t * const halfH= (uint8_t*)temp;\
|
|
838 uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
|
|
839 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
|
|
840 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\
|
|
841 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
|
|
842 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\
|
|
843 }\
|
|
844 \
|
|
845 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
846 uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
|
|
847 uint8_t * const halfV= (uint8_t*)temp;\
|
|
848 uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
|
|
849 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
|
|
850 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
|
|
851 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
|
|
852 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfV, halfHV, stride, SIZE, SIZE);\
|
|
853 }\
|
|
854 \
|
|
855 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
|
856 uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\
|
|
857 uint8_t * const halfV= (uint8_t*)temp;\
|
|
858 uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
|
|
859 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
|
|
860 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
|
|
861 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
|
|
862 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfV, halfHV, stride, SIZE, SIZE);\
|
|
863 }\
|
|
864
|
|
865
|
|
866 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
|
|
867 #define AVG_3DNOW_OP(a,b,temp, size) \
|
|
868 "mov" #size " " #b ", " #temp " \n\t"\
|
|
869 "pavgusb " #temp ", " #a " \n\t"\
|
|
870 "mov" #size " " #a ", " #b " \n\t"
|
|
871 #define AVG_MMX2_OP(a,b,temp, size) \
|
|
872 "mov" #size " " #b ", " #temp " \n\t"\
|
|
873 "pavgb " #temp ", " #a " \n\t"\
|
|
874 "mov" #size " " #a ", " #b " \n\t"
|
|
875
|
|
876 QPEL_H264(put_, PUT_OP, 3dnow)
|
|
877 QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
|
|
878 QPEL_H264(put_, PUT_OP, mmx2)
|
|
879 QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
|
|
880
|
|
881 H264_MC(put_, 4, 3dnow)
|
|
882 H264_MC(put_, 8, 3dnow)
|
|
883 H264_MC(put_, 16,3dnow)
|
|
884 H264_MC(avg_, 4, 3dnow)
|
|
885 H264_MC(avg_, 8, 3dnow)
|
|
886 H264_MC(avg_, 16,3dnow)
|
|
887 H264_MC(put_, 4, mmx2)
|
|
888 H264_MC(put_, 8, mmx2)
|
|
889 H264_MC(put_, 16,mmx2)
|
|
890 H264_MC(avg_, 4, mmx2)
|
|
891 H264_MC(avg_, 8, mmx2)
|
|
892 H264_MC(avg_, 16,mmx2)
|
|
893
|
|
894
|
|
895 #define H264_CHROMA_OP(S,D)
|
|
896 #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
|
|
897 #include "dsputil_h264_template_mmx.c"
|
|
898 #undef H264_CHROMA_OP
|
|
899 #undef H264_CHROMA_MC8_TMPL
|
|
900
|
|
901 #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
|
|
902 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
|
|
903 #include "dsputil_h264_template_mmx.c"
|
|
904 #undef H264_CHROMA_OP
|
|
905 #undef H264_CHROMA_MC8_TMPL
|
|
906
|
|
907 #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
|
|
908 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
|
|
909 #include "dsputil_h264_template_mmx.c"
|
|
910 #undef H264_CHROMA_OP
|
|
911 #undef H264_CHROMA_MC8_TMPL
|
|
912
|