1
|
1 /*
|
|
2 * motion_comp_mmx.c
|
9852
|
3 * Copyright (C) 2000-2002 Michel Lespinasse <walken@zoy.org>
|
|
4 * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
|
1
|
5 *
|
|
6 * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
|
9852
|
7 * See http://libmpeg2.sourceforge.net/ for updates.
|
1
|
8 *
|
|
9 * mpeg2dec is free software; you can redistribute it and/or modify
|
|
10 * it under the terms of the GNU General Public License as published by
|
|
11 * the Free Software Foundation; either version 2 of the License, or
|
|
12 * (at your option) any later version.
|
|
13 *
|
|
14 * mpeg2dec is distributed in the hope that it will be useful,
|
|
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
17 * GNU General Public License for more details.
|
|
18 *
|
|
19 * You should have received a copy of the GNU General Public License
|
|
20 * along with this program; if not, write to the Free Software
|
|
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
22 */
|
|
23
|
|
24 #include "config.h"
|
|
25
|
|
26 #ifdef ARCH_X86
|
|
27
|
|
28 #include <inttypes.h>
|
|
29
|
9852
|
30 #include "mpeg2.h"
|
1
|
31 #include "mpeg2_internal.h"
|
|
32 #include "attributes.h"
|
|
33 #include "mmx.h"
|
|
34
|
|
35 #define CPU_MMXEXT 0
|
|
36 #define CPU_3DNOW 1
|
|
37
|
|
38
|
36
|
39 /* MMX code - needs a rewrite */
|
1
|
40
|
9852
|
41 /*
|
|
42 * Motion Compensation frequently needs to average values using the
|
|
43 * formula (x+y+1)>>1. Both MMXEXT and 3Dnow include one instruction
|
|
44 * to compute this, but it's been left out of classic MMX.
|
|
45 *
|
|
46 * We need to be careful of overflows when doing this computation.
|
|
47 * Rather than unpacking data to 16-bits, which reduces parallelism,
|
|
48 * we use the following formulas:
|
|
49 *
|
|
50 * (x+y)>>1 == (x&y)+((x^y)>>1)
|
|
51 * (x+y+1)>>1 == (x|y)-((x^y)>>1)
|
|
52 */
|
1
|
53
|
36
|
54 /* some rounding constants */
|
9852
|
55 static mmx_t mask1 = {0xfefefefefefefefeLL};
|
|
56 static mmx_t round4 = {0x0002000200020002LL};
|
1
|
57
|
|
58 /*
|
|
59 * This code should probably be compiled with loop unrolling
|
|
60 * (ie, -funroll-loops in gcc)becuase some of the loops
|
|
61 * use a small static number of iterations. This was written
|
|
62 * with the assumption the compiler knows best about when
|
|
63 * unrolling will help
|
|
64 */
|
|
65
|
|
66 static inline void mmx_zero_reg ()
|
|
67 {
|
36
|
68 /* load 0 into mm0 */
|
1
|
69 pxor_r2r (mm0, mm0);
|
|
70 }
|
|
71
|
9852
|
72 static inline void mmx_average_2_U8 (uint8_t * dest, const uint8_t * src1,
|
|
73 const uint8_t * src2)
|
1
|
74 {
|
36
|
75 /* *dest = (*src1 + *src2 + 1)/ 2; */
|
1
|
76
|
9852
|
77 movq_m2r (*src1, mm1); /* load 8 src1 bytes */
|
|
78 movq_r2r (mm1, mm2); /* copy 8 src1 bytes */
|
1
|
79
|
9852
|
80 movq_m2r (*src2, mm3); /* load 8 src2 bytes */
|
|
81 movq_r2r (mm3, mm4); /* copy 8 src2 bytes */
|
1
|
82
|
9852
|
83 pxor_r2r (mm1, mm3); /* xor src1 and src2 */
|
|
84 pand_m2r (mask1, mm3); /* mask lower bits */
|
|
85 psrlq_i2r (1, mm3); /* /2 */
|
|
86 por_r2r (mm2, mm4); /* or src1 and src2 */
|
|
87 psubb_r2r (mm3, mm4); /* subtract subresults */
|
|
88 movq_r2m (mm4, *dest); /* store result in dest */
|
1
|
89 }
|
|
90
|
|
91 static inline void mmx_interp_average_2_U8 (uint8_t * dest,
|
9852
|
92 const uint8_t * src1,
|
|
93 const uint8_t * src2)
|
1
|
94 {
|
36
|
95 /* *dest = (*dest + (*src1 + *src2 + 1)/ 2 + 1)/ 2; */
|
1
|
96
|
9852
|
97 movq_m2r (*dest, mm1); /* load 8 dest bytes */
|
|
98 movq_r2r (mm1, mm2); /* copy 8 dest bytes */
|
1
|
99
|
9852
|
100 movq_m2r (*src1, mm3); /* load 8 src1 bytes */
|
|
101 movq_r2r (mm3, mm4); /* copy 8 src1 bytes */
|
1
|
102
|
9852
|
103 movq_m2r (*src2, mm5); /* load 8 src2 bytes */
|
|
104 movq_r2r (mm5, mm6); /* copy 8 src2 bytes */
|
1
|
105
|
9852
|
106 pxor_r2r (mm3, mm5); /* xor src1 and src2 */
|
|
107 pand_m2r (mask1, mm5); /* mask lower bits */
|
|
108 psrlq_i2r (1, mm5); /* /2 */
|
|
109 por_r2r (mm4, mm6); /* or src1 and src2 */
|
|
110 psubb_r2r (mm5, mm6); /* subtract subresults */
|
|
111 movq_r2r (mm6, mm5); /* copy subresult */
|
1
|
112
|
9852
|
113 pxor_r2r (mm1, mm5); /* xor srcavg and dest */
|
|
114 pand_m2r (mask1, mm5); /* mask lower bits */
|
|
115 psrlq_i2r (1, mm5); /* /2 */
|
|
116 por_r2r (mm2, mm6); /* or srcavg and dest */
|
|
117 psubb_r2r (mm5, mm6); /* subtract subresults */
|
|
118 movq_r2m (mm6, *dest); /* store result in dest */
|
1
|
119 }
|
|
120
|
9852
|
121 static inline void mmx_average_4_U8 (uint8_t * dest, const uint8_t * src1,
|
|
122 const uint8_t * src2,
|
|
123 const uint8_t * src3,
|
|
124 const uint8_t * src4)
|
1
|
125 {
|
36
|
126 /* *dest = (*src1 + *src2 + *src3 + *src4 + 2)/ 4; */
|
1
|
127
|
9852
|
128 movq_m2r (*src1, mm1); /* load 8 src1 bytes */
|
|
129 movq_r2r (mm1, mm2); /* copy 8 src1 bytes */
|
1
|
130
|
9852
|
131 punpcklbw_r2r (mm0, mm1); /* unpack low src1 bytes */
|
|
132 punpckhbw_r2r (mm0, mm2); /* unpack high src1 bytes */
|
1
|
133
|
9852
|
134 movq_m2r (*src2, mm3); /* load 8 src2 bytes */
|
|
135 movq_r2r (mm3, mm4); /* copy 8 src2 bytes */
|
1
|
136
|
9852
|
137 punpcklbw_r2r (mm0, mm3); /* unpack low src2 bytes */
|
|
138 punpckhbw_r2r (mm0, mm4); /* unpack high src2 bytes */
|
1
|
139
|
9852
|
140 paddw_r2r (mm3, mm1); /* add lows */
|
|
141 paddw_r2r (mm4, mm2); /* add highs */
|
1
|
142
|
36
|
143 /* now have partials in mm1 and mm2 */
|
1
|
144
|
9852
|
145 movq_m2r (*src3, mm3); /* load 8 src3 bytes */
|
|
146 movq_r2r (mm3, mm4); /* copy 8 src3 bytes */
|
1
|
147
|
9852
|
148 punpcklbw_r2r (mm0, mm3); /* unpack low src3 bytes */
|
|
149 punpckhbw_r2r (mm0, mm4); /* unpack high src3 bytes */
|
1
|
150
|
9852
|
151 paddw_r2r (mm3, mm1); /* add lows */
|
|
152 paddw_r2r (mm4, mm2); /* add highs */
|
1
|
153
|
9852
|
154 movq_m2r (*src4, mm5); /* load 8 src4 bytes */
|
|
155 movq_r2r (mm5, mm6); /* copy 8 src4 bytes */
|
1
|
156
|
9852
|
157 punpcklbw_r2r (mm0, mm5); /* unpack low src4 bytes */
|
|
158 punpckhbw_r2r (mm0, mm6); /* unpack high src4 bytes */
|
1
|
159
|
9852
|
160 paddw_r2r (mm5, mm1); /* add lows */
|
|
161 paddw_r2r (mm6, mm2); /* add highs */
|
1
|
162
|
36
|
163 /* now have subtotal in mm1 and mm2 */
|
1
|
164
|
|
165 paddw_m2r (round4, mm1);
|
9852
|
166 psraw_i2r (2, mm1); /* /4 */
|
1
|
167 paddw_m2r (round4, mm2);
|
9852
|
168 psraw_i2r (2, mm2); /* /4 */
|
1
|
169
|
9852
|
170 packuswb_r2r (mm2, mm1); /* pack (w/ saturation) */
|
|
171 movq_r2m (mm1, *dest); /* store result in dest */
|
1
|
172 }
|
|
173
|
|
174 static inline void mmx_interp_average_4_U8 (uint8_t * dest,
|
9852
|
175 const uint8_t * src1,
|
|
176 const uint8_t * src2,
|
|
177 const uint8_t * src3,
|
|
178 const uint8_t * src4)
|
1
|
179 {
|
36
|
180 /* *dest = (*dest + (*src1 + *src2 + *src3 + *src4 + 2)/ 4 + 1)/ 2; */
|
1
|
181
|
9852
|
182 movq_m2r (*src1, mm1); /* load 8 src1 bytes */
|
|
183 movq_r2r (mm1, mm2); /* copy 8 src1 bytes */
|
1
|
184
|
9852
|
185 punpcklbw_r2r (mm0, mm1); /* unpack low src1 bytes */
|
|
186 punpckhbw_r2r (mm0, mm2); /* unpack high src1 bytes */
|
1
|
187
|
9852
|
188 movq_m2r (*src2, mm3); /* load 8 src2 bytes */
|
|
189 movq_r2r (mm3, mm4); /* copy 8 src2 bytes */
|
1
|
190
|
9852
|
191 punpcklbw_r2r (mm0, mm3); /* unpack low src2 bytes */
|
|
192 punpckhbw_r2r (mm0, mm4); /* unpack high src2 bytes */
|
1
|
193
|
9852
|
194 paddw_r2r (mm3, mm1); /* add lows */
|
|
195 paddw_r2r (mm4, mm2); /* add highs */
|
1
|
196
|
36
|
197 /* now have partials in mm1 and mm2 */
|
1
|
198
|
9852
|
199 movq_m2r (*src3, mm3); /* load 8 src3 bytes */
|
|
200 movq_r2r (mm3, mm4); /* copy 8 src3 bytes */
|
1
|
201
|
9852
|
202 punpcklbw_r2r (mm0, mm3); /* unpack low src3 bytes */
|
|
203 punpckhbw_r2r (mm0, mm4); /* unpack high src3 bytes */
|
1
|
204
|
9852
|
205 paddw_r2r (mm3, mm1); /* add lows */
|
|
206 paddw_r2r (mm4, mm2); /* add highs */
|
1
|
207
|
9852
|
208 movq_m2r (*src4, mm5); /* load 8 src4 bytes */
|
|
209 movq_r2r (mm5, mm6); /* copy 8 src4 bytes */
|
1
|
210
|
9852
|
211 punpcklbw_r2r (mm0, mm5); /* unpack low src4 bytes */
|
|
212 punpckhbw_r2r (mm0, mm6); /* unpack high src4 bytes */
|
1
|
213
|
9852
|
214 paddw_r2r (mm5, mm1); /* add lows */
|
|
215 paddw_r2r (mm6, mm2); /* add highs */
|
1
|
216
|
|
217 paddw_m2r (round4, mm1);
|
9852
|
218 psraw_i2r (2, mm1); /* /4 */
|
1
|
219 paddw_m2r (round4, mm2);
|
9852
|
220 psraw_i2r (2, mm2); /* /4 */
|
1
|
221
|
36
|
222 /* now have subtotal/4 in mm1 and mm2 */
|
1
|
223
|
9852
|
224 movq_m2r (*dest, mm3); /* load 8 dest bytes */
|
|
225 movq_r2r (mm3, mm4); /* copy 8 dest bytes */
|
1
|
226
|
9852
|
227 packuswb_r2r (mm2, mm1); /* pack (w/ saturation) */
|
|
228 movq_r2r (mm1,mm2); /* copy subresult */
|
1
|
229
|
9852
|
230 pxor_r2r (mm1, mm3); /* xor srcavg and dest */
|
|
231 pand_m2r (mask1, mm3); /* mask lower bits */
|
|
232 psrlq_i2r (1, mm3); /* /2 */
|
|
233 por_r2r (mm2, mm4); /* or srcavg and dest */
|
|
234 psubb_r2r (mm3, mm4); /* subtract subresults */
|
|
235 movq_r2m (mm4, *dest); /* store result in dest */
|
1
|
236 }
|
|
237
|
36
|
238 /*-----------------------------------------------------------------------*/
|
1
|
239
|
9852
|
240 static inline void MC_avg_mmx (const int width, int height, uint8_t * dest,
|
|
241 const uint8_t * ref, const int stride)
|
1
|
242 {
|
|
243 mmx_zero_reg ();
|
|
244
|
|
245 do {
|
|
246 mmx_average_2_U8 (dest, dest, ref);
|
|
247
|
|
248 if (width == 16)
|
|
249 mmx_average_2_U8 (dest+8, dest+8, ref+8);
|
|
250
|
|
251 dest += stride;
|
|
252 ref += stride;
|
|
253 } while (--height);
|
|
254 }
|
|
255
|
9852
|
256 static void MC_avg_o_16_mmx (uint8_t * dest, const uint8_t * ref,
|
|
257 int stride, int height)
|
1
|
258 {
|
|
259 MC_avg_mmx (16, height, dest, ref, stride);
|
|
260 }
|
|
261
|
9852
|
262 static void MC_avg_o_8_mmx (uint8_t * dest, const uint8_t * ref,
|
|
263 int stride, int height)
|
1
|
264 {
|
|
265 MC_avg_mmx (8, height, dest, ref, stride);
|
|
266 }
|
|
267
|
36
|
268 /*-----------------------------------------------------------------------*/
|
1
|
269
|
9852
|
270 static inline void MC_put_mmx (const int width, int height, uint8_t * dest,
|
|
271 const uint8_t * ref, const int stride)
|
1
|
272 {
|
|
273 mmx_zero_reg ();
|
|
274
|
|
275 do {
|
9852
|
276 movq_m2r (* ref, mm1); /* load 8 ref bytes */
|
|
277 movq_r2m (mm1,* dest); /* store 8 bytes at curr */
|
1
|
278
|
|
279 if (width == 16)
|
|
280 {
|
9852
|
281 movq_m2r (* (ref+8), mm1); /* load 8 ref bytes */
|
|
282 movq_r2m (mm1,* (dest+8)); /* store 8 bytes at curr */
|
1
|
283 }
|
|
284
|
|
285 dest += stride;
|
|
286 ref += stride;
|
|
287 } while (--height);
|
|
288 }
|
|
289
|
9852
|
290 static void MC_put_o_16_mmx (uint8_t * dest, const uint8_t * ref,
|
|
291 int stride, int height)
|
1
|
292 {
|
|
293 MC_put_mmx (16, height, dest, ref, stride);
|
|
294 }
|
|
295
|
9852
|
296 static void MC_put_o_8_mmx (uint8_t * dest, const uint8_t * ref,
|
|
297 int stride, int height)
|
1
|
298 {
|
|
299 MC_put_mmx (8, height, dest, ref, stride);
|
|
300 }
|
|
301
|
36
|
302 /*-----------------------------------------------------------------------*/
|
1
|
303
|
36
|
304 /* Half pixel interpolation in the x direction */
|
9852
|
305 static inline void MC_avg_x_mmx (const int width, int height, uint8_t * dest,
|
|
306 const uint8_t * ref, const int stride)
|
1
|
307 {
|
|
308 mmx_zero_reg ();
|
|
309
|
|
310 do {
|
|
311 mmx_interp_average_2_U8 (dest, ref, ref+1);
|
|
312
|
|
313 if (width == 16)
|
|
314 mmx_interp_average_2_U8 (dest+8, ref+8, ref+9);
|
|
315
|
|
316 dest += stride;
|
|
317 ref += stride;
|
|
318 } while (--height);
|
|
319 }
|
|
320
|
9852
|
321 static void MC_avg_x_16_mmx (uint8_t * dest, const uint8_t * ref,
|
|
322 int stride, int height)
|
1
|
323 {
|
|
324 MC_avg_x_mmx (16, height, dest, ref, stride);
|
|
325 }
|
|
326
|
9852
|
327 static void MC_avg_x_8_mmx (uint8_t * dest, const uint8_t * ref,
|
|
328 int stride, int height)
|
1
|
329 {
|
|
330 MC_avg_x_mmx (8, height, dest, ref, stride);
|
|
331 }
|
|
332
|
36
|
333 /*-----------------------------------------------------------------------*/
|
1
|
334
|
9852
|
335 static inline void MC_put_x_mmx (const int width, int height, uint8_t * dest,
|
|
336 const uint8_t * ref, const int stride)
|
1
|
337 {
|
|
338 mmx_zero_reg ();
|
|
339
|
|
340 do {
|
|
341 mmx_average_2_U8 (dest, ref, ref+1);
|
|
342
|
|
343 if (width == 16)
|
|
344 mmx_average_2_U8 (dest+8, ref+8, ref+9);
|
|
345
|
|
346 dest += stride;
|
|
347 ref += stride;
|
|
348 } while (--height);
|
|
349 }
|
|
350
|
9852
|
351 static void MC_put_x_16_mmx (uint8_t * dest, const uint8_t * ref,
|
|
352 int stride, int height)
|
1
|
353 {
|
|
354 MC_put_x_mmx (16, height, dest, ref, stride);
|
|
355 }
|
|
356
|
9852
|
357 static void MC_put_x_8_mmx (uint8_t * dest, const uint8_t * ref,
|
|
358 int stride, int height)
|
1
|
359 {
|
|
360 MC_put_x_mmx (8, height, dest, ref, stride);
|
|
361 }
|
|
362
|
36
|
363 /*-----------------------------------------------------------------------*/
|
1
|
364
|
9852
|
365 static inline void MC_avg_xy_mmx (const int width, int height, uint8_t * dest,
|
|
366 const uint8_t * ref, const int stride)
|
1
|
367 {
|
9852
|
368 const uint8_t * ref_next = ref + stride;
|
1
|
369
|
|
370 mmx_zero_reg ();
|
|
371
|
|
372 do {
|
|
373 mmx_interp_average_4_U8 (dest, ref, ref+1, ref_next, ref_next+1);
|
|
374
|
|
375 if (width == 16)
|
|
376 mmx_interp_average_4_U8 (dest+8, ref+8, ref+9,
|
|
377 ref_next+8, ref_next+9);
|
|
378
|
|
379 dest += stride;
|
|
380 ref += stride;
|
|
381 ref_next += stride;
|
|
382 } while (--height);
|
|
383 }
|
|
384
|
9852
|
385 static void MC_avg_xy_16_mmx (uint8_t * dest, const uint8_t * ref,
|
|
386 int stride, int height)
|
1
|
387 {
|
|
388 MC_avg_xy_mmx (16, height, dest, ref, stride);
|
|
389 }
|
|
390
|
9852
|
391 static void MC_avg_xy_8_mmx (uint8_t * dest, const uint8_t * ref,
|
|
392 int stride, int height)
|
1
|
393 {
|
|
394 MC_avg_xy_mmx (8, height, dest, ref, stride);
|
|
395 }
|
|
396
|
36
|
397 /*-----------------------------------------------------------------------*/
|
1
|
398
|
9852
|
399 static inline void MC_put_xy_mmx (const int width, int height, uint8_t * dest,
|
|
400 const uint8_t * ref, const int stride)
|
1
|
401 {
|
9852
|
402 const uint8_t * ref_next = ref + stride;
|
1
|
403
|
|
404 mmx_zero_reg ();
|
|
405
|
|
406 do {
|
|
407 mmx_average_4_U8 (dest, ref, ref+1, ref_next, ref_next+1);
|
|
408
|
|
409 if (width == 16)
|
|
410 mmx_average_4_U8 (dest+8, ref+8, ref+9, ref_next+8, ref_next+9);
|
|
411
|
|
412 dest += stride;
|
|
413 ref += stride;
|
|
414 ref_next += stride;
|
|
415 } while (--height);
|
|
416 }
|
|
417
|
9852
|
418 static void MC_put_xy_16_mmx (uint8_t * dest, const uint8_t * ref,
|
|
419 int stride, int height)
|
1
|
420 {
|
|
421 MC_put_xy_mmx (16, height, dest, ref, stride);
|
|
422 }
|
|
423
|
9852
|
424 static void MC_put_xy_8_mmx (uint8_t * dest, const uint8_t * ref,
|
|
425 int stride, int height)
|
1
|
426 {
|
|
427 MC_put_xy_mmx (8, height, dest, ref, stride);
|
|
428 }
|
|
429
|
36
|
430 /*-----------------------------------------------------------------------*/
|
1
|
431
|
9852
|
432 static inline void MC_avg_y_mmx (const int width, int height, uint8_t * dest,
|
|
433 const uint8_t * ref, const int stride)
|
1
|
434 {
|
9852
|
435 const uint8_t * ref_next = ref + stride;
|
1
|
436
|
|
437 mmx_zero_reg ();
|
|
438
|
|
439 do {
|
|
440 mmx_interp_average_2_U8 (dest, ref, ref_next);
|
|
441
|
|
442 if (width == 16)
|
|
443 mmx_interp_average_2_U8 (dest+8, ref+8, ref_next+8);
|
|
444
|
|
445 dest += stride;
|
|
446 ref += stride;
|
|
447 ref_next += stride;
|
|
448 } while (--height);
|
|
449 }
|
|
450
|
9852
|
451 static void MC_avg_y_16_mmx (uint8_t * dest, const uint8_t * ref,
|
|
452 int stride, int height)
|
1
|
453 {
|
|
454 MC_avg_y_mmx (16, height, dest, ref, stride);
|
|
455 }
|
|
456
|
9852
|
457 static void MC_avg_y_8_mmx (uint8_t * dest, const uint8_t * ref,
|
|
458 int stride, int height)
|
1
|
459 {
|
|
460 MC_avg_y_mmx (8, height, dest, ref, stride);
|
|
461 }
|
|
462
|
36
|
463 /*-----------------------------------------------------------------------*/
|
1
|
464
|
9852
|
465 static inline void MC_put_y_mmx (const int width, int height, uint8_t * dest,
|
|
466 const uint8_t * ref, const int stride)
|
1
|
467 {
|
9852
|
468 const uint8_t * ref_next = ref + stride;
|
1
|
469
|
|
470 mmx_zero_reg ();
|
|
471
|
|
472 do {
|
|
473 mmx_average_2_U8 (dest, ref, ref_next);
|
|
474
|
|
475 if (width == 16)
|
|
476 mmx_average_2_U8 (dest+8, ref+8, ref_next+8);
|
|
477
|
|
478 dest += stride;
|
|
479 ref += stride;
|
|
480 ref_next += stride;
|
|
481 } while (--height);
|
|
482 }
|
|
483
|
9852
|
484 static void MC_put_y_16_mmx (uint8_t * dest, const uint8_t * ref,
|
|
485 int stride, int height)
|
1
|
486 {
|
|
487 MC_put_y_mmx (16, height, dest, ref, stride);
|
|
488 }
|
|
489
|
9852
|
490 static void MC_put_y_8_mmx (uint8_t * dest, const uint8_t * ref,
|
|
491 int stride, int height)
|
1
|
492 {
|
|
493 MC_put_y_mmx (8, height, dest, ref, stride);
|
|
494 }
|
|
495
|
|
496
|
9852
|
497 MPEG2_MC_EXTERN (mmx)
|
1
|
498
|
|
499
|
|
500
|
|
501
|
|
502
|
|
503
|
|
504
|
36
|
505 /* CPU_MMXEXT/CPU_3DNOW adaptation layer */
|
1
|
506
|
|
507 #define pavg_r2r(src,dest) \
|
|
508 do { \
|
|
509 if (cpu == CPU_MMXEXT) \
|
|
510 pavgb_r2r (src, dest); \
|
|
511 else \
|
|
512 pavgusb_r2r (src, dest); \
|
|
513 } while (0)
|
|
514
|
|
515 #define pavg_m2r(src,dest) \
|
|
516 do { \
|
|
517 if (cpu == CPU_MMXEXT) \
|
|
518 pavgb_m2r (src, dest); \
|
|
519 else \
|
|
520 pavgusb_m2r (src, dest); \
|
|
521 } while (0)
|
|
522
|
|
523
|
36
|
524 /* CPU_MMXEXT code */
|
1
|
525
|
|
526
|
9852
|
527 static inline void MC_put1_8 (int height, uint8_t * dest, const uint8_t * ref,
|
|
528 const int stride)
|
1
|
529 {
|
|
530 do {
|
|
531 movq_m2r (*ref, mm0);
|
|
532 movq_r2m (mm0, *dest);
|
|
533 ref += stride;
|
|
534 dest += stride;
|
|
535 } while (--height);
|
|
536 }
|
|
537
|
9852
|
538 static inline void MC_put1_16 (int height, uint8_t * dest, const uint8_t * ref,
|
|
539 const int stride)
|
1
|
540 {
|
|
541 do {
|
|
542 movq_m2r (*ref, mm0);
|
|
543 movq_m2r (*(ref+8), mm1);
|
|
544 ref += stride;
|
|
545 movq_r2m (mm0, *dest);
|
|
546 movq_r2m (mm1, *(dest+8));
|
|
547 dest += stride;
|
|
548 } while (--height);
|
|
549 }
|
|
550
|
9852
|
551 static inline void MC_avg1_8 (int height, uint8_t * dest, const uint8_t * ref,
|
|
552 const int stride, const int cpu)
|
1
|
553 {
|
|
554 do {
|
|
555 movq_m2r (*ref, mm0);
|
|
556 pavg_m2r (*dest, mm0);
|
|
557 ref += stride;
|
|
558 movq_r2m (mm0, *dest);
|
|
559 dest += stride;
|
|
560 } while (--height);
|
|
561 }
|
|
562
|
9852
|
563 static inline void MC_avg1_16 (int height, uint8_t * dest, const uint8_t * ref,
|
|
564 const int stride, const int cpu)
|
1
|
565 {
|
|
566 do {
|
|
567 movq_m2r (*ref, mm0);
|
|
568 movq_m2r (*(ref+8), mm1);
|
|
569 pavg_m2r (*dest, mm0);
|
|
570 pavg_m2r (*(dest+8), mm1);
|
|
571 movq_r2m (mm0, *dest);
|
|
572 ref += stride;
|
|
573 movq_r2m (mm1, *(dest+8));
|
|
574 dest += stride;
|
|
575 } while (--height);
|
|
576 }
|
|
577
|
9852
|
578 static inline void MC_put2_8 (int height, uint8_t * dest, const uint8_t * ref,
|
|
579 const int stride, const int offset,
|
|
580 const int cpu)
|
1
|
581 {
|
|
582 do {
|
|
583 movq_m2r (*ref, mm0);
|
|
584 pavg_m2r (*(ref+offset), mm0);
|
|
585 ref += stride;
|
|
586 movq_r2m (mm0, *dest);
|
|
587 dest += stride;
|
|
588 } while (--height);
|
|
589 }
|
|
590
|
9852
|
591 static inline void MC_put2_16 (int height, uint8_t * dest, const uint8_t * ref,
|
|
592 const int stride, const int offset,
|
|
593 const int cpu)
|
1
|
594 {
|
|
595 do {
|
|
596 movq_m2r (*ref, mm0);
|
|
597 movq_m2r (*(ref+8), mm1);
|
|
598 pavg_m2r (*(ref+offset), mm0);
|
|
599 pavg_m2r (*(ref+offset+8), mm1);
|
|
600 movq_r2m (mm0, *dest);
|
|
601 ref += stride;
|
|
602 movq_r2m (mm1, *(dest+8));
|
|
603 dest += stride;
|
|
604 } while (--height);
|
|
605 }
|
|
606
|
9852
|
607 static inline void MC_avg2_8 (int height, uint8_t * dest, const uint8_t * ref,
|
|
608 const int stride, const int offset,
|
|
609 const int cpu)
|
1
|
610 {
|
|
611 do {
|
|
612 movq_m2r (*ref, mm0);
|
|
613 pavg_m2r (*(ref+offset), mm0);
|
|
614 pavg_m2r (*dest, mm0);
|
|
615 ref += stride;
|
|
616 movq_r2m (mm0, *dest);
|
|
617 dest += stride;
|
|
618 } while (--height);
|
|
619 }
|
|
620
|
9852
|
621 static inline void MC_avg2_16 (int height, uint8_t * dest, const uint8_t * ref,
|
|
622 const int stride, const int offset,
|
|
623 const int cpu)
|
1
|
624 {
|
|
625 do {
|
|
626 movq_m2r (*ref, mm0);
|
|
627 movq_m2r (*(ref+8), mm1);
|
|
628 pavg_m2r (*(ref+offset), mm0);
|
|
629 pavg_m2r (*(ref+offset+8), mm1);
|
|
630 pavg_m2r (*dest, mm0);
|
|
631 pavg_m2r (*(dest+8), mm1);
|
|
632 ref += stride;
|
|
633 movq_r2m (mm0, *dest);
|
|
634 movq_r2m (mm1, *(dest+8));
|
|
635 dest += stride;
|
|
636 } while (--height);
|
|
637 }
|
|
638
|
|
639 static mmx_t mask_one = {0x0101010101010101LL};
|
|
640
|
9852
|
641 static inline void MC_put4_8 (int height, uint8_t * dest, const uint8_t * ref,
|
|
642 const int stride, const int cpu)
|
1
|
643 {
|
|
644 movq_m2r (*ref, mm0);
|
|
645 movq_m2r (*(ref+1), mm1);
|
|
646 movq_r2r (mm0, mm7);
|
|
647 pxor_r2r (mm1, mm7);
|
|
648 pavg_r2r (mm1, mm0);
|
|
649 ref += stride;
|
|
650
|
|
651 do {
|
|
652 movq_m2r (*ref, mm2);
|
|
653 movq_r2r (mm0, mm5);
|
|
654
|
|
655 movq_m2r (*(ref+1), mm3);
|
|
656 movq_r2r (mm2, mm6);
|
|
657
|
|
658 pxor_r2r (mm3, mm6);
|
|
659 pavg_r2r (mm3, mm2);
|
|
660
|
|
661 por_r2r (mm6, mm7);
|
|
662 pxor_r2r (mm2, mm5);
|
|
663
|
|
664 pand_r2r (mm5, mm7);
|
|
665 pavg_r2r (mm2, mm0);
|
|
666
|
|
667 pand_m2r (mask_one, mm7);
|
|
668
|
|
669 psubusb_r2r (mm7, mm0);
|
|
670
|
|
671 ref += stride;
|
|
672 movq_r2m (mm0, *dest);
|
|
673 dest += stride;
|
|
674
|
9852
|
675 movq_r2r (mm6, mm7); /* unroll ! */
|
|
676 movq_r2r (mm2, mm0); /* unroll ! */
|
1
|
677 } while (--height);
|
|
678 }
|
|
679
|
9852
|
680 static inline void MC_put4_16 (int height, uint8_t * dest, const uint8_t * ref,
|
|
681 const int stride, const int cpu)
|
1
|
682 {
|
|
683 do {
|
|
684 movq_m2r (*ref, mm0);
|
|
685 movq_m2r (*(ref+stride+1), mm1);
|
|
686 movq_r2r (mm0, mm7);
|
|
687 movq_m2r (*(ref+1), mm2);
|
|
688 pxor_r2r (mm1, mm7);
|
|
689 movq_m2r (*(ref+stride), mm3);
|
|
690 movq_r2r (mm2, mm6);
|
|
691 pxor_r2r (mm3, mm6);
|
|
692 pavg_r2r (mm1, mm0);
|
|
693 pavg_r2r (mm3, mm2);
|
|
694 por_r2r (mm6, mm7);
|
|
695 movq_r2r (mm0, mm6);
|
|
696 pxor_r2r (mm2, mm6);
|
|
697 pand_r2r (mm6, mm7);
|
|
698 pand_m2r (mask_one, mm7);
|
|
699 pavg_r2r (mm2, mm0);
|
|
700 psubusb_r2r (mm7, mm0);
|
|
701 movq_r2m (mm0, *dest);
|
|
702
|
|
703 movq_m2r (*(ref+8), mm0);
|
|
704 movq_m2r (*(ref+stride+9), mm1);
|
|
705 movq_r2r (mm0, mm7);
|
|
706 movq_m2r (*(ref+9), mm2);
|
|
707 pxor_r2r (mm1, mm7);
|
|
708 movq_m2r (*(ref+stride+8), mm3);
|
|
709 movq_r2r (mm2, mm6);
|
|
710 pxor_r2r (mm3, mm6);
|
|
711 pavg_r2r (mm1, mm0);
|
|
712 pavg_r2r (mm3, mm2);
|
|
713 por_r2r (mm6, mm7);
|
|
714 movq_r2r (mm0, mm6);
|
|
715 pxor_r2r (mm2, mm6);
|
|
716 pand_r2r (mm6, mm7);
|
|
717 pand_m2r (mask_one, mm7);
|
|
718 pavg_r2r (mm2, mm0);
|
|
719 psubusb_r2r (mm7, mm0);
|
|
720 ref += stride;
|
|
721 movq_r2m (mm0, *(dest+8));
|
|
722 dest += stride;
|
|
723 } while (--height);
|
|
724 }
|
|
725
|
9852
|
726 static inline void MC_avg4_8 (int height, uint8_t * dest, const uint8_t * ref,
|
|
727 const int stride, const int cpu)
|
1
|
728 {
|
|
729 do {
|
|
730 movq_m2r (*ref, mm0);
|
|
731 movq_m2r (*(ref+stride+1), mm1);
|
|
732 movq_r2r (mm0, mm7);
|
|
733 movq_m2r (*(ref+1), mm2);
|
|
734 pxor_r2r (mm1, mm7);
|
|
735 movq_m2r (*(ref+stride), mm3);
|
|
736 movq_r2r (mm2, mm6);
|
|
737 pxor_r2r (mm3, mm6);
|
|
738 pavg_r2r (mm1, mm0);
|
|
739 pavg_r2r (mm3, mm2);
|
|
740 por_r2r (mm6, mm7);
|
|
741 movq_r2r (mm0, mm6);
|
|
742 pxor_r2r (mm2, mm6);
|
|
743 pand_r2r (mm6, mm7);
|
|
744 pand_m2r (mask_one, mm7);
|
|
745 pavg_r2r (mm2, mm0);
|
|
746 psubusb_r2r (mm7, mm0);
|
|
747 movq_m2r (*dest, mm1);
|
|
748 pavg_r2r (mm1, mm0);
|
|
749 ref += stride;
|
|
750 movq_r2m (mm0, *dest);
|
|
751 dest += stride;
|
|
752 } while (--height);
|
|
753 }
|
|
754
|
9852
|
755 static inline void MC_avg4_16 (int height, uint8_t * dest, const uint8_t * ref,
|
|
756 const int stride, const int cpu)
|
1
|
757 {
|
|
758 do {
|
|
759 movq_m2r (*ref, mm0);
|
|
760 movq_m2r (*(ref+stride+1), mm1);
|
|
761 movq_r2r (mm0, mm7);
|
|
762 movq_m2r (*(ref+1), mm2);
|
|
763 pxor_r2r (mm1, mm7);
|
|
764 movq_m2r (*(ref+stride), mm3);
|
|
765 movq_r2r (mm2, mm6);
|
|
766 pxor_r2r (mm3, mm6);
|
|
767 pavg_r2r (mm1, mm0);
|
|
768 pavg_r2r (mm3, mm2);
|
|
769 por_r2r (mm6, mm7);
|
|
770 movq_r2r (mm0, mm6);
|
|
771 pxor_r2r (mm2, mm6);
|
|
772 pand_r2r (mm6, mm7);
|
|
773 pand_m2r (mask_one, mm7);
|
|
774 pavg_r2r (mm2, mm0);
|
|
775 psubusb_r2r (mm7, mm0);
|
|
776 movq_m2r (*dest, mm1);
|
|
777 pavg_r2r (mm1, mm0);
|
|
778 movq_r2m (mm0, *dest);
|
|
779
|
|
780 movq_m2r (*(ref+8), mm0);
|
|
781 movq_m2r (*(ref+stride+9), mm1);
|
|
782 movq_r2r (mm0, mm7);
|
|
783 movq_m2r (*(ref+9), mm2);
|
|
784 pxor_r2r (mm1, mm7);
|
|
785 movq_m2r (*(ref+stride+8), mm3);
|
|
786 movq_r2r (mm2, mm6);
|
|
787 pxor_r2r (mm3, mm6);
|
|
788 pavg_r2r (mm1, mm0);
|
|
789 pavg_r2r (mm3, mm2);
|
|
790 por_r2r (mm6, mm7);
|
|
791 movq_r2r (mm0, mm6);
|
|
792 pxor_r2r (mm2, mm6);
|
|
793 pand_r2r (mm6, mm7);
|
|
794 pand_m2r (mask_one, mm7);
|
|
795 pavg_r2r (mm2, mm0);
|
|
796 psubusb_r2r (mm7, mm0);
|
|
797 movq_m2r (*(dest+8), mm1);
|
|
798 pavg_r2r (mm1, mm0);
|
|
799 ref += stride;
|
|
800 movq_r2m (mm0, *(dest+8));
|
|
801 dest += stride;
|
|
802 } while (--height);
|
|
803 }
|
|
804
|
9852
|
805 static void MC_avg_o_16_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
806 int stride, int height)
|
1
|
807 {
|
|
808 MC_avg1_16 (height, dest, ref, stride, CPU_MMXEXT);
|
|
809 }
|
|
810
|
9852
|
811 static void MC_avg_o_8_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
812 int stride, int height)
|
1
|
813 {
|
|
814 MC_avg1_8 (height, dest, ref, stride, CPU_MMXEXT);
|
|
815 }
|
|
816
|
9852
|
817 static void MC_put_o_16_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
818 int stride, int height)
|
1
|
819 {
|
|
820 MC_put1_16 (height, dest, ref, stride);
|
|
821 }
|
|
822
|
9852
|
823 static void MC_put_o_8_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
824 int stride, int height)
|
1
|
825 {
|
|
826 MC_put1_8 (height, dest, ref, stride);
|
|
827 }
|
|
828
|
9852
|
829 static void MC_avg_x_16_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
830 int stride, int height)
|
1
|
831 {
|
|
832 MC_avg2_16 (height, dest, ref, stride, 1, CPU_MMXEXT);
|
|
833 }
|
|
834
|
9852
|
835 static void MC_avg_x_8_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
836 int stride, int height)
|
1
|
837 {
|
|
838 MC_avg2_8 (height, dest, ref, stride, 1, CPU_MMXEXT);
|
|
839 }
|
|
840
|
9852
|
841 static void MC_put_x_16_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
842 int stride, int height)
|
1
|
843 {
|
|
844 MC_put2_16 (height, dest, ref, stride, 1, CPU_MMXEXT);
|
|
845 }
|
|
846
|
9852
|
847 static void MC_put_x_8_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
848 int stride, int height)
|
1
|
849 {
|
|
850 MC_put2_8 (height, dest, ref, stride, 1, CPU_MMXEXT);
|
|
851 }
|
|
852
|
9852
|
853 static void MC_avg_y_16_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
854 int stride, int height)
|
1
|
855 {
|
|
856 MC_avg2_16 (height, dest, ref, stride, stride, CPU_MMXEXT);
|
|
857 }
|
|
858
|
9852
|
859 static void MC_avg_y_8_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
860 int stride, int height)
|
1
|
861 {
|
|
862 MC_avg2_8 (height, dest, ref, stride, stride, CPU_MMXEXT);
|
|
863 }
|
|
864
|
9852
|
865 static void MC_put_y_16_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
866 int stride, int height)
|
1
|
867 {
|
|
868 MC_put2_16 (height, dest, ref, stride, stride, CPU_MMXEXT);
|
|
869 }
|
|
870
|
9852
|
871 static void MC_put_y_8_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
872 int stride, int height)
|
1
|
873 {
|
|
874 MC_put2_8 (height, dest, ref, stride, stride, CPU_MMXEXT);
|
|
875 }
|
|
876
|
9852
|
877 static void MC_avg_xy_16_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
878 int stride, int height)
|
1
|
879 {
|
|
880 MC_avg4_16 (height, dest, ref, stride, CPU_MMXEXT);
|
|
881 }
|
|
882
|
9852
|
883 static void MC_avg_xy_8_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
884 int stride, int height)
|
1
|
885 {
|
|
886 MC_avg4_8 (height, dest, ref, stride, CPU_MMXEXT);
|
|
887 }
|
|
888
|
9852
|
889 static void MC_put_xy_16_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
890 int stride, int height)
|
1
|
891 {
|
|
892 MC_put4_16 (height, dest, ref, stride, CPU_MMXEXT);
|
|
893 }
|
|
894
|
9852
|
895 static void MC_put_xy_8_mmxext (uint8_t * dest, const uint8_t * ref,
|
|
896 int stride, int height)
|
1
|
897 {
|
|
898 MC_put4_8 (height, dest, ref, stride, CPU_MMXEXT);
|
|
899 }
|
|
900
|
|
901
|
9852
|
902 MPEG2_MC_EXTERN (mmxext)
|
1
|
903
|
|
904
|
|
905
|
9852
|
906 static void MC_avg_o_16_3dnow (uint8_t * dest, const uint8_t * ref,
|
|
907 int stride, int height)
|
1
|
908 {
|
|
909 MC_avg1_16 (height, dest, ref, stride, CPU_3DNOW);
|
|
910 }
|
|
911
|
9852
|
912 static void MC_avg_o_8_3dnow (uint8_t * dest, const uint8_t * ref,
|
|
913 int stride, int height)
|
1
|
914 {
|
|
915 MC_avg1_8 (height, dest, ref, stride, CPU_3DNOW);
|
|
916 }
|
|
917
|
9852
|
918 static void MC_put_o_16_3dnow (uint8_t * dest, const uint8_t * ref,
|
|
919 int stride, int height)
|
1
|
920 {
|
|
921 MC_put1_16 (height, dest, ref, stride);
|
|
922 }
|
|
923
|
9852
|
924 static void MC_put_o_8_3dnow (uint8_t * dest, const uint8_t * ref,
|
|
925 int stride, int height)
|
1
|
926 {
|
|
927 MC_put1_8 (height, dest, ref, stride);
|
|
928 }
|
|
929
|
9852
|
930 static void MC_avg_x_16_3dnow (uint8_t * dest, const uint8_t * ref,
|
1
|
931 int stride, int height)
|
|
932 {
|
|
933 MC_avg2_16 (height, dest, ref, stride, 1, CPU_3DNOW);
|
|
934 }
|
|
935
|
9852
|
936 static void MC_avg_x_8_3dnow (uint8_t * dest, const uint8_t * ref,
|
1
|
937 int stride, int height)
|
|
938 {
|
|
939 MC_avg2_8 (height, dest, ref, stride, 1, CPU_3DNOW);
|
|
940 }
|
|
941
|
9852
|
942 static void MC_put_x_16_3dnow (uint8_t * dest, const uint8_t * ref,
|
1
|
943 int stride, int height)
|
|
944 {
|
|
945 MC_put2_16 (height, dest, ref, stride, 1, CPU_3DNOW);
|
|
946 }
|
|
947
|
9852
|
948 static void MC_put_x_8_3dnow (uint8_t * dest, const uint8_t * ref,
|
1
|
949 int stride, int height)
|
|
950 {
|
|
951 MC_put2_8 (height, dest, ref, stride, 1, CPU_3DNOW);
|
|
952 }
|
|
953
|
9852
|
954 static void MC_avg_y_16_3dnow (uint8_t * dest, const uint8_t * ref,
|
1
|
955 int stride, int height)
|
|
956 {
|
|
957 MC_avg2_16 (height, dest, ref, stride, stride, CPU_3DNOW);
|
|
958 }
|
|
959
|
9852
|
960 static void MC_avg_y_8_3dnow (uint8_t * dest, const uint8_t * ref,
|
1
|
961 int stride, int height)
|
|
962 {
|
|
963 MC_avg2_8 (height, dest, ref, stride, stride, CPU_3DNOW);
|
|
964 }
|
|
965
|
9852
|
966 static void MC_put_y_16_3dnow (uint8_t * dest, const uint8_t * ref,
|
1
|
967 int stride, int height)
|
|
968 {
|
|
969 MC_put2_16 (height, dest, ref, stride, stride, CPU_3DNOW);
|
|
970 }
|
|
971
|
9852
|
972 static void MC_put_y_8_3dnow (uint8_t * dest, const uint8_t * ref,
|
1
|
973 int stride, int height)
|
|
974 {
|
|
975 MC_put2_8 (height, dest, ref, stride, stride, CPU_3DNOW);
|
|
976 }
|
|
977
|
9852
|
978 static void MC_avg_xy_16_3dnow (uint8_t * dest, const uint8_t * ref,
|
1
|
979 int stride, int height)
|
|
980 {
|
|
981 MC_avg4_16 (height, dest, ref, stride, CPU_3DNOW);
|
|
982 }
|
|
983
|
9852
|
984 static void MC_avg_xy_8_3dnow (uint8_t * dest, const uint8_t * ref,
|
1
|
985 int stride, int height)
|
|
986 {
|
|
987 MC_avg4_8 (height, dest, ref, stride, CPU_3DNOW);
|
|
988 }
|
|
989
|
9852
|
990 static void MC_put_xy_16_3dnow (uint8_t * dest, const uint8_t * ref,
|
1
|
991 int stride, int height)
|
|
992 {
|
|
993 MC_put4_16 (height, dest, ref, stride, CPU_3DNOW);
|
|
994 }
|
|
995
|
9852
|
996 static void MC_put_xy_8_3dnow (uint8_t * dest, const uint8_t * ref,
|
1
|
997 int stride, int height)
|
|
998 {
|
|
999 MC_put4_8 (height, dest, ref, stride, CPU_3DNOW);
|
|
1000 }
|
|
1001
|
|
1002
|
9852
|
1003 MPEG2_MC_EXTERN (3dnow)
|
1
|
1004
|
|
1005 #endif
|