Mercurial > mplayer.hg
comparison postproc/yuv2rgb_template.c @ 2732:ae79207a3055
Move yuv2rgb to postprocess
author | nick |
---|---|
date | Tue, 06 Nov 2001 11:22:40 +0000 |
parents | |
children | 4ce165aa0135 |
comparison
equal
deleted
inserted
replaced
2731:214f79969a80 | 2732:ae79207a3055 |
---|---|
1 | |
2 /* | |
3 * yuv2rgb_mmx.c, Software YUV to RGB coverter with Intel MMX "technology" | |
4 * | |
5 * Copyright (C) 2000, Silicon Integrated System Corp. | |
6 * All Rights Reserved. | |
7 * | |
8 * Author: Olie Lho <ollie@sis.com.tw> | |
9 * | |
10 * This file is part of mpeg2dec, a free MPEG-2 video decoder | |
11 * | |
12 * mpeg2dec is free software; you can redistribute it and/or modify | |
13 * it under the terms of the GNU General Public License as published by | |
14 * the Free Software Foundation; either version 2, or (at your option) | |
15 * any later version. | |
16 * | |
17 * mpeg2dec is distributed in the hope that it will be useful, | |
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
20 * GNU General Public License for more details. | |
21 * | |
22 * You should have received a copy of the GNU General Public License | |
23 * along with GNU Make; see the file COPYING. If not, write to | |
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
25 * | |
26 */ | |
27 | |
28 #include <stdio.h> | |
29 #include <stdlib.h> | |
30 | |
31 #include "../config.h" | |
32 | |
33 //#include "libmpeg2/mpeg2.h" | |
34 //#include "libmpeg2/mpeg2_internal.h" | |
35 #include <inttypes.h> | |
36 | |
37 #include "rgb2rgb.h" | |
38 | |
39 /* hope these constant values are cache line aligned */ | |
40 uint64_t mmx_80w = 0x0080008000800080; | |
41 uint64_t mmx_10w = 0x1010101010101010; | |
42 uint64_t mmx_00ffw = 0x00ff00ff00ff00ff; | |
43 uint64_t mmx_Y_coeff = 0x253f253f253f253f; | |
44 | |
45 /* hope these constant values are cache line aligned */ | |
46 uint64_t mmx_U_green = 0xf37df37df37df37d; | |
47 uint64_t mmx_U_blue = 0x4093409340934093; | |
48 uint64_t mmx_V_red = 0x3312331233123312; | |
49 uint64_t mmx_V_green = 0xe5fce5fce5fce5fc; | |
50 | |
51 /* hope these constant values are cache line aligned */ | |
52 uint64_t mmx_redmask = 0xf8f8f8f8f8f8f8f8; | |
53 uint64_t mmx_grnmask = 0xfcfcfcfcfcfcfcfc; | |
54 uint64_t mmx_grnshift = 0x03; | |
55 uint64_t mmx_blueshift = 0x03; | |
56 | |
57 #ifdef HAVE_MMX2 | |
58 /* use this for K7 and p3 only */ | |
59 #define MOVNTQ "movntq" | |
60 #else | |
61 /* for MMX-only processors */ | |
62 #define MOVNTQ "movq" | |
63 #endif | |
64 | |
65 #if !defined( HAVE_MMX2) && defined( HAVE_3DNOW) | |
66 /* for K6 2/2+/3 */ | |
67 #define EMMS "femms;" | |
68 #else | |
69 #define EMMS "emms;" | |
70 #endif | |
71 | |
72 static void yuv420_rgb16_mmx (uint8_t * image, uint8_t * py, | |
73 uint8_t * pu, uint8_t * pv, | |
74 int h_size, int v_size, | |
75 int rgb_stride, int y_stride, int uv_stride) | |
76 { | |
77 int even = 1; | |
78 int x, y; | |
79 | |
80 __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ ); | |
81 | |
82 for (y = v_size; --y >= 0; ) { | |
83 uint8_t *_image = image; | |
84 uint8_t *_py = py; | |
85 uint8_t *_pu = pu; | |
86 uint8_t *_pv = pv; | |
87 | |
88 /* load data for start of next scan line */ | |
89 __asm__ __volatile__ ( | |
90 "movd (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ | |
91 "movd (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ | |
92 "movq (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ | |
93 | |
94 : : "r" (_py), "r" (_pu), "r" (_pv)); | |
95 | |
96 for (x = h_size >> 3; --x >= 0; ) { | |
97 /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8 | |
98 pixels in each iteration */ | |
99 | |
100 __asm__ __volatile__ ( | |
101 /* Do the multiply part of the conversion for even and odd pixels, | |
102 register usage: | |
103 mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels, | |
104 mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels, | |
105 mm6 -> Y even, mm7 -> Y odd */ | |
106 /* convert the chroma part */ | |
107 "punpcklbw %%mm4, %%mm0;" /* scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 */ | |
108 "punpcklbw %%mm4, %%mm1;" /* scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 */ | |
109 | |
110 "psubsw mmx_80w, %%mm0;" /* Cb -= 128 */ | |
111 "psubsw mmx_80w, %%mm1;" /* Cr -= 128 */ | |
112 | |
113 "psllw $3, %%mm0;" /* Promote precision */ | |
114 "psllw $3, %%mm1;" /* Promote precision */ | |
115 | |
116 "movq %%mm0, %%mm2;" /* Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 */ | |
117 "movq %%mm1, %%mm3;" /* Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 */ | |
118 | |
119 "pmulhw mmx_U_green, %%mm2;" /* Mul Cb with green coeff -> Cb green */ | |
120 "pmulhw mmx_V_green, %%mm3;" /* Mul Cr with green coeff -> Cr green */ | |
121 | |
122 "pmulhw mmx_U_blue, %%mm0;" /* Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 */ | |
123 "pmulhw mmx_V_red, %%mm1;" /* Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 */ | |
124 | |
125 "paddsw %%mm3, %%mm2;" /* Cb green + Cr green -> Cgreen */ | |
126 | |
127 /* convert the luma part */ | |
128 "psubusb mmx_10w, %%mm6;" /* Y -= 16 */ | |
129 | |
130 "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ | |
131 "pand mmx_00ffw, %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */ | |
132 | |
133 "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */ | |
134 | |
135 "psllw $3, %%mm6;" /* Promote precision */ | |
136 "psllw $3, %%mm7;" /* Promote precision */ | |
137 | |
138 "pmulhw mmx_Y_coeff, %%mm6;" /* Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 */ | |
139 "pmulhw mmx_Y_coeff, %%mm7;" /* Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 */ | |
140 | |
141 /* Do the addition part of the conversion for even and odd pixels, | |
142 register usage: | |
143 mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels, | |
144 mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels, | |
145 mm6 -> Y even, mm7 -> Y odd */ | |
146 "movq %%mm0, %%mm3;" /* Copy Cblue */ | |
147 "movq %%mm1, %%mm4;" /* Copy Cred */ | |
148 "movq %%mm2, %%mm5;" /* Copy Cgreen */ | |
149 | |
150 "paddsw %%mm6, %%mm0;" /* Y even + Cblue 00 B6 00 B4 00 B2 00 B0 */ | |
151 "paddsw %%mm7, %%mm3;" /* Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 */ | |
152 | |
153 "paddsw %%mm6, %%mm1;" /* Y even + Cred 00 R6 00 R4 00 R2 00 R0 */ | |
154 "paddsw %%mm7, %%mm4;" /* Y odd + Cred 00 R7 00 R5 00 R3 00 R1 */ | |
155 | |
156 "paddsw %%mm6, %%mm2;" /* Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 */ | |
157 "paddsw %%mm7, %%mm5;" /* Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 */ | |
158 | |
159 /* Limit RGB even to 0..255 */ | |
160 "packuswb %%mm0, %%mm0;" /* B6 B4 B2 B0 B6 B4 B2 B0 */ | |
161 "packuswb %%mm1, %%mm1;" /* R6 R4 R2 R0 R6 R4 R2 R0 */ | |
162 "packuswb %%mm2, %%mm2;" /* G6 G4 G2 G0 G6 G4 G2 G0 */ | |
163 | |
164 /* Limit RGB odd to 0..255 */ | |
165 "packuswb %%mm3, %%mm3;" /* B7 B5 B3 B1 B7 B5 B3 B1 */ | |
166 "packuswb %%mm4, %%mm4;" /* R7 R5 R3 R1 R7 R5 R3 R1 */ | |
167 "packuswb %%mm5, %%mm5;" /* G7 G5 G3 G1 G7 G5 G3 G1 */ | |
168 | |
169 /* Interleave RGB even and odd */ | |
170 "punpcklbw %%mm3, %%mm0;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ | |
171 "punpcklbw %%mm4, %%mm1;" /* R7 R6 R5 R4 R3 R2 R1 R0 */ | |
172 "punpcklbw %%mm5, %%mm2;" /* G7 G6 G5 G4 G3 G2 G1 G0 */ | |
173 | |
174 /* mask unneeded bits off */ | |
175 "pand mmx_redmask, %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */ | |
176 "pand mmx_grnmask, %%mm2;" /* g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0 */ | |
177 "pand mmx_redmask, %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */ | |
178 | |
179 "psrlw mmx_blueshift,%%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */ | |
180 "pxor %%mm4, %%mm4;" /* zero mm4 */ | |
181 | |
182 "movq %%mm0, %%mm5;" /* Copy B7-B0 */ | |
183 "movq %%mm2, %%mm7;" /* Copy G7-G0 */ | |
184 | |
185 /* convert rgb24 plane to rgb16 pack for pixel 0-3 */ | |
186 "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */ | |
187 "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ | |
188 | |
189 "psllw mmx_blueshift,%%mm2;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */ | |
190 "por %%mm2, %%mm0;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */ | |
191 | |
192 "movq 8 (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ | |
193 MOVNTQ " %%mm0, (%3);" /* store pixel 0-3 */ | |
194 | |
195 /* convert rgb24 plane to rgb16 pack for pixel 0-3 */ | |
196 "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */ | |
197 "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ | |
198 | |
199 "psllw mmx_blueshift,%%mm7;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */ | |
200 "movd 4 (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ | |
201 | |
202 "por %%mm7, %%mm5;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */ | |
203 "movd 4 (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ | |
204 | |
205 MOVNTQ " %%mm5, 8 (%3);" /* store pixel 4-7 */ | |
206 : : "r" (_py), "r" (_pu), "r" (_pv), "r" (_image)); | |
207 | |
208 _py += 8; | |
209 _pu += 4; | |
210 _pv += 4; | |
211 _image += 16; | |
212 } | |
213 | |
214 if (!even) { | |
215 pu += uv_stride; | |
216 pv += uv_stride; | |
217 } | |
218 | |
219 py += y_stride; | |
220 image += rgb_stride; | |
221 | |
222 even = (!even); | |
223 } | |
224 | |
225 __asm__ __volatile__ (EMMS); | |
226 } | |
227 | |
228 static void yuv420_argb32_mmx (uint8_t * image, uint8_t * py, | |
229 uint8_t * pu, uint8_t * pv, | |
230 int h_size, int v_size, | |
231 int rgb_stride, int y_stride, int uv_stride) | |
232 { | |
233 int even = 1; | |
234 int x, y; | |
235 | |
236 __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ ); | |
237 | |
238 for (y = v_size; --y >= 0; ) { | |
239 uint8_t *_image = image; | |
240 uint8_t *_py = py; | |
241 uint8_t *_pu = pu; | |
242 uint8_t *_pv = pv; | |
243 | |
244 /* load data for start of next scan line */ | |
245 __asm__ __volatile__ | |
246 ( | |
247 "movd (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ | |
248 "movd (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ | |
249 "movq (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ | |
250 : : "r" (_py), "r" (_pu), "r" (_pv) | |
251 ); | |
252 | |
253 for (x = h_size >> 3; --x >= 0; ) { | |
254 /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8 | |
255 pixels in each iteration */ | |
256 __asm__ __volatile__ ( | |
257 /* Do the multiply part of the conversion for even and odd pixels, | |
258 register usage: | |
259 mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels, | |
260 mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels, | |
261 mm6 -> Y even, mm7 -> Y odd */ | |
262 | |
263 /* convert the chroma part */ | |
264 "punpcklbw %%mm4, %%mm0;" /* scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 */ | |
265 "punpcklbw %%mm4, %%mm1;" /* scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 */ | |
266 | |
267 "psubsw mmx_80w, %%mm0;" /* Cb -= 128 */ | |
268 "psubsw mmx_80w, %%mm1;" /* Cr -= 128 */ | |
269 | |
270 "psllw $3, %%mm0;" /* Promote precision */ | |
271 "psllw $3, %%mm1;" /* Promote precision */ | |
272 | |
273 "movq %%mm0, %%mm2;" /* Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 */ | |
274 "movq %%mm1, %%mm3;" /* Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 */ | |
275 | |
276 "pmulhw mmx_U_green, %%mm2;" /* Mul Cb with green coeff -> Cb green */ | |
277 "pmulhw mmx_V_green, %%mm3;" /* Mul Cr with green coeff -> Cr green */ | |
278 | |
279 "pmulhw mmx_U_blue, %%mm0;" /* Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 */ | |
280 "pmulhw mmx_V_red, %%mm1;" /* Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 */ | |
281 | |
282 "paddsw %%mm3, %%mm2;" /* Cb green + Cr green -> Cgreen */ | |
283 | |
284 /* convert the luma part */ | |
285 "psubusb mmx_10w, %%mm6;" /* Y -= 16 */ | |
286 | |
287 "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ | |
288 "pand mmx_00ffw, %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */ | |
289 | |
290 "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */ | |
291 | |
292 "psllw $3, %%mm6;" /* Promote precision */ | |
293 "psllw $3, %%mm7;" /* Promote precision */ | |
294 | |
295 "pmulhw mmx_Y_coeff, %%mm6;" /* Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 */ | |
296 "pmulhw mmx_Y_coeff, %%mm7;" /* Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 */ | |
297 | |
298 /* Do the addition part of the conversion for even and odd pixels, | |
299 register usage: | |
300 mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels, | |
301 mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels, | |
302 mm6 -> Y even, mm7 -> Y odd */ | |
303 | |
304 "movq %%mm0, %%mm3;" /* Copy Cblue */ | |
305 "movq %%mm1, %%mm4;" /* Copy Cred */ | |
306 "movq %%mm2, %%mm5;" /* Copy Cgreen */ | |
307 | |
308 "paddsw %%mm6, %%mm0;" /* Y even + Cblue 00 B6 00 B4 00 B2 00 B0 */ | |
309 "paddsw %%mm7, %%mm3;" /* Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 */ | |
310 | |
311 "paddsw %%mm6, %%mm1;" /* Y even + Cred 00 R6 00 R4 00 R2 00 R0 */ | |
312 "paddsw %%mm7, %%mm4;" /* Y odd + Cred 00 R7 00 R5 00 R3 00 R1 */ | |
313 | |
314 "paddsw %%mm6, %%mm2;" /* Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 */ | |
315 "paddsw %%mm7, %%mm5;" /* Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 */ | |
316 | |
317 /* Limit RGB even to 0..255 */ | |
318 "packuswb %%mm0, %%mm0;" /* B6 B4 B2 B0 B6 B4 B2 B0 */ | |
319 "packuswb %%mm1, %%mm1;" /* R6 R4 R2 R0 R6 R4 R2 R0 */ | |
320 "packuswb %%mm2, %%mm2;" /* G6 G4 G2 G0 G6 G4 G2 G0 */ | |
321 | |
322 /* Limit RGB odd to 0..255 */ | |
323 "packuswb %%mm3, %%mm3;" /* B7 B5 B3 B1 B7 B5 B3 B1 */ | |
324 "packuswb %%mm4, %%mm4;" /* R7 R5 R3 R1 R7 R5 R3 R1 */ | |
325 "packuswb %%mm5, %%mm5;" /* G7 G5 G3 G1 G7 G5 G3 G1 */ | |
326 | |
327 /* Interleave RGB even and odd */ | |
328 "punpcklbw %%mm3, %%mm0;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ | |
329 "punpcklbw %%mm4, %%mm1;" /* R7 R6 R5 R4 R3 R2 R1 R0 */ | |
330 "punpcklbw %%mm5, %%mm2;" /* G7 G6 G5 G4 G3 G2 G1 G0 */ | |
331 | |
332 /* convert RGB plane to RGB packed format, | |
333 mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0, | |
334 mm4 -> GB, mm5 -> AR pixel 4-7, | |
335 mm6 -> GB, mm7 -> AR pixel 0-3 */ | |
336 "pxor %%mm3, %%mm3;" /* zero mm3 */ | |
337 | |
338 "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ | |
339 "movq %%mm1, %%mm7;" /* R7 R6 R5 R4 R3 R2 R1 R0 */ | |
340 | |
341 "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ | |
342 "movq %%mm1, %%mm5;" /* R7 R6 R5 R4 R3 R2 R1 R0 */ | |
343 | |
344 "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */ | |
345 "punpcklbw %%mm3, %%mm7;" /* 00 R3 00 R2 00 R1 00 R0 */ | |
346 | |
347 "punpcklwd %%mm7, %%mm6;" /* 00 R1 B1 G1 00 R0 B0 G0 */ | |
348 MOVNTQ " %%mm6, (%3);" /* Store ARGB1 ARGB0 */ | |
349 | |
350 "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ | |
351 "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */ | |
352 | |
353 "punpckhwd %%mm7, %%mm6;" /* 00 R3 G3 B3 00 R2 B3 G2 */ | |
354 MOVNTQ " %%mm6, 8 (%3);" /* Store ARGB3 ARGB2 */ | |
355 | |
356 "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */ | |
357 "punpckhbw %%mm3, %%mm5;" /* 00 R7 00 R6 00 R5 00 R4 */ | |
358 | |
359 "punpcklwd %%mm5, %%mm4;" /* 00 R5 B5 G5 00 R4 B4 G4 */ | |
360 MOVNTQ " %%mm4, 16 (%3);" /* Store ARGB5 ARGB4 */ | |
361 | |
362 "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ | |
363 "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */ | |
364 | |
365 "punpckhwd %%mm5, %%mm4;" /* 00 R7 G7 B7 00 R6 B6 G6 */ | |
366 MOVNTQ " %%mm4, 24 (%3);" /* Store ARGB7 ARGB6 */ | |
367 | |
368 "movd 4 (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ | |
369 "movd 4 (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ | |
370 | |
371 "pxor %%mm4, %%mm4;" /* zero mm4 */ | |
372 "movq 8 (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ | |
373 | |
374 : : "r" (_py), "r" (_pu), "r" (_pv), "r" (_image)); | |
375 | |
376 _py += 8; | |
377 _pu += 4; | |
378 _pv += 4; | |
379 _image += 32; | |
380 } | |
381 | |
382 if (!even) { | |
383 pu += uv_stride; | |
384 pv += uv_stride; | |
385 } | |
386 | |
387 py += y_stride; | |
388 image += rgb_stride; | |
389 | |
390 even = (!even); | |
391 } | |
392 | |
393 __asm__ __volatile__ (EMMS); | |
394 } | |
395 | |
396 yuv2rgb_fun yuv2rgb_init_mmx (int bpp, int mode) | |
397 { | |
398 // if (bpp == 15 || bpp == 16) { | |
399 if (bpp == 16 && mode == MODE_RGB) return yuv420_rgb16_mmx; | |
400 if (bpp == 32 && mode == MODE_RGB) return yuv420_argb32_mmx; | |
401 return NULL; // Fallback to C. | |
402 } | |
403 |