comparison postproc/yuv2rgb_template.c @ 9476:eff727517e6b

yuv2rgb brightness/contrast/saturation/different colorspaces support finished yuv2rgb deglobalize yuv2rgb optimizations / cleanup bugs?
author michael
date Fri, 21 Feb 2003 20:35:18 +0000
parents 7bbe4bce6293
children 543ab3909b78
comparison
equal deleted inserted replaced
9475:6d4d66421b29 9476:eff727517e6b
23 * along with GNU Make; see the file COPYING. If not, write to 23 * along with GNU Make; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 * 25 *
26 * 15,24 bpp and dithering from Michael Niedermayer (michaelni@gmx.at) 26 * 15,24 bpp and dithering from Michael Niedermayer (michaelni@gmx.at)
27 * MMX/MMX2 Template stuff from Michael Niedermayer (needed for fast movntq support) 27 * MMX/MMX2 Template stuff from Michael Niedermayer (needed for fast movntq support)
28 * context / deglobalize stuff by Michael Niedermayer
28 */ 29 */
29 30
30 #undef MOVNTQ 31 #undef MOVNTQ
31 #undef EMMS 32 #undef EMMS
32 #undef SFENCE 33 #undef SFENCE
54 mm6 -> Y even, mm7 -> Y odd */\ 55 mm6 -> Y even, mm7 -> Y odd */\
55 /* convert the chroma part */\ 56 /* convert the chroma part */\
56 "punpcklbw %%mm4, %%mm0;" /* scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \ 57 "punpcklbw %%mm4, %%mm0;" /* scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
57 "punpcklbw %%mm4, %%mm1;" /* scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \ 58 "punpcklbw %%mm4, %%mm1;" /* scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
58 \ 59 \
59 "psubsw "MANGLE(mmx_80w)", %%mm0;" /* Cb -= 128 */ \
60 "psubsw "MANGLE(mmx_80w)", %%mm1;" /* Cr -= 128 */ \
61 \
62 "psllw $3, %%mm0;" /* Promote precision */ \ 60 "psllw $3, %%mm0;" /* Promote precision */ \
63 "psllw $3, %%mm1;" /* Promote precision */ \ 61 "psllw $3, %%mm1;" /* Promote precision */ \
64 \ 62 \
63 "psubsw "U_OFFSET"(%4), %%mm0;" /* Cb -= 128 */ \
64 "psubsw "V_OFFSET"(%4), %%mm1;" /* Cr -= 128 */ \
65 \
65 "movq %%mm0, %%mm2;" /* Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \ 66 "movq %%mm0, %%mm2;" /* Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
66 "movq %%mm1, %%mm3;" /* Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \ 67 "movq %%mm1, %%mm3;" /* Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
67 \ 68 \
68 "pmulhw "MANGLE(mmx_U_green)", %%mm2;" /* Mul Cb with green coeff -> Cb green */ \ 69 "pmulhw "UG_COEFF"(%4), %%mm2;" /* Mul Cb with green coeff -> Cb green */ \
69 "pmulhw "MANGLE(mmx_V_green)", %%mm3;" /* Mul Cr with green coeff -> Cr green */ \ 70 "pmulhw "VG_COEFF"(%4), %%mm3;" /* Mul Cr with green coeff -> Cr green */ \
70 \ 71 \
71 "pmulhw "MANGLE(mmx_U_blue)", %%mm0;" /* Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 */\ 72 "pmulhw "UB_COEFF"(%4), %%mm0;" /* Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 */\
72 "pmulhw "MANGLE(mmx_V_red)", %%mm1;" /* Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 */\ 73 "pmulhw "VR_COEFF"(%4), %%mm1;" /* Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 */\
73 \ 74 \
74 "paddsw %%mm3, %%mm2;" /* Cb green + Cr green -> Cgreen */\ 75 "paddsw %%mm3, %%mm2;" /* Cb green + Cr green -> Cgreen */\
75 \ 76 \
76 /* convert the luma part */\ 77 /* convert the luma part */\
77 "psubusb "MANGLE(mmx_10w)", %%mm6;" /* Y -= 16 */\
78 \
79 "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */\ 78 "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */\
80 "pand "MANGLE(mmx_00ffw)", %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */\ 79 "pand "MANGLE(mmx_00ffw)", %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */\
81 \ 80 \
82 "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */\ 81 "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */\
83 \ 82 \
84 "psllw $3, %%mm6;" /* Promote precision */\ 83 "psllw $3, %%mm6;" /* Promote precision */\
85 "psllw $3, %%mm7;" /* Promote precision */\ 84 "psllw $3, %%mm7;" /* Promote precision */\
86 \ 85 \
87 "pmulhw "MANGLE(mmx_Y_coeff)", %%mm6;" /* Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 */\ 86 "psubw "Y_OFFSET"(%4), %%mm6;" /* Y -= 16 */\
88 "pmulhw "MANGLE(mmx_Y_coeff)", %%mm7;" /* Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 */\ 87 "psubw "Y_OFFSET"(%4), %%mm7;" /* Y -= 16 */\
88 \
89 "pmulhw "Y_COEFF"(%4), %%mm6;" /* Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 */\
90 "pmulhw "Y_COEFF"(%4), %%mm7;" /* Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 */\
89 \ 91 \
90 /* Do the addition part of the conversion for even and odd pixels, 92 /* Do the addition part of the conversion for even and odd pixels,
91 register usage: 93 register usage:
92 mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels, 94 mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
93 mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels, 95 mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
119 "punpcklbw %%mm3, %%mm0;" /* B7 B6 B5 B4 B3 B2 B1 B0 */\ 121 "punpcklbw %%mm3, %%mm0;" /* B7 B6 B5 B4 B3 B2 B1 B0 */\
120 "punpcklbw %%mm4, %%mm1;" /* R7 R6 R5 R4 R3 R2 R1 R0 */\ 122 "punpcklbw %%mm4, %%mm1;" /* R7 R6 R5 R4 R3 R2 R1 R0 */\
121 "punpcklbw %%mm5, %%mm2;" /* G7 G6 G5 G4 G3 G2 G1 G0 */\ 123 "punpcklbw %%mm5, %%mm2;" /* G7 G6 G5 G4 G3 G2 G1 G0 */\
122 124
123 125
124 static inline void RENAME(yuv420_rgb16) (uint8_t * image, uint8_t * py, 126 static inline void RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], int srcSliceY,
125 uint8_t * pu, uint8_t * pv, 127 int srcSliceH, uint8_t* dst[], int dstStride[]){
126 unsigned h_size, unsigned v_size, 128 int srcStride[3];
127 int rgb_stride, int y_stride, int uv_stride) 129 uint8_t *src[3];
128 { 130 int y, h_size;
129 int even = 1; 131
130 int x, y; 132 sws_orderYUV(c->srcFormat, src, srcStride, srcParam, srcStrideParam);
131 133 if(c->srcFormat == IMGFMT_422P){
134 srcStride[1] *= 2;
135 srcStride[2] *= 2;
136 }
137
138 h_size= (c->dstW+7)&~7;
139 if(h_size*2 > dstStride[0]) h_size-=8;
140
132 __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ ); 141 __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
133 142 //printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&b5Dither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0],
134 for (y = v_size; --y >= 0; ) { 143 //srcStride[0],srcStride[1],srcStride[2],dstStride[0]);
135 uint8_t *_image = image; 144 for (y= 0; y<srcSliceH; y++ ) {
136 uint8_t *_py = py; 145 uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
137 uint8_t *_pu = pu; 146 uint8_t *_py = src[0] + y*srcStride[0];
138 uint8_t *_pv = pv; 147 uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
139 int internal_h_size= h_size; 148 uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
140 int aligned_h_size= (h_size+7)&~7; 149 int index= -h_size/2;
141
142 if(rgb_stride >= aligned_h_size*2) internal_h_size= aligned_h_size;
143 150
144 b5Dither= dither8[y&1]; 151 b5Dither= dither8[y&1];
145 g6Dither= dither4[y&1]; 152 g6Dither= dither4[y&1];
146 g5Dither= dither8[y&1]; 153 g5Dither= dither8[y&1];
147 r5Dither= dither8[(y+1)&1]; 154 r5Dither= dither8[(y+1)&1];
148
149 /* load data for start of next scan line */
150 __asm__ __volatile__ (
151 "movd (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
152 "movd (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
153 "movq (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
154
155 : : "r" (_py), "r" (_pu), "r" (_pv));
156
157 for (x = internal_h_size >> 3; --x >= 0; ) {
158 /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8 155 /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
159 pixels in each iteration */ 156 pixels in each iteration */
160
161 __asm__ __volatile__ ( 157 __asm__ __volatile__ (
158 /* load data for start of next scan line */
159 "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
160 "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
161 "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
162 // ".balign 16 \n\t"
163 "1: \n\t"
162 /* no speed diference on my p3@500 with prefetch, 164 /* no speed diference on my p3@500 with prefetch,
163 * if it is faster for anyone with -benchmark then tell me 165 * if it is faster for anyone with -benchmark then tell me
164 PREFETCH" 64(%0) \n\t" 166 PREFETCH" 64(%0) \n\t"
165 PREFETCH" 64(%1) \n\t" 167 PREFETCH" 64(%1) \n\t"
166 PREFETCH" 64(%2) \n\t" 168 PREFETCH" 64(%2) \n\t"
188 "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ 190 "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
189 191
190 "psllw $3, %%mm2;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */ 192 "psllw $3, %%mm2;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
191 "por %%mm2, %%mm0;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */ 193 "por %%mm2, %%mm0;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
192 194
193 "movq 8 (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ 195 "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
194 MOVNTQ " %%mm0, (%3);" /* store pixel 0-3 */ 196 MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */
195 197
196 /* convert rgb24 plane to rgb16 pack for pixel 0-3 */ 198 /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
197 "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */ 199 "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
198 "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ 200 "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
199 201
200 "psllw $3, %%mm7;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */ 202 "psllw $3, %%mm7;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
201 "movd 4 (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ 203 "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
202 204
203 "por %%mm7, %%mm5;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */ 205 "por %%mm7, %%mm5;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
204 "movd 4 (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ 206 "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
205 207
206 MOVNTQ " %%mm5, 8 (%3);" /* store pixel 4-7 */ 208 MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
207 : : "r" (_py), "r" (_pu), "r" (_pv), "r" (_image)); 209
208 210 "addl $16, %1 \n\t"
209 _py += 8; 211 "addl $4, %0 \n\t"
210 _pu += 4; 212 " js 1b \n\t"
211 _pv += 4; 213
212 _image += 16; 214 : "+r" (index), "+r" (_image)
213 } 215 : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
214 216 );
215 if (!even) {
216 pu += uv_stride;
217 pv += uv_stride;
218 }
219
220 py += y_stride;
221 image += rgb_stride;
222
223 even = (!even);
224 } 217 }
225 218
226 __asm__ __volatile__ (EMMS); 219 __asm__ __volatile__ (EMMS);
227 } 220 }
228 221
229 static inline void RENAME(yuv420_rgb15) (uint8_t * image, uint8_t * py, 222 static inline void RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], int srcSliceY,
230 uint8_t * pu, uint8_t * pv, 223 int srcSliceH, uint8_t* dst[], int dstStride[]){
231 unsigned h_size, unsigned v_size, 224 int srcStride[3];
232 int rgb_stride, int y_stride, int uv_stride) 225 uint8_t *src[3];
233 { 226 int y, h_size;
234 int even = 1; 227
235 int x, y; 228 sws_orderYUV(c->srcFormat, src, srcStride, srcParam, srcStrideParam);
236 229 if(c->srcFormat == IMGFMT_422P){
230 srcStride[1] *= 2;
231 srcStride[2] *= 2;
232 }
233
234 h_size= (c->dstW+7)&~7;
235 if(h_size*2 > dstStride[0]) h_size-=8;
236
237 __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ ); 237 __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
238 238 //printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&b5Dither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0],
239 for (y = v_size; --y >= 0; ) { 239 //srcStride[0],srcStride[1],srcStride[2],dstStride[0]);
240 uint8_t *_image = image; 240 for (y= 0; y<srcSliceH; y++ ) {
241 uint8_t *_py = py; 241 uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
242 uint8_t *_pu = pu; 242 uint8_t *_py = src[0] + y*srcStride[0];
243 uint8_t *_pv = pv; 243 uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
244 int internal_h_size= h_size; 244 uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
245 int aligned_h_size= (h_size+7)&~7; 245 int index= -h_size/2;
246
247 if(rgb_stride >= aligned_h_size*2) internal_h_size= aligned_h_size;
248 246
249 b5Dither= dither8[y&1]; 247 b5Dither= dither8[y&1];
250 g6Dither= dither4[y&1]; 248 g6Dither= dither4[y&1];
251 g5Dither= dither8[y&1]; 249 g5Dither= dither8[y&1];
252 r5Dither= dither8[(y+1)&1]; 250 r5Dither= dither8[(y+1)&1];
253
254 /* load data for start of next scan line */
255 __asm__ __volatile__ (
256 "movd (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
257 "movd (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
258 "movq (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
259
260 : : "r" (_py), "r" (_pu), "r" (_pv));
261
262 for (x = internal_h_size >> 3; --x >= 0; ) {
263 /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8 251 /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
264 pixels in each iteration */ 252 pixels in each iteration */
265
266 __asm__ __volatile__ ( 253 __asm__ __volatile__ (
254 /* load data for start of next scan line */
255 "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
256 "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
257 "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
258 // ".balign 16 \n\t"
259 "1: \n\t"
267 YUV2RGB 260 YUV2RGB
268 261
269 #ifdef DITHER1XBPP 262 #ifdef DITHER1XBPP
270 "paddusb "MANGLE(b5Dither)", %%mm0 \n\t" 263 "paddusb "MANGLE(b5Dither)", %%mm0 \n\t"
271 "paddusb "MANGLE(g5Dither)", %%mm2 \n\t" 264 "paddusb "MANGLE(g5Dither)", %%mm2 \n\t"
289 "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ 282 "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
290 283
291 "psllw $2, %%mm2;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */ 284 "psllw $2, %%mm2;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
292 "por %%mm2, %%mm0;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */ 285 "por %%mm2, %%mm0;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
293 286
294 "movq 8 (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ 287 "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
295 MOVNTQ " %%mm0, (%3);" /* store pixel 0-3 */ 288 MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */
296 289
297 /* convert rgb24 plane to rgb16 pack for pixel 0-3 */ 290 /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
298 "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 0_g7g6g5 g4g3_0_0 */ 291 "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 0_g7g6g5 g4g3_0_0 */
299 "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ 292 "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
300 293
301 "psllw $2, %%mm7;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */ 294 "psllw $2, %%mm7;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
302 "movd 4 (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ 295 "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
303 296
304 "por %%mm7, %%mm5;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */ 297 "por %%mm7, %%mm5;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
305 "movd 4 (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ 298 "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
306 299
307 MOVNTQ " %%mm5, 8 (%3);" /* store pixel 4-7 */ 300 MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
308 : : "r" (_py), "r" (_pu), "r" (_pv), "r" (_image)); 301
309 302 "addl $16, %1 \n\t"
310 _py += 8; 303 "addl $4, %0 \n\t"
311 _pu += 4; 304 " js 1b \n\t"
312 _pv += 4; 305 : "+r" (index), "+r" (_image)
313 _image += 16; 306 : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
314 } 307 );
315
316 if (!even) {
317 pu += uv_stride;
318 pv += uv_stride;
319 }
320
321 py += y_stride;
322 image += rgb_stride;
323
324 even = (!even);
325 } 308 }
326 309
327 __asm__ __volatile__ (EMMS); 310 __asm__ __volatile__ (EMMS);
328 } 311 }
329 312
330 static inline void RENAME(yuv420_rgb24) (uint8_t * image, uint8_t * py, 313 static inline void RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], int srcSliceY,
331 uint8_t * pu, uint8_t * pv, 314 int srcSliceH, uint8_t* dst[], int dstStride[]){
332 unsigned h_size, unsigned v_size, 315 int srcStride[3];
333 int rgb_stride, int y_stride, int uv_stride) 316 uint8_t *src[3];
334 { 317 int y, h_size;
335 int even = 1; 318
336 int x, y; 319 sws_orderYUV(c->srcFormat, src, srcStride, srcParam, srcStrideParam);
337 320 if(c->srcFormat == IMGFMT_422P){
321 srcStride[1] *= 2;
322 srcStride[2] *= 2;
323 }
324
325 h_size= (c->dstW+7)&~7;
326 if(h_size*3 > dstStride[0]) h_size-=8;
327
338 __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ ); 328 __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
339 329
340 for (y = v_size; --y >= 0; ) { 330 for (y= 0; y<srcSliceH; y++ ) {
341 uint8_t *_image = image; 331 uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
342 uint8_t *_py = py; 332 uint8_t *_py = src[0] + y*srcStride[0];
343 uint8_t *_pu = pu; 333 uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
344 uint8_t *_pv = pv; 334 uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
345 int internal_h_size= h_size; 335 int index= -h_size/2;
346 int aligned_h_size= (h_size+7)&~7; 336
347
348 if(rgb_stride >= aligned_h_size*3) internal_h_size= aligned_h_size;
349
350 /* load data for start of next scan line */
351 __asm__ __volatile__ (
352 "movd (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
353 "movd (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
354 "movq (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
355
356 : : "r" (_py), "r" (_pu), "r" (_pv));
357
358 for (x = internal_h_size >> 3; --x >= 0; ) {
359 /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8 337 /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
360 pixels in each iteration */ 338 pixels in each iteration */
361
362 __asm__ __volatile__ ( 339 __asm__ __volatile__ (
340 /* load data for start of next scan line */
341 "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
342 "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
343 "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
344 // ".balign 16 \n\t"
345 "1: \n\t"
363 YUV2RGB 346 YUV2RGB
364 /* mm0=B, %%mm2=G, %%mm1=R */ 347 /* mm0=B, %%mm2=G, %%mm1=R */
365 #ifdef HAVE_MMX2 348 #ifdef HAVE_MMX2
366 "movq "MANGLE(M24A)", %%mm4 \n\t" 349 "movq "MANGLE(M24A)", %%mm4 \n\t"
367 "movq "MANGLE(M24C)", %%mm7 \n\t" 350 "movq "MANGLE(M24C)", %%mm7 \n\t"
374 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */ 357 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */
375 358
376 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */ 359 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */
377 "por %%mm5, %%mm6 \n\t" 360 "por %%mm5, %%mm6 \n\t"
378 "por %%mm3, %%mm6 \n\t" 361 "por %%mm3, %%mm6 \n\t"
379 MOVNTQ" %%mm6, (%3) \n\t" 362 MOVNTQ" %%mm6, (%1) \n\t"
380 363
381 "psrlq $8, %%mm2 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */ 364 "psrlq $8, %%mm2 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */
382 "pshufw $0xA5, %%mm0, %%mm5 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */ 365 "pshufw $0xA5, %%mm0, %%mm5 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */
383 "pshufw $0x55, %%mm2, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */ 366 "pshufw $0x55, %%mm2, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */
384 "pshufw $0xA5, %%mm1, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */ 367 "pshufw $0xA5, %%mm1, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */
387 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */ 370 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */
388 "pand %%mm4, %%mm6 \n\t" /* R4 R3 R2 */ 371 "pand %%mm4, %%mm6 \n\t" /* R4 R3 R2 */
389 372
390 "por %%mm5, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */ 373 "por %%mm5, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */
391 "por %%mm3, %%mm6 \n\t" 374 "por %%mm3, %%mm6 \n\t"
392 MOVNTQ" %%mm6, 8(%3) \n\t" 375 MOVNTQ" %%mm6, 8(%1) \n\t"
393 376
394 "pshufw $0xFF, %%mm0, %%mm5 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */ 377 "pshufw $0xFF, %%mm0, %%mm5 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */
395 "pshufw $0xFA, %%mm2, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */ 378 "pshufw $0xFA, %%mm2, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */
396 "pshufw $0xFA, %%mm1, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */ 379 "pshufw $0xFA, %%mm1, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */
397 "movd 4 (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ 380 "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
398 381
399 "pand %%mm7, %%mm5 \n\t" /* B7 B6 */ 382 "pand %%mm7, %%mm5 \n\t" /* B7 B6 */
400 "pand %%mm4, %%mm3 \n\t" /* G7 G6 G5 */ 383 "pand %%mm4, %%mm3 \n\t" /* G7 G6 G5 */
401 "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */ 384 "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */
402 "movd 4 (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ 385 "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
403 \ 386 \
404 "por %%mm5, %%mm3 \n\t" 387 "por %%mm5, %%mm3 \n\t"
405 "por %%mm3, %%mm6 \n\t" 388 "por %%mm3, %%mm6 \n\t"
406 MOVNTQ" %%mm6, 16(%3) \n\t" 389 MOVNTQ" %%mm6, 16(%1) \n\t"
407 "movq 8 (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ 390 "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
408 "pxor %%mm4, %%mm4 \n\t" 391 "pxor %%mm4, %%mm4 \n\t"
409 392
410 #else 393 #else
411 394
412 "pxor %%mm4, %%mm4 \n\t" 395 "pxor %%mm4, %%mm4 \n\t"
440 423
441 "psrlq $8, %%mm7 \n\t" /* 00RGBRGB 0 */ 424 "psrlq $8, %%mm7 \n\t" /* 00RGBRGB 0 */
442 "movq %%mm0, %%mm6 \n\t" /* 0RGBRGB0 1 */ 425 "movq %%mm0, %%mm6 \n\t" /* 0RGBRGB0 1 */
443 "psllq $40, %%mm0 \n\t" /* GB000000 1 */ 426 "psllq $40, %%mm0 \n\t" /* GB000000 1 */
444 "por %%mm0, %%mm7 \n\t" /* GBRGBRGB 0 */ 427 "por %%mm0, %%mm7 \n\t" /* GBRGBRGB 0 */
445 MOVNTQ" %%mm7, (%3) \n\t" 428 MOVNTQ" %%mm7, (%1) \n\t"
446 429
447 "movd 4 (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ 430 "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
448 431
449 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */ 432 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */
450 "movq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */ 433 "movq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */
451 "psllq $24, %%mm5 \n\t" /* BRGB0000 2 */ 434 "psllq $24, %%mm5 \n\t" /* BRGB0000 2 */
452 "por %%mm5, %%mm6 \n\t" /* BRGBRGBR 1 */ 435 "por %%mm5, %%mm6 \n\t" /* BRGBRGBR 1 */
453 MOVNTQ" %%mm6, 8(%3) \n\t" 436 MOVNTQ" %%mm6, 8(%1) \n\t"
454 437
455 "movq 8 (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ 438 "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
456 439
457 "psrlq $40, %%mm1 \n\t" /* 000000RG 2 */ 440 "psrlq $40, %%mm1 \n\t" /* 000000RG 2 */
458 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */ 441 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */
459 "por %%mm3, %%mm1 \n\t" /* RGBRGBRG 2 */ 442 "por %%mm3, %%mm1 \n\t" /* RGBRGBRG 2 */
460 MOVNTQ" %%mm1, 16(%3) \n\t" 443 MOVNTQ" %%mm1, 16(%1) \n\t"
461 444
462 "movd 4 (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ 445 "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
463 "pxor %%mm4, %%mm4 \n\t" 446 "pxor %%mm4, %%mm4 \n\t"
464 #endif 447 #endif
465 448
466 : : "r" (_py), "r" (_pu), "r" (_pv), "r" (_image)); 449 "addl $24, %1 \n\t"
467 450 "addl $4, %0 \n\t"
468 _py += 8; 451 " js 1b \n\t"
469 _pu += 4; 452
470 _pv += 4; 453 : "+r" (index), "+r" (_image)
471 _image += 24; 454 : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
472 } 455 );
473
474 if (!even) {
475 pu += uv_stride;
476 pv += uv_stride;
477 }
478
479 py += y_stride;
480 image += rgb_stride;
481
482 even = (!even);
483 } 456 }
484 457
485 __asm__ __volatile__ (EMMS); 458 __asm__ __volatile__ (EMMS);
486 } 459 }
487 460
488 461 static inline void RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], int srcSliceY,
489 static inline void RENAME(yuv420_argb32) (uint8_t * image, uint8_t * py, 462 int srcSliceH, uint8_t* dst[], int dstStride[]){
490 uint8_t * pu, uint8_t * pv, 463 int srcStride[3];
491 unsigned h_size, unsigned v_size, 464 uint8_t *src[3];
492 int rgb_stride, int y_stride, int uv_stride) 465 int y, h_size;
493 { 466
494 int even = 1; 467 sws_orderYUV(c->srcFormat, src, srcStride, srcParam, srcStrideParam);
495 int x, y; 468 if(c->srcFormat == IMGFMT_422P){
496 469 srcStride[1] *= 2;
470 srcStride[2] *= 2;
471 }
472
473 h_size= (c->dstW+7)&~7;
474 if(h_size*4 > dstStride[0]) h_size-=8;
475
497 __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ ); 476 __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
498 477
499 for (y = v_size; --y >= 0; ) { 478 for (y= 0; y<srcSliceH; y++ ) {
500 uint8_t *_image = image; 479 uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
501 uint8_t *_py = py; 480 uint8_t *_py = src[0] + y*srcStride[0];
502 uint8_t *_pu = pu; 481 uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
503 uint8_t *_pv = pv; 482 uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
504 int internal_h_size= h_size; 483 int index= -h_size/2;
505 int aligned_h_size= (h_size+7)&~7; 484
506
507 if(rgb_stride >= aligned_h_size*4) internal_h_size= aligned_h_size;
508
509 /* load data for start of next scan line */
510 __asm__ __volatile__
511 (
512 "movd (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
513 "movd (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
514 "movq (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
515 : : "r" (_py), "r" (_pu), "r" (_pv)
516 );
517
518 for (x = internal_h_size >> 3; --x >= 0; ) {
519 /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8 485 /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
520 pixels in each iteration */ 486 pixels in each iteration */
521 __asm__ __volatile__ ( 487 __asm__ __volatile__ (
488 /* load data for start of next scan line */
489 "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
490 "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
491 "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
492 // ".balign 16 \n\t"
493 "1: \n\t"
522 YUV2RGB 494 YUV2RGB
523 /* convert RGB plane to RGB packed format, 495 /* convert RGB plane to RGB packed format,
524 mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0, 496 mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0,
525 mm4 -> GB, mm5 -> AR pixel 4-7, 497 mm4 -> GB, mm5 -> AR pixel 4-7,
526 mm6 -> GB, mm7 -> AR pixel 0-3 */ 498 mm6 -> GB, mm7 -> AR pixel 0-3 */
534 506
535 "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */ 507 "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */
536 "punpcklbw %%mm3, %%mm7;" /* 00 R3 00 R2 00 R1 00 R0 */ 508 "punpcklbw %%mm3, %%mm7;" /* 00 R3 00 R2 00 R1 00 R0 */
537 509
538 "punpcklwd %%mm7, %%mm6;" /* 00 R1 B1 G1 00 R0 B0 G0 */ 510 "punpcklwd %%mm7, %%mm6;" /* 00 R1 B1 G1 00 R0 B0 G0 */
539 MOVNTQ " %%mm6, (%3);" /* Store ARGB1 ARGB0 */ 511 MOVNTQ " %%mm6, (%1);" /* Store ARGB1 ARGB0 */
540 512
541 "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ 513 "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
542 "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */ 514 "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */
543 515
544 "punpckhwd %%mm7, %%mm6;" /* 00 R3 G3 B3 00 R2 B3 G2 */ 516 "punpckhwd %%mm7, %%mm6;" /* 00 R3 G3 B3 00 R2 B3 G2 */
545 MOVNTQ " %%mm6, 8 (%3);" /* Store ARGB3 ARGB2 */ 517 MOVNTQ " %%mm6, 8 (%1);" /* Store ARGB3 ARGB2 */
546 518
547 "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */ 519 "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */
548 "punpckhbw %%mm3, %%mm5;" /* 00 R7 00 R6 00 R5 00 R4 */ 520 "punpckhbw %%mm3, %%mm5;" /* 00 R7 00 R6 00 R5 00 R4 */
549 521
550 "punpcklwd %%mm5, %%mm4;" /* 00 R5 B5 G5 00 R4 B4 G4 */ 522 "punpcklwd %%mm5, %%mm4;" /* 00 R5 B5 G5 00 R4 B4 G4 */
551 MOVNTQ " %%mm4, 16 (%3);" /* Store ARGB5 ARGB4 */ 523 MOVNTQ " %%mm4, 16 (%1);" /* Store ARGB5 ARGB4 */
552 524
553 "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ 525 "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
554 "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */ 526 "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */
555 527
556 "punpckhwd %%mm5, %%mm4;" /* 00 R7 G7 B7 00 R6 B6 G6 */ 528 "punpckhwd %%mm5, %%mm4;" /* 00 R7 G7 B7 00 R6 B6 G6 */
557 MOVNTQ " %%mm4, 24 (%3);" /* Store ARGB7 ARGB6 */ 529 MOVNTQ " %%mm4, 24 (%1);" /* Store ARGB7 ARGB6 */
558 530
559 "movd 4 (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ 531 "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
560 "movd 4 (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ 532 "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
561 533
562 "pxor %%mm4, %%mm4;" /* zero mm4 */ 534 "pxor %%mm4, %%mm4;" /* zero mm4 */
563 "movq 8 (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ 535 "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
564 536
565 : : "r" (_py), "r" (_pu), "r" (_pv), "r" (_image)); 537 "addl $32, %1 \n\t"
566 538 "addl $4, %0 \n\t"
567 _py += 8; 539 " js 1b \n\t"
568 _pu += 4; 540
569 _pv += 4; 541 : "+r" (index), "+r" (_image)
570 _image += 32; 542 : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
571 } 543 );
572
573 if (!even) {
574 pu += uv_stride;
575 pv += uv_stride;
576 }
577
578 py += y_stride;
579 image += rgb_stride;
580
581 even = (!even);
582 } 544 }
583 545
584 __asm__ __volatile__ (EMMS); 546 __asm__ __volatile__ (EMMS);
585 } 547 }
586
587 yuv2rgb_fun RENAME(yuv2rgb_init) (unsigned bpp, int mode)
588 {
589 if (bpp == 15 && mode == MODE_RGB) return RENAME(yuv420_rgb15);
590 if (bpp == 16 && mode == MODE_RGB) return RENAME(yuv420_rgb16);
591 if (bpp == 24 && mode == MODE_RGB) return RENAME(yuv420_rgb24);
592 if (bpp == 32 && mode == MODE_RGB) return RENAME(yuv420_argb32);
593 return NULL; // Fallback to C.
594 }
595