Mercurial > mplayer.hg
annotate postproc/swscale_template.c @ 7414:00c3f129908f
update & GUI CP1251 encoding in FFT
author | iive |
---|---|
date | Mon, 16 Sep 2002 13:15:20 +0000 |
parents | 064ada190b6c |
children | c6aa14b47d03 |
rev | line source |
---|---|
4295 | 1 /* |
2 Copyright (C) 2001-2002 Michael Niedermayer <michaelni@gmx.at> | |
2216 | 3 |
4295 | 4 This program is free software; you can redistribute it and/or modify |
5 it under the terms of the GNU General Public License as published by | |
6 the Free Software Foundation; either version 2 of the License, or | |
7 (at your option) any later version. | |
2216 | 8 |
4295 | 9 This program is distributed in the hope that it will be useful, |
10 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 GNU General Public License for more details. | |
13 | |
14 You should have received a copy of the GNU General Public License | |
15 along with this program; if not, write to the Free Software | |
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 */ | |
2264
7851375ea156
increased precission of s_xinc s_xinc2 (needed for the mmx2 bugfix)
michael
parents:
2237
diff
changeset
|
18 |
2540 | 19 #undef MOVNTQ |
2680 | 20 #undef PAVGB |
3136 | 21 #undef PREFETCH |
22 #undef PREFETCHW | |
23 #undef EMMS | |
24 #undef SFENCE | |
25 | |
26 #ifdef HAVE_3DNOW | |
27 /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */ | |
28 #define EMMS "femms" | |
29 #else | |
30 #define EMMS "emms" | |
31 #endif | |
32 | |
33 #ifdef HAVE_3DNOW | |
34 #define PREFETCH "prefetch" | |
35 #define PREFETCHW "prefetchw" | |
36 #elif defined ( HAVE_MMX2 ) | |
37 #define PREFETCH "prefetchnta" | |
38 #define PREFETCHW "prefetcht0" | |
39 #else | |
40 #define PREFETCH "/nop" | |
41 #define PREFETCHW "/nop" | |
42 #endif | |
43 | |
44 #ifdef HAVE_MMX2 | |
45 #define SFENCE "sfence" | |
46 #else | |
47 #define SFENCE "/nop" | |
48 #endif | |
2232
65996b3467d7
MMX & MMX2 optimizations (MMX2 is buggy and commented out)
michael
parents:
2230
diff
changeset
|
49 |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
50 #ifdef HAVE_MMX2 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
51 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
52 #elif defined (HAVE_3DNOW) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
53 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
54 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
55 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
56 #ifdef HAVE_MMX2 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
57 #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
58 #else |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
59 #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
60 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
61 |
3344 | 62 #define YSCALEYUV2YV12X(x) \ |
63 "xorl %%eax, %%eax \n\t"\ | |
64 "pxor %%mm3, %%mm3 \n\t"\ | |
65 "pxor %%mm4, %%mm4 \n\t"\ | |
66 "movl %0, %%edx \n\t"\ | |
67 ".balign 16 \n\t" /* FIXME Unroll? */\ | |
68 "1: \n\t"\ | |
69 "movl (%1, %%edx, 4), %%esi \n\t"\ | |
70 "movq (%2, %%edx, 8), %%mm0 \n\t" /* filterCoeff */\ | |
71 "movq " #x "(%%esi, %%eax, 2), %%mm2 \n\t" /* srcData */\ | |
72 "movq 8+" #x "(%%esi, %%eax, 2), %%mm5 \n\t" /* srcData */\ | |
73 "pmulhw %%mm0, %%mm2 \n\t"\ | |
74 "pmulhw %%mm0, %%mm5 \n\t"\ | |
75 "paddw %%mm2, %%mm3 \n\t"\ | |
76 "paddw %%mm5, %%mm4 \n\t"\ | |
77 "addl $1, %%edx \n\t"\ | |
78 " jnz 1b \n\t"\ | |
79 "psraw $3, %%mm3 \n\t"\ | |
80 "psraw $3, %%mm4 \n\t"\ | |
81 "packuswb %%mm4, %%mm3 \n\t"\ | |
82 MOVNTQ(%%mm3, (%3, %%eax))\ | |
83 "addl $8, %%eax \n\t"\ | |
84 "cmpl %4, %%eax \n\t"\ | |
85 "pxor %%mm3, %%mm3 \n\t"\ | |
86 "pxor %%mm4, %%mm4 \n\t"\ | |
87 "movl %0, %%edx \n\t"\ | |
88 "jb 1b \n\t" | |
89 | |
90 #define YSCALEYUV2YV121 \ | |
91 "movl %2, %%eax \n\t"\ | |
92 ".balign 16 \n\t" /* FIXME Unroll? */\ | |
93 "1: \n\t"\ | |
94 "movq (%0, %%eax, 2), %%mm0 \n\t"\ | |
95 "movq 8(%0, %%eax, 2), %%mm1 \n\t"\ | |
96 "psraw $7, %%mm0 \n\t"\ | |
97 "psraw $7, %%mm1 \n\t"\ | |
98 "packuswb %%mm1, %%mm0 \n\t"\ | |
99 MOVNTQ(%%mm0, (%1, %%eax))\ | |
100 "addl $8, %%eax \n\t"\ | |
101 "jnc 1b \n\t" | |
102 | |
103 /* | |
104 :: "m" (-lumFilterSize), "m" (-chrFilterSize), | |
105 "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4), | |
106 "r" (dest), "m" (dstW), | |
107 "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize) | |
108 : "%eax", "%ebx", "%ecx", "%edx", "%esi" | |
109 */ | |
110 #define YSCALEYUV2RGBX \ | |
111 "xorl %%eax, %%eax \n\t"\ | |
112 ".balign 16 \n\t"\ | |
113 "1: \n\t"\ | |
114 "movl %1, %%edx \n\t" /* -chrFilterSize */\ | |
6679
5c4beb993674
fixing sinc filter (seems the problem was caused by rounding in pmulhw -> solution use shorter filter, its long and slow enough anyway)
michael
parents:
6615
diff
changeset
|
115 "movl %3, %%ebx \n\t" /* chrMmxFilter+chrFilterSize */\ |
5c4beb993674
fixing sinc filter (seems the problem was caused by rounding in pmulhw -> solution use shorter filter, its long and slow enough anyway)
michael
parents:
6615
diff
changeset
|
116 "movl %7, %%ecx \n\t" /* chrSrc+chrFilterSize */\ |
3344 | 117 "pxor %%mm3, %%mm3 \n\t"\ |
118 "pxor %%mm4, %%mm4 \n\t"\ | |
119 "2: \n\t"\ | |
120 "movl (%%ecx, %%edx, 4), %%esi \n\t"\ | |
121 "movq (%%ebx, %%edx, 8), %%mm0 \n\t" /* filterCoeff */\ | |
122 "movq (%%esi, %%eax), %%mm2 \n\t" /* UsrcData */\ | |
123 "movq 4096(%%esi, %%eax), %%mm5 \n\t" /* VsrcData */\ | |
124 "pmulhw %%mm0, %%mm2 \n\t"\ | |
125 "pmulhw %%mm0, %%mm5 \n\t"\ | |
126 "paddw %%mm2, %%mm3 \n\t"\ | |
127 "paddw %%mm5, %%mm4 \n\t"\ | |
128 "addl $1, %%edx \n\t"\ | |
129 " jnz 2b \n\t"\ | |
130 \ | |
131 "movl %0, %%edx \n\t" /* -lumFilterSize */\ | |
132 "movl %2, %%ebx \n\t" /* lumMmxFilter+lumFilterSize */\ | |
133 "movl %6, %%ecx \n\t" /* lumSrc+lumFilterSize */\ | |
134 "pxor %%mm1, %%mm1 \n\t"\ | |
135 "pxor %%mm7, %%mm7 \n\t"\ | |
136 "2: \n\t"\ | |
137 "movl (%%ecx, %%edx, 4), %%esi \n\t"\ | |
138 "movq (%%ebx, %%edx, 8), %%mm0 \n\t" /* filterCoeff */\ | |
139 "movq (%%esi, %%eax, 2), %%mm2 \n\t" /* Y1srcData */\ | |
140 "movq 8(%%esi, %%eax, 2), %%mm5 \n\t" /* Y2srcData */\ | |
141 "pmulhw %%mm0, %%mm2 \n\t"\ | |
142 "pmulhw %%mm0, %%mm5 \n\t"\ | |
143 "paddw %%mm2, %%mm1 \n\t"\ | |
144 "paddw %%mm5, %%mm7 \n\t"\ | |
145 "addl $1, %%edx \n\t"\ | |
146 " jnz 2b \n\t"\ | |
147 \ | |
4248 | 148 "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\ |
149 "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\ | |
3344 | 150 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ |
151 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ | |
4248 | 152 "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\ |
153 "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\ | |
3344 | 154 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ |
4248 | 155 "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\ |
156 "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\ | |
157 "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\ | |
158 "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\ | |
159 "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\ | |
160 "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\ | |
3344 | 161 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ |
162 "paddw %%mm3, %%mm4 \n\t"\ | |
163 "movq %%mm2, %%mm0 \n\t"\ | |
164 "movq %%mm5, %%mm6 \n\t"\ | |
165 "movq %%mm4, %%mm3 \n\t"\ | |
166 "punpcklwd %%mm2, %%mm2 \n\t"\ | |
167 "punpcklwd %%mm5, %%mm5 \n\t"\ | |
168 "punpcklwd %%mm4, %%mm4 \n\t"\ | |
169 "paddw %%mm1, %%mm2 \n\t"\ | |
170 "paddw %%mm1, %%mm5 \n\t"\ | |
171 "paddw %%mm1, %%mm4 \n\t"\ | |
172 "punpckhwd %%mm0, %%mm0 \n\t"\ | |
173 "punpckhwd %%mm6, %%mm6 \n\t"\ | |
174 "punpckhwd %%mm3, %%mm3 \n\t"\ | |
175 "paddw %%mm7, %%mm0 \n\t"\ | |
176 "paddw %%mm7, %%mm6 \n\t"\ | |
177 "paddw %%mm7, %%mm3 \n\t"\ | |
178 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ | |
179 "packuswb %%mm0, %%mm2 \n\t"\ | |
180 "packuswb %%mm6, %%mm5 \n\t"\ | |
181 "packuswb %%mm3, %%mm4 \n\t"\ | |
182 "pxor %%mm7, %%mm7 \n\t" | |
183 | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
184 #define FULL_YSCALEYUV2RGB \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
185 "pxor %%mm7, %%mm7 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
186 "movd %6, %%mm6 \n\t" /*yalpha1*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
187 "punpcklwd %%mm6, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
188 "punpcklwd %%mm6, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
189 "movd %7, %%mm5 \n\t" /*uvalpha1*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
190 "punpcklwd %%mm5, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
191 "punpcklwd %%mm5, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
192 "xorl %%eax, %%eax \n\t"\ |
2800
7847d6b7ad3d
.balign or we¡ll align by 64kb on some architectures
michael
parents:
2799
diff
changeset
|
193 ".balign 16 \n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
194 "1: \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
195 "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
196 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
197 "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
198 "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
199 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
200 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
201 "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
202 "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
203 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
204 "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
205 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
206 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
207 "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
208 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
209 "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\ |
4248 | 210 "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\ |
211 "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\ | |
212 "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
213 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
214 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
215 "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
216 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ |
4248 | 217 "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
218 "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ |
4248 | 219 "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
220 "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\ |
4248 | 221 "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
222 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
223 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
224 "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\ |
4248 | 225 "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\ |
226 "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
227 "paddw %%mm1, %%mm3 \n\t" /* B*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
228 "paddw %%mm1, %%mm0 \n\t" /* R*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
229 "packuswb %%mm3, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
230 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
231 "packuswb %%mm0, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
232 "paddw %%mm4, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
233 "paddw %%mm2, %%mm1 \n\t" /* G*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
234 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
235 "packuswb %%mm1, %%mm1 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
236 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
237 #define YSCALEYUV2RGB \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
238 "movd %6, %%mm6 \n\t" /*yalpha1*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
239 "punpcklwd %%mm6, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
240 "punpcklwd %%mm6, %%mm6 \n\t"\ |
6554 | 241 "movq %%mm6, 3968(%2) \n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
242 "movd %7, %%mm5 \n\t" /*uvalpha1*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
243 "punpcklwd %%mm5, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
244 "punpcklwd %%mm5, %%mm5 \n\t"\ |
6554 | 245 "movq %%mm5, 3976(%2) \n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
246 "xorl %%eax, %%eax \n\t"\ |
2800
7847d6b7ad3d
.balign or we¡ll align by 64kb on some architectures
michael
parents:
2799
diff
changeset
|
247 ".balign 16 \n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
248 "1: \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
249 "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
250 "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
251 "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
252 "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
253 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
254 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\ |
6554 | 255 "movq 3976(%2), %%mm0 \n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
256 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
257 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
258 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
259 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
260 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
261 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\ |
4248 | 262 "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\ |
263 "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
264 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
265 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ |
4248 | 266 "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\ |
267 "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
268 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
269 "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
270 "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
271 "movq 8(%0, %%eax, 2), %%mm6 \n\t" /*buf0[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
272 "movq 8(%1, %%eax, 2), %%mm7 \n\t" /*buf1[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
273 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
274 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\ |
6554 | 275 "pmulhw 3968(%2), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ |
276 "pmulhw 3968(%2), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
277 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
278 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
279 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
280 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ |
4248 | 281 "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\ |
282 "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\ | |
283 "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\ | |
284 "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\ | |
285 "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\ | |
286 "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
287 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
288 "paddw %%mm3, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
289 "movq %%mm2, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
290 "movq %%mm5, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
291 "movq %%mm4, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
292 "punpcklwd %%mm2, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
293 "punpcklwd %%mm5, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
294 "punpcklwd %%mm4, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
295 "paddw %%mm1, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
296 "paddw %%mm1, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
297 "paddw %%mm1, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
298 "punpckhwd %%mm0, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
299 "punpckhwd %%mm6, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
300 "punpckhwd %%mm3, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
301 "paddw %%mm7, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
302 "paddw %%mm7, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
303 "paddw %%mm7, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
304 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
305 "packuswb %%mm0, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
306 "packuswb %%mm6, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
307 "packuswb %%mm3, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
308 "pxor %%mm7, %%mm7 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
309 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
310 #define YSCALEYUV2RGB1 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
311 "xorl %%eax, %%eax \n\t"\ |
2800
7847d6b7ad3d
.balign or we¡ll align by 64kb on some architectures
michael
parents:
2799
diff
changeset
|
312 ".balign 16 \n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
313 "1: \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
314 "movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
315 "movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
316 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
317 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ |
4248 | 318 "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\ |
319 "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
320 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
321 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ |
4248 | 322 "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\ |
323 "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
324 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
325 "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
326 "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
327 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
328 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
4248 | 329 "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\ |
330 "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\ | |
331 "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\ | |
332 "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\ | |
333 "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\ | |
334 "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\ | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
335 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
336 "paddw %%mm3, %%mm4 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
337 "movq %%mm2, %%mm0 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
338 "movq %%mm5, %%mm6 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
339 "movq %%mm4, %%mm3 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
340 "punpcklwd %%mm2, %%mm2 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
341 "punpcklwd %%mm5, %%mm5 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
342 "punpcklwd %%mm4, %%mm4 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
343 "paddw %%mm1, %%mm2 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
344 "paddw %%mm1, %%mm5 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
345 "paddw %%mm1, %%mm4 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
346 "punpckhwd %%mm0, %%mm0 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
347 "punpckhwd %%mm6, %%mm6 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
348 "punpckhwd %%mm3, %%mm3 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
349 "paddw %%mm7, %%mm0 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
350 "paddw %%mm7, %%mm6 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
351 "paddw %%mm7, %%mm3 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
352 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
353 "packuswb %%mm0, %%mm2 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
354 "packuswb %%mm6, %%mm5 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
355 "packuswb %%mm3, %%mm4 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
356 "pxor %%mm7, %%mm7 \n\t" |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
357 |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
358 // do vertical chrominance interpolation |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
359 #define YSCALEYUV2RGB1b \ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
360 "xorl %%eax, %%eax \n\t"\ |
2800
7847d6b7ad3d
.balign or we¡ll align by 64kb on some architectures
michael
parents:
2799
diff
changeset
|
361 ".balign 16 \n\t"\ |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
362 "1: \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
363 "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
364 "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
365 "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
366 "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ |
2576 | 367 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\ |
368 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\ | |
3344 | 369 "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\ |
370 "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\ | |
4248 | 371 "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\ |
372 "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\ | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
373 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
374 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ |
4248 | 375 "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\ |
376 "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\ | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
377 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
378 "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
379 "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
380 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
381 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
4248 | 382 "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\ |
383 "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\ | |
384 "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\ | |
385 "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\ | |
386 "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\ | |
387 "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
388 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
389 "paddw %%mm3, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
390 "movq %%mm2, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
391 "movq %%mm5, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
392 "movq %%mm4, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
393 "punpcklwd %%mm2, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
394 "punpcklwd %%mm5, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
395 "punpcklwd %%mm4, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
396 "paddw %%mm1, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
397 "paddw %%mm1, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
398 "paddw %%mm1, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
399 "punpckhwd %%mm0, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
400 "punpckhwd %%mm6, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
401 "punpckhwd %%mm3, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
402 "paddw %%mm7, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
403 "paddw %%mm7, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
404 "paddw %%mm7, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
405 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
406 "packuswb %%mm0, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
407 "packuswb %%mm6, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
408 "packuswb %%mm3, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
409 "pxor %%mm7, %%mm7 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
410 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
411 #define WRITEBGR32 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
412 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
413 "movq %%mm2, %%mm1 \n\t" /* B */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
414 "movq %%mm5, %%mm6 \n\t" /* R */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
415 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
416 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
417 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
418 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
419 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
420 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
421 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
422 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
423 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
424 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
425 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
426 MOVNTQ(%%mm0, (%4, %%eax, 4))\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
427 MOVNTQ(%%mm2, 8(%4, %%eax, 4))\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
428 MOVNTQ(%%mm1, 16(%4, %%eax, 4))\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
429 MOVNTQ(%%mm3, 24(%4, %%eax, 4))\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
430 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
431 "addl $8, %%eax \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
432 "cmpl %5, %%eax \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
433 " jb 1b \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
434 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
435 #define WRITEBGR16 \ |
4248 | 436 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\ |
437 "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\ | |
438 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\ | |
2669 | 439 "psrlq $3, %%mm2 \n\t"\ |
440 \ | |
441 "movq %%mm2, %%mm1 \n\t"\ | |
442 "movq %%mm4, %%mm3 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
443 \ |
2669 | 444 "punpcklbw %%mm7, %%mm3 \n\t"\ |
445 "punpcklbw %%mm5, %%mm2 \n\t"\ | |
446 "punpckhbw %%mm7, %%mm4 \n\t"\ | |
447 "punpckhbw %%mm5, %%mm1 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
448 \ |
2669 | 449 "psllq $3, %%mm3 \n\t"\ |
450 "psllq $3, %%mm4 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
451 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
452 "por %%mm3, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
453 "por %%mm4, %%mm1 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
454 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
455 MOVNTQ(%%mm2, (%4, %%eax, 2))\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
456 MOVNTQ(%%mm1, 8(%4, %%eax, 2))\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
457 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
458 "addl $8, %%eax \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
459 "cmpl %5, %%eax \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
460 " jb 1b \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
461 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
462 #define WRITEBGR15 \ |
4248 | 463 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\ |
464 "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\ | |
465 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\ | |
2669 | 466 "psrlq $3, %%mm2 \n\t"\ |
467 "psrlq $1, %%mm5 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
468 \ |
2669 | 469 "movq %%mm2, %%mm1 \n\t"\ |
470 "movq %%mm4, %%mm3 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
471 \ |
2669 | 472 "punpcklbw %%mm7, %%mm3 \n\t"\ |
473 "punpcklbw %%mm5, %%mm2 \n\t"\ | |
474 "punpckhbw %%mm7, %%mm4 \n\t"\ | |
475 "punpckhbw %%mm5, %%mm1 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
476 \ |
2669 | 477 "psllq $2, %%mm3 \n\t"\ |
478 "psllq $2, %%mm4 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
479 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
480 "por %%mm3, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
481 "por %%mm4, %%mm1 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
482 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
483 MOVNTQ(%%mm2, (%4, %%eax, 2))\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
484 MOVNTQ(%%mm1, 8(%4, %%eax, 2))\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
485 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
486 "addl $8, %%eax \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
487 "cmpl %5, %%eax \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
488 " jb 1b \n\t" |
2669 | 489 |
2730 | 490 #define WRITEBGR24OLD \ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
491 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
492 "movq %%mm2, %%mm1 \n\t" /* B */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
493 "movq %%mm5, %%mm6 \n\t" /* R */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
494 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
495 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
496 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
497 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
498 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
499 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\ |
2326 | 500 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\ |
501 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\ | |
502 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\ | |
503 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
504 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
505 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
506 "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\ |
4248 | 507 "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\ |
508 "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
509 "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
510 "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
511 "psllq $48, %%mm2 \n\t" /* GB000000 1 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
512 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
513 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
514 "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
515 "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
516 "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
517 "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\ |
4248 | 518 "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
519 "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
520 "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\ |
4248 | 521 "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\ |
522 "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
523 "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
524 "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
525 "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
526 "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
527 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
528 "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
529 "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
530 "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\ |
4248 | 531 "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\ |
532 "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
533 "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
534 "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
535 "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
536 \ |
2728 | 537 MOVNTQ(%%mm0, (%%ebx))\ |
538 MOVNTQ(%%mm2, 8(%%ebx))\ | |
539 MOVNTQ(%%mm3, 16(%%ebx))\ | |
540 "addl $24, %%ebx \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
541 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
542 "addl $8, %%eax \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
543 "cmpl %5, %%eax \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
544 " jb 1b \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
545 |
2730 | 546 #define WRITEBGR24MMX \ |
547 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ | |
548 "movq %%mm2, %%mm1 \n\t" /* B */\ | |
549 "movq %%mm5, %%mm6 \n\t" /* R */\ | |
550 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\ | |
551 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\ | |
552 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\ | |
553 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\ | |
554 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\ | |
555 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\ | |
556 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\ | |
557 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\ | |
558 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\ | |
559 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\ | |
560 \ | |
561 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\ | |
562 "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\ | |
563 "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\ | |
564 "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\ | |
565 \ | |
566 "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\ | |
567 "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\ | |
568 "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\ | |
569 "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\ | |
570 \ | |
571 "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\ | |
572 "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\ | |
573 "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\ | |
574 "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\ | |
575 \ | |
576 "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\ | |
577 "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\ | |
578 "psllq $40, %%mm2 \n\t" /* GB000000 1 */\ | |
579 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\ | |
580 MOVNTQ(%%mm0, (%%ebx))\ | |
581 \ | |
582 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\ | |
583 "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\ | |
584 "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\ | |
585 "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\ | |
586 MOVNTQ(%%mm6, 8(%%ebx))\ | |
587 \ | |
588 "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\ | |
589 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\ | |
590 "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\ | |
591 MOVNTQ(%%mm5, 16(%%ebx))\ | |
592 \ | |
593 "addl $24, %%ebx \n\t"\ | |
594 \ | |
595 "addl $8, %%eax \n\t"\ | |
596 "cmpl %5, %%eax \n\t"\ | |
597 " jb 1b \n\t" | |
598 | |
599 #define WRITEBGR24MMX2 \ | |
600 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ | |
4248 | 601 "movq "MANGLE(M24A)", %%mm0 \n\t"\ |
602 "movq "MANGLE(M24C)", %%mm7 \n\t"\ | |
2730 | 603 "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\ |
604 "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\ | |
605 "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\ | |
606 \ | |
607 "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\ | |
608 "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\ | |
609 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\ | |
610 \ | |
611 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\ | |
612 "por %%mm1, %%mm6 \n\t"\ | |
613 "por %%mm3, %%mm6 \n\t"\ | |
614 MOVNTQ(%%mm6, (%%ebx))\ | |
615 \ | |
616 "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\ | |
617 "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\ | |
618 "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\ | |
619 "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\ | |
620 \ | |
4248 | 621 "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\ |
2730 | 622 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\ |
623 "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\ | |
624 \ | |
625 "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\ | |
626 "por %%mm3, %%mm6 \n\t"\ | |
627 MOVNTQ(%%mm6, 8(%%ebx))\ | |
628 \ | |
629 "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\ | |
630 "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\ | |
631 "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\ | |
632 \ | |
633 "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\ | |
634 "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\ | |
4248 | 635 "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\ |
2730 | 636 \ |
637 "por %%mm1, %%mm3 \n\t"\ | |
638 "por %%mm3, %%mm6 \n\t"\ | |
639 MOVNTQ(%%mm6, 16(%%ebx))\ | |
640 \ | |
641 "addl $24, %%ebx \n\t"\ | |
642 \ | |
643 "addl $8, %%eax \n\t"\ | |
644 "cmpl %5, %%eax \n\t"\ | |
645 " jb 1b \n\t" | |
646 | |
647 #ifdef HAVE_MMX2 | |
3126 | 648 #undef WRITEBGR24 |
2730 | 649 #define WRITEBGR24 WRITEBGR24MMX2 |
650 #else | |
3126 | 651 #undef WRITEBGR24 |
2730 | 652 #define WRITEBGR24 WRITEBGR24MMX |
653 #endif | |
654 | |
3344 | 655 static inline void RENAME(yuv2yuvX)(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, |
656 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
657 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW, |
3344 | 658 int16_t * lumMmxFilter, int16_t * chrMmxFilter) |
2519 | 659 { |
3344 | 660 #ifdef HAVE_MMX |
661 if(uDest != NULL) | |
662 { | |
663 asm volatile( | |
664 YSCALEYUV2YV12X(0) | |
665 :: "m" (-chrFilterSize), "r" (chrSrc+chrFilterSize), | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
666 "r" (chrMmxFilter+chrFilterSize*4), "r" (uDest), "m" (chrDstW) |
3344 | 667 : "%eax", "%edx", "%esi" |
668 ); | |
2519 | 669 |
3344 | 670 asm volatile( |
671 YSCALEYUV2YV12X(4096) | |
672 :: "m" (-chrFilterSize), "r" (chrSrc+chrFilterSize), | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
673 "r" (chrMmxFilter+chrFilterSize*4), "r" (vDest), "m" (chrDstW) |
3344 | 674 : "%eax", "%edx", "%esi" |
675 ); | |
676 } | |
2521 | 677 |
3344 | 678 asm volatile( |
679 YSCALEYUV2YV12X(0) | |
680 :: "m" (-lumFilterSize), "r" (lumSrc+lumFilterSize), | |
681 "r" (lumMmxFilter+lumFilterSize*4), "r" (dest), "m" (dstW) | |
682 : "%eax", "%edx", "%esi" | |
683 ); | |
684 #else | |
6540 | 685 yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize, |
3352 | 686 chrFilter, chrSrc, chrFilterSize, |
6540 | 687 dest, uDest, vDest, dstW, chrDstW); |
3344 | 688 #endif |
689 } | |
690 | |
691 static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc, | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
692 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW) |
3344 | 693 { |
694 #ifdef HAVE_MMX | |
695 if(uDest != NULL) | |
696 { | |
697 asm volatile( | |
698 YSCALEYUV2YV121 | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
699 :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW), |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
700 "g" (-chrDstW) |
3344 | 701 : "%eax" |
702 ); | |
703 | |
704 asm volatile( | |
705 YSCALEYUV2YV121 | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
706 :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW), |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
707 "g" (-chrDstW) |
3344 | 708 : "%eax" |
709 ); | |
2519 | 710 } |
3344 | 711 |
712 asm volatile( | |
713 YSCALEYUV2YV121 | |
714 :: "r" (lumSrc + dstW), "r" (dest + dstW), | |
715 "g" (-dstW) | |
716 : "%eax" | |
717 ); | |
718 #else | |
719 int i; | |
720 for(i=0; i<dstW; i++) | |
721 { | |
722 int val= lumSrc[i]>>7; | |
6503 | 723 |
724 if(val&256){ | |
725 if(val<0) val=0; | |
726 else val=255; | |
727 } | |
3344 | 728 |
6503 | 729 dest[i]= val; |
3344 | 730 } |
731 | |
732 if(uDest != NULL) | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
733 for(i=0; i<chrDstW; i++) |
3344 | 734 { |
735 int u=chrSrc[i]>>7; | |
736 int v=chrSrc[i + 2048]>>7; | |
737 | |
6503 | 738 if((u|v)&256){ |
739 if(u<0) u=0; | |
740 else if (u>255) u=255; | |
741 if(v<0) v=0; | |
742 else if (v>255) v=255; | |
743 } | |
744 | |
745 uDest[i]= u; | |
746 vDest[i]= v; | |
3344 | 747 } |
748 #endif | |
2519 | 749 } |
750 | |
3344 | 751 |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
752 /** |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
753 * vertical scale YV12 to RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
754 */ |
6578 | 755 static inline void RENAME(yuv2rgbX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, |
3344 | 756 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, |
6578 | 757 uint8_t *dest, int dstW, int16_t * lumMmxFilter, int16_t * chrMmxFilter, int dstY) |
3344 | 758 { |
6578 | 759 switch(c->dstFormat) |
3344 | 760 { |
761 #ifdef HAVE_MMX | |
6578 | 762 case IMGFMT_BGR32: |
3344 | 763 { |
764 asm volatile( | |
765 YSCALEYUV2RGBX | |
766 WRITEBGR32 | |
767 | |
768 :: "m" (-lumFilterSize), "m" (-chrFilterSize), | |
769 "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4), | |
770 "r" (dest), "m" (dstW), | |
771 "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize) | |
772 : "%eax", "%ebx", "%ecx", "%edx", "%esi" | |
773 ); | |
774 } | |
6578 | 775 break; |
776 case IMGFMT_BGR24: | |
3344 | 777 { |
778 asm volatile( | |
779 YSCALEYUV2RGBX | |
780 "leal (%%eax, %%eax, 2), %%ebx \n\t" //FIXME optimize | |
781 "addl %4, %%ebx \n\t" | |
782 WRITEBGR24 | |
783 | |
784 :: "m" (-lumFilterSize), "m" (-chrFilterSize), | |
785 "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4), | |
786 "r" (dest), "m" (dstW), | |
787 "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize) | |
788 : "%eax", "%ebx", "%ecx", "%edx", "%esi" | |
789 ); | |
790 } | |
6578 | 791 break; |
792 case IMGFMT_BGR15: | |
3344 | 793 { |
794 asm volatile( | |
795 YSCALEYUV2RGBX | |
796 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ | |
797 #ifdef DITHER1XBPP | |
4248 | 798 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
799 "paddusb "MANGLE(g5Dither)", %%mm4\n\t" | |
800 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
3344 | 801 #endif |
802 | |
803 WRITEBGR15 | |
804 | |
805 :: "m" (-lumFilterSize), "m" (-chrFilterSize), | |
806 "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4), | |
807 "r" (dest), "m" (dstW), | |
808 "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize) | |
809 : "%eax", "%ebx", "%ecx", "%edx", "%esi" | |
810 ); | |
811 } | |
6578 | 812 break; |
813 case IMGFMT_BGR16: | |
3344 | 814 { |
815 asm volatile( | |
816 YSCALEYUV2RGBX | |
817 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ | |
818 #ifdef DITHER1XBPP | |
4248 | 819 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
820 "paddusb "MANGLE(g6Dither)", %%mm4\n\t" | |
821 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
3344 | 822 #endif |
823 | |
824 WRITEBGR16 | |
825 | |
826 :: "m" (-lumFilterSize), "m" (-chrFilterSize), | |
827 "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4), | |
828 "r" (dest), "m" (dstW), | |
829 "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize) | |
830 : "%eax", "%ebx", "%ecx", "%edx", "%esi" | |
831 ); | |
832 } | |
6578 | 833 break; |
3344 | 834 #endif |
6578 | 835 default: |
836 yuv2rgbXinC(c, lumFilter, lumSrc, lumFilterSize, | |
837 chrFilter, chrSrc, chrFilterSize, | |
838 dest, dstW, dstY); | |
839 break; | |
840 } | |
3344 | 841 } |
842 | |
843 /** | |
844 * vertical bilinear scale YV12 to RGB | |
845 */ | |
6578 | 846 static inline void RENAME(yuv2rgb2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1, |
847 uint8_t *dest, int dstW, int yalpha, int uvalpha, int y) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
848 { |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
849 int yalpha1=yalpha^4095; |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
850 int uvalpha1=uvalpha^4095; |
6578 | 851 int i; |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
852 |
6578 | 853 #if 0 //isnt used |
4467 | 854 if(flags&SWS_FULL_CHR_H_INT) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
855 { |
6578 | 856 switch(dstFormat) |
857 { | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
858 #ifdef HAVE_MMX |
6578 | 859 case IMGFMT_BGR32: |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
860 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
861 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
862 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
863 FULL_YSCALEYUV2RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
864 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
865 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
866 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
867 "movq %%mm3, %%mm1 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
868 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
869 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
870 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
871 MOVNTQ(%%mm3, (%4, %%eax, 4)) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
872 MOVNTQ(%%mm1, 8(%4, %%eax, 4)) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
873 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
874 "addl $4, %%eax \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
875 "cmpl %5, %%eax \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
876 " jb 1b \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
877 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
878 |
3209 | 879 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
880 "m" (yalpha1), "m" (uvalpha1) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
881 : "%eax" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
882 ); |
6578 | 883 break; |
884 case IMGFMT_BGR24: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
885 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
886 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
887 FULL_YSCALEYUV2RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
888 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
889 // lsb ... msb |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
890 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
891 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
892 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
893 "movq %%mm3, %%mm1 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
894 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
895 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
896 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
897 "movq %%mm3, %%mm2 \n\t" // BGR0BGR0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
898 "psrlq $8, %%mm3 \n\t" // GR0BGR00 |
4248 | 899 "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000 |
900 "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00 | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
901 "por %%mm2, %%mm3 \n\t" // BGRBGR00 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
902 "movq %%mm1, %%mm2 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
903 "psllq $48, %%mm1 \n\t" // 000000BG |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
904 "por %%mm1, %%mm3 \n\t" // BGRBGRBG |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
905 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
906 "movq %%mm2, %%mm1 \n\t" // BGR0BGR0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
907 "psrld $16, %%mm2 \n\t" // R000R000 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
908 "psrlq $24, %%mm1 \n\t" // 0BGR0000 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
909 "por %%mm2, %%mm1 \n\t" // RBGRR000 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
910 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
911 "movl %4, %%ebx \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
912 "addl %%eax, %%ebx \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
913 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
914 #ifdef HAVE_MMX2 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
915 //FIXME Alignment |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
916 "movntq %%mm3, (%%ebx, %%eax, 2)\n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
917 "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
918 #else |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
919 "movd %%mm3, (%%ebx, %%eax, 2) \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
920 "psrlq $32, %%mm3 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
921 "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
922 "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
923 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
924 "addl $4, %%eax \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
925 "cmpl %5, %%eax \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
926 " jb 1b \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
927 |
3209 | 928 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
929 "m" (yalpha1), "m" (uvalpha1) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
930 : "%eax", "%ebx" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
931 ); |
6578 | 932 break; |
933 case IMGFMT_BGR15: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
934 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
935 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
936 FULL_YSCALEYUV2RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
937 #ifdef DITHER1XBPP |
4248 | 938 "paddusb "MANGLE(g5Dither)", %%mm1\n\t" |
939 "paddusb "MANGLE(r5Dither)", %%mm0\n\t" | |
940 "paddusb "MANGLE(b5Dither)", %%mm3\n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
941 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
942 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
943 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
944 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
945 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
946 "psrlw $3, %%mm3 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
947 "psllw $2, %%mm1 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
948 "psllw $7, %%mm0 \n\t" |
4248 | 949 "pand "MANGLE(g15Mask)", %%mm1 \n\t" |
950 "pand "MANGLE(r15Mask)", %%mm0 \n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
951 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
952 "por %%mm3, %%mm1 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
953 "por %%mm1, %%mm0 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
954 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
955 MOVNTQ(%%mm0, (%4, %%eax, 2)) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
956 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
957 "addl $4, %%eax \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
958 "cmpl %5, %%eax \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
959 " jb 1b \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
960 |
3209 | 961 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
962 "m" (yalpha1), "m" (uvalpha1) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
963 : "%eax" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
964 ); |
6578 | 965 break; |
966 case IMGFMT_BGR16: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
967 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
968 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
969 FULL_YSCALEYUV2RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
970 #ifdef DITHER1XBPP |
4248 | 971 "paddusb "MANGLE(g6Dither)", %%mm1\n\t" |
972 "paddusb "MANGLE(r5Dither)", %%mm0\n\t" | |
973 "paddusb "MANGLE(b5Dither)", %%mm3\n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
974 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
975 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
976 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
977 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
978 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
979 "psrlw $3, %%mm3 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
980 "psllw $3, %%mm1 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
981 "psllw $8, %%mm0 \n\t" |
4248 | 982 "pand "MANGLE(g16Mask)", %%mm1 \n\t" |
983 "pand "MANGLE(r16Mask)", %%mm0 \n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
984 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
985 "por %%mm3, %%mm1 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
986 "por %%mm1, %%mm0 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
987 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
988 MOVNTQ(%%mm0, (%4, %%eax, 2)) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
989 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
990 "addl $4, %%eax \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
991 "cmpl %5, %%eax \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
992 " jb 1b \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
993 |
3209 | 994 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
995 "m" (yalpha1), "m" (uvalpha1) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
996 : "%eax" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
997 ); |
6578 | 998 break; |
999 #endif | |
1000 case IMGFMT_RGB32: | |
1001 #ifndef HAVE_MMX | |
1002 case IMGFMT_BGR32: | |
1003 #endif | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1004 if(dstFormat==IMGFMT_BGR32) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1005 { |
4794 | 1006 int i; |
4793 | 1007 #ifdef WORDS_BIGENDIAN |
1008 dest++; | |
1009 #endif | |
3209 | 1010 for(i=0;i<dstW;i++){ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1011 // vertical linear interpolation && yuv2rgb in a single step: |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1012 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)]; |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1013 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19); |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1014 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19); |
2503 | 1015 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)]; |
1016 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)]; | |
1017 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)]; | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1018 dest+= 4; |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1019 } |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1020 } |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1021 else if(dstFormat==IMGFMT_BGR24) |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1022 { |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1023 int i; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1024 for(i=0;i<dstW;i++){ |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1025 // vertical linear interpolation && yuv2rgb in a single step: |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1026 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)]; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1027 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19); |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1028 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19); |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1029 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)]; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1030 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)]; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1031 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)]; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1032 dest+= 3; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1033 } |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1034 } |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1035 else if(dstFormat==IMGFMT_BGR16) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1036 { |
2671 | 1037 int i; |
3209 | 1038 for(i=0;i<dstW;i++){ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1039 // vertical linear interpolation && yuv2rgb in a single step: |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1040 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)]; |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1041 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19); |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1042 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19); |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1043 |
2572
f2353173d52c
c optimizations (array is faster than pointer) (16bpp variants tested and 2% faster)
michael
parents:
2569
diff
changeset
|
1044 ((uint16_t*)dest)[i] = |
2584 | 1045 clip_table16b[(Y + yuvtab_40cf[U]) >>13] | |
1046 clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] | | |
1047 clip_table16r[(Y + yuvtab_3343[V]) >>13]; | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1048 } |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1049 } |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1050 else if(dstFormat==IMGFMT_BGR15) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1051 { |
2671 | 1052 int i; |
3209 | 1053 for(i=0;i<dstW;i++){ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1054 // vertical linear interpolation && yuv2rgb in a single step: |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1055 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)]; |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1056 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19); |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1057 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19); |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1058 |
2572
f2353173d52c
c optimizations (array is faster than pointer) (16bpp variants tested and 2% faster)
michael
parents:
2569
diff
changeset
|
1059 ((uint16_t*)dest)[i] = |
2584 | 1060 clip_table15b[(Y + yuvtab_40cf[U]) >>13] | |
1061 clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] | | |
1062 clip_table15r[(Y + yuvtab_3343[V]) >>13]; | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1063 } |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1064 } |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1065 }//FULL_UV_IPOL |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1066 else |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1067 { |
6578 | 1068 #endif // if 0 |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1069 #ifdef HAVE_MMX |
6578 | 1070 switch(c->dstFormat) |
1071 { | |
1072 case IMGFMT_BGR32: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1073 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1074 YSCALEYUV2RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1075 WRITEBGR32 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1076 |
3209 | 1077 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1078 "m" (yalpha1), "m" (uvalpha1) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1079 : "%eax" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1080 ); |
6578 | 1081 return; |
1082 case IMGFMT_BGR24: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1083 asm volatile( |
2728 | 1084 "movl %4, %%ebx \n\t" |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1085 YSCALEYUV2RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1086 WRITEBGR24 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1087 |
3209 | 1088 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1089 "m" (yalpha1), "m" (uvalpha1) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1090 : "%eax", "%ebx" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1091 ); |
6578 | 1092 return; |
1093 case IMGFMT_BGR15: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1094 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1095 YSCALEYUV2RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1096 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1097 #ifdef DITHER1XBPP |
4248 | 1098 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
1099 "paddusb "MANGLE(g5Dither)", %%mm4\n\t" | |
1100 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1101 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1102 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1103 WRITEBGR15 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1104 |
3209 | 1105 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1106 "m" (yalpha1), "m" (uvalpha1) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1107 : "%eax" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1108 ); |
6578 | 1109 return; |
1110 case IMGFMT_BGR16: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1111 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1112 YSCALEYUV2RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1113 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1114 #ifdef DITHER1XBPP |
4248 | 1115 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
1116 "paddusb "MANGLE(g6Dither)", %%mm4\n\t" | |
1117 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1118 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1119 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1120 WRITEBGR16 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1121 |
3209 | 1122 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1123 "m" (yalpha1), "m" (uvalpha1) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1124 : "%eax" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1125 ); |
6578 | 1126 return; |
1127 default: break; | |
1128 } | |
1129 #endif //HAVE_MMX | |
1130 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1131 } |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1132 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1133 /** |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1134 * YV12 to RGB without scaling or interpolating |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1135 */ |
6578 | 1136 static inline void RENAME(yuv2rgb1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1, |
1137 uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1138 { |
2671 | 1139 int uvalpha1=uvalpha^4095; |
3344 | 1140 const int yalpha1=0; |
6578 | 1141 int i; |
1142 | |
1143 uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1 | |
1144 const int yalpha= 4096; //FIXME ... | |
2671 | 1145 |
4467 | 1146 if(flags&SWS_FULL_CHR_H_INT) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1147 { |
6578 | 1148 RENAME(yuv2rgb2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y); |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1149 return; |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1150 } |
2576 | 1151 |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1152 #ifdef HAVE_MMX |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1153 if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1154 { |
6578 | 1155 switch(dstFormat) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1156 { |
6578 | 1157 case IMGFMT_BGR32: |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1158 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1159 YSCALEYUV2RGB1 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1160 WRITEBGR32 |
3344 | 1161 :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1162 "m" (yalpha1), "m" (uvalpha1) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1163 : "%eax" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1164 ); |
6578 | 1165 return; |
1166 case IMGFMT_BGR24: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1167 asm volatile( |
2728 | 1168 "movl %4, %%ebx \n\t" |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1169 YSCALEYUV2RGB1 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1170 WRITEBGR24 |
3344 | 1171 :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1172 "m" (yalpha1), "m" (uvalpha1) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1173 : "%eax", "%ebx" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1174 ); |
6578 | 1175 return; |
1176 case IMGFMT_BGR15: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1177 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1178 YSCALEYUV2RGB1 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1179 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1180 #ifdef DITHER1XBPP |
4248 | 1181 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
1182 "paddusb "MANGLE(g5Dither)", %%mm4\n\t" | |
1183 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1184 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1185 WRITEBGR15 |
3344 | 1186 :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1187 "m" (yalpha1), "m" (uvalpha1) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1188 : "%eax" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1189 ); |
6578 | 1190 return; |
1191 case IMGFMT_BGR16: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1192 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1193 YSCALEYUV2RGB1 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1194 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1195 #ifdef DITHER1XBPP |
4248 | 1196 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
1197 "paddusb "MANGLE(g6Dither)", %%mm4\n\t" | |
1198 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1199 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1200 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1201 WRITEBGR16 |
3344 | 1202 :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1203 "m" (yalpha1), "m" (uvalpha1) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1204 : "%eax" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1205 ); |
6578 | 1206 return; |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1207 } |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1208 } |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1209 else |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1210 { |
6578 | 1211 switch(dstFormat) |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1212 { |
6578 | 1213 case IMGFMT_BGR32: |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1214 asm volatile( |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1215 YSCALEYUV2RGB1b |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1216 WRITEBGR32 |
3344 | 1217 :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1218 "m" (yalpha1), "m" (uvalpha1) |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1219 : "%eax" |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1220 ); |
6578 | 1221 return; |
1222 case IMGFMT_BGR24: | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1223 asm volatile( |
2728 | 1224 "movl %4, %%ebx \n\t" |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1225 YSCALEYUV2RGB1b |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1226 WRITEBGR24 |
3344 | 1227 :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW), |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1228 "m" (yalpha1), "m" (uvalpha1) |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1229 : "%eax", "%ebx" |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1230 ); |
6578 | 1231 return; |
1232 case IMGFMT_BGR15: | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1233 asm volatile( |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1234 YSCALEYUV2RGB1b |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1235 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1236 #ifdef DITHER1XBPP |
4248 | 1237 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
1238 "paddusb "MANGLE(g5Dither)", %%mm4\n\t" | |
1239 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1240 #endif |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1241 WRITEBGR15 |
3344 | 1242 :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1243 "m" (yalpha1), "m" (uvalpha1) |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1244 : "%eax" |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1245 ); |
6578 | 1246 return; |
1247 case IMGFMT_BGR16: | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1248 asm volatile( |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1249 YSCALEYUV2RGB1b |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1250 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1251 #ifdef DITHER1XBPP |
4248 | 1252 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
1253 "paddusb "MANGLE(g6Dither)", %%mm4\n\t" | |
1254 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1255 #endif |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1256 |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1257 WRITEBGR16 |
3344 | 1258 :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1259 "m" (yalpha1), "m" (uvalpha1) |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1260 : "%eax" |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1261 ); |
6578 | 1262 return; |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1263 } |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1264 } |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1265 #endif |
6578 | 1266 if( uvalpha < 2048 ) |
1267 { | |
1268 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C) | |
1269 }else{ | |
1270 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C) | |
1271 } | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1272 } |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1273 |
4481 | 1274 //FIXME yuy2* can read upto 7 samples to much |
1275 | |
4467 | 1276 static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, int width) |
1277 { | |
4481 | 1278 #ifdef HAVE_MMX |
1279 asm volatile( | |
1280 "movq "MANGLE(bm01010101)", %%mm2\n\t" | |
1281 "movl %0, %%eax \n\t" | |
1282 "1: \n\t" | |
1283 "movq (%1, %%eax,2), %%mm0 \n\t" | |
1284 "movq 8(%1, %%eax,2), %%mm1 \n\t" | |
1285 "pand %%mm2, %%mm0 \n\t" | |
1286 "pand %%mm2, %%mm1 \n\t" | |
1287 "packuswb %%mm1, %%mm0 \n\t" | |
1288 "movq %%mm0, (%2, %%eax) \n\t" | |
1289 "addl $8, %%eax \n\t" | |
1290 " js 1b \n\t" | |
1291 : : "g" (-width), "r" (src+width*2), "r" (dst+width) | |
1292 : "%eax" | |
1293 ); | |
4467 | 1294 #else |
1295 int i; | |
1296 for(i=0; i<width; i++) | |
1297 dst[i]= src[2*i]; | |
1298 #endif | |
1299 } | |
1300 | |
1301 static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
1302 { | |
4481 | 1303 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) |
1304 asm volatile( | |
1305 "movq "MANGLE(bm01010101)", %%mm4\n\t" | |
1306 "movl %0, %%eax \n\t" | |
1307 "1: \n\t" | |
1308 "movq (%1, %%eax,4), %%mm0 \n\t" | |
1309 "movq 8(%1, %%eax,4), %%mm1 \n\t" | |
1310 "movq (%2, %%eax,4), %%mm2 \n\t" | |
1311 "movq 8(%2, %%eax,4), %%mm3 \n\t" | |
1312 PAVGB(%%mm2, %%mm0) | |
1313 PAVGB(%%mm3, %%mm1) | |
1314 "psrlw $8, %%mm0 \n\t" | |
1315 "psrlw $8, %%mm1 \n\t" | |
1316 "packuswb %%mm1, %%mm0 \n\t" | |
1317 "movq %%mm0, %%mm1 \n\t" | |
1318 "psrlw $8, %%mm0 \n\t" | |
1319 "pand %%mm4, %%mm1 \n\t" | |
1320 "packuswb %%mm0, %%mm0 \n\t" | |
1321 "packuswb %%mm1, %%mm1 \n\t" | |
1322 "movd %%mm0, (%4, %%eax) \n\t" | |
1323 "movd %%mm1, (%3, %%eax) \n\t" | |
1324 "addl $4, %%eax \n\t" | |
1325 " js 1b \n\t" | |
1326 : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width) | |
1327 : "%eax" | |
1328 ); | |
4467 | 1329 #else |
1330 int i; | |
1331 for(i=0; i<width; i++) | |
1332 { | |
1333 dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1; | |
1334 dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1; | |
1335 } | |
1336 #endif | |
1337 } | |
1338 | |
1339 static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width) | |
1340 { | |
1341 #ifdef HAVE_MMXFIXME | |
1342 #else | |
1343 int i; | |
1344 for(i=0; i<width; i++) | |
1345 { | |
1346 int b= src[i*4+0]; | |
1347 int g= src[i*4+1]; | |
1348 int r= src[i*4+2]; | |
1349 | |
1350 dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; | |
1351 } | |
1352 #endif | |
1353 } | |
1354 | |
1355 static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
1356 { | |
1357 #ifdef HAVE_MMXFIXME | |
1358 #else | |
1359 int i; | |
1360 for(i=0; i<width; i++) | |
1361 { | |
1362 int b= src1[8*i + 0] + src1[8*i + 4] + src2[8*i + 0] + src2[8*i + 4]; | |
1363 int g= src1[8*i + 1] + src1[8*i + 5] + src2[8*i + 1] + src2[8*i + 5]; | |
1364 int r= src1[8*i + 2] + src1[8*i + 6] + src2[8*i + 2] + src2[8*i + 6]; | |
1365 | |
1366 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1367 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1368 } | |
1369 #endif | |
1370 } | |
1371 | |
1372 static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, int width) | |
1373 { | |
4612 | 1374 #ifdef HAVE_MMX |
1375 asm volatile( | |
1376 "movl %2, %%eax \n\t" | |
4923 | 1377 "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t" |
1378 "movq "MANGLE(w1111)", %%mm5 \n\t" | |
4612 | 1379 "pxor %%mm7, %%mm7 \n\t" |
1380 "leal (%%eax, %%eax, 2), %%ebx \n\t" | |
1381 ".balign 16 \n\t" | |
1382 "1: \n\t" | |
1383 PREFETCH" 64(%0, %%ebx) \n\t" | |
1384 "movd (%0, %%ebx), %%mm0 \n\t" | |
1385 "movd 3(%0, %%ebx), %%mm1 \n\t" | |
1386 "punpcklbw %%mm7, %%mm0 \n\t" | |
1387 "punpcklbw %%mm7, %%mm1 \n\t" | |
1388 "movd 6(%0, %%ebx), %%mm2 \n\t" | |
1389 "movd 9(%0, %%ebx), %%mm3 \n\t" | |
1390 "punpcklbw %%mm7, %%mm2 \n\t" | |
1391 "punpcklbw %%mm7, %%mm3 \n\t" | |
1392 "pmaddwd %%mm6, %%mm0 \n\t" | |
1393 "pmaddwd %%mm6, %%mm1 \n\t" | |
1394 "pmaddwd %%mm6, %%mm2 \n\t" | |
1395 "pmaddwd %%mm6, %%mm3 \n\t" | |
1396 #ifndef FAST_BGR2YV12 | |
1397 "psrad $8, %%mm0 \n\t" | |
1398 "psrad $8, %%mm1 \n\t" | |
1399 "psrad $8, %%mm2 \n\t" | |
1400 "psrad $8, %%mm3 \n\t" | |
1401 #endif | |
1402 "packssdw %%mm1, %%mm0 \n\t" | |
1403 "packssdw %%mm3, %%mm2 \n\t" | |
1404 "pmaddwd %%mm5, %%mm0 \n\t" | |
1405 "pmaddwd %%mm5, %%mm2 \n\t" | |
1406 "packssdw %%mm2, %%mm0 \n\t" | |
1407 "psraw $7, %%mm0 \n\t" | |
1408 | |
1409 "movd 12(%0, %%ebx), %%mm4 \n\t" | |
1410 "movd 15(%0, %%ebx), %%mm1 \n\t" | |
1411 "punpcklbw %%mm7, %%mm4 \n\t" | |
1412 "punpcklbw %%mm7, %%mm1 \n\t" | |
1413 "movd 18(%0, %%ebx), %%mm2 \n\t" | |
1414 "movd 21(%0, %%ebx), %%mm3 \n\t" | |
1415 "punpcklbw %%mm7, %%mm2 \n\t" | |
1416 "punpcklbw %%mm7, %%mm3 \n\t" | |
1417 "pmaddwd %%mm6, %%mm4 \n\t" | |
1418 "pmaddwd %%mm6, %%mm1 \n\t" | |
1419 "pmaddwd %%mm6, %%mm2 \n\t" | |
1420 "pmaddwd %%mm6, %%mm3 \n\t" | |
1421 #ifndef FAST_BGR2YV12 | |
1422 "psrad $8, %%mm4 \n\t" | |
1423 "psrad $8, %%mm1 \n\t" | |
1424 "psrad $8, %%mm2 \n\t" | |
1425 "psrad $8, %%mm3 \n\t" | |
1426 #endif | |
1427 "packssdw %%mm1, %%mm4 \n\t" | |
1428 "packssdw %%mm3, %%mm2 \n\t" | |
1429 "pmaddwd %%mm5, %%mm4 \n\t" | |
1430 "pmaddwd %%mm5, %%mm2 \n\t" | |
1431 "addl $24, %%ebx \n\t" | |
1432 "packssdw %%mm2, %%mm4 \n\t" | |
1433 "psraw $7, %%mm4 \n\t" | |
1434 | |
1435 "packuswb %%mm4, %%mm0 \n\t" | |
4923 | 1436 "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t" |
4612 | 1437 |
4619 | 1438 "movq %%mm0, (%1, %%eax) \n\t" |
4612 | 1439 "addl $8, %%eax \n\t" |
1440 " js 1b \n\t" | |
1441 : : "r" (src+width*3), "r" (dst+width), "g" (-width) | |
1442 : "%eax", "%ebx" | |
1443 ); | |
4467 | 1444 #else |
1445 int i; | |
1446 for(i=0; i<width; i++) | |
1447 { | |
1448 int b= src[i*3+0]; | |
1449 int g= src[i*3+1]; | |
1450 int r= src[i*3+2]; | |
1451 | |
1452 dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; | |
1453 } | |
1454 #endif | |
1455 } | |
1456 | |
1457 static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
1458 { | |
4619 | 1459 #ifdef HAVE_MMX |
1460 asm volatile( | |
1461 "movl %4, %%eax \n\t" | |
4923 | 1462 "movq "MANGLE(w1111)", %%mm5 \n\t" |
1463 "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t" | |
4619 | 1464 "pxor %%mm7, %%mm7 \n\t" |
1465 "leal (%%eax, %%eax, 2), %%ebx \n\t" | |
1466 "addl %%ebx, %%ebx \n\t" | |
1467 ".balign 16 \n\t" | |
1468 "1: \n\t" | |
1469 PREFETCH" 64(%0, %%ebx) \n\t" | |
1470 PREFETCH" 64(%1, %%ebx) \n\t" | |
1471 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) | |
1472 "movq (%0, %%ebx), %%mm0 \n\t" | |
1473 "movq (%1, %%ebx), %%mm1 \n\t" | |
1474 "movq 6(%0, %%ebx), %%mm2 \n\t" | |
1475 "movq 6(%1, %%ebx), %%mm3 \n\t" | |
1476 PAVGB(%%mm1, %%mm0) | |
1477 PAVGB(%%mm3, %%mm2) | |
1478 "movq %%mm0, %%mm1 \n\t" | |
1479 "movq %%mm2, %%mm3 \n\t" | |
1480 "psrlq $24, %%mm0 \n\t" | |
1481 "psrlq $24, %%mm2 \n\t" | |
1482 PAVGB(%%mm1, %%mm0) | |
1483 PAVGB(%%mm3, %%mm2) | |
1484 "punpcklbw %%mm7, %%mm0 \n\t" | |
1485 "punpcklbw %%mm7, %%mm2 \n\t" | |
1486 #else | |
1487 "movd (%0, %%ebx), %%mm0 \n\t" | |
1488 "movd (%1, %%ebx), %%mm1 \n\t" | |
1489 "movd 3(%0, %%ebx), %%mm2 \n\t" | |
1490 "movd 3(%1, %%ebx), %%mm3 \n\t" | |
1491 "punpcklbw %%mm7, %%mm0 \n\t" | |
1492 "punpcklbw %%mm7, %%mm1 \n\t" | |
1493 "punpcklbw %%mm7, %%mm2 \n\t" | |
1494 "punpcklbw %%mm7, %%mm3 \n\t" | |
1495 "paddw %%mm1, %%mm0 \n\t" | |
1496 "paddw %%mm3, %%mm2 \n\t" | |
1497 "paddw %%mm2, %%mm0 \n\t" | |
1498 "movd 6(%0, %%ebx), %%mm4 \n\t" | |
1499 "movd 6(%1, %%ebx), %%mm1 \n\t" | |
1500 "movd 9(%0, %%ebx), %%mm2 \n\t" | |
1501 "movd 9(%1, %%ebx), %%mm3 \n\t" | |
1502 "punpcklbw %%mm7, %%mm4 \n\t" | |
1503 "punpcklbw %%mm7, %%mm1 \n\t" | |
1504 "punpcklbw %%mm7, %%mm2 \n\t" | |
1505 "punpcklbw %%mm7, %%mm3 \n\t" | |
1506 "paddw %%mm1, %%mm4 \n\t" | |
1507 "paddw %%mm3, %%mm2 \n\t" | |
1508 "paddw %%mm4, %%mm2 \n\t" | |
1509 "psrlw $2, %%mm0 \n\t" | |
1510 "psrlw $2, %%mm2 \n\t" | |
1511 #endif | |
4923 | 1512 "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t" |
1513 "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t" | |
4619 | 1514 |
1515 "pmaddwd %%mm0, %%mm1 \n\t" | |
1516 "pmaddwd %%mm2, %%mm3 \n\t" | |
1517 "pmaddwd %%mm6, %%mm0 \n\t" | |
1518 "pmaddwd %%mm6, %%mm2 \n\t" | |
1519 #ifndef FAST_BGR2YV12 | |
1520 "psrad $8, %%mm0 \n\t" | |
1521 "psrad $8, %%mm1 \n\t" | |
1522 "psrad $8, %%mm2 \n\t" | |
1523 "psrad $8, %%mm3 \n\t" | |
1524 #endif | |
1525 "packssdw %%mm2, %%mm0 \n\t" | |
1526 "packssdw %%mm3, %%mm1 \n\t" | |
1527 "pmaddwd %%mm5, %%mm0 \n\t" | |
1528 "pmaddwd %%mm5, %%mm1 \n\t" | |
1529 "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0 | |
1530 "psraw $7, %%mm0 \n\t" | |
1531 | |
1532 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) | |
1533 "movq 12(%0, %%ebx), %%mm4 \n\t" | |
1534 "movq 12(%1, %%ebx), %%mm1 \n\t" | |
1535 "movq 18(%0, %%ebx), %%mm2 \n\t" | |
1536 "movq 18(%1, %%ebx), %%mm3 \n\t" | |
1537 PAVGB(%%mm1, %%mm4) | |
1538 PAVGB(%%mm3, %%mm2) | |
1539 "movq %%mm4, %%mm1 \n\t" | |
1540 "movq %%mm2, %%mm3 \n\t" | |
1541 "psrlq $24, %%mm4 \n\t" | |
1542 "psrlq $24, %%mm2 \n\t" | |
1543 PAVGB(%%mm1, %%mm4) | |
1544 PAVGB(%%mm3, %%mm2) | |
1545 "punpcklbw %%mm7, %%mm4 \n\t" | |
1546 "punpcklbw %%mm7, %%mm2 \n\t" | |
1547 #else | |
1548 "movd 12(%0, %%ebx), %%mm4 \n\t" | |
1549 "movd 12(%1, %%ebx), %%mm1 \n\t" | |
1550 "movd 15(%0, %%ebx), %%mm2 \n\t" | |
1551 "movd 15(%1, %%ebx), %%mm3 \n\t" | |
1552 "punpcklbw %%mm7, %%mm4 \n\t" | |
1553 "punpcklbw %%mm7, %%mm1 \n\t" | |
1554 "punpcklbw %%mm7, %%mm2 \n\t" | |
1555 "punpcklbw %%mm7, %%mm3 \n\t" | |
1556 "paddw %%mm1, %%mm4 \n\t" | |
1557 "paddw %%mm3, %%mm2 \n\t" | |
1558 "paddw %%mm2, %%mm4 \n\t" | |
1559 "movd 18(%0, %%ebx), %%mm5 \n\t" | |
1560 "movd 18(%1, %%ebx), %%mm1 \n\t" | |
1561 "movd 21(%0, %%ebx), %%mm2 \n\t" | |
1562 "movd 21(%1, %%ebx), %%mm3 \n\t" | |
1563 "punpcklbw %%mm7, %%mm5 \n\t" | |
1564 "punpcklbw %%mm7, %%mm1 \n\t" | |
1565 "punpcklbw %%mm7, %%mm2 \n\t" | |
1566 "punpcklbw %%mm7, %%mm3 \n\t" | |
1567 "paddw %%mm1, %%mm5 \n\t" | |
1568 "paddw %%mm3, %%mm2 \n\t" | |
1569 "paddw %%mm5, %%mm2 \n\t" | |
4923 | 1570 "movq "MANGLE(w1111)", %%mm5 \n\t" |
4619 | 1571 "psrlw $2, %%mm4 \n\t" |
1572 "psrlw $2, %%mm2 \n\t" | |
1573 #endif | |
4923 | 1574 "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t" |
1575 "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t" | |
4619 | 1576 |
1577 "pmaddwd %%mm4, %%mm1 \n\t" | |
1578 "pmaddwd %%mm2, %%mm3 \n\t" | |
1579 "pmaddwd %%mm6, %%mm4 \n\t" | |
1580 "pmaddwd %%mm6, %%mm2 \n\t" | |
1581 #ifndef FAST_BGR2YV12 | |
1582 "psrad $8, %%mm4 \n\t" | |
1583 "psrad $8, %%mm1 \n\t" | |
1584 "psrad $8, %%mm2 \n\t" | |
1585 "psrad $8, %%mm3 \n\t" | |
1586 #endif | |
1587 "packssdw %%mm2, %%mm4 \n\t" | |
1588 "packssdw %%mm3, %%mm1 \n\t" | |
1589 "pmaddwd %%mm5, %%mm4 \n\t" | |
1590 "pmaddwd %%mm5, %%mm1 \n\t" | |
1591 "addl $24, %%ebx \n\t" | |
1592 "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2 | |
1593 "psraw $7, %%mm4 \n\t" | |
1594 | |
1595 "movq %%mm0, %%mm1 \n\t" | |
1596 "punpckldq %%mm4, %%mm0 \n\t" | |
1597 "punpckhdq %%mm4, %%mm1 \n\t" | |
1598 "packsswb %%mm1, %%mm0 \n\t" | |
4923 | 1599 "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t" |
4619 | 1600 |
1601 "movd %%mm0, (%2, %%eax) \n\t" | |
1602 "punpckhdq %%mm0, %%mm0 \n\t" | |
1603 "movd %%mm0, (%3, %%eax) \n\t" | |
1604 "addl $4, %%eax \n\t" | |
1605 " js 1b \n\t" | |
1606 : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width) | |
1607 : "%eax", "%ebx" | |
1608 ); | |
4467 | 1609 #else |
1610 int i; | |
1611 for(i=0; i<width; i++) | |
1612 { | |
1613 int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3]; | |
1614 int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4]; | |
1615 int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5]; | |
1616 | |
1617 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1618 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1619 } | |
1620 #endif | |
1621 } | |
1622 | |
4578 | 1623 static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width) |
1624 { | |
1625 int i; | |
1626 for(i=0; i<width; i++) | |
1627 { | |
1628 int d= src[i*2] + (src[i*2+1]<<8); | |
1629 int b= d&0x1F; | |
1630 int g= (d>>5)&0x3F; | |
1631 int r= (d>>11)&0x1F; | |
1632 | |
1633 dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16; | |
1634 } | |
1635 } | |
1636 | |
1637 static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
1638 { | |
1639 int i; | |
1640 for(i=0; i<width; i++) | |
1641 { | |
4579 | 1642 #if 1 |
1643 int d0= le2me_32( ((uint32_t*)src1)[i] ); | |
1644 int d1= le2me_32( ((uint32_t*)src2)[i] ); | |
1645 | |
1646 int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F); | |
1647 int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F); | |
1648 | |
1649 int dh2= (dh>>11) + (dh<<21); | |
1650 int d= dh2 + dl; | |
1651 | |
1652 int b= d&0x7F; | |
1653 int r= (d>>11)&0x7F; | |
1654 int g= d>>21; | |
1655 #else | |
4578 | 1656 int d0= src1[i*4] + (src1[i*4+1]<<8); |
1657 int b0= d0&0x1F; | |
1658 int g0= (d0>>5)&0x3F; | |
1659 int r0= (d0>>11)&0x1F; | |
1660 | |
1661 int d1= src1[i*4+2] + (src1[i*4+3]<<8); | |
1662 int b1= d1&0x1F; | |
1663 int g1= (d1>>5)&0x3F; | |
1664 int r1= (d1>>11)&0x1F; | |
1665 | |
1666 int d2= src2[i*4] + (src2[i*4+1]<<8); | |
1667 int b2= d2&0x1F; | |
1668 int g2= (d2>>5)&0x3F; | |
1669 int r2= (d2>>11)&0x1F; | |
1670 | |
1671 int d3= src2[i*4+2] + (src2[i*4+3]<<8); | |
1672 int b3= d3&0x1F; | |
1673 int g3= (d3>>5)&0x3F; | |
1674 int r3= (d3>>11)&0x1F; | |
1675 | |
1676 int b= b0 + b1 + b2 + b3; | |
1677 int g= g0 + g1 + g2 + g3; | |
1678 int r= r0 + r1 + r2 + r3; | |
4579 | 1679 #endif |
4578 | 1680 dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128; |
1681 dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128; | |
1682 } | |
1683 } | |
1684 | |
4580 | 1685 static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width) |
1686 { | |
1687 int i; | |
1688 for(i=0; i<width; i++) | |
1689 { | |
1690 int d= src[i*2] + (src[i*2+1]<<8); | |
1691 int b= d&0x1F; | |
1692 int g= (d>>5)&0x1F; | |
1693 int r= (d>>10)&0x1F; | |
1694 | |
1695 dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16; | |
1696 } | |
1697 } | |
1698 | |
1699 static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
1700 { | |
1701 int i; | |
1702 for(i=0; i<width; i++) | |
1703 { | |
1704 #if 1 | |
1705 int d0= le2me_32( ((uint32_t*)src1)[i] ); | |
1706 int d1= le2me_32( ((uint32_t*)src2)[i] ); | |
1707 | |
1708 int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F); | |
1709 int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F); | |
1710 | |
1711 int dh2= (dh>>11) + (dh<<21); | |
1712 int d= dh2 + dl; | |
1713 | |
1714 int b= d&0x7F; | |
1715 int r= (d>>10)&0x7F; | |
1716 int g= d>>21; | |
1717 #else | |
1718 int d0= src1[i*4] + (src1[i*4+1]<<8); | |
1719 int b0= d0&0x1F; | |
1720 int g0= (d0>>5)&0x1F; | |
1721 int r0= (d0>>10)&0x1F; | |
1722 | |
1723 int d1= src1[i*4+2] + (src1[i*4+3]<<8); | |
1724 int b1= d1&0x1F; | |
1725 int g1= (d1>>5)&0x1F; | |
1726 int r1= (d1>>10)&0x1F; | |
1727 | |
1728 int d2= src2[i*4] + (src2[i*4+1]<<8); | |
1729 int b2= d2&0x1F; | |
1730 int g2= (d2>>5)&0x1F; | |
1731 int r2= (d2>>10)&0x1F; | |
1732 | |
1733 int d3= src2[i*4+2] + (src2[i*4+3]<<8); | |
1734 int b3= d3&0x1F; | |
1735 int g3= (d3>>5)&0x1F; | |
1736 int r3= (d3>>10)&0x1F; | |
1737 | |
1738 int b= b0 + b1 + b2 + b3; | |
1739 int g= g0 + g1 + g2 + g3; | |
1740 int r= r0 + r1 + r2 + r3; | |
1741 #endif | |
1742 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128; | |
1743 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128; | |
1744 } | |
1745 } | |
1746 | |
1747 | |
4558 | 1748 static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width) |
1749 { | |
1750 int i; | |
1751 for(i=0; i<width; i++) | |
1752 { | |
1753 int r= src[i*4+0]; | |
1754 int g= src[i*4+1]; | |
1755 int b= src[i*4+2]; | |
1756 | |
1757 dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; | |
1758 } | |
1759 } | |
1760 | |
1761 static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
1762 { | |
1763 int i; | |
1764 for(i=0; i<width; i++) | |
1765 { | |
1766 int r= src1[8*i + 0] + src1[8*i + 4] + src2[8*i + 0] + src2[8*i + 4]; | |
1767 int g= src1[8*i + 1] + src1[8*i + 5] + src2[8*i + 1] + src2[8*i + 5]; | |
1768 int b= src1[8*i + 2] + src1[8*i + 6] + src2[8*i + 2] + src2[8*i + 6]; | |
1769 | |
1770 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1771 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1772 } | |
1773 } | |
1774 | |
1775 static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width) | |
1776 { | |
1777 int i; | |
1778 for(i=0; i<width; i++) | |
1779 { | |
1780 int r= src[i*3+0]; | |
1781 int g= src[i*3+1]; | |
1782 int b= src[i*3+2]; | |
1783 | |
1784 dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; | |
1785 } | |
1786 } | |
1787 | |
1788 static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
1789 { | |
1790 int i; | |
1791 for(i=0; i<width; i++) | |
1792 { | |
1793 int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3]; | |
1794 int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4]; | |
1795 int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5]; | |
1796 | |
1797 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1798 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1799 } | |
1800 } | |
1801 | |
4467 | 1802 |
3272 | 1803 // Bilinear / Bicubic scaling |
1804 static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, | |
1805 int16_t *filter, int16_t *filterPos, int filterSize) | |
1806 { | |
1807 #ifdef HAVE_MMX | |
1808 if(filterSize==4) // allways true for upscaling, sometimes for down too | |
1809 { | |
1810 int counter= -2*dstW; | |
1811 filter-= counter*2; | |
1812 filterPos-= counter/2; | |
1813 dst-= counter/2; | |
1814 asm volatile( | |
1815 "pxor %%mm7, %%mm7 \n\t" | |
4248 | 1816 "movq "MANGLE(w02)", %%mm6 \n\t" |
3272 | 1817 "pushl %%ebp \n\t" // we use 7 regs here ... |
1818 "movl %%eax, %%ebp \n\t" | |
1819 ".balign 16 \n\t" | |
1820 "1: \n\t" | |
1821 "movzwl (%2, %%ebp), %%eax \n\t" | |
1822 "movzwl 2(%2, %%ebp), %%ebx \n\t" | |
1823 "movq (%1, %%ebp, 4), %%mm1 \n\t" | |
1824 "movq 8(%1, %%ebp, 4), %%mm3 \n\t" | |
1825 "movd (%3, %%eax), %%mm0 \n\t" | |
1826 "movd (%3, %%ebx), %%mm2 \n\t" | |
1827 "punpcklbw %%mm7, %%mm0 \n\t" | |
1828 "punpcklbw %%mm7, %%mm2 \n\t" | |
1829 "pmaddwd %%mm1, %%mm0 \n\t" | |
1830 "pmaddwd %%mm2, %%mm3 \n\t" | |
1831 "psrad $8, %%mm0 \n\t" | |
1832 "psrad $8, %%mm3 \n\t" | |
1833 "packssdw %%mm3, %%mm0 \n\t" | |
1834 "pmaddwd %%mm6, %%mm0 \n\t" | |
1835 "packssdw %%mm0, %%mm0 \n\t" | |
1836 "movd %%mm0, (%4, %%ebp) \n\t" | |
1837 "addl $4, %%ebp \n\t" | |
1838 " jnc 1b \n\t" | |
3352 | 1839 |
3272 | 1840 "popl %%ebp \n\t" |
1841 : "+a" (counter) | |
1842 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst) | |
1843 : "%ebx" | |
1844 ); | |
1845 } | |
1846 else if(filterSize==8) | |
1847 { | |
1848 int counter= -2*dstW; | |
1849 filter-= counter*4; | |
1850 filterPos-= counter/2; | |
1851 dst-= counter/2; | |
1852 asm volatile( | |
1853 "pxor %%mm7, %%mm7 \n\t" | |
4248 | 1854 "movq "MANGLE(w02)", %%mm6 \n\t" |
3272 | 1855 "pushl %%ebp \n\t" // we use 7 regs here ... |
1856 "movl %%eax, %%ebp \n\t" | |
1857 ".balign 16 \n\t" | |
1858 "1: \n\t" | |
1859 "movzwl (%2, %%ebp), %%eax \n\t" | |
1860 "movzwl 2(%2, %%ebp), %%ebx \n\t" | |
1861 "movq (%1, %%ebp, 8), %%mm1 \n\t" | |
1862 "movq 16(%1, %%ebp, 8), %%mm3 \n\t" | |
1863 "movd (%3, %%eax), %%mm0 \n\t" | |
1864 "movd (%3, %%ebx), %%mm2 \n\t" | |
1865 "punpcklbw %%mm7, %%mm0 \n\t" | |
1866 "punpcklbw %%mm7, %%mm2 \n\t" | |
1867 "pmaddwd %%mm1, %%mm0 \n\t" | |
1868 "pmaddwd %%mm2, %%mm3 \n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1869 |
3272 | 1870 "movq 8(%1, %%ebp, 8), %%mm1 \n\t" |
1871 "movq 24(%1, %%ebp, 8), %%mm5 \n\t" | |
1872 "movd 4(%3, %%eax), %%mm4 \n\t" | |
1873 "movd 4(%3, %%ebx), %%mm2 \n\t" | |
1874 "punpcklbw %%mm7, %%mm4 \n\t" | |
1875 "punpcklbw %%mm7, %%mm2 \n\t" | |
1876 "pmaddwd %%mm1, %%mm4 \n\t" | |
1877 "pmaddwd %%mm2, %%mm5 \n\t" | |
1878 "paddd %%mm4, %%mm0 \n\t" | |
1879 "paddd %%mm5, %%mm3 \n\t" | |
1880 | |
1881 "psrad $8, %%mm0 \n\t" | |
1882 "psrad $8, %%mm3 \n\t" | |
1883 "packssdw %%mm3, %%mm0 \n\t" | |
1884 "pmaddwd %%mm6, %%mm0 \n\t" | |
1885 "packssdw %%mm0, %%mm0 \n\t" | |
1886 "movd %%mm0, (%4, %%ebp) \n\t" | |
1887 "addl $4, %%ebp \n\t" | |
1888 " jnc 1b \n\t" | |
3344 | 1889 |
3272 | 1890 "popl %%ebp \n\t" |
1891 : "+a" (counter) | |
1892 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst) | |
1893 : "%ebx" | |
1894 ); | |
1895 } | |
1896 else | |
1897 { | |
1898 int counter= -2*dstW; | |
1899 // filter-= counter*filterSize/2; | |
1900 filterPos-= counter/2; | |
1901 dst-= counter/2; | |
1902 asm volatile( | |
1903 "pxor %%mm7, %%mm7 \n\t" | |
4248 | 1904 "movq "MANGLE(w02)", %%mm6 \n\t" |
3272 | 1905 ".balign 16 \n\t" |
1906 "1: \n\t" | |
1907 "movl %2, %%ecx \n\t" | |
1908 "movzwl (%%ecx, %0), %%eax \n\t" | |
1909 "movzwl 2(%%ecx, %0), %%ebx \n\t" | |
1910 "movl %5, %%ecx \n\t" | |
1911 "pxor %%mm4, %%mm4 \n\t" | |
1912 "pxor %%mm5, %%mm5 \n\t" | |
1913 "2: \n\t" | |
1914 "movq (%1), %%mm1 \n\t" | |
1915 "movq (%1, %6), %%mm3 \n\t" | |
1916 "movd (%%ecx, %%eax), %%mm0 \n\t" | |
1917 "movd (%%ecx, %%ebx), %%mm2 \n\t" | |
1918 "punpcklbw %%mm7, %%mm0 \n\t" | |
1919 "punpcklbw %%mm7, %%mm2 \n\t" | |
1920 "pmaddwd %%mm1, %%mm0 \n\t" | |
1921 "pmaddwd %%mm2, %%mm3 \n\t" | |
1922 "paddd %%mm3, %%mm5 \n\t" | |
1923 "paddd %%mm0, %%mm4 \n\t" | |
1924 "addl $8, %1 \n\t" | |
1925 "addl $4, %%ecx \n\t" | |
1926 "cmpl %4, %%ecx \n\t" | |
1927 " jb 2b \n\t" | |
1928 "addl %6, %1 \n\t" | |
1929 "psrad $8, %%mm4 \n\t" | |
1930 "psrad $8, %%mm5 \n\t" | |
1931 "packssdw %%mm5, %%mm4 \n\t" | |
1932 "pmaddwd %%mm6, %%mm4 \n\t" | |
1933 "packssdw %%mm4, %%mm4 \n\t" | |
1934 "movl %3, %%eax \n\t" | |
1935 "movd %%mm4, (%%eax, %0) \n\t" | |
1936 "addl $4, %0 \n\t" | |
1937 " jnc 1b \n\t" | |
3344 | 1938 |
3641 | 1939 : "+r" (counter), "+r" (filter) |
1940 : "m" (filterPos), "m" (dst), "m"(src+filterSize), | |
3272 | 1941 "m" (src), "r" (filterSize*2) |
3299 | 1942 : "%ebx", "%eax", "%ecx" |
3272 | 1943 ); |
1944 } | |
1945 #else | |
1946 int i; | |
1947 for(i=0; i<dstW; i++) | |
1948 { | |
1949 int j; | |
1950 int srcPos= filterPos[i]; | |
1951 int val=0; | |
3344 | 1952 // printf("filterPos: %d\n", filterPos[i]); |
3272 | 1953 for(j=0; j<filterSize; j++) |
1954 { | |
1955 // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]); | |
1956 val += ((int)src[srcPos + j])*filter[filterSize*i + j]; | |
1957 } | |
1958 // filter += hFilterSize; | |
1959 dst[i] = MIN(MAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ... | |
1960 // dst[i] = val>>7; | |
1961 } | |
1962 #endif | |
1963 } | |
1964 // *** horizontal scale Y line to temp buffer | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1965 static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, int srcW, int xInc, |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1966 int flags, int canMMX2BeUsed, int16_t *hLumFilter, |
4467 | 1967 int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode, |
5452 | 1968 int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter, |
1969 int32_t *mmx2FilterPos) | |
2469 | 1970 { |
4467 | 1971 if(srcFormat==IMGFMT_YUY2) |
1972 { | |
1973 RENAME(yuy2ToY)(formatConvBuffer, src, srcW); | |
1974 src= formatConvBuffer; | |
1975 } | |
1976 else if(srcFormat==IMGFMT_BGR32) | |
1977 { | |
1978 RENAME(bgr32ToY)(formatConvBuffer, src, srcW); | |
1979 src= formatConvBuffer; | |
1980 } | |
1981 else if(srcFormat==IMGFMT_BGR24) | |
1982 { | |
1983 RENAME(bgr24ToY)(formatConvBuffer, src, srcW); | |
1984 src= formatConvBuffer; | |
1985 } | |
4578 | 1986 else if(srcFormat==IMGFMT_BGR16) |
1987 { | |
1988 RENAME(bgr16ToY)(formatConvBuffer, src, srcW); | |
1989 src= formatConvBuffer; | |
1990 } | |
4580 | 1991 else if(srcFormat==IMGFMT_BGR15) |
1992 { | |
1993 RENAME(bgr15ToY)(formatConvBuffer, src, srcW); | |
1994 src= formatConvBuffer; | |
1995 } | |
4558 | 1996 else if(srcFormat==IMGFMT_RGB32) |
1997 { | |
1998 RENAME(rgb32ToY)(formatConvBuffer, src, srcW); | |
1999 src= formatConvBuffer; | |
2000 } | |
2001 else if(srcFormat==IMGFMT_RGB24) | |
2002 { | |
2003 RENAME(rgb24ToY)(formatConvBuffer, src, srcW); | |
2004 src= formatConvBuffer; | |
2005 } | |
4467 | 2006 |
3352 | 2007 #ifdef HAVE_MMX |
2008 // use the new MMX scaler if th mmx2 cant be used (its faster than the x86asm one) | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2009 if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed)) |
3352 | 2010 #else |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2011 if(!(flags&SWS_FAST_BILINEAR)) |
3352 | 2012 #endif |
3272 | 2013 { |
2014 RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize); | |
2015 } | |
2016 else // Fast Bilinear upscale / crap downscale | |
2017 { | |
2469 | 2018 #ifdef ARCH_X86 |
2019 #ifdef HAVE_MMX2 | |
2671 | 2020 int i; |
2469 | 2021 if(canMMX2BeUsed) |
2022 { | |
2023 asm volatile( | |
2024 "pxor %%mm7, %%mm7 \n\t" | |
5452 | 2025 "movl %0, %%ecx \n\t" |
2026 "movl %1, %%edi \n\t" | |
2027 "movl %2, %%edx \n\t" | |
2028 "movl %3, %%ebx \n\t" | |
2469 | 2029 "xorl %%eax, %%eax \n\t" // i |
5452 | 2030 PREFETCH" (%%ecx) \n\t" |
2031 PREFETCH" 32(%%ecx) \n\t" | |
2032 PREFETCH" 64(%%ecx) \n\t" | |
2520 | 2033 |
2469 | 2034 #define FUNNY_Y_CODE \ |
5452 | 2035 "movl (%%ebx), %%esi \n\t"\ |
2036 "call *%4 \n\t"\ | |
2037 "addl (%%ebx, %%eax), %%ecx \n\t"\ | |
2038 "addl %%eax, %%edi \n\t"\ | |
2039 "xorl %%eax, %%eax \n\t"\ | |
2520 | 2040 |
2469 | 2041 FUNNY_Y_CODE |
2042 FUNNY_Y_CODE | |
2043 FUNNY_Y_CODE | |
2044 FUNNY_Y_CODE | |
2045 FUNNY_Y_CODE | |
2046 FUNNY_Y_CODE | |
2047 FUNNY_Y_CODE | |
2048 FUNNY_Y_CODE | |
2049 | |
5452 | 2050 :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos), |
2051 "m" (funnyYCode) | |
2469 | 2052 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi" |
2053 ); | |
3215 | 2054 for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128; |
2469 | 2055 } |
2056 else | |
2057 { | |
2058 #endif | |
2059 //NO MMX just normal asm ... | |
2060 asm volatile( | |
2061 "xorl %%eax, %%eax \n\t" // i | |
2062 "xorl %%ebx, %%ebx \n\t" // xx | |
2063 "xorl %%ecx, %%ecx \n\t" // 2*xalpha | |
2800
7847d6b7ad3d
.balign or we¡ll align by 64kb on some architectures
michael
parents:
2799
diff
changeset
|
2064 ".balign 16 \n\t" |
2469 | 2065 "1: \n\t" |
2066 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx] | |
2067 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1] | |
2068 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] | |
2069 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha | |
2070 "shll $16, %%edi \n\t" | |
2071 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) | |
2072 "movl %1, %%edi \n\t" | |
2073 "shrl $9, %%esi \n\t" | |
2074 "movw %%si, (%%edi, %%eax, 2) \n\t" | |
2075 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF | |
2076 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry | |
2077 | |
2078 "movzbl (%0, %%ebx), %%edi \n\t" //src[xx] | |
2079 "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1] | |
2080 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] | |
2081 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha | |
2082 "shll $16, %%edi \n\t" | |
2083 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) | |
2084 "movl %1, %%edi \n\t" | |
2085 "shrl $9, %%esi \n\t" | |
2086 "movw %%si, 2(%%edi, %%eax, 2) \n\t" | |
2087 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF | |
2088 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry | |
2089 | |
2090 | |
2091 "addl $2, %%eax \n\t" | |
2092 "cmpl %2, %%eax \n\t" | |
2093 " jb 1b \n\t" | |
2094 | |
2095 | |
2096 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF) | |
2097 : "%eax", "%ebx", "%ecx", "%edi", "%esi" | |
2098 ); | |
2099 #ifdef HAVE_MMX2 | |
2100 } //if MMX2 cant be used | |
2101 #endif | |
2102 #else | |
2671 | 2103 int i; |
2104 unsigned int xpos=0; | |
2105 for(i=0;i<dstWidth;i++) | |
2106 { | |
2107 register unsigned int xx=xpos>>16; | |
2108 register unsigned int xalpha=(xpos&0xFFFF)>>9; | |
2109 dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha; | |
2110 xpos+=xInc; | |
2111 } | |
2469 | 2112 #endif |
3272 | 2113 } |
2469 | 2114 } |
2115 | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2116 inline static void RENAME(hcscale)(uint16_t *dst, int dstWidth, uint8_t *src1, uint8_t *src2, |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2117 int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter, |
4467 | 2118 int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode, |
5452 | 2119 int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter, |
2120 int32_t *mmx2FilterPos) | |
2469 | 2121 { |
4467 | 2122 if(srcFormat==IMGFMT_YUY2) |
2123 { | |
2124 RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2125 src1= formatConvBuffer; | |
2126 src2= formatConvBuffer+2048; | |
2127 } | |
2128 else if(srcFormat==IMGFMT_BGR32) | |
2129 { | |
2130 RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2131 src1= formatConvBuffer; | |
2132 src2= formatConvBuffer+2048; | |
2133 } | |
2134 else if(srcFormat==IMGFMT_BGR24) | |
2135 { | |
2136 RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2137 src1= formatConvBuffer; | |
2138 src2= formatConvBuffer+2048; | |
2139 } | |
4578 | 2140 else if(srcFormat==IMGFMT_BGR16) |
2141 { | |
2142 RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2143 src1= formatConvBuffer; | |
2144 src2= formatConvBuffer+2048; | |
2145 } | |
4580 | 2146 else if(srcFormat==IMGFMT_BGR15) |
2147 { | |
2148 RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2149 src1= formatConvBuffer; | |
2150 src2= formatConvBuffer+2048; | |
2151 } | |
4558 | 2152 else if(srcFormat==IMGFMT_RGB32) |
2153 { | |
2154 RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2155 src1= formatConvBuffer; | |
2156 src2= formatConvBuffer+2048; | |
2157 } | |
2158 else if(srcFormat==IMGFMT_RGB24) | |
2159 { | |
2160 RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2161 src1= formatConvBuffer; | |
2162 src2= formatConvBuffer+2048; | |
2163 } | |
4481 | 2164 else if(isGray(srcFormat)) |
2165 { | |
2166 return; | |
2167 } | |
4467 | 2168 |
3352 | 2169 #ifdef HAVE_MMX |
2170 // use the new MMX scaler if th mmx2 cant be used (its faster than the x86asm one) | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2171 if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed)) |
3352 | 2172 #else |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2173 if(!(flags&SWS_FAST_BILINEAR)) |
3352 | 2174 #endif |
3272 | 2175 { |
2176 RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); | |
2177 RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); | |
2178 } | |
2179 else // Fast Bilinear upscale / crap downscale | |
2180 { | |
2469 | 2181 #ifdef ARCH_X86 |
2182 #ifdef HAVE_MMX2 | |
2671 | 2183 int i; |
2469 | 2184 if(canMMX2BeUsed) |
2185 { | |
2186 asm volatile( | |
5452 | 2187 "pxor %%mm7, %%mm7 \n\t" |
2188 "movl %0, %%ecx \n\t" | |
2189 "movl %1, %%edi \n\t" | |
2190 "movl %2, %%edx \n\t" | |
2191 "movl %3, %%ebx \n\t" | |
2192 "xorl %%eax, %%eax \n\t" // i | |
2193 PREFETCH" (%%ecx) \n\t" | |
2194 PREFETCH" 32(%%ecx) \n\t" | |
2195 PREFETCH" 64(%%ecx) \n\t" | |
2196 | |
2197 #define FUNNY_UV_CODE \ | |
2198 "movl (%%ebx), %%esi \n\t"\ | |
2199 "call *%4 \n\t"\ | |
2200 "addl (%%ebx, %%eax), %%ecx \n\t"\ | |
2201 "addl %%eax, %%edi \n\t"\ | |
2202 "xorl %%eax, %%eax \n\t"\ | |
2469 | 2203 |
5452 | 2204 FUNNY_UV_CODE |
2205 FUNNY_UV_CODE | |
2206 FUNNY_UV_CODE | |
2207 FUNNY_UV_CODE | |
2208 "xorl %%eax, %%eax \n\t" // i | |
2209 "movl %5, %%ecx \n\t" // src | |
2210 "movl %1, %%edi \n\t" // buf1 | |
2211 "addl $4096, %%edi \n\t" | |
2212 PREFETCH" (%%ecx) \n\t" | |
2213 PREFETCH" 32(%%ecx) \n\t" | |
2214 PREFETCH" 64(%%ecx) \n\t" | |
2469 | 2215 |
5452 | 2216 FUNNY_UV_CODE |
2217 FUNNY_UV_CODE | |
2218 FUNNY_UV_CODE | |
2219 FUNNY_UV_CODE | |
2469 | 2220 |
5452 | 2221 :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos), |
2222 "m" (funnyUVCode), "m" (src2) | |
2223 : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi" | |
2224 ); | |
3344 | 2225 for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) |
2469 | 2226 { |
3344 | 2227 // printf("%d %d %d\n", dstWidth, i, srcW); |
2228 dst[i] = src1[srcW-1]*128; | |
2229 dst[i+2048] = src2[srcW-1]*128; | |
2469 | 2230 } |
2231 } | |
2232 else | |
2233 { | |
2234 #endif | |
2235 asm volatile( | |
2236 "xorl %%eax, %%eax \n\t" // i | |
2237 "xorl %%ebx, %%ebx \n\t" // xx | |
2238 "xorl %%ecx, %%ecx \n\t" // 2*xalpha | |
2800
7847d6b7ad3d
.balign or we¡ll align by 64kb on some architectures
michael
parents:
2799
diff
changeset
|
2239 ".balign 16 \n\t" |
2469 | 2240 "1: \n\t" |
2241 "movl %0, %%esi \n\t" | |
2242 "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx] | |
2243 "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1] | |
2244 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] | |
2245 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha | |
2246 "shll $16, %%edi \n\t" | |
2247 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) | |
2248 "movl %1, %%edi \n\t" | |
2249 "shrl $9, %%esi \n\t" | |
2250 "movw %%si, (%%edi, %%eax, 2) \n\t" | |
2251 | |
2252 "movzbl (%5, %%ebx), %%edi \n\t" //src[xx] | |
2253 "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1] | |
2254 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] | |
2255 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha | |
2256 "shll $16, %%edi \n\t" | |
2257 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) | |
2258 "movl %1, %%edi \n\t" | |
2259 "shrl $9, %%esi \n\t" | |
2260 "movw %%si, 4096(%%edi, %%eax, 2)\n\t" | |
2261 | |
2262 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF | |
2263 "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry | |
2264 "addl $1, %%eax \n\t" | |
2265 "cmpl %2, %%eax \n\t" | |
2266 " jb 1b \n\t" | |
2267 | |
2268 :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF), | |
2269 "r" (src2) | |
2270 : "%eax", "%ebx", "%ecx", "%edi", "%esi" | |
2271 ); | |
2272 #ifdef HAVE_MMX2 | |
2273 } //if MMX2 cant be used | |
2274 #endif | |
2275 #else | |
2671 | 2276 int i; |
2277 unsigned int xpos=0; | |
2278 for(i=0;i<dstWidth;i++) | |
2279 { | |
2280 register unsigned int xx=xpos>>16; | |
2281 register unsigned int xalpha=(xpos&0xFFFF)>>9; | |
2282 dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha); | |
2283 dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha); | |
2566 | 2284 /* slower |
2285 dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha; | |
2286 dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha; | |
2287 */ | |
2671 | 2288 xpos+=xInc; |
2289 } | |
2469 | 2290 #endif |
3272 | 2291 } |
2292 } | |
2293 | |
4467 | 2294 static void RENAME(swScale)(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], int srcSliceY, |
4698 | 2295 int srcSliceH, uint8_t* dstParam[], int dstStrideParam[]){ |
3344 | 2296 |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2297 /* load a few things into local vars to make the code more readable? and faster */ |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2298 const int srcW= c->srcW; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2299 const int dstW= c->dstW; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2300 const int dstH= c->dstH; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2301 const int chrDstW= c->chrDstW; |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2302 const int chrSrcW= c->chrSrcW; |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2303 const int lumXInc= c->lumXInc; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2304 const int chrXInc= c->chrXInc; |
4295 | 2305 const int dstFormat= c->dstFormat; |
6503 | 2306 const int srcFormat= c->srcFormat; |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2307 const int flags= c->flags; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2308 const int canMMX2BeUsed= c->canMMX2BeUsed; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2309 int16_t *vLumFilterPos= c->vLumFilterPos; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2310 int16_t *vChrFilterPos= c->vChrFilterPos; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2311 int16_t *hLumFilterPos= c->hLumFilterPos; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2312 int16_t *hChrFilterPos= c->hChrFilterPos; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2313 int16_t *vLumFilter= c->vLumFilter; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2314 int16_t *vChrFilter= c->vChrFilter; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2315 int16_t *hLumFilter= c->hLumFilter; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2316 int16_t *hChrFilter= c->hChrFilter; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2317 int16_t *lumMmxFilter= c->lumMmxFilter; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2318 int16_t *chrMmxFilter= c->chrMmxFilter; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2319 const int vLumFilterSize= c->vLumFilterSize; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2320 const int vChrFilterSize= c->vChrFilterSize; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2321 const int hLumFilterSize= c->hLumFilterSize; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2322 const int hChrFilterSize= c->hChrFilterSize; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2323 int16_t **lumPixBuf= c->lumPixBuf; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2324 int16_t **chrPixBuf= c->chrPixBuf; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2325 const int vLumBufSize= c->vLumBufSize; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2326 const int vChrBufSize= c->vChrBufSize; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2327 uint8_t *funnyYCode= c->funnyYCode; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2328 uint8_t *funnyUVCode= c->funnyUVCode; |
4467 | 2329 uint8_t *formatConvBuffer= c->formatConvBuffer; |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2330 const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample; |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2331 const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample); |
3344 | 2332 |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2333 /* vars whch will change and which we need to storw back in the context */ |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2334 int dstY= c->dstY; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2335 int lumBufIndex= c->lumBufIndex; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2336 int chrBufIndex= c->chrBufIndex; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2337 int lastInLumBuf= c->lastInLumBuf; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2338 int lastInChrBuf= c->lastInChrBuf; |
4467 | 2339 int srcStride[3]; |
4698 | 2340 int dstStride[3]; |
4419 | 2341 uint8_t *src[3]; |
2342 uint8_t *dst[3]; | |
6540 | 2343 |
2344 orderYUV(c->srcFormat, src, srcStride, srcParam, srcStrideParam); | |
2345 orderYUV(c->dstFormat, dst, dstStride, dstParam, dstStrideParam); | |
6503 | 2346 |
6540 | 2347 if(isPacked(c->srcFormat)){ |
4467 | 2348 src[0]= |
2349 src[1]= | |
2350 src[2]= srcParam[0]; | |
6540 | 2351 srcStride[0]= |
4467 | 2352 srcStride[1]= |
6540 | 2353 srcStride[2]= srcStrideParam[0]; |
4467 | 2354 } |
6540 | 2355 srcStride[1]<<= c->vChrDrop; |
2356 srcStride[2]<<= c->vChrDrop; | |
4419 | 2357 |
6517 | 2358 // printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2], |
2359 // (int)dst[0], (int)dst[1], (int)dst[2]); | |
2360 | |
2361 #if 0 //self test FIXME move to a vfilter or something | |
2362 { | |
2363 static volatile int i=0; | |
2364 i++; | |
2365 if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH) | |
2366 selfTest(src, srcStride, c->srcW, c->srcH); | |
2367 i--; | |
2368 } | |
2369 #endif | |
4554 | 2370 |
2371 //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2], | |
2372 //dstStride[0],dstStride[1],dstStride[2]); | |
4419 | 2373 |
2374 if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0) | |
2375 { | |
2376 static int firstTime=1; //FIXME move this into the context perhaps | |
2377 if(flags & SWS_PRINT_INFO && firstTime) | |
2378 { | |
5937 | 2379 mp_msg(MSGT_SWS,MSGL_WARN,"SwScaler: Warning: dstStride is not aligned!\n" |
4419 | 2380 "SwScaler: ->cannot do aligned memory acesses anymore\n"); |
2381 firstTime=0; | |
2382 } | |
2383 } | |
3344 | 2384 |
4467 | 2385 /* Note the user might start scaling the picture in the middle so this will not get executed |
2386 this is not really intended but works currently, so ppl might do it */ | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2387 if(srcSliceY ==0){ |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2388 lumBufIndex=0; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2389 chrBufIndex=0; |
4467 | 2390 dstY=0; |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2391 lastInLumBuf= -1; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2392 lastInChrBuf= -1; |
3272 | 2393 } |
3344 | 2394 |
2395 for(;dstY < dstH; dstY++){ | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2396 unsigned char *dest =dst[0]+dstStride[0]*dstY; |
6520 | 2397 const int chrDstY= dstY>>c->chrDstVSubSample; |
2398 unsigned char *uDest=dst[1]+dstStride[1]*chrDstY; | |
2399 unsigned char *vDest=dst[2]+dstStride[2]*chrDstY; | |
3344 | 2400 |
2401 const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input | |
2402 const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input | |
2403 const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input | |
2404 const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input | |
2405 | |
4290
1f8ceb12284d
general convolution filtering of the source picture
michael
parents:
4276
diff
changeset
|
2406 //handle holes (FAST_BILINEAR & weird filters) |
1f8ceb12284d
general convolution filtering of the source picture
michael
parents:
4276
diff
changeset
|
2407 if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1; |
1f8ceb12284d
general convolution filtering of the source picture
michael
parents:
4276
diff
changeset
|
2408 if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1; |
1f8ceb12284d
general convolution filtering of the source picture
michael
parents:
4276
diff
changeset
|
2409 //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize); |
3344 | 2410 ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1) |
2411 ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1) | |
2216 | 2412 |
3344 | 2413 // Do we have enough lines in this slice to output the dstY line |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2414 if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample)) |
2469 | 2415 { |
3344 | 2416 //Do horizontal scaling |
2417 while(lastInLumBuf < lastLumSrcY) | |
2418 { | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2419 uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; |
3344 | 2420 lumBufIndex++; |
4290
1f8ceb12284d
general convolution filtering of the source picture
michael
parents:
4276
diff
changeset
|
2421 // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY); |
3344 | 2422 ASSERT(lumBufIndex < 2*vLumBufSize) |
2423 ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) | |
2424 ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) | |
2425 // printf("%d %d\n", lumBufIndex, vLumBufSize); | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2426 RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2427 flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, |
5452 | 2428 funnyYCode, c->srcFormat, formatConvBuffer, |
2429 c->lumMmx2Filter, c->lumMmx2FilterPos); | |
3344 | 2430 lastInLumBuf++; |
2431 } | |
2432 while(lastInChrBuf < lastChrSrcY) | |
2433 { | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2434 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1]; |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2435 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2]; |
3344 | 2436 chrBufIndex++; |
2437 ASSERT(chrBufIndex < 2*vChrBufSize) | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2438 ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH)) |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2439 ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0) |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2440 //FIXME replace parameters through context struct (some at least) |
6503 | 2441 |
2442 if(!(isGray(srcFormat) || isGray(dstFormat))) | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2443 RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc, |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2444 flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, |
5452 | 2445 funnyUVCode, c->srcFormat, formatConvBuffer, |
2446 c->chrMmx2Filter, c->chrMmx2FilterPos); | |
3344 | 2447 lastInChrBuf++; |
2448 } | |
2449 //wrap buf index around to stay inside the ring buffer | |
2450 if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; | |
2451 if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; | |
2469 | 2452 } |
3344 | 2453 else // not enough lines left in this slice -> load the rest in the buffer |
2469 | 2454 { |
3344 | 2455 /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n", |
2456 firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY, | |
2457 lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize, | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2458 vChrBufSize, vLumBufSize);*/ |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2459 |
3344 | 2460 //Do horizontal scaling |
2461 while(lastInLumBuf+1 < srcSliceY + srcSliceH) | |
2469 | 2462 { |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2463 uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; |
3344 | 2464 lumBufIndex++; |
2465 ASSERT(lumBufIndex < 2*vLumBufSize) | |
2466 ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) | |
2467 ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2468 RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2469 flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, |
5452 | 2470 funnyYCode, c->srcFormat, formatConvBuffer, |
2471 c->lumMmx2Filter, c->lumMmx2FilterPos); | |
3344 | 2472 lastInLumBuf++; |
2469 | 2473 } |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2474 while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH)) |
3344 | 2475 { |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2476 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1]; |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2477 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2]; |
3344 | 2478 chrBufIndex++; |
2479 ASSERT(chrBufIndex < 2*vChrBufSize) | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2480 ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH) |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2481 ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0) |
6503 | 2482 |
2483 if(!(isGray(srcFormat) || isGray(dstFormat))) | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2484 RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc, |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2485 flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, |
5452 | 2486 funnyUVCode, c->srcFormat, formatConvBuffer, |
2487 c->chrMmx2Filter, c->chrMmx2FilterPos); | |
3344 | 2488 lastInChrBuf++; |
2489 } | |
2490 //wrap buf index around to stay inside the ring buffer | |
2491 if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; | |
2492 if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; | |
2493 break; //we cant output a dstY line so lets try with the next slice | |
2469 | 2494 } |
2264
7851375ea156
increased precission of s_xinc s_xinc2 (needed for the mmx2 bugfix)
michael
parents:
2237
diff
changeset
|
2495 |
2748 | 2496 #ifdef HAVE_MMX |
3344 | 2497 b5Dither= dither8[dstY&1]; |
2498 g6Dither= dither4[dstY&1]; | |
2499 g5Dither= dither8[dstY&1]; | |
2500 r5Dither= dither8[(dstY+1)&1]; | |
2748 | 2501 #endif |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2502 if(dstY < dstH-2) |
3352 | 2503 { |
6503 | 2504 if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like |
3344 | 2505 { |
7351 | 2506 const int chrSkipMask= (1<<c->chrDstVSubSample)-1; |
2507 if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi | |
3344 | 2508 if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12 |
2509 { | |
2510 int16_t *lumBuf = lumPixBuf[0]; | |
2511 int16_t *chrBuf= chrPixBuf[0]; | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2512 RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW); |
3344 | 2513 } |
2514 else //General YV12 | |
2515 { | |
2516 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; | |
2517 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; | |
2518 RENAME(yuv2yuvX)( | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2519 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2520 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2521 dest, uDest, vDest, dstW, chrDstW, |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2522 lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+chrDstY*vChrFilterSize*4); |
3344 | 2523 } |
2524 } | |
2525 else | |
2526 { | |
2527 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; | |
2528 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; | |
2529 | |
2530 ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); | |
2531 ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); | |
2532 if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB | |
2533 { | |
2534 int chrAlpha= vChrFilter[2*dstY+1]; | |
2535 | |
6578 | 2536 RENAME(yuv2rgb1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1), |
2537 dest, dstW, chrAlpha, dstFormat, flags, dstY); | |
3344 | 2538 } |
2539 else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB | |
2540 { | |
2541 int lumAlpha= vLumFilter[2*dstY+1]; | |
2542 int chrAlpha= vChrFilter[2*dstY+1]; | |
2543 | |
6578 | 2544 RENAME(yuv2rgb2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1), |
2545 dest, dstW, lumAlpha, chrAlpha, dstY); | |
3344 | 2546 } |
2547 else //General RGB | |
2548 { | |
6578 | 2549 RENAME(yuv2rgbX)(c, |
3344 | 2550 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, |
2551 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, | |
6578 | 2552 dest, dstW, |
2553 lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+dstY*vChrFilterSize*4, dstY); | |
3344 | 2554 } |
2555 } | |
3352 | 2556 } |
2557 else // hmm looks like we cant use MMX here without overwriting this arrays tail | |
2558 { | |
2559 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; | |
2560 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; | |
6615 | 2561 if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 |
3352 | 2562 { |
7351 | 2563 const int chrSkipMask= (1<<c->chrDstVSubSample)-1; |
2564 if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi | |
6540 | 2565 yuv2yuvXinC( |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2566 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2567 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, |
6540 | 2568 dest, uDest, vDest, dstW, chrDstW); |
3352 | 2569 } |
2570 else | |
2571 { | |
2572 ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); | |
2573 ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); | |
6578 | 2574 yuv2rgbXinC(c, |
3352 | 2575 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, |
2576 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, | |
6578 | 2577 dest, dstW, dstY); |
3352 | 2578 } |
2579 } | |
3344 | 2580 } |
2534
cc9d3fd626f0
patch from Martin Decky <deckm1am@ss1000.ms.mff.cuni.cz> applied and unnecassery "memory" removed
michael
parents:
2521
diff
changeset
|
2581 |
cc9d3fd626f0
patch from Martin Decky <deckm1am@ss1000.ms.mff.cuni.cz> applied and unnecassery "memory" removed
michael
parents:
2521
diff
changeset
|
2582 #ifdef HAVE_MMX |
cc9d3fd626f0
patch from Martin Decky <deckm1am@ss1000.ms.mff.cuni.cz> applied and unnecassery "memory" removed
michael
parents:
2521
diff
changeset
|
2583 __asm __volatile(SFENCE:::"memory"); |
2566 | 2584 __asm __volatile(EMMS:::"memory"); |
2534
cc9d3fd626f0
patch from Martin Decky <deckm1am@ss1000.ms.mff.cuni.cz> applied and unnecassery "memory" removed
michael
parents:
2521
diff
changeset
|
2585 #endif |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2586 /* store changed local vars back in the context */ |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2587 c->dstY= dstY; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2588 c->lumBufIndex= lumBufIndex; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2589 c->chrBufIndex= chrBufIndex; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2590 c->lastInLumBuf= lastInLumBuf; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2591 c->lastInChrBuf= lastInChrBuf; |
3641 | 2592 } |