Mercurial > mplayer.hg
annotate libswscale/swscale_template.c @ 19172:bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
the +-1 issue is limited to >2tap vertical filters, so bilinear upscale was unaffected
the new code is sometime faster sometimes slower but the difference is significant (~20%) so its optional and enabled with arnd=1
author | michael |
---|---|
date | Mon, 24 Jul 2006 10:36:06 +0000 |
parents | 8579acff875e |
children | dbdc58b6e9bb |
rev | line source |
---|---|
18861 | 1 /* |
2 Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at> | |
3 | |
4 This program is free software; you can redistribute it and/or modify | |
5 it under the terms of the GNU General Public License as published by | |
6 the Free Software Foundation; either version 2 of the License, or | |
7 (at your option) any later version. | |
8 | |
9 This program is distributed in the hope that it will be useful, | |
10 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 GNU General Public License for more details. | |
13 | |
14 You should have received a copy of the GNU General Public License | |
15 along with this program; if not, write to the Free Software | |
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
17 */ | |
18 | |
19 #include "asmalign.h" | |
20 | |
21 #undef REAL_MOVNTQ | |
22 #undef MOVNTQ | |
23 #undef PAVGB | |
24 #undef PREFETCH | |
25 #undef PREFETCHW | |
26 #undef EMMS | |
27 #undef SFENCE | |
28 | |
29 #ifdef HAVE_3DNOW | |
30 /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */ | |
31 #define EMMS "femms" | |
32 #else | |
33 #define EMMS "emms" | |
34 #endif | |
35 | |
36 #ifdef HAVE_3DNOW | |
37 #define PREFETCH "prefetch" | |
38 #define PREFETCHW "prefetchw" | |
39 #elif defined ( HAVE_MMX2 ) | |
40 #define PREFETCH "prefetchnta" | |
41 #define PREFETCHW "prefetcht0" | |
42 #else | |
43 #define PREFETCH "/nop" | |
44 #define PREFETCHW "/nop" | |
45 #endif | |
46 | |
47 #ifdef HAVE_MMX2 | |
48 #define SFENCE "sfence" | |
49 #else | |
50 #define SFENCE "/nop" | |
51 #endif | |
52 | |
53 #ifdef HAVE_MMX2 | |
54 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t" | |
55 #elif defined (HAVE_3DNOW) | |
56 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t" | |
57 #endif | |
58 | |
59 #ifdef HAVE_MMX2 | |
60 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t" | |
61 #else | |
62 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t" | |
63 #endif | |
64 #define MOVNTQ(a,b) REAL_MOVNTQ(a,b) | |
65 | |
66 #ifdef HAVE_ALTIVEC | |
67 #include "swscale_altivec_template.c" | |
68 #endif | |
69 | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
70 #define YSCALEYUV2YV12X(x, offset, dest, width) \ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
71 asm volatile(\ |
18861 | 72 "xor %%"REG_a", %%"REG_a" \n\t"\ |
73 "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\ | |
74 "movq %%mm3, %%mm4 \n\t"\ | |
75 "lea " offset "(%0), %%"REG_d" \n\t"\ | |
76 "mov (%%"REG_d"), %%"REG_S" \n\t"\ | |
77 ASMALIGN16 /* FIXME Unroll? */\ | |
78 "1: \n\t"\ | |
79 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\ | |
80 "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\ | |
81 "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\ | |
82 "add $16, %%"REG_d" \n\t"\ | |
83 "mov (%%"REG_d"), %%"REG_S" \n\t"\ | |
84 "test %%"REG_S", %%"REG_S" \n\t"\ | |
85 "pmulhw %%mm0, %%mm2 \n\t"\ | |
86 "pmulhw %%mm0, %%mm5 \n\t"\ | |
87 "paddw %%mm2, %%mm3 \n\t"\ | |
88 "paddw %%mm5, %%mm4 \n\t"\ | |
89 " jnz 1b \n\t"\ | |
90 "psraw $3, %%mm3 \n\t"\ | |
91 "psraw $3, %%mm4 \n\t"\ | |
92 "packuswb %%mm4, %%mm3 \n\t"\ | |
93 MOVNTQ(%%mm3, (%1, %%REGa))\ | |
94 "add $8, %%"REG_a" \n\t"\ | |
95 "cmp %2, %%"REG_a" \n\t"\ | |
96 "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\ | |
97 "movq %%mm3, %%mm4 \n\t"\ | |
98 "lea " offset "(%0), %%"REG_d" \n\t"\ | |
99 "mov (%%"REG_d"), %%"REG_S" \n\t"\ | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
100 "jb 1b \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
101 :: "r" (&c->redDither),\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
102 "r" (dest), "p" (width)\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
103 : "%"REG_a, "%"REG_d, "%"REG_S\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
104 ); |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
105 |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
106 #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
107 asm volatile(\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
108 "lea " offset "(%0), %%"REG_d" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
109 "xor %%"REG_a", %%"REG_a" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
110 "pxor %%mm4, %%mm4 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
111 "pxor %%mm5, %%mm5 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
112 "pxor %%mm6, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
113 "pxor %%mm7, %%mm7 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
114 "mov (%%"REG_d"), %%"REG_S" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
115 ASMALIGN16 \ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
116 "1: \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
117 "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm0\n\t" /* srcData */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
118 "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
119 "mov 4(%%"REG_d"), %%"REG_S" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
120 "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm1\n\t" /* srcData */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
121 "movq %%mm0, %%mm3 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
122 "punpcklwd %%mm1, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
123 "punpckhwd %%mm1, %%mm3 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
124 "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
125 "pmaddwd %%mm1, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
126 "pmaddwd %%mm1, %%mm3 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
127 "paddd %%mm0, %%mm4 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
128 "paddd %%mm3, %%mm5 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
129 "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm3\n\t" /* srcData */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
130 "mov 16(%%"REG_d"), %%"REG_S" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
131 "add $16, %%"REG_d" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
132 "test %%"REG_S", %%"REG_S" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
133 "movq %%mm2, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
134 "punpcklwd %%mm3, %%mm2 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
135 "punpckhwd %%mm3, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
136 "pmaddwd %%mm1, %%mm2 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
137 "pmaddwd %%mm1, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
138 "paddd %%mm2, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
139 "paddd %%mm0, %%mm7 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
140 " jnz 1b \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
141 "psrad $16, %%mm4 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
142 "psrad $16, %%mm5 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
143 "psrad $16, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
144 "psrad $16, %%mm7 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
145 "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
146 "packssdw %%mm5, %%mm4 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
147 "packssdw %%mm7, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
148 "paddw %%mm0, %%mm4 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
149 "paddw %%mm0, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
150 "psraw $3, %%mm4 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
151 "psraw $3, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
152 "packuswb %%mm6, %%mm4 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
153 MOVNTQ(%%mm4, (%1, %%REGa))\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
154 "add $8, %%"REG_a" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
155 "cmp %2, %%"REG_a" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
156 "lea " offset "(%0), %%"REG_d" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
157 "pxor %%mm4, %%mm4 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
158 "pxor %%mm5, %%mm5 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
159 "pxor %%mm6, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
160 "pxor %%mm7, %%mm7 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
161 "mov (%%"REG_d"), %%"REG_S" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
162 "jb 1b \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
163 :: "r" (&c->redDither),\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
164 "r" (dest), "p" (width)\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
165 : "%"REG_a, "%"REG_d, "%"REG_S\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
166 ); |
18861 | 167 |
168 #define YSCALEYUV2YV121 \ | |
169 "mov %2, %%"REG_a" \n\t"\ | |
170 ASMALIGN16 /* FIXME Unroll? */\ | |
171 "1: \n\t"\ | |
172 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\ | |
173 "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\ | |
174 "psraw $7, %%mm0 \n\t"\ | |
175 "psraw $7, %%mm1 \n\t"\ | |
176 "packuswb %%mm1, %%mm0 \n\t"\ | |
177 MOVNTQ(%%mm0, (%1, %%REGa))\ | |
178 "add $8, %%"REG_a" \n\t"\ | |
179 "jnc 1b \n\t" | |
180 | |
181 /* | |
182 :: "m" (-lumFilterSize), "m" (-chrFilterSize), | |
183 "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4), | |
184 "r" (dest), "m" (dstW), | |
185 "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize) | |
186 : "%eax", "%ebx", "%ecx", "%edx", "%esi" | |
187 */ | |
188 #define YSCALEYUV2PACKEDX \ | |
189 "xor %%"REG_a", %%"REG_a" \n\t"\ | |
190 ASMALIGN16\ | |
191 "nop \n\t"\ | |
192 "1: \n\t"\ | |
193 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\ | |
194 "mov (%%"REG_d"), %%"REG_S" \n\t"\ | |
195 "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\ | |
196 "movq %%mm3, %%mm4 \n\t"\ | |
197 ASMALIGN16\ | |
198 "2: \n\t"\ | |
199 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\ | |
200 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\ | |
201 "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\ | |
202 "add $16, %%"REG_d" \n\t"\ | |
203 "mov (%%"REG_d"), %%"REG_S" \n\t"\ | |
204 "pmulhw %%mm0, %%mm2 \n\t"\ | |
205 "pmulhw %%mm0, %%mm5 \n\t"\ | |
206 "paddw %%mm2, %%mm3 \n\t"\ | |
207 "paddw %%mm5, %%mm4 \n\t"\ | |
208 "test %%"REG_S", %%"REG_S" \n\t"\ | |
209 " jnz 2b \n\t"\ | |
210 \ | |
211 "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\ | |
212 "mov (%%"REG_d"), %%"REG_S" \n\t"\ | |
213 "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\ | |
214 "movq %%mm1, %%mm7 \n\t"\ | |
215 ASMALIGN16\ | |
216 "2: \n\t"\ | |
217 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\ | |
218 "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\ | |
219 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\ | |
220 "add $16, %%"REG_d" \n\t"\ | |
221 "mov (%%"REG_d"), %%"REG_S" \n\t"\ | |
222 "pmulhw %%mm0, %%mm2 \n\t"\ | |
223 "pmulhw %%mm0, %%mm5 \n\t"\ | |
224 "paddw %%mm2, %%mm1 \n\t"\ | |
225 "paddw %%mm5, %%mm7 \n\t"\ | |
226 "test %%"REG_S", %%"REG_S" \n\t"\ | |
227 " jnz 2b \n\t"\ | |
228 | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
229 #define YSCALEYUV2PACKEDX_ACCURATE \ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
230 "xor %%"REG_a", %%"REG_a" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
231 ASMALIGN16\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
232 "nop \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
233 "1: \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
234 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
235 "mov (%%"REG_d"), %%"REG_S" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
236 "pxor %%mm4, %%mm4 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
237 "pxor %%mm5, %%mm5 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
238 "pxor %%mm6, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
239 "pxor %%mm7, %%mm7 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
240 ASMALIGN16\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
241 "2: \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
242 "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
243 "movq 4096(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
244 "mov 4(%%"REG_d"), %%"REG_S" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
245 "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
246 "movq %%mm0, %%mm3 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
247 "punpcklwd %%mm1, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
248 "punpckhwd %%mm1, %%mm3 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
249 "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
250 "pmaddwd %%mm1, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
251 "pmaddwd %%mm1, %%mm3 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
252 "paddd %%mm0, %%mm4 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
253 "paddd %%mm3, %%mm5 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
254 "movq 4096(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
255 "mov 16(%%"REG_d"), %%"REG_S" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
256 "add $16, %%"REG_d" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
257 "test %%"REG_S", %%"REG_S" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
258 "movq %%mm2, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
259 "punpcklwd %%mm3, %%mm2 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
260 "punpckhwd %%mm3, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
261 "pmaddwd %%mm1, %%mm2 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
262 "pmaddwd %%mm1, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
263 "paddd %%mm2, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
264 "paddd %%mm0, %%mm7 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
265 " jnz 2b \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
266 "psrad $16, %%mm4 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
267 "psrad $16, %%mm5 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
268 "psrad $16, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
269 "psrad $16, %%mm7 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
270 "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
271 "packssdw %%mm5, %%mm4 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
272 "packssdw %%mm7, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
273 "paddw %%mm0, %%mm4 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
274 "paddw %%mm0, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
275 "movq %%mm4, "U_TEMP"(%0) \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
276 "movq %%mm6, "V_TEMP"(%0) \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
277 \ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
278 "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
279 "mov (%%"REG_d"), %%"REG_S" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
280 "pxor %%mm1, %%mm1 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
281 "pxor %%mm5, %%mm5 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
282 "pxor %%mm7, %%mm7 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
283 "pxor %%mm6, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
284 ASMALIGN16\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
285 "2: \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
286 "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
287 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
288 "mov 4(%%"REG_d"), %%"REG_S" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
289 "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
290 "movq %%mm0, %%mm3 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
291 "punpcklwd %%mm4, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
292 "punpckhwd %%mm4, %%mm3 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
293 "movq 8(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
294 "pmaddwd %%mm4, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
295 "pmaddwd %%mm4, %%mm3 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
296 "paddd %%mm0, %%mm1 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
297 "paddd %%mm3, %%mm5 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
298 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
299 "mov 16(%%"REG_d"), %%"REG_S" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
300 "add $16, %%"REG_d" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
301 "test %%"REG_S", %%"REG_S" \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
302 "movq %%mm2, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
303 "punpcklwd %%mm3, %%mm2 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
304 "punpckhwd %%mm3, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
305 "pmaddwd %%mm4, %%mm2 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
306 "pmaddwd %%mm4, %%mm0 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
307 "paddd %%mm2, %%mm7 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
308 "paddd %%mm0, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
309 " jnz 2b \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
310 "psrad $16, %%mm1 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
311 "psrad $16, %%mm5 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
312 "psrad $16, %%mm7 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
313 "psrad $16, %%mm6 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
314 "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
315 "packssdw %%mm5, %%mm1 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
316 "packssdw %%mm6, %%mm7 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
317 "paddw %%mm0, %%mm1 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
318 "paddw %%mm0, %%mm7 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
319 "movq "U_TEMP"(%0), %%mm3 \n\t"\ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
320 "movq "V_TEMP"(%0), %%mm4 \n\t"\ |
18861 | 321 |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
322 #define YSCALEYUV2RGBX(YSCALEYUV2PACKEDX) \ |
18861 | 323 YSCALEYUV2PACKEDX\ |
324 "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\ | |
325 "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\ | |
326 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ | |
327 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ | |
328 "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\ | |
329 "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\ | |
330 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ | |
331 "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\ | |
332 "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\ | |
333 "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\ | |
334 "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\ | |
335 "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\ | |
336 "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\ | |
337 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ | |
338 "paddw %%mm3, %%mm4 \n\t"\ | |
339 "movq %%mm2, %%mm0 \n\t"\ | |
340 "movq %%mm5, %%mm6 \n\t"\ | |
341 "movq %%mm4, %%mm3 \n\t"\ | |
342 "punpcklwd %%mm2, %%mm2 \n\t"\ | |
343 "punpcklwd %%mm5, %%mm5 \n\t"\ | |
344 "punpcklwd %%mm4, %%mm4 \n\t"\ | |
345 "paddw %%mm1, %%mm2 \n\t"\ | |
346 "paddw %%mm1, %%mm5 \n\t"\ | |
347 "paddw %%mm1, %%mm4 \n\t"\ | |
348 "punpckhwd %%mm0, %%mm0 \n\t"\ | |
349 "punpckhwd %%mm6, %%mm6 \n\t"\ | |
350 "punpckhwd %%mm3, %%mm3 \n\t"\ | |
351 "paddw %%mm7, %%mm0 \n\t"\ | |
352 "paddw %%mm7, %%mm6 \n\t"\ | |
353 "paddw %%mm7, %%mm3 \n\t"\ | |
354 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ | |
355 "packuswb %%mm0, %%mm2 \n\t"\ | |
356 "packuswb %%mm6, %%mm5 \n\t"\ | |
357 "packuswb %%mm3, %%mm4 \n\t"\ | |
358 "pxor %%mm7, %%mm7 \n\t" | |
359 #if 0 | |
360 #define FULL_YSCALEYUV2RGB \ | |
361 "pxor %%mm7, %%mm7 \n\t"\ | |
362 "movd %6, %%mm6 \n\t" /*yalpha1*/\ | |
363 "punpcklwd %%mm6, %%mm6 \n\t"\ | |
364 "punpcklwd %%mm6, %%mm6 \n\t"\ | |
365 "movd %7, %%mm5 \n\t" /*uvalpha1*/\ | |
366 "punpcklwd %%mm5, %%mm5 \n\t"\ | |
367 "punpcklwd %%mm5, %%mm5 \n\t"\ | |
368 "xor %%"REG_a", %%"REG_a" \n\t"\ | |
369 ASMALIGN16\ | |
370 "1: \n\t"\ | |
371 "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\ | |
372 "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\ | |
373 "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\ | |
374 "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\ | |
375 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\ | |
376 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\ | |
377 "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ | |
378 "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\ | |
379 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ | |
380 "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\ | |
381 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ | |
382 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ | |
383 "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\ | |
384 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\ | |
385 "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\ | |
386 "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\ | |
387 "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\ | |
388 "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\ | |
389 \ | |
390 \ | |
391 "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\ | |
392 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ | |
393 "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\ | |
394 "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ | |
395 "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\ | |
396 "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\ | |
397 "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\ | |
398 \ | |
399 \ | |
400 "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\ | |
401 "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\ | |
402 "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\ | |
403 "paddw %%mm1, %%mm3 \n\t" /* B*/\ | |
404 "paddw %%mm1, %%mm0 \n\t" /* R*/\ | |
405 "packuswb %%mm3, %%mm3 \n\t"\ | |
406 \ | |
407 "packuswb %%mm0, %%mm0 \n\t"\ | |
408 "paddw %%mm4, %%mm2 \n\t"\ | |
409 "paddw %%mm2, %%mm1 \n\t" /* G*/\ | |
410 \ | |
411 "packuswb %%mm1, %%mm1 \n\t" | |
412 #endif | |
413 | |
414 #define REAL_YSCALEYUV2PACKED(index, c) \ | |
415 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\ | |
416 "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\ | |
417 "psraw $3, %%mm0 \n\t"\ | |
418 "psraw $3, %%mm1 \n\t"\ | |
419 "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\ | |
420 "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\ | |
421 "xor "#index", "#index" \n\t"\ | |
422 ASMALIGN16\ | |
423 "1: \n\t"\ | |
424 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ | |
425 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ | |
426 "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ | |
427 "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ | |
428 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\ | |
429 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\ | |
430 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\ | |
431 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\ | |
432 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\ | |
433 "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ | |
434 "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ | |
435 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\ | |
436 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\ | |
437 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\ | |
438 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\ | |
439 "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\ | |
440 "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\ | |
441 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\ | |
442 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\ | |
443 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ | |
444 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ | |
445 "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ | |
446 "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ | |
447 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ | |
448 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ | |
449 | |
450 #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c) | |
451 | |
452 #define REAL_YSCALEYUV2RGB(index, c) \ | |
453 "xor "#index", "#index" \n\t"\ | |
454 ASMALIGN16\ | |
455 "1: \n\t"\ | |
456 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ | |
457 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ | |
458 "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\ | |
459 "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\ | |
460 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\ | |
461 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\ | |
462 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\ | |
463 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\ | |
464 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\ | |
465 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ | |
466 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ | |
467 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\ | |
468 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\ | |
469 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\ | |
470 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\ | |
471 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ | |
472 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ | |
473 "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\ | |
474 "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\ | |
475 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ | |
476 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\ | |
477 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\ | |
478 "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\ | |
479 "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\ | |
480 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\ | |
481 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\ | |
482 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ | |
483 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ | |
484 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ | |
485 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ | |
486 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ | |
487 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ | |
488 "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\ | |
489 "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\ | |
490 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\ | |
491 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\ | |
492 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\ | |
493 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\ | |
494 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ | |
495 "paddw %%mm3, %%mm4 \n\t"\ | |
496 "movq %%mm2, %%mm0 \n\t"\ | |
497 "movq %%mm5, %%mm6 \n\t"\ | |
498 "movq %%mm4, %%mm3 \n\t"\ | |
499 "punpcklwd %%mm2, %%mm2 \n\t"\ | |
500 "punpcklwd %%mm5, %%mm5 \n\t"\ | |
501 "punpcklwd %%mm4, %%mm4 \n\t"\ | |
502 "paddw %%mm1, %%mm2 \n\t"\ | |
503 "paddw %%mm1, %%mm5 \n\t"\ | |
504 "paddw %%mm1, %%mm4 \n\t"\ | |
505 "punpckhwd %%mm0, %%mm0 \n\t"\ | |
506 "punpckhwd %%mm6, %%mm6 \n\t"\ | |
507 "punpckhwd %%mm3, %%mm3 \n\t"\ | |
508 "paddw %%mm7, %%mm0 \n\t"\ | |
509 "paddw %%mm7, %%mm6 \n\t"\ | |
510 "paddw %%mm7, %%mm3 \n\t"\ | |
511 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ | |
512 "packuswb %%mm0, %%mm2 \n\t"\ | |
513 "packuswb %%mm6, %%mm5 \n\t"\ | |
514 "packuswb %%mm3, %%mm4 \n\t"\ | |
515 "pxor %%mm7, %%mm7 \n\t" | |
516 #define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c) | |
517 | |
518 #define REAL_YSCALEYUV2PACKED1(index, c) \ | |
519 "xor "#index", "#index" \n\t"\ | |
520 ASMALIGN16\ | |
521 "1: \n\t"\ | |
522 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\ | |
523 "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\ | |
524 "psraw $7, %%mm3 \n\t" \ | |
525 "psraw $7, %%mm4 \n\t" \ | |
526 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\ | |
527 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\ | |
528 "psraw $7, %%mm1 \n\t" \ | |
529 "psraw $7, %%mm7 \n\t" \ | |
530 | |
531 #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c) | |
532 | |
533 #define REAL_YSCALEYUV2RGB1(index, c) \ | |
534 "xor "#index", "#index" \n\t"\ | |
535 ASMALIGN16\ | |
536 "1: \n\t"\ | |
537 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\ | |
538 "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\ | |
539 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ | |
540 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ | |
541 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\ | |
542 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\ | |
543 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ | |
544 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ | |
545 "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\ | |
546 "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\ | |
547 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ | |
548 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\ | |
549 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\ | |
550 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ | |
551 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ | |
552 "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\ | |
553 "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\ | |
554 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\ | |
555 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\ | |
556 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\ | |
557 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\ | |
558 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ | |
559 "paddw %%mm3, %%mm4 \n\t"\ | |
560 "movq %%mm2, %%mm0 \n\t"\ | |
561 "movq %%mm5, %%mm6 \n\t"\ | |
562 "movq %%mm4, %%mm3 \n\t"\ | |
563 "punpcklwd %%mm2, %%mm2 \n\t"\ | |
564 "punpcklwd %%mm5, %%mm5 \n\t"\ | |
565 "punpcklwd %%mm4, %%mm4 \n\t"\ | |
566 "paddw %%mm1, %%mm2 \n\t"\ | |
567 "paddw %%mm1, %%mm5 \n\t"\ | |
568 "paddw %%mm1, %%mm4 \n\t"\ | |
569 "punpckhwd %%mm0, %%mm0 \n\t"\ | |
570 "punpckhwd %%mm6, %%mm6 \n\t"\ | |
571 "punpckhwd %%mm3, %%mm3 \n\t"\ | |
572 "paddw %%mm7, %%mm0 \n\t"\ | |
573 "paddw %%mm7, %%mm6 \n\t"\ | |
574 "paddw %%mm7, %%mm3 \n\t"\ | |
575 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ | |
576 "packuswb %%mm0, %%mm2 \n\t"\ | |
577 "packuswb %%mm6, %%mm5 \n\t"\ | |
578 "packuswb %%mm3, %%mm4 \n\t"\ | |
579 "pxor %%mm7, %%mm7 \n\t" | |
580 #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c) | |
581 | |
582 #define REAL_YSCALEYUV2PACKED1b(index, c) \ | |
583 "xor "#index", "#index" \n\t"\ | |
584 ASMALIGN16\ | |
585 "1: \n\t"\ | |
586 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ | |
587 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ | |
588 "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ | |
589 "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ | |
590 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\ | |
591 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\ | |
592 "psrlw $8, %%mm3 \n\t" \ | |
593 "psrlw $8, %%mm4 \n\t" \ | |
594 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\ | |
595 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\ | |
596 "psraw $7, %%mm1 \n\t" \ | |
597 "psraw $7, %%mm7 \n\t" | |
598 #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c) | |
599 | |
600 // do vertical chrominance interpolation | |
601 #define REAL_YSCALEYUV2RGB1b(index, c) \ | |
602 "xor "#index", "#index" \n\t"\ | |
603 ASMALIGN16\ | |
604 "1: \n\t"\ | |
605 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ | |
606 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ | |
607 "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ | |
608 "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ | |
609 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\ | |
610 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\ | |
611 "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\ | |
612 "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\ | |
613 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\ | |
614 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\ | |
615 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ | |
616 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ | |
617 "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\ | |
618 "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\ | |
619 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ | |
620 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\ | |
621 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\ | |
622 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ | |
623 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ | |
624 "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\ | |
625 "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\ | |
626 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\ | |
627 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\ | |
628 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\ | |
629 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\ | |
630 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ | |
631 "paddw %%mm3, %%mm4 \n\t"\ | |
632 "movq %%mm2, %%mm0 \n\t"\ | |
633 "movq %%mm5, %%mm6 \n\t"\ | |
634 "movq %%mm4, %%mm3 \n\t"\ | |
635 "punpcklwd %%mm2, %%mm2 \n\t"\ | |
636 "punpcklwd %%mm5, %%mm5 \n\t"\ | |
637 "punpcklwd %%mm4, %%mm4 \n\t"\ | |
638 "paddw %%mm1, %%mm2 \n\t"\ | |
639 "paddw %%mm1, %%mm5 \n\t"\ | |
640 "paddw %%mm1, %%mm4 \n\t"\ | |
641 "punpckhwd %%mm0, %%mm0 \n\t"\ | |
642 "punpckhwd %%mm6, %%mm6 \n\t"\ | |
643 "punpckhwd %%mm3, %%mm3 \n\t"\ | |
644 "paddw %%mm7, %%mm0 \n\t"\ | |
645 "paddw %%mm7, %%mm6 \n\t"\ | |
646 "paddw %%mm7, %%mm3 \n\t"\ | |
647 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ | |
648 "packuswb %%mm0, %%mm2 \n\t"\ | |
649 "packuswb %%mm6, %%mm5 \n\t"\ | |
650 "packuswb %%mm3, %%mm4 \n\t"\ | |
651 "pxor %%mm7, %%mm7 \n\t" | |
652 #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c) | |
653 | |
654 #define REAL_WRITEBGR32(dst, dstw, index) \ | |
655 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ | |
656 "movq %%mm2, %%mm1 \n\t" /* B */\ | |
657 "movq %%mm5, %%mm6 \n\t" /* R */\ | |
658 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\ | |
659 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\ | |
660 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\ | |
661 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\ | |
662 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\ | |
663 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\ | |
664 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\ | |
665 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\ | |
666 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\ | |
667 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\ | |
668 \ | |
669 MOVNTQ(%%mm0, (dst, index, 4))\ | |
670 MOVNTQ(%%mm2, 8(dst, index, 4))\ | |
671 MOVNTQ(%%mm1, 16(dst, index, 4))\ | |
672 MOVNTQ(%%mm3, 24(dst, index, 4))\ | |
673 \ | |
674 "add $8, "#index" \n\t"\ | |
675 "cmp "#dstw", "#index" \n\t"\ | |
676 " jb 1b \n\t" | |
677 #define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index) | |
678 | |
679 #define REAL_WRITEBGR16(dst, dstw, index) \ | |
680 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\ | |
681 "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\ | |
682 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\ | |
683 "psrlq $3, %%mm2 \n\t"\ | |
684 \ | |
685 "movq %%mm2, %%mm1 \n\t"\ | |
686 "movq %%mm4, %%mm3 \n\t"\ | |
687 \ | |
688 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
689 "punpcklbw %%mm5, %%mm2 \n\t"\ | |
690 "punpckhbw %%mm7, %%mm4 \n\t"\ | |
691 "punpckhbw %%mm5, %%mm1 \n\t"\ | |
692 \ | |
693 "psllq $3, %%mm3 \n\t"\ | |
694 "psllq $3, %%mm4 \n\t"\ | |
695 \ | |
696 "por %%mm3, %%mm2 \n\t"\ | |
697 "por %%mm4, %%mm1 \n\t"\ | |
698 \ | |
699 MOVNTQ(%%mm2, (dst, index, 2))\ | |
700 MOVNTQ(%%mm1, 8(dst, index, 2))\ | |
701 \ | |
702 "add $8, "#index" \n\t"\ | |
703 "cmp "#dstw", "#index" \n\t"\ | |
704 " jb 1b \n\t" | |
705 #define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index) | |
706 | |
707 #define REAL_WRITEBGR15(dst, dstw, index) \ | |
708 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\ | |
709 "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\ | |
710 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\ | |
711 "psrlq $3, %%mm2 \n\t"\ | |
712 "psrlq $1, %%mm5 \n\t"\ | |
713 \ | |
714 "movq %%mm2, %%mm1 \n\t"\ | |
715 "movq %%mm4, %%mm3 \n\t"\ | |
716 \ | |
717 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
718 "punpcklbw %%mm5, %%mm2 \n\t"\ | |
719 "punpckhbw %%mm7, %%mm4 \n\t"\ | |
720 "punpckhbw %%mm5, %%mm1 \n\t"\ | |
721 \ | |
722 "psllq $2, %%mm3 \n\t"\ | |
723 "psllq $2, %%mm4 \n\t"\ | |
724 \ | |
725 "por %%mm3, %%mm2 \n\t"\ | |
726 "por %%mm4, %%mm1 \n\t"\ | |
727 \ | |
728 MOVNTQ(%%mm2, (dst, index, 2))\ | |
729 MOVNTQ(%%mm1, 8(dst, index, 2))\ | |
730 \ | |
731 "add $8, "#index" \n\t"\ | |
732 "cmp "#dstw", "#index" \n\t"\ | |
733 " jb 1b \n\t" | |
734 #define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index) | |
735 | |
736 #define WRITEBGR24OLD(dst, dstw, index) \ | |
737 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ | |
738 "movq %%mm2, %%mm1 \n\t" /* B */\ | |
739 "movq %%mm5, %%mm6 \n\t" /* R */\ | |
740 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\ | |
741 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\ | |
742 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\ | |
743 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\ | |
744 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\ | |
745 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\ | |
746 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\ | |
747 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\ | |
748 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\ | |
749 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\ | |
750 \ | |
751 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\ | |
752 "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\ | |
753 "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\ | |
754 "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\ | |
755 "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\ | |
756 "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\ | |
757 "psllq $48, %%mm2 \n\t" /* GB000000 1 */\ | |
758 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\ | |
759 \ | |
760 "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\ | |
761 "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\ | |
762 "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\ | |
763 "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\ | |
764 "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\ | |
765 "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\ | |
766 "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\ | |
767 "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\ | |
768 "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\ | |
769 "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\ | |
770 "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\ | |
771 "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\ | |
772 "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\ | |
773 \ | |
774 "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\ | |
775 "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\ | |
776 "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\ | |
777 "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\ | |
778 "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\ | |
779 "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\ | |
780 "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\ | |
781 "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\ | |
782 \ | |
783 MOVNTQ(%%mm0, (dst))\ | |
784 MOVNTQ(%%mm2, 8(dst))\ | |
785 MOVNTQ(%%mm3, 16(dst))\ | |
786 "add $24, "#dst" \n\t"\ | |
787 \ | |
788 "add $8, "#index" \n\t"\ | |
789 "cmp "#dstw", "#index" \n\t"\ | |
790 " jb 1b \n\t" | |
791 | |
792 #define WRITEBGR24MMX(dst, dstw, index) \ | |
793 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ | |
794 "movq %%mm2, %%mm1 \n\t" /* B */\ | |
795 "movq %%mm5, %%mm6 \n\t" /* R */\ | |
796 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\ | |
797 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\ | |
798 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\ | |
799 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\ | |
800 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\ | |
801 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\ | |
802 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\ | |
803 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\ | |
804 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\ | |
805 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\ | |
806 \ | |
807 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\ | |
808 "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\ | |
809 "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\ | |
810 "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\ | |
811 \ | |
812 "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\ | |
813 "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\ | |
814 "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\ | |
815 "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\ | |
816 \ | |
817 "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\ | |
818 "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\ | |
819 "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\ | |
820 "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\ | |
821 \ | |
822 "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\ | |
823 "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\ | |
824 "psllq $40, %%mm2 \n\t" /* GB000000 1 */\ | |
825 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\ | |
826 MOVNTQ(%%mm0, (dst))\ | |
827 \ | |
828 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\ | |
829 "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\ | |
830 "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\ | |
831 "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\ | |
832 MOVNTQ(%%mm6, 8(dst))\ | |
833 \ | |
834 "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\ | |
835 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\ | |
836 "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\ | |
837 MOVNTQ(%%mm5, 16(dst))\ | |
838 \ | |
839 "add $24, "#dst" \n\t"\ | |
840 \ | |
841 "add $8, "#index" \n\t"\ | |
842 "cmp "#dstw", "#index" \n\t"\ | |
843 " jb 1b \n\t" | |
844 | |
845 #define WRITEBGR24MMX2(dst, dstw, index) \ | |
846 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ | |
847 "movq "MANGLE(M24A)", %%mm0 \n\t"\ | |
848 "movq "MANGLE(M24C)", %%mm7 \n\t"\ | |
849 "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\ | |
850 "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\ | |
851 "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\ | |
852 \ | |
853 "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\ | |
854 "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\ | |
855 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\ | |
856 \ | |
857 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\ | |
858 "por %%mm1, %%mm6 \n\t"\ | |
859 "por %%mm3, %%mm6 \n\t"\ | |
860 MOVNTQ(%%mm6, (dst))\ | |
861 \ | |
862 "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\ | |
863 "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\ | |
864 "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\ | |
865 "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\ | |
866 \ | |
867 "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\ | |
868 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\ | |
869 "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\ | |
870 \ | |
871 "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\ | |
872 "por %%mm3, %%mm6 \n\t"\ | |
873 MOVNTQ(%%mm6, 8(dst))\ | |
874 \ | |
875 "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\ | |
876 "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\ | |
877 "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\ | |
878 \ | |
879 "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\ | |
880 "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\ | |
881 "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\ | |
882 \ | |
883 "por %%mm1, %%mm3 \n\t"\ | |
884 "por %%mm3, %%mm6 \n\t"\ | |
885 MOVNTQ(%%mm6, 16(dst))\ | |
886 \ | |
887 "add $24, "#dst" \n\t"\ | |
888 \ | |
889 "add $8, "#index" \n\t"\ | |
890 "cmp "#dstw", "#index" \n\t"\ | |
891 " jb 1b \n\t" | |
892 | |
893 #ifdef HAVE_MMX2 | |
894 #undef WRITEBGR24 | |
895 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index) | |
896 #else | |
897 #undef WRITEBGR24 | |
898 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index) | |
899 #endif | |
900 | |
901 #define REAL_WRITEYUY2(dst, dstw, index) \ | |
902 "packuswb %%mm3, %%mm3 \n\t"\ | |
903 "packuswb %%mm4, %%mm4 \n\t"\ | |
904 "packuswb %%mm7, %%mm1 \n\t"\ | |
905 "punpcklbw %%mm4, %%mm3 \n\t"\ | |
906 "movq %%mm1, %%mm7 \n\t"\ | |
907 "punpcklbw %%mm3, %%mm1 \n\t"\ | |
908 "punpckhbw %%mm3, %%mm7 \n\t"\ | |
909 \ | |
910 MOVNTQ(%%mm1, (dst, index, 2))\ | |
911 MOVNTQ(%%mm7, 8(dst, index, 2))\ | |
912 \ | |
913 "add $8, "#index" \n\t"\ | |
914 "cmp "#dstw", "#index" \n\t"\ | |
915 " jb 1b \n\t" | |
916 #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index) | |
917 | |
918 | |
919 static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, | |
920 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, | |
921 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW) | |
922 { | |
923 #ifdef HAVE_MMX | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
924 if(c->flags & SWS_ACCURATE_RND){ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
925 if(uDest){ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
926 YSCALEYUV2YV12X_ACCURATE( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
927 YSCALEYUV2YV12X_ACCURATE(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
928 } |
18861 | 929 |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
930 YSCALEYUV2YV12X_ACCURATE(0, LUM_MMX_FILTER_OFFSET, dest, dstW) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
931 }else{ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
932 if(uDest){ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
933 YSCALEYUV2YV12X( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
934 YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
935 } |
18861 | 936 |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
937 YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET, dest, dstW) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
938 } |
18861 | 939 #else |
940 #ifdef HAVE_ALTIVEC | |
941 yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize, | |
942 chrFilter, chrSrc, chrFilterSize, | |
943 dest, uDest, vDest, dstW, chrDstW); | |
944 #else //HAVE_ALTIVEC | |
945 yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize, | |
946 chrFilter, chrSrc, chrFilterSize, | |
947 dest, uDest, vDest, dstW, chrDstW); | |
948 #endif //!HAVE_ALTIVEC | |
949 #endif | |
950 } | |
951 | |
952 static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, | |
953 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, | |
954 uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat) | |
955 { | |
956 yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize, | |
957 chrFilter, chrSrc, chrFilterSize, | |
958 dest, uDest, dstW, chrDstW, dstFormat); | |
959 } | |
960 | |
961 static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc, | |
962 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW) | |
963 { | |
964 #ifdef HAVE_MMX | |
965 if(uDest != NULL) | |
966 { | |
967 asm volatile( | |
968 YSCALEYUV2YV121 | |
969 :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW), | |
970 "g" (-chrDstW) | |
971 : "%"REG_a | |
972 ); | |
973 | |
974 asm volatile( | |
975 YSCALEYUV2YV121 | |
976 :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW), | |
977 "g" (-chrDstW) | |
978 : "%"REG_a | |
979 ); | |
980 } | |
981 | |
982 asm volatile( | |
983 YSCALEYUV2YV121 | |
984 :: "r" (lumSrc + dstW), "r" (dest + dstW), | |
985 "g" (-dstW) | |
986 : "%"REG_a | |
987 ); | |
988 #else | |
989 int i; | |
990 for(i=0; i<dstW; i++) | |
991 { | |
992 int val= lumSrc[i]>>7; | |
993 | |
994 if(val&256){ | |
995 if(val<0) val=0; | |
996 else val=255; | |
997 } | |
998 | |
999 dest[i]= val; | |
1000 } | |
1001 | |
1002 if(uDest != NULL) | |
1003 for(i=0; i<chrDstW; i++) | |
1004 { | |
1005 int u=chrSrc[i]>>7; | |
1006 int v=chrSrc[i + 2048]>>7; | |
1007 | |
1008 if((u|v)&256){ | |
1009 if(u<0) u=0; | |
1010 else if (u>255) u=255; | |
1011 if(v<0) v=0; | |
1012 else if (v>255) v=255; | |
1013 } | |
1014 | |
1015 uDest[i]= u; | |
1016 vDest[i]= v; | |
1017 } | |
1018 #endif | |
1019 } | |
1020 | |
1021 | |
1022 /** | |
1023 * vertical scale YV12 to RGB | |
1024 */ | |
1025 static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, | |
1026 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, | |
1027 uint8_t *dest, long dstW, long dstY) | |
1028 { | |
1029 long dummy=0; | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1030 #ifdef HAVE_MMX |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1031 if(c->flags & SWS_ACCURATE_RND){ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1032 switch(c->dstFormat){ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1033 case IMGFMT_BGR32: |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1034 asm volatile( |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1035 YSCALEYUV2RGBX(YSCALEYUV2PACKEDX_ACCURATE) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1036 WRITEBGR32(%4, %5, %%REGa) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1037 |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1038 :: "r" (&c->redDither), |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1039 "m" (dummy), "m" (dummy), "m" (dummy), |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1040 "r" (dest), "m" (dstW) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1041 : "%"REG_a, "%"REG_d, "%"REG_S |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1042 ); |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1043 return; |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1044 case IMGFMT_BGR24: |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1045 asm volatile( |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1046 YSCALEYUV2RGBX(YSCALEYUV2PACKEDX_ACCURATE) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1047 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1048 "add %4, %%"REG_b" \n\t" |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1049 WRITEBGR24(%%REGb, %5, %%REGa) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1050 |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1051 :: "r" (&c->redDither), |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1052 "m" (dummy), "m" (dummy), "m" (dummy), |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1053 "r" (dest), "m" (dstW) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1054 : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1055 ); |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1056 return; |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1057 case IMGFMT_BGR15: |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1058 asm volatile( |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1059 YSCALEYUV2RGBX(YSCALEYUV2PACKEDX_ACCURATE) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1060 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1061 #ifdef DITHER1XBPP |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1062 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1063 "paddusb "MANGLE(g5Dither)", %%mm4\n\t" |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1064 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1065 #endif |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1066 |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1067 WRITEBGR15(%4, %5, %%REGa) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1068 |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1069 :: "r" (&c->redDither), |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1070 "m" (dummy), "m" (dummy), "m" (dummy), |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1071 "r" (dest), "m" (dstW) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1072 : "%"REG_a, "%"REG_d, "%"REG_S |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1073 ); |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1074 return; |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1075 case IMGFMT_BGR16: |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1076 asm volatile( |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1077 YSCALEYUV2RGBX(YSCALEYUV2PACKEDX_ACCURATE) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1078 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1079 #ifdef DITHER1XBPP |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1080 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1081 "paddusb "MANGLE(g6Dither)", %%mm4\n\t" |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1082 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1083 #endif |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1084 |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1085 WRITEBGR16(%4, %5, %%REGa) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1086 |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1087 :: "r" (&c->redDither), |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1088 "m" (dummy), "m" (dummy), "m" (dummy), |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1089 "r" (dest), "m" (dstW) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1090 : "%"REG_a, "%"REG_d, "%"REG_S |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1091 ); |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1092 return; |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1093 case IMGFMT_YUY2: |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1094 asm volatile( |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1095 YSCALEYUV2PACKEDX_ACCURATE |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1096 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1097 |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1098 "psraw $3, %%mm3 \n\t" |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1099 "psraw $3, %%mm4 \n\t" |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1100 "psraw $3, %%mm1 \n\t" |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1101 "psraw $3, %%mm7 \n\t" |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1102 WRITEYUY2(%4, %5, %%REGa) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1103 |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1104 :: "r" (&c->redDither), |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1105 "m" (dummy), "m" (dummy), "m" (dummy), |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1106 "r" (dest), "m" (dstW) |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1107 : "%"REG_a, "%"REG_d, "%"REG_S |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1108 ); |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1109 return; |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1110 } |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1111 }else{ |
18861 | 1112 switch(c->dstFormat) |
1113 { | |
1114 case IMGFMT_BGR32: | |
1115 { | |
1116 asm volatile( | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1117 YSCALEYUV2RGBX(YSCALEYUV2PACKEDX) |
18861 | 1118 WRITEBGR32(%4, %5, %%REGa) |
1119 | |
1120 :: "r" (&c->redDither), | |
1121 "m" (dummy), "m" (dummy), "m" (dummy), | |
1122 "r" (dest), "m" (dstW) | |
1123 : "%"REG_a, "%"REG_d, "%"REG_S | |
1124 ); | |
1125 } | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1126 return; |
18861 | 1127 case IMGFMT_BGR24: |
1128 { | |
1129 asm volatile( | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1130 YSCALEYUV2RGBX(YSCALEYUV2PACKEDX) |
18861 | 1131 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize |
1132 "add %4, %%"REG_b" \n\t" | |
1133 WRITEBGR24(%%REGb, %5, %%REGa) | |
1134 | |
1135 :: "r" (&c->redDither), | |
1136 "m" (dummy), "m" (dummy), "m" (dummy), | |
1137 "r" (dest), "m" (dstW) | |
1138 : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx | |
1139 ); | |
1140 } | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1141 return; |
18861 | 1142 case IMGFMT_BGR15: |
1143 { | |
1144 asm volatile( | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1145 YSCALEYUV2RGBX(YSCALEYUV2PACKEDX) |
18861 | 1146 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
1147 #ifdef DITHER1XBPP | |
1148 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" | |
1149 "paddusb "MANGLE(g5Dither)", %%mm4\n\t" | |
1150 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
1151 #endif | |
1152 | |
1153 WRITEBGR15(%4, %5, %%REGa) | |
1154 | |
1155 :: "r" (&c->redDither), | |
1156 "m" (dummy), "m" (dummy), "m" (dummy), | |
1157 "r" (dest), "m" (dstW) | |
1158 : "%"REG_a, "%"REG_d, "%"REG_S | |
1159 ); | |
1160 } | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1161 return; |
18861 | 1162 case IMGFMT_BGR16: |
1163 { | |
1164 asm volatile( | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1165 YSCALEYUV2RGBX(YSCALEYUV2PACKEDX) |
18861 | 1166 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
1167 #ifdef DITHER1XBPP | |
1168 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" | |
1169 "paddusb "MANGLE(g6Dither)", %%mm4\n\t" | |
1170 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
1171 #endif | |
1172 | |
1173 WRITEBGR16(%4, %5, %%REGa) | |
1174 | |
1175 :: "r" (&c->redDither), | |
1176 "m" (dummy), "m" (dummy), "m" (dummy), | |
1177 "r" (dest), "m" (dstW) | |
1178 : "%"REG_a, "%"REG_d, "%"REG_S | |
1179 ); | |
1180 } | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1181 return; |
18861 | 1182 case IMGFMT_YUY2: |
1183 { | |
1184 asm volatile( | |
1185 YSCALEYUV2PACKEDX | |
1186 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ | |
1187 | |
1188 "psraw $3, %%mm3 \n\t" | |
1189 "psraw $3, %%mm4 \n\t" | |
1190 "psraw $3, %%mm1 \n\t" | |
1191 "psraw $3, %%mm7 \n\t" | |
1192 WRITEYUY2(%4, %5, %%REGa) | |
1193 | |
1194 :: "r" (&c->redDither), | |
1195 "m" (dummy), "m" (dummy), "m" (dummy), | |
1196 "r" (dest), "m" (dstW) | |
1197 : "%"REG_a, "%"REG_d, "%"REG_S | |
1198 ); | |
1199 } | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1200 return; |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1201 } |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
1202 } |
18861 | 1203 #endif |
1204 #ifdef HAVE_ALTIVEC | |
1205 /* The following list of supported dstFormat values should | |
1206 match what's found in the body of altivec_yuv2packedX() */ | |
1207 if(c->dstFormat==IMGFMT_ABGR || c->dstFormat==IMGFMT_BGRA || | |
1208 c->dstFormat==IMGFMT_BGR24 || c->dstFormat==IMGFMT_RGB24 || | |
1209 c->dstFormat==IMGFMT_RGBA || c->dstFormat==IMGFMT_ARGB) | |
1210 altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize, | |
1211 chrFilter, chrSrc, chrFilterSize, | |
1212 dest, dstW, dstY); | |
1213 else | |
1214 #endif | |
1215 yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize, | |
1216 chrFilter, chrSrc, chrFilterSize, | |
1217 dest, dstW, dstY); | |
1218 } | |
1219 | |
1220 /** | |
1221 * vertical bilinear scale YV12 to RGB | |
1222 */ | |
1223 static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1, | |
1224 uint8_t *dest, int dstW, int yalpha, int uvalpha, int y) | |
1225 { | |
1226 int yalpha1=yalpha^4095; | |
1227 int uvalpha1=uvalpha^4095; | |
1228 int i; | |
1229 | |
1230 #if 0 //isn't used | |
1231 if(flags&SWS_FULL_CHR_H_INT) | |
1232 { | |
1233 switch(dstFormat) | |
1234 { | |
1235 #ifdef HAVE_MMX | |
1236 case IMGFMT_BGR32: | |
1237 asm volatile( | |
1238 | |
1239 | |
1240 FULL_YSCALEYUV2RGB | |
1241 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG | |
1242 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0 | |
1243 | |
1244 "movq %%mm3, %%mm1 \n\t" | |
1245 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0 | |
1246 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0 | |
1247 | |
1248 MOVNTQ(%%mm3, (%4, %%REGa, 4)) | |
1249 MOVNTQ(%%mm1, 8(%4, %%REGa, 4)) | |
1250 | |
1251 "add $4, %%"REG_a" \n\t" | |
1252 "cmp %5, %%"REG_a" \n\t" | |
1253 " jb 1b \n\t" | |
1254 | |
1255 | |
1256 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW), | |
1257 "m" (yalpha1), "m" (uvalpha1) | |
1258 : "%"REG_a | |
1259 ); | |
1260 break; | |
1261 case IMGFMT_BGR24: | |
1262 asm volatile( | |
1263 | |
1264 FULL_YSCALEYUV2RGB | |
1265 | |
1266 // lsb ... msb | |
1267 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG | |
1268 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0 | |
1269 | |
1270 "movq %%mm3, %%mm1 \n\t" | |
1271 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0 | |
1272 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0 | |
1273 | |
1274 "movq %%mm3, %%mm2 \n\t" // BGR0BGR0 | |
1275 "psrlq $8, %%mm3 \n\t" // GR0BGR00 | |
1276 "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000 | |
1277 "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00 | |
1278 "por %%mm2, %%mm3 \n\t" // BGRBGR00 | |
1279 "movq %%mm1, %%mm2 \n\t" | |
1280 "psllq $48, %%mm1 \n\t" // 000000BG | |
1281 "por %%mm1, %%mm3 \n\t" // BGRBGRBG | |
1282 | |
1283 "movq %%mm2, %%mm1 \n\t" // BGR0BGR0 | |
1284 "psrld $16, %%mm2 \n\t" // R000R000 | |
1285 "psrlq $24, %%mm1 \n\t" // 0BGR0000 | |
1286 "por %%mm2, %%mm1 \n\t" // RBGRR000 | |
1287 | |
1288 "mov %4, %%"REG_b" \n\t" | |
1289 "add %%"REG_a", %%"REG_b" \n\t" | |
1290 | |
1291 #ifdef HAVE_MMX2 | |
1292 //FIXME Alignment | |
1293 "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t" | |
1294 "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t" | |
1295 #else | |
1296 "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t" | |
1297 "psrlq $32, %%mm3 \n\t" | |
1298 "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t" | |
1299 "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t" | |
1300 #endif | |
1301 "add $4, %%"REG_a" \n\t" | |
1302 "cmp %5, %%"REG_a" \n\t" | |
1303 " jb 1b \n\t" | |
1304 | |
1305 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW), | |
1306 "m" (yalpha1), "m" (uvalpha1) | |
1307 : "%"REG_a, "%"REG_b | |
1308 ); | |
1309 break; | |
1310 case IMGFMT_BGR15: | |
1311 asm volatile( | |
1312 | |
1313 FULL_YSCALEYUV2RGB | |
1314 #ifdef DITHER1XBPP | |
1315 "paddusb "MANGLE(g5Dither)", %%mm1\n\t" | |
1316 "paddusb "MANGLE(r5Dither)", %%mm0\n\t" | |
1317 "paddusb "MANGLE(b5Dither)", %%mm3\n\t" | |
1318 #endif | |
1319 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G | |
1320 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B | |
1321 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R | |
1322 | |
1323 "psrlw $3, %%mm3 \n\t" | |
1324 "psllw $2, %%mm1 \n\t" | |
1325 "psllw $7, %%mm0 \n\t" | |
1326 "pand "MANGLE(g15Mask)", %%mm1 \n\t" | |
1327 "pand "MANGLE(r15Mask)", %%mm0 \n\t" | |
1328 | |
1329 "por %%mm3, %%mm1 \n\t" | |
1330 "por %%mm1, %%mm0 \n\t" | |
1331 | |
1332 MOVNTQ(%%mm0, (%4, %%REGa, 2)) | |
1333 | |
1334 "add $4, %%"REG_a" \n\t" | |
1335 "cmp %5, %%"REG_a" \n\t" | |
1336 " jb 1b \n\t" | |
1337 | |
1338 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), | |
1339 "m" (yalpha1), "m" (uvalpha1) | |
1340 : "%"REG_a | |
1341 ); | |
1342 break; | |
1343 case IMGFMT_BGR16: | |
1344 asm volatile( | |
1345 | |
1346 FULL_YSCALEYUV2RGB | |
1347 #ifdef DITHER1XBPP | |
1348 "paddusb "MANGLE(g6Dither)", %%mm1\n\t" | |
1349 "paddusb "MANGLE(r5Dither)", %%mm0\n\t" | |
1350 "paddusb "MANGLE(b5Dither)", %%mm3\n\t" | |
1351 #endif | |
1352 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G | |
1353 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B | |
1354 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R | |
1355 | |
1356 "psrlw $3, %%mm3 \n\t" | |
1357 "psllw $3, %%mm1 \n\t" | |
1358 "psllw $8, %%mm0 \n\t" | |
1359 "pand "MANGLE(g16Mask)", %%mm1 \n\t" | |
1360 "pand "MANGLE(r16Mask)", %%mm0 \n\t" | |
1361 | |
1362 "por %%mm3, %%mm1 \n\t" | |
1363 "por %%mm1, %%mm0 \n\t" | |
1364 | |
1365 MOVNTQ(%%mm0, (%4, %%REGa, 2)) | |
1366 | |
1367 "add $4, %%"REG_a" \n\t" | |
1368 "cmp %5, %%"REG_a" \n\t" | |
1369 " jb 1b \n\t" | |
1370 | |
1371 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), | |
1372 "m" (yalpha1), "m" (uvalpha1) | |
1373 : "%"REG_a | |
1374 ); | |
1375 break; | |
1376 #endif | |
1377 case IMGFMT_RGB32: | |
1378 #ifndef HAVE_MMX | |
1379 case IMGFMT_BGR32: | |
1380 #endif | |
1381 if(dstFormat==IMGFMT_BGR32) | |
1382 { | |
1383 int i; | |
1384 #ifdef WORDS_BIGENDIAN | |
1385 dest++; | |
1386 #endif | |
1387 for(i=0;i<dstW;i++){ | |
1388 // vertical linear interpolation && yuv2rgb in a single step: | |
1389 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)]; | |
1390 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19); | |
1391 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19); | |
1392 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)]; | |
1393 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)]; | |
1394 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)]; | |
1395 dest+= 4; | |
1396 } | |
1397 } | |
1398 else if(dstFormat==IMGFMT_BGR24) | |
1399 { | |
1400 int i; | |
1401 for(i=0;i<dstW;i++){ | |
1402 // vertical linear interpolation && yuv2rgb in a single step: | |
1403 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)]; | |
1404 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19); | |
1405 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19); | |
1406 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)]; | |
1407 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)]; | |
1408 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)]; | |
1409 dest+= 3; | |
1410 } | |
1411 } | |
1412 else if(dstFormat==IMGFMT_BGR16) | |
1413 { | |
1414 int i; | |
1415 for(i=0;i<dstW;i++){ | |
1416 // vertical linear interpolation && yuv2rgb in a single step: | |
1417 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)]; | |
1418 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19); | |
1419 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19); | |
1420 | |
1421 ((uint16_t*)dest)[i] = | |
1422 clip_table16b[(Y + yuvtab_40cf[U]) >>13] | | |
1423 clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] | | |
1424 clip_table16r[(Y + yuvtab_3343[V]) >>13]; | |
1425 } | |
1426 } | |
1427 else if(dstFormat==IMGFMT_BGR15) | |
1428 { | |
1429 int i; | |
1430 for(i=0;i<dstW;i++){ | |
1431 // vertical linear interpolation && yuv2rgb in a single step: | |
1432 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)]; | |
1433 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19); | |
1434 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19); | |
1435 | |
1436 ((uint16_t*)dest)[i] = | |
1437 clip_table15b[(Y + yuvtab_40cf[U]) >>13] | | |
1438 clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] | | |
1439 clip_table15r[(Y + yuvtab_3343[V]) >>13]; | |
1440 } | |
1441 } | |
1442 }//FULL_UV_IPOL | |
1443 else | |
1444 { | |
1445 #endif // if 0 | |
1446 #ifdef HAVE_MMX | |
1447 switch(c->dstFormat) | |
1448 { | |
1449 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( | |
1450 case IMGFMT_BGR32: | |
1451 asm volatile( | |
1452 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1453 "mov %4, %%"REG_b" \n\t" | |
1454 "push %%"REG_BP" \n\t" | |
1455 YSCALEYUV2RGB(%%REGBP, %5) | |
1456 WRITEBGR32(%%REGb, 8280(%5), %%REGBP) | |
1457 "pop %%"REG_BP" \n\t" | |
1458 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1459 | |
1460 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1461 "a" (&c->redDither) | |
1462 ); | |
1463 return; | |
1464 case IMGFMT_BGR24: | |
1465 asm volatile( | |
1466 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1467 "mov %4, %%"REG_b" \n\t" | |
1468 "push %%"REG_BP" \n\t" | |
1469 YSCALEYUV2RGB(%%REGBP, %5) | |
1470 WRITEBGR24(%%REGb, 8280(%5), %%REGBP) | |
1471 "pop %%"REG_BP" \n\t" | |
1472 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1473 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1474 "a" (&c->redDither) | |
1475 ); | |
1476 return; | |
1477 case IMGFMT_BGR15: | |
1478 asm volatile( | |
1479 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1480 "mov %4, %%"REG_b" \n\t" | |
1481 "push %%"REG_BP" \n\t" | |
1482 YSCALEYUV2RGB(%%REGBP, %5) | |
1483 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ | |
1484 #ifdef DITHER1XBPP | |
1485 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" | |
1486 "paddusb "MANGLE(g5Dither)", %%mm4\n\t" | |
1487 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
1488 #endif | |
1489 | |
1490 WRITEBGR15(%%REGb, 8280(%5), %%REGBP) | |
1491 "pop %%"REG_BP" \n\t" | |
1492 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1493 | |
1494 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1495 "a" (&c->redDither) | |
1496 ); | |
1497 return; | |
1498 case IMGFMT_BGR16: | |
1499 asm volatile( | |
1500 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1501 "mov %4, %%"REG_b" \n\t" | |
1502 "push %%"REG_BP" \n\t" | |
1503 YSCALEYUV2RGB(%%REGBP, %5) | |
1504 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ | |
1505 #ifdef DITHER1XBPP | |
1506 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" | |
1507 "paddusb "MANGLE(g6Dither)", %%mm4\n\t" | |
1508 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
1509 #endif | |
1510 | |
1511 WRITEBGR16(%%REGb, 8280(%5), %%REGBP) | |
1512 "pop %%"REG_BP" \n\t" | |
1513 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1514 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1515 "a" (&c->redDither) | |
1516 ); | |
1517 return; | |
1518 case IMGFMT_YUY2: | |
1519 asm volatile( | |
1520 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1521 "mov %4, %%"REG_b" \n\t" | |
1522 "push %%"REG_BP" \n\t" | |
1523 YSCALEYUV2PACKED(%%REGBP, %5) | |
1524 WRITEYUY2(%%REGb, 8280(%5), %%REGBP) | |
1525 "pop %%"REG_BP" \n\t" | |
1526 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1527 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1528 "a" (&c->redDither) | |
1529 ); | |
1530 return; | |
1531 default: break; | |
1532 } | |
1533 #endif //HAVE_MMX | |
1534 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C) | |
1535 } | |
1536 | |
1537 /** | |
1538 * YV12 to RGB without scaling or interpolating | |
1539 */ | |
1540 static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1, | |
1541 uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y) | |
1542 { | |
1543 const int yalpha1=0; | |
1544 int i; | |
1545 | |
1546 uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1 | |
1547 const int yalpha= 4096; //FIXME ... | |
1548 | |
1549 if(flags&SWS_FULL_CHR_H_INT) | |
1550 { | |
1551 RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y); | |
1552 return; | |
1553 } | |
1554 | |
1555 #ifdef HAVE_MMX | |
1556 if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster | |
1557 { | |
1558 switch(dstFormat) | |
1559 { | |
1560 case IMGFMT_BGR32: | |
1561 asm volatile( | |
1562 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1563 "mov %4, %%"REG_b" \n\t" | |
1564 "push %%"REG_BP" \n\t" | |
1565 YSCALEYUV2RGB1(%%REGBP, %5) | |
1566 WRITEBGR32(%%REGb, 8280(%5), %%REGBP) | |
1567 "pop %%"REG_BP" \n\t" | |
1568 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1569 | |
1570 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1571 "a" (&c->redDither) | |
1572 ); | |
1573 return; | |
1574 case IMGFMT_BGR24: | |
1575 asm volatile( | |
1576 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1577 "mov %4, %%"REG_b" \n\t" | |
1578 "push %%"REG_BP" \n\t" | |
1579 YSCALEYUV2RGB1(%%REGBP, %5) | |
1580 WRITEBGR24(%%REGb, 8280(%5), %%REGBP) | |
1581 "pop %%"REG_BP" \n\t" | |
1582 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1583 | |
1584 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1585 "a" (&c->redDither) | |
1586 ); | |
1587 return; | |
1588 case IMGFMT_BGR15: | |
1589 asm volatile( | |
1590 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1591 "mov %4, %%"REG_b" \n\t" | |
1592 "push %%"REG_BP" \n\t" | |
1593 YSCALEYUV2RGB1(%%REGBP, %5) | |
1594 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ | |
1595 #ifdef DITHER1XBPP | |
1596 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" | |
1597 "paddusb "MANGLE(g5Dither)", %%mm4\n\t" | |
1598 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
1599 #endif | |
1600 WRITEBGR15(%%REGb, 8280(%5), %%REGBP) | |
1601 "pop %%"REG_BP" \n\t" | |
1602 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1603 | |
1604 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1605 "a" (&c->redDither) | |
1606 ); | |
1607 return; | |
1608 case IMGFMT_BGR16: | |
1609 asm volatile( | |
1610 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1611 "mov %4, %%"REG_b" \n\t" | |
1612 "push %%"REG_BP" \n\t" | |
1613 YSCALEYUV2RGB1(%%REGBP, %5) | |
1614 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ | |
1615 #ifdef DITHER1XBPP | |
1616 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" | |
1617 "paddusb "MANGLE(g6Dither)", %%mm4\n\t" | |
1618 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
1619 #endif | |
1620 | |
1621 WRITEBGR16(%%REGb, 8280(%5), %%REGBP) | |
1622 "pop %%"REG_BP" \n\t" | |
1623 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1624 | |
1625 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1626 "a" (&c->redDither) | |
1627 ); | |
1628 return; | |
1629 case IMGFMT_YUY2: | |
1630 asm volatile( | |
1631 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1632 "mov %4, %%"REG_b" \n\t" | |
1633 "push %%"REG_BP" \n\t" | |
1634 YSCALEYUV2PACKED1(%%REGBP, %5) | |
1635 WRITEYUY2(%%REGb, 8280(%5), %%REGBP) | |
1636 "pop %%"REG_BP" \n\t" | |
1637 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1638 | |
1639 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1640 "a" (&c->redDither) | |
1641 ); | |
1642 return; | |
1643 } | |
1644 } | |
1645 else | |
1646 { | |
1647 switch(dstFormat) | |
1648 { | |
1649 case IMGFMT_BGR32: | |
1650 asm volatile( | |
1651 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1652 "mov %4, %%"REG_b" \n\t" | |
1653 "push %%"REG_BP" \n\t" | |
1654 YSCALEYUV2RGB1b(%%REGBP, %5) | |
1655 WRITEBGR32(%%REGb, 8280(%5), %%REGBP) | |
1656 "pop %%"REG_BP" \n\t" | |
1657 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1658 | |
1659 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1660 "a" (&c->redDither) | |
1661 ); | |
1662 return; | |
1663 case IMGFMT_BGR24: | |
1664 asm volatile( | |
1665 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1666 "mov %4, %%"REG_b" \n\t" | |
1667 "push %%"REG_BP" \n\t" | |
1668 YSCALEYUV2RGB1b(%%REGBP, %5) | |
1669 WRITEBGR24(%%REGb, 8280(%5), %%REGBP) | |
1670 "pop %%"REG_BP" \n\t" | |
1671 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1672 | |
1673 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1674 "a" (&c->redDither) | |
1675 ); | |
1676 return; | |
1677 case IMGFMT_BGR15: | |
1678 asm volatile( | |
1679 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1680 "mov %4, %%"REG_b" \n\t" | |
1681 "push %%"REG_BP" \n\t" | |
1682 YSCALEYUV2RGB1b(%%REGBP, %5) | |
1683 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ | |
1684 #ifdef DITHER1XBPP | |
1685 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" | |
1686 "paddusb "MANGLE(g5Dither)", %%mm4\n\t" | |
1687 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
1688 #endif | |
1689 WRITEBGR15(%%REGb, 8280(%5), %%REGBP) | |
1690 "pop %%"REG_BP" \n\t" | |
1691 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1692 | |
1693 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1694 "a" (&c->redDither) | |
1695 ); | |
1696 return; | |
1697 case IMGFMT_BGR16: | |
1698 asm volatile( | |
1699 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1700 "mov %4, %%"REG_b" \n\t" | |
1701 "push %%"REG_BP" \n\t" | |
1702 YSCALEYUV2RGB1b(%%REGBP, %5) | |
1703 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ | |
1704 #ifdef DITHER1XBPP | |
1705 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" | |
1706 "paddusb "MANGLE(g6Dither)", %%mm4\n\t" | |
1707 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
1708 #endif | |
1709 | |
1710 WRITEBGR16(%%REGb, 8280(%5), %%REGBP) | |
1711 "pop %%"REG_BP" \n\t" | |
1712 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1713 | |
1714 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1715 "a" (&c->redDither) | |
1716 ); | |
1717 return; | |
1718 case IMGFMT_YUY2: | |
1719 asm volatile( | |
1720 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" | |
1721 "mov %4, %%"REG_b" \n\t" | |
1722 "push %%"REG_BP" \n\t" | |
1723 YSCALEYUV2PACKED1b(%%REGBP, %5) | |
1724 WRITEYUY2(%%REGb, 8280(%5), %%REGBP) | |
1725 "pop %%"REG_BP" \n\t" | |
1726 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1727 | |
1728 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1729 "a" (&c->redDither) | |
1730 ); | |
1731 return; | |
1732 } | |
1733 } | |
1734 #endif | |
1735 if( uvalpha < 2048 ) | |
1736 { | |
1737 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C) | |
1738 }else{ | |
1739 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C) | |
1740 } | |
1741 } | |
1742 | |
1743 //FIXME yuy2* can read upto 7 samples to much | |
1744 | |
1745 static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width) | |
1746 { | |
1747 #ifdef HAVE_MMX | |
1748 asm volatile( | |
1749 "movq "MANGLE(bm01010101)", %%mm2\n\t" | |
1750 "mov %0, %%"REG_a" \n\t" | |
1751 "1: \n\t" | |
1752 "movq (%1, %%"REG_a",2), %%mm0 \n\t" | |
1753 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t" | |
1754 "pand %%mm2, %%mm0 \n\t" | |
1755 "pand %%mm2, %%mm1 \n\t" | |
1756 "packuswb %%mm1, %%mm0 \n\t" | |
1757 "movq %%mm0, (%2, %%"REG_a") \n\t" | |
1758 "add $8, %%"REG_a" \n\t" | |
1759 " js 1b \n\t" | |
1760 : : "g" (-width), "r" (src+width*2), "r" (dst+width) | |
1761 : "%"REG_a | |
1762 ); | |
1763 #else | |
1764 int i; | |
1765 for(i=0; i<width; i++) | |
1766 dst[i]= src[2*i]; | |
1767 #endif | |
1768 } | |
1769 | |
1770 static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width) | |
1771 { | |
1772 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) | |
1773 asm volatile( | |
1774 "movq "MANGLE(bm01010101)", %%mm4\n\t" | |
1775 "mov %0, %%"REG_a" \n\t" | |
1776 "1: \n\t" | |
1777 "movq (%1, %%"REG_a",4), %%mm0 \n\t" | |
1778 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t" | |
1779 "movq (%2, %%"REG_a",4), %%mm2 \n\t" | |
1780 "movq 8(%2, %%"REG_a",4), %%mm3 \n\t" | |
1781 PAVGB(%%mm2, %%mm0) | |
1782 PAVGB(%%mm3, %%mm1) | |
1783 "psrlw $8, %%mm0 \n\t" | |
1784 "psrlw $8, %%mm1 \n\t" | |
1785 "packuswb %%mm1, %%mm0 \n\t" | |
1786 "movq %%mm0, %%mm1 \n\t" | |
1787 "psrlw $8, %%mm0 \n\t" | |
1788 "pand %%mm4, %%mm1 \n\t" | |
1789 "packuswb %%mm0, %%mm0 \n\t" | |
1790 "packuswb %%mm1, %%mm1 \n\t" | |
1791 "movd %%mm0, (%4, %%"REG_a") \n\t" | |
1792 "movd %%mm1, (%3, %%"REG_a") \n\t" | |
1793 "add $4, %%"REG_a" \n\t" | |
1794 " js 1b \n\t" | |
1795 : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width) | |
1796 : "%"REG_a | |
1797 ); | |
1798 #else | |
1799 int i; | |
1800 for(i=0; i<width; i++) | |
1801 { | |
1802 dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1; | |
1803 dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1; | |
1804 } | |
1805 #endif | |
1806 } | |
1807 | |
1808 //this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses | |
1809 static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width) | |
1810 { | |
1811 #ifdef HAVE_MMX | |
1812 asm volatile( | |
1813 "mov %0, %%"REG_a" \n\t" | |
1814 "1: \n\t" | |
1815 "movq (%1, %%"REG_a",2), %%mm0 \n\t" | |
1816 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t" | |
1817 "psrlw $8, %%mm0 \n\t" | |
1818 "psrlw $8, %%mm1 \n\t" | |
1819 "packuswb %%mm1, %%mm0 \n\t" | |
1820 "movq %%mm0, (%2, %%"REG_a") \n\t" | |
1821 "add $8, %%"REG_a" \n\t" | |
1822 " js 1b \n\t" | |
1823 : : "g" (-width), "r" (src+width*2), "r" (dst+width) | |
1824 : "%"REG_a | |
1825 ); | |
1826 #else | |
1827 int i; | |
1828 for(i=0; i<width; i++) | |
1829 dst[i]= src[2*i+1]; | |
1830 #endif | |
1831 } | |
1832 | |
1833 static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width) | |
1834 { | |
1835 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) | |
1836 asm volatile( | |
1837 "movq "MANGLE(bm01010101)", %%mm4\n\t" | |
1838 "mov %0, %%"REG_a" \n\t" | |
1839 "1: \n\t" | |
1840 "movq (%1, %%"REG_a",4), %%mm0 \n\t" | |
1841 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t" | |
1842 "movq (%2, %%"REG_a",4), %%mm2 \n\t" | |
1843 "movq 8(%2, %%"REG_a",4), %%mm3 \n\t" | |
1844 PAVGB(%%mm2, %%mm0) | |
1845 PAVGB(%%mm3, %%mm1) | |
1846 "pand %%mm4, %%mm0 \n\t" | |
1847 "pand %%mm4, %%mm1 \n\t" | |
1848 "packuswb %%mm1, %%mm0 \n\t" | |
1849 "movq %%mm0, %%mm1 \n\t" | |
1850 "psrlw $8, %%mm0 \n\t" | |
1851 "pand %%mm4, %%mm1 \n\t" | |
1852 "packuswb %%mm0, %%mm0 \n\t" | |
1853 "packuswb %%mm1, %%mm1 \n\t" | |
1854 "movd %%mm0, (%4, %%"REG_a") \n\t" | |
1855 "movd %%mm1, (%3, %%"REG_a") \n\t" | |
1856 "add $4, %%"REG_a" \n\t" | |
1857 " js 1b \n\t" | |
1858 : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width) | |
1859 : "%"REG_a | |
1860 ); | |
1861 #else | |
1862 int i; | |
1863 for(i=0; i<width; i++) | |
1864 { | |
1865 dstU[i]= (src1[4*i + 0] + src2[4*i + 0])>>1; | |
1866 dstV[i]= (src1[4*i + 2] + src2[4*i + 2])>>1; | |
1867 } | |
1868 #endif | |
1869 } | |
1870 | |
1871 static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width) | |
1872 { | |
1873 int i; | |
1874 for(i=0; i<width; i++) | |
1875 { | |
1876 int b= ((uint32_t*)src)[i]&0xFF; | |
1877 int g= (((uint32_t*)src)[i]>>8)&0xFF; | |
1878 int r= (((uint32_t*)src)[i]>>16)&0xFF; | |
1879 | |
1880 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT); | |
1881 } | |
1882 } | |
1883 | |
1884 static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
1885 { | |
1886 int i; | |
1887 for(i=0; i<width; i++) | |
1888 { | |
1889 const int a= ((uint32_t*)src1)[2*i+0]; | |
1890 const int e= ((uint32_t*)src1)[2*i+1]; | |
1891 const int c= ((uint32_t*)src2)[2*i+0]; | |
1892 const int d= ((uint32_t*)src2)[2*i+1]; | |
1893 const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF); | |
1894 const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00); | |
1895 const int b= l&0x3FF; | |
1896 const int g= h>>8; | |
1897 const int r= l>>16; | |
1898 | |
1899 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1900 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1901 } | |
1902 } | |
1903 | |
1904 static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width) | |
1905 { | |
1906 #ifdef HAVE_MMX | |
1907 asm volatile( | |
1908 "mov %2, %%"REG_a" \n\t" | |
1909 "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t" | |
1910 "movq "MANGLE(w1111)", %%mm5 \n\t" | |
1911 "pxor %%mm7, %%mm7 \n\t" | |
1912 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" | |
1913 ASMALIGN16 | |
1914 "1: \n\t" | |
1915 PREFETCH" 64(%0, %%"REG_b") \n\t" | |
1916 "movd (%0, %%"REG_b"), %%mm0 \n\t" | |
1917 "movd 3(%0, %%"REG_b"), %%mm1 \n\t" | |
1918 "punpcklbw %%mm7, %%mm0 \n\t" | |
1919 "punpcklbw %%mm7, %%mm1 \n\t" | |
1920 "movd 6(%0, %%"REG_b"), %%mm2 \n\t" | |
1921 "movd 9(%0, %%"REG_b"), %%mm3 \n\t" | |
1922 "punpcklbw %%mm7, %%mm2 \n\t" | |
1923 "punpcklbw %%mm7, %%mm3 \n\t" | |
1924 "pmaddwd %%mm6, %%mm0 \n\t" | |
1925 "pmaddwd %%mm6, %%mm1 \n\t" | |
1926 "pmaddwd %%mm6, %%mm2 \n\t" | |
1927 "pmaddwd %%mm6, %%mm3 \n\t" | |
1928 #ifndef FAST_BGR2YV12 | |
1929 "psrad $8, %%mm0 \n\t" | |
1930 "psrad $8, %%mm1 \n\t" | |
1931 "psrad $8, %%mm2 \n\t" | |
1932 "psrad $8, %%mm3 \n\t" | |
1933 #endif | |
1934 "packssdw %%mm1, %%mm0 \n\t" | |
1935 "packssdw %%mm3, %%mm2 \n\t" | |
1936 "pmaddwd %%mm5, %%mm0 \n\t" | |
1937 "pmaddwd %%mm5, %%mm2 \n\t" | |
1938 "packssdw %%mm2, %%mm0 \n\t" | |
1939 "psraw $7, %%mm0 \n\t" | |
1940 | |
1941 "movd 12(%0, %%"REG_b"), %%mm4 \n\t" | |
1942 "movd 15(%0, %%"REG_b"), %%mm1 \n\t" | |
1943 "punpcklbw %%mm7, %%mm4 \n\t" | |
1944 "punpcklbw %%mm7, %%mm1 \n\t" | |
1945 "movd 18(%0, %%"REG_b"), %%mm2 \n\t" | |
1946 "movd 21(%0, %%"REG_b"), %%mm3 \n\t" | |
1947 "punpcklbw %%mm7, %%mm2 \n\t" | |
1948 "punpcklbw %%mm7, %%mm3 \n\t" | |
1949 "pmaddwd %%mm6, %%mm4 \n\t" | |
1950 "pmaddwd %%mm6, %%mm1 \n\t" | |
1951 "pmaddwd %%mm6, %%mm2 \n\t" | |
1952 "pmaddwd %%mm6, %%mm3 \n\t" | |
1953 #ifndef FAST_BGR2YV12 | |
1954 "psrad $8, %%mm4 \n\t" | |
1955 "psrad $8, %%mm1 \n\t" | |
1956 "psrad $8, %%mm2 \n\t" | |
1957 "psrad $8, %%mm3 \n\t" | |
1958 #endif | |
1959 "packssdw %%mm1, %%mm4 \n\t" | |
1960 "packssdw %%mm3, %%mm2 \n\t" | |
1961 "pmaddwd %%mm5, %%mm4 \n\t" | |
1962 "pmaddwd %%mm5, %%mm2 \n\t" | |
1963 "add $24, %%"REG_b" \n\t" | |
1964 "packssdw %%mm2, %%mm4 \n\t" | |
1965 "psraw $7, %%mm4 \n\t" | |
1966 | |
1967 "packuswb %%mm4, %%mm0 \n\t" | |
1968 "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t" | |
1969 | |
1970 "movq %%mm0, (%1, %%"REG_a") \n\t" | |
1971 "add $8, %%"REG_a" \n\t" | |
1972 " js 1b \n\t" | |
1973 : : "r" (src+width*3), "r" (dst+width), "g" (-width) | |
1974 : "%"REG_a, "%"REG_b | |
1975 ); | |
1976 #else | |
1977 int i; | |
1978 for(i=0; i<width; i++) | |
1979 { | |
1980 int b= src[i*3+0]; | |
1981 int g= src[i*3+1]; | |
1982 int r= src[i*3+2]; | |
1983 | |
1984 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT); | |
1985 } | |
1986 #endif | |
1987 } | |
1988 | |
1989 static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width) | |
1990 { | |
1991 #ifdef HAVE_MMX | |
1992 asm volatile( | |
1993 "mov %4, %%"REG_a" \n\t" | |
1994 "movq "MANGLE(w1111)", %%mm5 \n\t" | |
1995 "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t" | |
1996 "pxor %%mm7, %%mm7 \n\t" | |
1997 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b" \n\t" | |
1998 "add %%"REG_b", %%"REG_b" \n\t" | |
1999 ASMALIGN16 | |
2000 "1: \n\t" | |
2001 PREFETCH" 64(%0, %%"REG_b") \n\t" | |
2002 PREFETCH" 64(%1, %%"REG_b") \n\t" | |
2003 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) | |
2004 "movq (%0, %%"REG_b"), %%mm0 \n\t" | |
2005 "movq (%1, %%"REG_b"), %%mm1 \n\t" | |
2006 "movq 6(%0, %%"REG_b"), %%mm2 \n\t" | |
2007 "movq 6(%1, %%"REG_b"), %%mm3 \n\t" | |
2008 PAVGB(%%mm1, %%mm0) | |
2009 PAVGB(%%mm3, %%mm2) | |
2010 "movq %%mm0, %%mm1 \n\t" | |
2011 "movq %%mm2, %%mm3 \n\t" | |
2012 "psrlq $24, %%mm0 \n\t" | |
2013 "psrlq $24, %%mm2 \n\t" | |
2014 PAVGB(%%mm1, %%mm0) | |
2015 PAVGB(%%mm3, %%mm2) | |
2016 "punpcklbw %%mm7, %%mm0 \n\t" | |
2017 "punpcklbw %%mm7, %%mm2 \n\t" | |
2018 #else | |
2019 "movd (%0, %%"REG_b"), %%mm0 \n\t" | |
2020 "movd (%1, %%"REG_b"), %%mm1 \n\t" | |
2021 "movd 3(%0, %%"REG_b"), %%mm2 \n\t" | |
2022 "movd 3(%1, %%"REG_b"), %%mm3 \n\t" | |
2023 "punpcklbw %%mm7, %%mm0 \n\t" | |
2024 "punpcklbw %%mm7, %%mm1 \n\t" | |
2025 "punpcklbw %%mm7, %%mm2 \n\t" | |
2026 "punpcklbw %%mm7, %%mm3 \n\t" | |
2027 "paddw %%mm1, %%mm0 \n\t" | |
2028 "paddw %%mm3, %%mm2 \n\t" | |
2029 "paddw %%mm2, %%mm0 \n\t" | |
2030 "movd 6(%0, %%"REG_b"), %%mm4 \n\t" | |
2031 "movd 6(%1, %%"REG_b"), %%mm1 \n\t" | |
2032 "movd 9(%0, %%"REG_b"), %%mm2 \n\t" | |
2033 "movd 9(%1, %%"REG_b"), %%mm3 \n\t" | |
2034 "punpcklbw %%mm7, %%mm4 \n\t" | |
2035 "punpcklbw %%mm7, %%mm1 \n\t" | |
2036 "punpcklbw %%mm7, %%mm2 \n\t" | |
2037 "punpcklbw %%mm7, %%mm3 \n\t" | |
2038 "paddw %%mm1, %%mm4 \n\t" | |
2039 "paddw %%mm3, %%mm2 \n\t" | |
2040 "paddw %%mm4, %%mm2 \n\t" | |
2041 "psrlw $2, %%mm0 \n\t" | |
2042 "psrlw $2, %%mm2 \n\t" | |
2043 #endif | |
2044 "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t" | |
2045 "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t" | |
2046 | |
2047 "pmaddwd %%mm0, %%mm1 \n\t" | |
2048 "pmaddwd %%mm2, %%mm3 \n\t" | |
2049 "pmaddwd %%mm6, %%mm0 \n\t" | |
2050 "pmaddwd %%mm6, %%mm2 \n\t" | |
2051 #ifndef FAST_BGR2YV12 | |
2052 "psrad $8, %%mm0 \n\t" | |
2053 "psrad $8, %%mm1 \n\t" | |
2054 "psrad $8, %%mm2 \n\t" | |
2055 "psrad $8, %%mm3 \n\t" | |
2056 #endif | |
2057 "packssdw %%mm2, %%mm0 \n\t" | |
2058 "packssdw %%mm3, %%mm1 \n\t" | |
2059 "pmaddwd %%mm5, %%mm0 \n\t" | |
2060 "pmaddwd %%mm5, %%mm1 \n\t" | |
2061 "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0 | |
2062 "psraw $7, %%mm0 \n\t" | |
2063 | |
2064 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) | |
2065 "movq 12(%0, %%"REG_b"), %%mm4 \n\t" | |
2066 "movq 12(%1, %%"REG_b"), %%mm1 \n\t" | |
2067 "movq 18(%0, %%"REG_b"), %%mm2 \n\t" | |
2068 "movq 18(%1, %%"REG_b"), %%mm3 \n\t" | |
2069 PAVGB(%%mm1, %%mm4) | |
2070 PAVGB(%%mm3, %%mm2) | |
2071 "movq %%mm4, %%mm1 \n\t" | |
2072 "movq %%mm2, %%mm3 \n\t" | |
2073 "psrlq $24, %%mm4 \n\t" | |
2074 "psrlq $24, %%mm2 \n\t" | |
2075 PAVGB(%%mm1, %%mm4) | |
2076 PAVGB(%%mm3, %%mm2) | |
2077 "punpcklbw %%mm7, %%mm4 \n\t" | |
2078 "punpcklbw %%mm7, %%mm2 \n\t" | |
2079 #else | |
2080 "movd 12(%0, %%"REG_b"), %%mm4 \n\t" | |
2081 "movd 12(%1, %%"REG_b"), %%mm1 \n\t" | |
2082 "movd 15(%0, %%"REG_b"), %%mm2 \n\t" | |
2083 "movd 15(%1, %%"REG_b"), %%mm3 \n\t" | |
2084 "punpcklbw %%mm7, %%mm4 \n\t" | |
2085 "punpcklbw %%mm7, %%mm1 \n\t" | |
2086 "punpcklbw %%mm7, %%mm2 \n\t" | |
2087 "punpcklbw %%mm7, %%mm3 \n\t" | |
2088 "paddw %%mm1, %%mm4 \n\t" | |
2089 "paddw %%mm3, %%mm2 \n\t" | |
2090 "paddw %%mm2, %%mm4 \n\t" | |
2091 "movd 18(%0, %%"REG_b"), %%mm5 \n\t" | |
2092 "movd 18(%1, %%"REG_b"), %%mm1 \n\t" | |
2093 "movd 21(%0, %%"REG_b"), %%mm2 \n\t" | |
2094 "movd 21(%1, %%"REG_b"), %%mm3 \n\t" | |
2095 "punpcklbw %%mm7, %%mm5 \n\t" | |
2096 "punpcklbw %%mm7, %%mm1 \n\t" | |
2097 "punpcklbw %%mm7, %%mm2 \n\t" | |
2098 "punpcklbw %%mm7, %%mm3 \n\t" | |
2099 "paddw %%mm1, %%mm5 \n\t" | |
2100 "paddw %%mm3, %%mm2 \n\t" | |
2101 "paddw %%mm5, %%mm2 \n\t" | |
2102 "movq "MANGLE(w1111)", %%mm5 \n\t" | |
2103 "psrlw $2, %%mm4 \n\t" | |
2104 "psrlw $2, %%mm2 \n\t" | |
2105 #endif | |
2106 "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t" | |
2107 "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t" | |
2108 | |
2109 "pmaddwd %%mm4, %%mm1 \n\t" | |
2110 "pmaddwd %%mm2, %%mm3 \n\t" | |
2111 "pmaddwd %%mm6, %%mm4 \n\t" | |
2112 "pmaddwd %%mm6, %%mm2 \n\t" | |
2113 #ifndef FAST_BGR2YV12 | |
2114 "psrad $8, %%mm4 \n\t" | |
2115 "psrad $8, %%mm1 \n\t" | |
2116 "psrad $8, %%mm2 \n\t" | |
2117 "psrad $8, %%mm3 \n\t" | |
2118 #endif | |
2119 "packssdw %%mm2, %%mm4 \n\t" | |
2120 "packssdw %%mm3, %%mm1 \n\t" | |
2121 "pmaddwd %%mm5, %%mm4 \n\t" | |
2122 "pmaddwd %%mm5, %%mm1 \n\t" | |
2123 "add $24, %%"REG_b" \n\t" | |
2124 "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2 | |
2125 "psraw $7, %%mm4 \n\t" | |
2126 | |
2127 "movq %%mm0, %%mm1 \n\t" | |
2128 "punpckldq %%mm4, %%mm0 \n\t" | |
2129 "punpckhdq %%mm4, %%mm1 \n\t" | |
2130 "packsswb %%mm1, %%mm0 \n\t" | |
2131 "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t" | |
2132 | |
2133 "movd %%mm0, (%2, %%"REG_a") \n\t" | |
2134 "punpckhdq %%mm0, %%mm0 \n\t" | |
2135 "movd %%mm0, (%3, %%"REG_a") \n\t" | |
2136 "add $4, %%"REG_a" \n\t" | |
2137 " js 1b \n\t" | |
2138 : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width) | |
2139 : "%"REG_a, "%"REG_b | |
2140 ); | |
2141 #else | |
2142 int i; | |
2143 for(i=0; i<width; i++) | |
2144 { | |
2145 int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3]; | |
2146 int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4]; | |
2147 int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5]; | |
2148 | |
2149 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
2150 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
2151 } | |
2152 #endif | |
2153 } | |
2154 | |
2155 static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width) | |
2156 { | |
2157 int i; | |
2158 for(i=0; i<width; i++) | |
2159 { | |
2160 int d= ((uint16_t*)src)[i]; | |
2161 int b= d&0x1F; | |
2162 int g= (d>>5)&0x3F; | |
2163 int r= (d>>11)&0x1F; | |
2164 | |
2165 dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16; | |
2166 } | |
2167 } | |
2168 | |
2169 static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
2170 { | |
2171 int i; | |
2172 for(i=0; i<width; i++) | |
2173 { | |
2174 int d0= ((uint32_t*)src1)[i]; | |
2175 int d1= ((uint32_t*)src2)[i]; | |
2176 | |
2177 int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F); | |
2178 int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F); | |
2179 | |
2180 int dh2= (dh>>11) + (dh<<21); | |
2181 int d= dh2 + dl; | |
2182 | |
2183 int b= d&0x7F; | |
2184 int r= (d>>11)&0x7F; | |
2185 int g= d>>21; | |
2186 dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128; | |
2187 dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128; | |
2188 } | |
2189 } | |
2190 | |
2191 static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width) | |
2192 { | |
2193 int i; | |
2194 for(i=0; i<width; i++) | |
2195 { | |
2196 int d= ((uint16_t*)src)[i]; | |
2197 int b= d&0x1F; | |
2198 int g= (d>>5)&0x1F; | |
2199 int r= (d>>10)&0x1F; | |
2200 | |
2201 dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16; | |
2202 } | |
2203 } | |
2204 | |
2205 static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
2206 { | |
2207 int i; | |
2208 for(i=0; i<width; i++) | |
2209 { | |
2210 int d0= ((uint32_t*)src1)[i]; | |
2211 int d1= ((uint32_t*)src2)[i]; | |
2212 | |
2213 int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F); | |
2214 int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F); | |
2215 | |
2216 int dh2= (dh>>11) + (dh<<21); | |
2217 int d= dh2 + dl; | |
2218 | |
2219 int b= d&0x7F; | |
2220 int r= (d>>10)&0x7F; | |
2221 int g= d>>21; | |
2222 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128; | |
2223 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128; | |
2224 } | |
2225 } | |
2226 | |
2227 | |
2228 static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width) | |
2229 { | |
2230 int i; | |
2231 for(i=0; i<width; i++) | |
2232 { | |
2233 int r= ((uint32_t*)src)[i]&0xFF; | |
2234 int g= (((uint32_t*)src)[i]>>8)&0xFF; | |
2235 int b= (((uint32_t*)src)[i]>>16)&0xFF; | |
2236 | |
2237 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT); | |
2238 } | |
2239 } | |
2240 | |
2241 static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
2242 { | |
2243 int i; | |
2244 for(i=0; i<width; i++) | |
2245 { | |
2246 const int a= ((uint32_t*)src1)[2*i+0]; | |
2247 const int e= ((uint32_t*)src1)[2*i+1]; | |
2248 const int c= ((uint32_t*)src2)[2*i+0]; | |
2249 const int d= ((uint32_t*)src2)[2*i+1]; | |
2250 const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF); | |
2251 const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00); | |
2252 const int r= l&0x3FF; | |
2253 const int g= h>>8; | |
2254 const int b= l>>16; | |
2255 | |
2256 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
2257 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
2258 } | |
2259 } | |
2260 | |
2261 static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width) | |
2262 { | |
2263 int i; | |
2264 for(i=0; i<width; i++) | |
2265 { | |
2266 int r= src[i*3+0]; | |
2267 int g= src[i*3+1]; | |
2268 int b= src[i*3+2]; | |
2269 | |
2270 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT); | |
2271 } | |
2272 } | |
2273 | |
2274 static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
2275 { | |
2276 int i; | |
2277 for(i=0; i<width; i++) | |
2278 { | |
2279 int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3]; | |
2280 int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4]; | |
2281 int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5]; | |
2282 | |
2283 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
2284 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
2285 } | |
2286 } | |
2287 | |
2288 | |
2289 // Bilinear / Bicubic scaling | |
2290 static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, | |
2291 int16_t *filter, int16_t *filterPos, long filterSize) | |
2292 { | |
2293 #ifdef HAVE_MMX | |
2294 assert(filterSize % 4 == 0 && filterSize>0); | |
2295 if(filterSize==4) // allways true for upscaling, sometimes for down too | |
2296 { | |
2297 long counter= -2*dstW; | |
2298 filter-= counter*2; | |
2299 filterPos-= counter/2; | |
2300 dst-= counter/2; | |
2301 asm volatile( | |
2302 "pxor %%mm7, %%mm7 \n\t" | |
2303 "movq "MANGLE(w02)", %%mm6 \n\t" | |
2304 "push %%"REG_BP" \n\t" // we use 7 regs here ... | |
2305 "mov %%"REG_a", %%"REG_BP" \n\t" | |
2306 ASMALIGN16 | |
2307 "1: \n\t" | |
2308 "movzwl (%2, %%"REG_BP"), %%eax \n\t" | |
2309 "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t" | |
2310 "movq (%1, %%"REG_BP", 4), %%mm1\n\t" | |
2311 "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t" | |
2312 "movd (%3, %%"REG_a"), %%mm0 \n\t" | |
2313 "movd (%3, %%"REG_b"), %%mm2 \n\t" | |
2314 "punpcklbw %%mm7, %%mm0 \n\t" | |
2315 "punpcklbw %%mm7, %%mm2 \n\t" | |
2316 "pmaddwd %%mm1, %%mm0 \n\t" | |
2317 "pmaddwd %%mm2, %%mm3 \n\t" | |
2318 "psrad $8, %%mm0 \n\t" | |
2319 "psrad $8, %%mm3 \n\t" | |
2320 "packssdw %%mm3, %%mm0 \n\t" | |
2321 "pmaddwd %%mm6, %%mm0 \n\t" | |
2322 "packssdw %%mm0, %%mm0 \n\t" | |
2323 "movd %%mm0, (%4, %%"REG_BP") \n\t" | |
2324 "add $4, %%"REG_BP" \n\t" | |
2325 " jnc 1b \n\t" | |
2326 | |
2327 "pop %%"REG_BP" \n\t" | |
2328 : "+a" (counter) | |
2329 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst) | |
2330 : "%"REG_b | |
2331 ); | |
2332 } | |
2333 else if(filterSize==8) | |
2334 { | |
2335 long counter= -2*dstW; | |
2336 filter-= counter*4; | |
2337 filterPos-= counter/2; | |
2338 dst-= counter/2; | |
2339 asm volatile( | |
2340 "pxor %%mm7, %%mm7 \n\t" | |
2341 "movq "MANGLE(w02)", %%mm6 \n\t" | |
2342 "push %%"REG_BP" \n\t" // we use 7 regs here ... | |
2343 "mov %%"REG_a", %%"REG_BP" \n\t" | |
2344 ASMALIGN16 | |
2345 "1: \n\t" | |
2346 "movzwl (%2, %%"REG_BP"), %%eax \n\t" | |
2347 "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t" | |
2348 "movq (%1, %%"REG_BP", 8), %%mm1\n\t" | |
2349 "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t" | |
2350 "movd (%3, %%"REG_a"), %%mm0 \n\t" | |
2351 "movd (%3, %%"REG_b"), %%mm2 \n\t" | |
2352 "punpcklbw %%mm7, %%mm0 \n\t" | |
2353 "punpcklbw %%mm7, %%mm2 \n\t" | |
2354 "pmaddwd %%mm1, %%mm0 \n\t" | |
2355 "pmaddwd %%mm2, %%mm3 \n\t" | |
2356 | |
2357 "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t" | |
2358 "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t" | |
2359 "movd 4(%3, %%"REG_a"), %%mm4 \n\t" | |
2360 "movd 4(%3, %%"REG_b"), %%mm2 \n\t" | |
2361 "punpcklbw %%mm7, %%mm4 \n\t" | |
2362 "punpcklbw %%mm7, %%mm2 \n\t" | |
2363 "pmaddwd %%mm1, %%mm4 \n\t" | |
2364 "pmaddwd %%mm2, %%mm5 \n\t" | |
2365 "paddd %%mm4, %%mm0 \n\t" | |
2366 "paddd %%mm5, %%mm3 \n\t" | |
2367 | |
2368 "psrad $8, %%mm0 \n\t" | |
2369 "psrad $8, %%mm3 \n\t" | |
2370 "packssdw %%mm3, %%mm0 \n\t" | |
2371 "pmaddwd %%mm6, %%mm0 \n\t" | |
2372 "packssdw %%mm0, %%mm0 \n\t" | |
2373 "movd %%mm0, (%4, %%"REG_BP") \n\t" | |
2374 "add $4, %%"REG_BP" \n\t" | |
2375 " jnc 1b \n\t" | |
2376 | |
2377 "pop %%"REG_BP" \n\t" | |
2378 : "+a" (counter) | |
2379 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst) | |
2380 : "%"REG_b | |
2381 ); | |
2382 } | |
2383 else | |
2384 { | |
2385 uint8_t *offset = src+filterSize; | |
2386 long counter= -2*dstW; | |
2387 // filter-= counter*filterSize/2; | |
2388 filterPos-= counter/2; | |
2389 dst-= counter/2; | |
2390 asm volatile( | |
2391 "pxor %%mm7, %%mm7 \n\t" | |
2392 "movq "MANGLE(w02)", %%mm6 \n\t" | |
2393 ASMALIGN16 | |
2394 "1: \n\t" | |
2395 "mov %2, %%"REG_c" \n\t" | |
2396 "movzwl (%%"REG_c", %0), %%eax \n\t" | |
2397 "movzwl 2(%%"REG_c", %0), %%ebx \n\t" | |
2398 "mov %5, %%"REG_c" \n\t" | |
2399 "pxor %%mm4, %%mm4 \n\t" | |
2400 "pxor %%mm5, %%mm5 \n\t" | |
2401 "2: \n\t" | |
2402 "movq (%1), %%mm1 \n\t" | |
2403 "movq (%1, %6), %%mm3 \n\t" | |
2404 "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t" | |
2405 "movd (%%"REG_c", %%"REG_b"), %%mm2\n\t" | |
2406 "punpcklbw %%mm7, %%mm0 \n\t" | |
2407 "punpcklbw %%mm7, %%mm2 \n\t" | |
2408 "pmaddwd %%mm1, %%mm0 \n\t" | |
2409 "pmaddwd %%mm2, %%mm3 \n\t" | |
2410 "paddd %%mm3, %%mm5 \n\t" | |
2411 "paddd %%mm0, %%mm4 \n\t" | |
2412 "add $8, %1 \n\t" | |
2413 "add $4, %%"REG_c" \n\t" | |
2414 "cmp %4, %%"REG_c" \n\t" | |
2415 " jb 2b \n\t" | |
2416 "add %6, %1 \n\t" | |
2417 "psrad $8, %%mm4 \n\t" | |
2418 "psrad $8, %%mm5 \n\t" | |
2419 "packssdw %%mm5, %%mm4 \n\t" | |
2420 "pmaddwd %%mm6, %%mm4 \n\t" | |
2421 "packssdw %%mm4, %%mm4 \n\t" | |
2422 "mov %3, %%"REG_a" \n\t" | |
2423 "movd %%mm4, (%%"REG_a", %0) \n\t" | |
2424 "add $4, %0 \n\t" | |
2425 " jnc 1b \n\t" | |
2426 | |
2427 : "+r" (counter), "+r" (filter) | |
2428 : "m" (filterPos), "m" (dst), "m"(offset), | |
2429 "m" (src), "r" (filterSize*2) | |
2430 : "%"REG_b, "%"REG_a, "%"REG_c | |
2431 ); | |
2432 } | |
2433 #else | |
2434 #ifdef HAVE_ALTIVEC | |
2435 hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize); | |
2436 #else | |
2437 int i; | |
2438 for(i=0; i<dstW; i++) | |
2439 { | |
2440 int j; | |
2441 int srcPos= filterPos[i]; | |
2442 int val=0; | |
2443 // printf("filterPos: %d\n", filterPos[i]); | |
2444 for(j=0; j<filterSize; j++) | |
2445 { | |
2446 // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]); | |
2447 val += ((int)src[srcPos + j])*filter[filterSize*i + j]; | |
2448 } | |
2449 // filter += hFilterSize; | |
2450 dst[i] = MIN(MAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ... | |
2451 // dst[i] = val>>7; | |
2452 } | |
2453 #endif | |
2454 #endif | |
2455 } | |
2456 // *** horizontal scale Y line to temp buffer | |
2457 static inline void RENAME(hyscale)(uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc, | |
2458 int flags, int canMMX2BeUsed, int16_t *hLumFilter, | |
2459 int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode, | |
2460 int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter, | |
2461 int32_t *mmx2FilterPos) | |
2462 { | |
2463 if(srcFormat==IMGFMT_YUY2) | |
2464 { | |
2465 RENAME(yuy2ToY)(formatConvBuffer, src, srcW); | |
2466 src= formatConvBuffer; | |
2467 } | |
2468 else if(srcFormat==IMGFMT_UYVY) | |
2469 { | |
2470 RENAME(uyvyToY)(formatConvBuffer, src, srcW); | |
2471 src= formatConvBuffer; | |
2472 } | |
2473 else if(srcFormat==IMGFMT_BGR32) | |
2474 { | |
2475 RENAME(bgr32ToY)(formatConvBuffer, src, srcW); | |
2476 src= formatConvBuffer; | |
2477 } | |
2478 else if(srcFormat==IMGFMT_BGR24) | |
2479 { | |
2480 RENAME(bgr24ToY)(formatConvBuffer, src, srcW); | |
2481 src= formatConvBuffer; | |
2482 } | |
2483 else if(srcFormat==IMGFMT_BGR16) | |
2484 { | |
2485 RENAME(bgr16ToY)(formatConvBuffer, src, srcW); | |
2486 src= formatConvBuffer; | |
2487 } | |
2488 else if(srcFormat==IMGFMT_BGR15) | |
2489 { | |
2490 RENAME(bgr15ToY)(formatConvBuffer, src, srcW); | |
2491 src= formatConvBuffer; | |
2492 } | |
2493 else if(srcFormat==IMGFMT_RGB32) | |
2494 { | |
2495 RENAME(rgb32ToY)(formatConvBuffer, src, srcW); | |
2496 src= formatConvBuffer; | |
2497 } | |
2498 else if(srcFormat==IMGFMT_RGB24) | |
2499 { | |
2500 RENAME(rgb24ToY)(formatConvBuffer, src, srcW); | |
2501 src= formatConvBuffer; | |
2502 } | |
2503 | |
2504 #ifdef HAVE_MMX | |
2505 // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one) | |
2506 if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed)) | |
2507 #else | |
2508 if(!(flags&SWS_FAST_BILINEAR)) | |
2509 #endif | |
2510 { | |
2511 RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize); | |
2512 } | |
2513 else // Fast Bilinear upscale / crap downscale | |
2514 { | |
2515 #if defined(ARCH_X86) || defined(ARCH_X86_64) | |
2516 #ifdef HAVE_MMX2 | |
2517 int i; | |
2518 if(canMMX2BeUsed) | |
2519 { | |
2520 asm volatile( | |
2521 "pxor %%mm7, %%mm7 \n\t" | |
2522 "mov %0, %%"REG_c" \n\t" | |
2523 "mov %1, %%"REG_D" \n\t" | |
2524 "mov %2, %%"REG_d" \n\t" | |
2525 "mov %3, %%"REG_b" \n\t" | |
2526 "xor %%"REG_a", %%"REG_a" \n\t" // i | |
2527 PREFETCH" (%%"REG_c") \n\t" | |
2528 PREFETCH" 32(%%"REG_c") \n\t" | |
2529 PREFETCH" 64(%%"REG_c") \n\t" | |
2530 | |
2531 #ifdef ARCH_X86_64 | |
2532 | |
2533 #define FUNNY_Y_CODE \ | |
2534 "movl (%%"REG_b"), %%esi \n\t"\ | |
2535 "call *%4 \n\t"\ | |
2536 "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\ | |
2537 "add %%"REG_S", %%"REG_c" \n\t"\ | |
2538 "add %%"REG_a", %%"REG_D" \n\t"\ | |
2539 "xor %%"REG_a", %%"REG_a" \n\t"\ | |
2540 | |
2541 #else | |
2542 | |
2543 #define FUNNY_Y_CODE \ | |
2544 "movl (%%"REG_b"), %%esi \n\t"\ | |
2545 "call *%4 \n\t"\ | |
2546 "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\ | |
2547 "add %%"REG_a", %%"REG_D" \n\t"\ | |
2548 "xor %%"REG_a", %%"REG_a" \n\t"\ | |
2549 | |
2550 #endif | |
2551 | |
2552 FUNNY_Y_CODE | |
2553 FUNNY_Y_CODE | |
2554 FUNNY_Y_CODE | |
2555 FUNNY_Y_CODE | |
2556 FUNNY_Y_CODE | |
2557 FUNNY_Y_CODE | |
2558 FUNNY_Y_CODE | |
2559 FUNNY_Y_CODE | |
2560 | |
2561 :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos), | |
2562 "m" (funnyYCode) | |
2563 : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D | |
2564 ); | |
2565 for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128; | |
2566 } | |
2567 else | |
2568 { | |
2569 #endif | |
2570 long xInc_shr16 = xInc >> 16; | |
2571 uint16_t xInc_mask = xInc & 0xffff; | |
2572 //NO MMX just normal asm ... | |
2573 asm volatile( | |
2574 "xor %%"REG_a", %%"REG_a" \n\t" // i | |
2575 "xor %%"REG_b", %%"REG_b" \n\t" // xx | |
2576 "xorl %%ecx, %%ecx \n\t" // 2*xalpha | |
2577 ASMALIGN16 | |
2578 "1: \n\t" | |
2579 "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx] | |
2580 "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1] | |
2581 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] | |
2582 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha | |
2583 "shll $16, %%edi \n\t" | |
2584 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) | |
2585 "mov %1, %%"REG_D" \n\t" | |
2586 "shrl $9, %%esi \n\t" | |
2587 "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t" | |
2588 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF | |
2589 "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry | |
2590 | |
2591 "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx] | |
2592 "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1] | |
2593 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] | |
2594 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha | |
2595 "shll $16, %%edi \n\t" | |
2596 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) | |
2597 "mov %1, %%"REG_D" \n\t" | |
2598 "shrl $9, %%esi \n\t" | |
2599 "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t" | |
2600 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF | |
2601 "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry | |
2602 | |
2603 | |
2604 "add $2, %%"REG_a" \n\t" | |
2605 "cmp %2, %%"REG_a" \n\t" | |
2606 " jb 1b \n\t" | |
2607 | |
2608 | |
2609 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask) | |
2610 : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi" | |
2611 ); | |
2612 #ifdef HAVE_MMX2 | |
2613 } //if MMX2 can't be used | |
2614 #endif | |
2615 #else | |
2616 int i; | |
2617 unsigned int xpos=0; | |
2618 for(i=0;i<dstWidth;i++) | |
2619 { | |
2620 register unsigned int xx=xpos>>16; | |
2621 register unsigned int xalpha=(xpos&0xFFFF)>>9; | |
2622 dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha; | |
2623 xpos+=xInc; | |
2624 } | |
2625 #endif | |
2626 } | |
2627 } | |
2628 | |
2629 inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2, | |
2630 int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter, | |
2631 int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode, | |
2632 int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter, | |
2633 int32_t *mmx2FilterPos) | |
2634 { | |
2635 if(srcFormat==IMGFMT_YUY2) | |
2636 { | |
2637 RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2638 src1= formatConvBuffer; | |
2639 src2= formatConvBuffer+2048; | |
2640 } | |
2641 else if(srcFormat==IMGFMT_UYVY) | |
2642 { | |
2643 RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2644 src1= formatConvBuffer; | |
2645 src2= formatConvBuffer+2048; | |
2646 } | |
2647 else if(srcFormat==IMGFMT_BGR32) | |
2648 { | |
2649 RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2650 src1= formatConvBuffer; | |
2651 src2= formatConvBuffer+2048; | |
2652 } | |
2653 else if(srcFormat==IMGFMT_BGR24) | |
2654 { | |
2655 RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2656 src1= formatConvBuffer; | |
2657 src2= formatConvBuffer+2048; | |
2658 } | |
2659 else if(srcFormat==IMGFMT_BGR16) | |
2660 { | |
2661 RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2662 src1= formatConvBuffer; | |
2663 src2= formatConvBuffer+2048; | |
2664 } | |
2665 else if(srcFormat==IMGFMT_BGR15) | |
2666 { | |
2667 RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2668 src1= formatConvBuffer; | |
2669 src2= formatConvBuffer+2048; | |
2670 } | |
2671 else if(srcFormat==IMGFMT_RGB32) | |
2672 { | |
2673 RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2674 src1= formatConvBuffer; | |
2675 src2= formatConvBuffer+2048; | |
2676 } | |
2677 else if(srcFormat==IMGFMT_RGB24) | |
2678 { | |
2679 RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2680 src1= formatConvBuffer; | |
2681 src2= formatConvBuffer+2048; | |
2682 } | |
2683 else if(isGray(srcFormat)) | |
2684 { | |
2685 return; | |
2686 } | |
2687 | |
2688 #ifdef HAVE_MMX | |
2689 // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one) | |
2690 if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed)) | |
2691 #else | |
2692 if(!(flags&SWS_FAST_BILINEAR)) | |
2693 #endif | |
2694 { | |
2695 RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); | |
2696 RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); | |
2697 } | |
2698 else // Fast Bilinear upscale / crap downscale | |
2699 { | |
2700 #if defined(ARCH_X86) || defined(ARCH_X86_64) | |
2701 #ifdef HAVE_MMX2 | |
2702 int i; | |
2703 if(canMMX2BeUsed) | |
2704 { | |
2705 asm volatile( | |
2706 "pxor %%mm7, %%mm7 \n\t" | |
2707 "mov %0, %%"REG_c" \n\t" | |
2708 "mov %1, %%"REG_D" \n\t" | |
2709 "mov %2, %%"REG_d" \n\t" | |
2710 "mov %3, %%"REG_b" \n\t" | |
2711 "xor %%"REG_a", %%"REG_a" \n\t" // i | |
2712 PREFETCH" (%%"REG_c") \n\t" | |
2713 PREFETCH" 32(%%"REG_c") \n\t" | |
2714 PREFETCH" 64(%%"REG_c") \n\t" | |
2715 | |
2716 #ifdef ARCH_X86_64 | |
2717 | |
2718 #define FUNNY_UV_CODE \ | |
2719 "movl (%%"REG_b"), %%esi \n\t"\ | |
2720 "call *%4 \n\t"\ | |
2721 "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\ | |
2722 "add %%"REG_S", %%"REG_c" \n\t"\ | |
2723 "add %%"REG_a", %%"REG_D" \n\t"\ | |
2724 "xor %%"REG_a", %%"REG_a" \n\t"\ | |
2725 | |
2726 #else | |
2727 | |
2728 #define FUNNY_UV_CODE \ | |
2729 "movl (%%"REG_b"), %%esi \n\t"\ | |
2730 "call *%4 \n\t"\ | |
2731 "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\ | |
2732 "add %%"REG_a", %%"REG_D" \n\t"\ | |
2733 "xor %%"REG_a", %%"REG_a" \n\t"\ | |
2734 | |
2735 #endif | |
2736 | |
2737 FUNNY_UV_CODE | |
2738 FUNNY_UV_CODE | |
2739 FUNNY_UV_CODE | |
2740 FUNNY_UV_CODE | |
2741 "xor %%"REG_a", %%"REG_a" \n\t" // i | |
2742 "mov %5, %%"REG_c" \n\t" // src | |
2743 "mov %1, %%"REG_D" \n\t" // buf1 | |
2744 "add $4096, %%"REG_D" \n\t" | |
2745 PREFETCH" (%%"REG_c") \n\t" | |
2746 PREFETCH" 32(%%"REG_c") \n\t" | |
2747 PREFETCH" 64(%%"REG_c") \n\t" | |
2748 | |
2749 FUNNY_UV_CODE | |
2750 FUNNY_UV_CODE | |
2751 FUNNY_UV_CODE | |
2752 FUNNY_UV_CODE | |
2753 | |
2754 :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos), | |
2755 "m" (funnyUVCode), "m" (src2) | |
2756 : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D | |
2757 ); | |
2758 for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) | |
2759 { | |
2760 // printf("%d %d %d\n", dstWidth, i, srcW); | |
2761 dst[i] = src1[srcW-1]*128; | |
2762 dst[i+2048] = src2[srcW-1]*128; | |
2763 } | |
2764 } | |
2765 else | |
2766 { | |
2767 #endif | |
2768 long xInc_shr16 = (long) (xInc >> 16); | |
2769 uint16_t xInc_mask = xInc & 0xffff; | |
2770 asm volatile( | |
2771 "xor %%"REG_a", %%"REG_a" \n\t" // i | |
2772 "xor %%"REG_b", %%"REG_b" \n\t" // xx | |
2773 "xorl %%ecx, %%ecx \n\t" // 2*xalpha | |
2774 ASMALIGN16 | |
2775 "1: \n\t" | |
2776 "mov %0, %%"REG_S" \n\t" | |
2777 "movzbl (%%"REG_S", %%"REG_b"), %%edi \n\t" //src[xx] | |
2778 "movzbl 1(%%"REG_S", %%"REG_b"), %%esi \n\t" //src[xx+1] | |
2779 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] | |
2780 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha | |
2781 "shll $16, %%edi \n\t" | |
2782 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) | |
2783 "mov %1, %%"REG_D" \n\t" | |
2784 "shrl $9, %%esi \n\t" | |
2785 "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t" | |
2786 | |
2787 "movzbl (%5, %%"REG_b"), %%edi \n\t" //src[xx] | |
2788 "movzbl 1(%5, %%"REG_b"), %%esi \n\t" //src[xx+1] | |
2789 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] | |
2790 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha | |
2791 "shll $16, %%edi \n\t" | |
2792 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) | |
2793 "mov %1, %%"REG_D" \n\t" | |
2794 "shrl $9, %%esi \n\t" | |
2795 "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t" | |
2796 | |
2797 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF | |
2798 "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry | |
2799 "add $1, %%"REG_a" \n\t" | |
2800 "cmp %2, %%"REG_a" \n\t" | |
2801 " jb 1b \n\t" | |
2802 | |
2803 /* GCC-3.3 makes MPlayer crash on IA-32 machines when using "g" operand here, | |
2804 which is needed to support GCC-4.0 */ | |
2805 #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4)) | |
2806 :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask), | |
2807 #else | |
2808 :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask), | |
2809 #endif | |
2810 "r" (src2) | |
2811 : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi" | |
2812 ); | |
2813 #ifdef HAVE_MMX2 | |
2814 } //if MMX2 can't be used | |
2815 #endif | |
2816 #else | |
2817 int i; | |
2818 unsigned int xpos=0; | |
2819 for(i=0;i<dstWidth;i++) | |
2820 { | |
2821 register unsigned int xx=xpos>>16; | |
2822 register unsigned int xalpha=(xpos&0xFFFF)>>9; | |
2823 dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha); | |
2824 dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha); | |
2825 /* slower | |
2826 dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha; | |
2827 dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha; | |
2828 */ | |
2829 xpos+=xInc; | |
2830 } | |
2831 #endif | |
2832 } | |
2833 } | |
2834 | |
2835 static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, | |
2836 int srcSliceH, uint8_t* dst[], int dstStride[]){ | |
2837 | |
2838 /* load a few things into local vars to make the code more readable? and faster */ | |
2839 const int srcW= c->srcW; | |
2840 const int dstW= c->dstW; | |
2841 const int dstH= c->dstH; | |
2842 const int chrDstW= c->chrDstW; | |
2843 const int chrSrcW= c->chrSrcW; | |
2844 const int lumXInc= c->lumXInc; | |
2845 const int chrXInc= c->chrXInc; | |
2846 const int dstFormat= c->dstFormat; | |
2847 const int srcFormat= c->srcFormat; | |
2848 const int flags= c->flags; | |
2849 const int canMMX2BeUsed= c->canMMX2BeUsed; | |
2850 int16_t *vLumFilterPos= c->vLumFilterPos; | |
2851 int16_t *vChrFilterPos= c->vChrFilterPos; | |
2852 int16_t *hLumFilterPos= c->hLumFilterPos; | |
2853 int16_t *hChrFilterPos= c->hChrFilterPos; | |
2854 int16_t *vLumFilter= c->vLumFilter; | |
2855 int16_t *vChrFilter= c->vChrFilter; | |
2856 int16_t *hLumFilter= c->hLumFilter; | |
2857 int16_t *hChrFilter= c->hChrFilter; | |
2858 int32_t *lumMmxFilter= c->lumMmxFilter; | |
2859 int32_t *chrMmxFilter= c->chrMmxFilter; | |
2860 const int vLumFilterSize= c->vLumFilterSize; | |
2861 const int vChrFilterSize= c->vChrFilterSize; | |
2862 const int hLumFilterSize= c->hLumFilterSize; | |
2863 const int hChrFilterSize= c->hChrFilterSize; | |
2864 int16_t **lumPixBuf= c->lumPixBuf; | |
2865 int16_t **chrPixBuf= c->chrPixBuf; | |
2866 const int vLumBufSize= c->vLumBufSize; | |
2867 const int vChrBufSize= c->vChrBufSize; | |
2868 uint8_t *funnyYCode= c->funnyYCode; | |
2869 uint8_t *funnyUVCode= c->funnyUVCode; | |
2870 uint8_t *formatConvBuffer= c->formatConvBuffer; | |
2871 const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample; | |
2872 const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample); | |
2873 int lastDstY; | |
2874 | |
2875 /* vars whch will change and which we need to storw back in the context */ | |
2876 int dstY= c->dstY; | |
2877 int lumBufIndex= c->lumBufIndex; | |
2878 int chrBufIndex= c->chrBufIndex; | |
2879 int lastInLumBuf= c->lastInLumBuf; | |
2880 int lastInChrBuf= c->lastInChrBuf; | |
2881 | |
2882 if(isPacked(c->srcFormat)){ | |
2883 src[0]= | |
2884 src[1]= | |
2885 src[2]= src[0]; | |
2886 srcStride[0]= | |
2887 srcStride[1]= | |
2888 srcStride[2]= srcStride[0]; | |
2889 } | |
2890 srcStride[1]<<= c->vChrDrop; | |
2891 srcStride[2]<<= c->vChrDrop; | |
2892 | |
2893 // printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2], | |
2894 // (int)dst[0], (int)dst[1], (int)dst[2]); | |
2895 | |
2896 #if 0 //self test FIXME move to a vfilter or something | |
2897 { | |
2898 static volatile int i=0; | |
2899 i++; | |
2900 if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH) | |
2901 selfTest(src, srcStride, c->srcW, c->srcH); | |
2902 i--; | |
2903 } | |
2904 #endif | |
2905 | |
2906 //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2], | |
2907 //dstStride[0],dstStride[1],dstStride[2]); | |
2908 | |
2909 if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0) | |
2910 { | |
2911 static int firstTime=1; //FIXME move this into the context perhaps | |
2912 if(flags & SWS_PRINT_INFO && firstTime) | |
2913 { | |
2914 MSG_WARN("SwScaler: Warning: dstStride is not aligned!\n" | |
2915 "SwScaler: ->cannot do aligned memory acesses anymore\n"); | |
2916 firstTime=0; | |
2917 } | |
2918 } | |
2919 | |
2920 /* Note the user might start scaling the picture in the middle so this will not get executed | |
2921 this is not really intended but works currently, so ppl might do it */ | |
2922 if(srcSliceY ==0){ | |
2923 lumBufIndex=0; | |
2924 chrBufIndex=0; | |
2925 dstY=0; | |
2926 lastInLumBuf= -1; | |
2927 lastInChrBuf= -1; | |
2928 } | |
2929 | |
2930 lastDstY= dstY; | |
2931 | |
2932 for(;dstY < dstH; dstY++){ | |
2933 unsigned char *dest =dst[0]+dstStride[0]*dstY; | |
2934 const int chrDstY= dstY>>c->chrDstVSubSample; | |
2935 unsigned char *uDest=dst[1]+dstStride[1]*chrDstY; | |
2936 unsigned char *vDest=dst[2]+dstStride[2]*chrDstY; | |
2937 | |
2938 const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input | |
2939 const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input | |
2940 const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input | |
2941 const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input | |
2942 | |
2943 //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n", | |
2944 // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample); | |
2945 //handle holes (FAST_BILINEAR & weird filters) | |
2946 if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1; | |
2947 if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1; | |
2948 //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize); | |
2949 ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1) | |
2950 ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1) | |
2951 | |
2952 // Do we have enough lines in this slice to output the dstY line | |
2953 if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample)) | |
2954 { | |
2955 //Do horizontal scaling | |
2956 while(lastInLumBuf < lastLumSrcY) | |
2957 { | |
2958 uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; | |
2959 lumBufIndex++; | |
2960 // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY); | |
2961 ASSERT(lumBufIndex < 2*vLumBufSize) | |
2962 ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) | |
2963 ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) | |
2964 // printf("%d %d\n", lumBufIndex, vLumBufSize); | |
2965 RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, | |
2966 flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, | |
2967 funnyYCode, c->srcFormat, formatConvBuffer, | |
2968 c->lumMmx2Filter, c->lumMmx2FilterPos); | |
2969 lastInLumBuf++; | |
2970 } | |
2971 while(lastInChrBuf < lastChrSrcY) | |
2972 { | |
2973 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1]; | |
2974 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2]; | |
2975 chrBufIndex++; | |
2976 ASSERT(chrBufIndex < 2*vChrBufSize) | |
2977 ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH)) | |
2978 ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0) | |
2979 //FIXME replace parameters through context struct (some at least) | |
2980 | |
2981 if(!(isGray(srcFormat) || isGray(dstFormat))) | |
2982 RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc, | |
2983 flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, | |
2984 funnyUVCode, c->srcFormat, formatConvBuffer, | |
2985 c->chrMmx2Filter, c->chrMmx2FilterPos); | |
2986 lastInChrBuf++; | |
2987 } | |
2988 //wrap buf index around to stay inside the ring buffer | |
2989 if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; | |
2990 if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; | |
2991 } | |
2992 else // not enough lines left in this slice -> load the rest in the buffer | |
2993 { | |
2994 /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n", | |
2995 firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY, | |
2996 lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize, | |
2997 vChrBufSize, vLumBufSize);*/ | |
2998 | |
2999 //Do horizontal scaling | |
3000 while(lastInLumBuf+1 < srcSliceY + srcSliceH) | |
3001 { | |
3002 uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; | |
3003 lumBufIndex++; | |
3004 ASSERT(lumBufIndex < 2*vLumBufSize) | |
3005 ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) | |
3006 ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) | |
3007 RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, | |
3008 flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, | |
3009 funnyYCode, c->srcFormat, formatConvBuffer, | |
3010 c->lumMmx2Filter, c->lumMmx2FilterPos); | |
3011 lastInLumBuf++; | |
3012 } | |
3013 while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH)) | |
3014 { | |
3015 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1]; | |
3016 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2]; | |
3017 chrBufIndex++; | |
3018 ASSERT(chrBufIndex < 2*vChrBufSize) | |
3019 ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH) | |
3020 ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0) | |
3021 | |
3022 if(!(isGray(srcFormat) || isGray(dstFormat))) | |
3023 RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc, | |
3024 flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, | |
3025 funnyUVCode, c->srcFormat, formatConvBuffer, | |
3026 c->chrMmx2Filter, c->chrMmx2FilterPos); | |
3027 lastInChrBuf++; | |
3028 } | |
3029 //wrap buf index around to stay inside the ring buffer | |
3030 if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; | |
3031 if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; | |
3032 break; //we can't output a dstY line so let's try with the next slice | |
3033 } | |
3034 | |
3035 #ifdef HAVE_MMX | |
3036 b5Dither= dither8[dstY&1]; | |
3037 g6Dither= dither4[dstY&1]; | |
3038 g5Dither= dither8[dstY&1]; | |
3039 r5Dither= dither8[(dstY+1)&1]; | |
3040 #endif | |
3041 if(dstY < dstH-2) | |
3042 { | |
3043 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; | |
3044 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; | |
3045 #ifdef HAVE_MMX | |
3046 int i; | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3047 if(flags & SWS_ACCURATE_RND){ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3048 for(i=0; i<vLumFilterSize; i+=2){ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3049 lumMmxFilter[2*i+0]= lumSrcPtr[i ]; |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3050 lumMmxFilter[2*i+1]= lumSrcPtr[i+(vLumFilterSize>1)]; |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3051 lumMmxFilter[2*i+2]= |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3052 lumMmxFilter[2*i+3]= vLumFilter[dstY*vLumFilterSize + i ] |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3053 + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0); |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3054 } |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3055 for(i=0; i<vChrFilterSize; i+=2){ |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3056 chrMmxFilter[2*i+0]= chrSrcPtr[i ]; |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3057 chrMmxFilter[2*i+1]= chrSrcPtr[i+(vChrFilterSize>1)]; |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3058 chrMmxFilter[2*i+2]= |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3059 chrMmxFilter[2*i+3]= vChrFilter[chrDstY*vChrFilterSize + i ] |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3060 + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0); |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3061 } |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3062 }else{ |
18861 | 3063 for(i=0; i<vLumFilterSize; i++) |
3064 { | |
3065 lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i]; | |
3066 lumMmxFilter[4*i+2]= | |
3067 lumMmxFilter[4*i+3]= | |
3068 ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001; | |
3069 } | |
3070 for(i=0; i<vChrFilterSize; i++) | |
3071 { | |
3072 chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i]; | |
3073 chrMmxFilter[4*i+2]= | |
3074 chrMmxFilter[4*i+3]= | |
3075 ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001; | |
3076 } | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3077 } |
18861 | 3078 #endif |
3079 if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){ | |
3080 const int chrSkipMask= (1<<c->chrDstVSubSample)-1; | |
3081 if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi | |
3082 RENAME(yuv2nv12X)(c, | |
3083 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, | |
3084 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, | |
3085 dest, uDest, dstW, chrDstW, dstFormat); | |
3086 } | |
3087 else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like | |
3088 { | |
3089 const int chrSkipMask= (1<<c->chrDstVSubSample)-1; | |
3090 if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi | |
3091 if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12 | |
3092 { | |
3093 int16_t *lumBuf = lumPixBuf[0]; | |
3094 int16_t *chrBuf= chrPixBuf[0]; | |
3095 RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW); | |
3096 } | |
3097 else //General YV12 | |
3098 { | |
3099 RENAME(yuv2yuvX)(c, | |
3100 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, | |
3101 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, | |
3102 dest, uDest, vDest, dstW, chrDstW); | |
3103 } | |
3104 } | |
3105 else | |
3106 { | |
3107 ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); | |
3108 ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); | |
3109 if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB | |
3110 { | |
3111 int chrAlpha= vChrFilter[2*dstY+1]; | |
3112 RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1), | |
3113 dest, dstW, chrAlpha, dstFormat, flags, dstY); | |
3114 } | |
3115 else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB | |
3116 { | |
3117 int lumAlpha= vLumFilter[2*dstY+1]; | |
3118 int chrAlpha= vChrFilter[2*dstY+1]; | |
19172
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3119 lumMmxFilter[2]= |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3120 lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001; |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3121 chrMmxFilter[2]= |
bae6c99a99cc
vertical scaler with accurate rounding, some people on doom9 can see +-1 errors
michael
parents:
18861
diff
changeset
|
3122 chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001; |
18861 | 3123 RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1), |
3124 dest, dstW, lumAlpha, chrAlpha, dstY); | |
3125 } | |
3126 else //General RGB | |
3127 { | |
3128 RENAME(yuv2packedX)(c, | |
3129 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, | |
3130 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, | |
3131 dest, dstW, dstY); | |
3132 } | |
3133 } | |
3134 } | |
3135 else // hmm looks like we can't use MMX here without overwriting this array's tail | |
3136 { | |
3137 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; | |
3138 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; | |
3139 if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){ | |
3140 const int chrSkipMask= (1<<c->chrDstVSubSample)-1; | |
3141 if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi | |
3142 yuv2nv12XinC( | |
3143 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, | |
3144 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, | |
3145 dest, uDest, dstW, chrDstW, dstFormat); | |
3146 } | |
3147 else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 | |
3148 { | |
3149 const int chrSkipMask= (1<<c->chrDstVSubSample)-1; | |
3150 if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi | |
3151 yuv2yuvXinC( | |
3152 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, | |
3153 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, | |
3154 dest, uDest, vDest, dstW, chrDstW); | |
3155 } | |
3156 else | |
3157 { | |
3158 ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); | |
3159 ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); | |
3160 yuv2packedXinC(c, | |
3161 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, | |
3162 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, | |
3163 dest, dstW, dstY); | |
3164 } | |
3165 } | |
3166 } | |
3167 | |
3168 #ifdef HAVE_MMX | |
3169 __asm __volatile(SFENCE:::"memory"); | |
3170 __asm __volatile(EMMS:::"memory"); | |
3171 #endif | |
3172 /* store changed local vars back in the context */ | |
3173 c->dstY= dstY; | |
3174 c->lumBufIndex= lumBufIndex; | |
3175 c->chrBufIndex= chrBufIndex; | |
3176 c->lastInLumBuf= lastInLumBuf; | |
3177 c->lastInChrBuf= lastInChrBuf; | |
3178 | |
3179 return dstY - lastDstY; | |
3180 } |