Mercurial > mplayer.hg
annotate postproc/swscale_template.c @ 18813:53a08a2754b5
reword some of the german translations of the mga message strings
author | attila |
---|---|
date | Sun, 25 Jun 2006 16:09:57 +0000 |
parents | e00cea3e1732 |
children |
rev | line source |
---|---|
4295 | 1 /* |
9476
eff727517e6b
yuv2rgb brightness/contrast/saturation/different colorspaces support finished
michael
parents:
9434
diff
changeset
|
2 Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at> |
2216 | 3 |
4295 | 4 This program is free software; you can redistribute it and/or modify |
5 it under the terms of the GNU General Public License as published by | |
6 the Free Software Foundation; either version 2 of the License, or | |
7 (at your option) any later version. | |
2216 | 8 |
4295 | 9 This program is distributed in the hope that it will be useful, |
10 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 GNU General Public License for more details. | |
13 | |
14 You should have received a copy of the GNU General Public License | |
15 along with this program; if not, write to the Free Software | |
17367
401b440a6d76
Update licensing information: The FSF changed postal address.
diego
parents:
16739
diff
changeset
|
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
4295 | 17 */ |
2264
7851375ea156
increased precission of s_xinc s_xinc2 (needed for the mmx2 bugfix)
michael
parents:
2237
diff
changeset
|
18 |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
19 #include "asmalign.h" |
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
20 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
21 #undef REAL_MOVNTQ |
2540 | 22 #undef MOVNTQ |
2680 | 23 #undef PAVGB |
3136 | 24 #undef PREFETCH |
25 #undef PREFETCHW | |
26 #undef EMMS | |
27 #undef SFENCE | |
28 | |
29 #ifdef HAVE_3DNOW | |
30 /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */ | |
31 #define EMMS "femms" | |
32 #else | |
33 #define EMMS "emms" | |
34 #endif | |
35 | |
36 #ifdef HAVE_3DNOW | |
37 #define PREFETCH "prefetch" | |
38 #define PREFETCHW "prefetchw" | |
39 #elif defined ( HAVE_MMX2 ) | |
40 #define PREFETCH "prefetchnta" | |
41 #define PREFETCHW "prefetcht0" | |
42 #else | |
43 #define PREFETCH "/nop" | |
44 #define PREFETCHW "/nop" | |
45 #endif | |
46 | |
47 #ifdef HAVE_MMX2 | |
48 #define SFENCE "sfence" | |
49 #else | |
50 #define SFENCE "/nop" | |
51 #endif | |
2232
65996b3467d7
MMX & MMX2 optimizations (MMX2 is buggy and commented out)
michael
parents:
2230
diff
changeset
|
52 |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
53 #ifdef HAVE_MMX2 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
54 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
55 #elif defined (HAVE_3DNOW) |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
56 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
57 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
58 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
59 #ifdef HAVE_MMX2 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
60 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t" |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
61 #else |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
62 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t" |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
63 #endif |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
64 #define MOVNTQ(a,b) REAL_MOVNTQ(a,b) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
65 |
12017
21e5cb258a95
AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
11122
diff
changeset
|
66 #ifdef HAVE_ALTIVEC |
21e5cb258a95
AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
11122
diff
changeset
|
67 #include "swscale_altivec_template.c" |
21e5cb258a95
AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
11122
diff
changeset
|
68 #endif |
21e5cb258a95
AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
11122
diff
changeset
|
69 |
9413 | 70 #define YSCALEYUV2YV12X(x, offset) \ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
71 "xor %%"REG_a", %%"REG_a" \n\t"\ |
11122 | 72 "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\ |
73 "movq %%mm3, %%mm4 \n\t"\ | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
74 "lea " offset "(%0), %%"REG_d" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
75 "mov (%%"REG_d"), %%"REG_S" \n\t"\ |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
76 ASMALIGN16 /* FIXME Unroll? */\ |
3344 | 77 "1: \n\t"\ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
78 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
79 "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
80 "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
81 "add $16, %%"REG_d" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
82 "mov (%%"REG_d"), %%"REG_S" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
83 "test %%"REG_S", %%"REG_S" \n\t"\ |
3344 | 84 "pmulhw %%mm0, %%mm2 \n\t"\ |
85 "pmulhw %%mm0, %%mm5 \n\t"\ | |
86 "paddw %%mm2, %%mm3 \n\t"\ | |
87 "paddw %%mm5, %%mm4 \n\t"\ | |
88 " jnz 1b \n\t"\ | |
89 "psraw $3, %%mm3 \n\t"\ | |
90 "psraw $3, %%mm4 \n\t"\ | |
91 "packuswb %%mm4, %%mm3 \n\t"\ | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
92 MOVNTQ(%%mm3, (%1, %%REGa))\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
93 "add $8, %%"REG_a" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
94 "cmp %2, %%"REG_a" \n\t"\ |
11122 | 95 "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\ |
96 "movq %%mm3, %%mm4 \n\t"\ | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
97 "lea " offset "(%0), %%"REG_d" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
98 "mov (%%"REG_d"), %%"REG_S" \n\t"\ |
3344 | 99 "jb 1b \n\t" |
100 | |
101 #define YSCALEYUV2YV121 \ | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
102 "mov %2, %%"REG_a" \n\t"\ |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
103 ASMALIGN16 /* FIXME Unroll? */\ |
3344 | 104 "1: \n\t"\ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
105 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
106 "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\ |
3344 | 107 "psraw $7, %%mm0 \n\t"\ |
108 "psraw $7, %%mm1 \n\t"\ | |
109 "packuswb %%mm1, %%mm0 \n\t"\ | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
110 MOVNTQ(%%mm0, (%1, %%REGa))\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
111 "add $8, %%"REG_a" \n\t"\ |
3344 | 112 "jnc 1b \n\t" |
113 | |
114 /* | |
115 :: "m" (-lumFilterSize), "m" (-chrFilterSize), | |
116 "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4), | |
117 "r" (dest), "m" (dstW), | |
118 "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize) | |
119 : "%eax", "%ebx", "%ecx", "%edx", "%esi" | |
120 */ | |
7723 | 121 #define YSCALEYUV2PACKEDX \ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
122 "xor %%"REG_a", %%"REG_a" \n\t"\ |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
123 ASMALIGN16\ |
9413 | 124 "nop \n\t"\ |
3344 | 125 "1: \n\t"\ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
126 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
127 "mov (%%"REG_d"), %%"REG_S" \n\t"\ |
11122 | 128 "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\ |
129 "movq %%mm3, %%mm4 \n\t"\ | |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
130 ASMALIGN16\ |
3344 | 131 "2: \n\t"\ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
132 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
133 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
134 "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
135 "add $16, %%"REG_d" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
136 "mov (%%"REG_d"), %%"REG_S" \n\t"\ |
3344 | 137 "pmulhw %%mm0, %%mm2 \n\t"\ |
138 "pmulhw %%mm0, %%mm5 \n\t"\ | |
139 "paddw %%mm2, %%mm3 \n\t"\ | |
140 "paddw %%mm5, %%mm4 \n\t"\ | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
141 "test %%"REG_S", %%"REG_S" \n\t"\ |
3344 | 142 " jnz 2b \n\t"\ |
143 \ | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
144 "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
145 "mov (%%"REG_d"), %%"REG_S" \n\t"\ |
11122 | 146 "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\ |
147 "movq %%mm1, %%mm7 \n\t"\ | |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
148 ASMALIGN16\ |
3344 | 149 "2: \n\t"\ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
150 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
151 "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
152 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
153 "add $16, %%"REG_d" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
154 "mov (%%"REG_d"), %%"REG_S" \n\t"\ |
3344 | 155 "pmulhw %%mm0, %%mm2 \n\t"\ |
156 "pmulhw %%mm0, %%mm5 \n\t"\ | |
157 "paddw %%mm2, %%mm1 \n\t"\ | |
158 "paddw %%mm5, %%mm7 \n\t"\ | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
159 "test %%"REG_S", %%"REG_S" \n\t"\ |
3344 | 160 " jnz 2b \n\t"\ |
7723 | 161 |
162 | |
163 #define YSCALEYUV2RGBX \ | |
164 YSCALEYUV2PACKEDX\ | |
9413 | 165 "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\ |
166 "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\ | |
3344 | 167 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ |
168 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ | |
9413 | 169 "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\ |
170 "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\ | |
3344 | 171 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ |
9413 | 172 "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\ |
173 "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\ | |
174 "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\ | |
175 "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\ | |
176 "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\ | |
177 "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\ | |
3344 | 178 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ |
179 "paddw %%mm3, %%mm4 \n\t"\ | |
180 "movq %%mm2, %%mm0 \n\t"\ | |
181 "movq %%mm5, %%mm6 \n\t"\ | |
182 "movq %%mm4, %%mm3 \n\t"\ | |
183 "punpcklwd %%mm2, %%mm2 \n\t"\ | |
184 "punpcklwd %%mm5, %%mm5 \n\t"\ | |
185 "punpcklwd %%mm4, %%mm4 \n\t"\ | |
186 "paddw %%mm1, %%mm2 \n\t"\ | |
187 "paddw %%mm1, %%mm5 \n\t"\ | |
188 "paddw %%mm1, %%mm4 \n\t"\ | |
189 "punpckhwd %%mm0, %%mm0 \n\t"\ | |
190 "punpckhwd %%mm6, %%mm6 \n\t"\ | |
191 "punpckhwd %%mm3, %%mm3 \n\t"\ | |
192 "paddw %%mm7, %%mm0 \n\t"\ | |
193 "paddw %%mm7, %%mm6 \n\t"\ | |
194 "paddw %%mm7, %%mm3 \n\t"\ | |
195 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ | |
196 "packuswb %%mm0, %%mm2 \n\t"\ | |
197 "packuswb %%mm6, %%mm5 \n\t"\ | |
198 "packuswb %%mm3, %%mm4 \n\t"\ | |
199 "pxor %%mm7, %%mm7 \n\t" | |
9413 | 200 #if 0 |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
201 #define FULL_YSCALEYUV2RGB \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
202 "pxor %%mm7, %%mm7 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
203 "movd %6, %%mm6 \n\t" /*yalpha1*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
204 "punpcklwd %%mm6, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
205 "punpcklwd %%mm6, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
206 "movd %7, %%mm5 \n\t" /*uvalpha1*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
207 "punpcklwd %%mm5, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
208 "punpcklwd %%mm5, %%mm5 \n\t"\ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
209 "xor %%"REG_a", %%"REG_a" \n\t"\ |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
210 ASMALIGN16\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
211 "1: \n\t"\ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
212 "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
213 "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
214 "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
215 "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
216 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
217 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
218 "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
219 "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
220 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
221 "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
222 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
223 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
224 "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
225 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
226 "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\ |
4248 | 227 "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\ |
228 "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\ | |
229 "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
230 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
231 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
232 "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
233 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ |
4248 | 234 "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
235 "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ |
4248 | 236 "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
237 "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\ |
4248 | 238 "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
239 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
240 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
241 "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\ |
4248 | 242 "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\ |
243 "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
244 "paddw %%mm1, %%mm3 \n\t" /* B*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
245 "paddw %%mm1, %%mm0 \n\t" /* R*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
246 "packuswb %%mm3, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
247 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
248 "packuswb %%mm0, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
249 "paddw %%mm4, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
250 "paddw %%mm2, %%mm1 \n\t" /* G*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
251 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
252 "packuswb %%mm1, %%mm1 \n\t" |
9413 | 253 #endif |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
254 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
255 #define REAL_YSCALEYUV2PACKED(index, c) \ |
9414 | 256 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\ |
257 "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\ | |
258 "psraw $3, %%mm0 \n\t"\ | |
259 "psraw $3, %%mm1 \n\t"\ | |
260 "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\ | |
261 "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\ | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
262 "xor "#index", "#index" \n\t"\ |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
263 ASMALIGN16\ |
7723 | 264 "1: \n\t"\ |
9414 | 265 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ |
266 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ | |
267 "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ | |
268 "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ | |
7723 | 269 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\ |
270 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\ | |
9414 | 271 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\ |
7723 | 272 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\ |
273 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\ | |
274 "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ | |
275 "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ | |
276 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\ | |
277 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\ | |
9414 | 278 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\ |
279 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\ | |
280 "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\ | |
281 "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\ | |
7723 | 282 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\ |
283 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\ | |
9414 | 284 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ |
285 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ | |
7723 | 286 "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
287 "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ | |
288 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ | |
289 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ | |
290 | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
291 #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
292 |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
293 #define REAL_YSCALEYUV2RGB(index, c) \ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
294 "xor "#index", "#index" \n\t"\ |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
295 ASMALIGN16\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
296 "1: \n\t"\ |
9414 | 297 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ |
298 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ | |
299 "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\ | |
300 "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
301 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
302 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\ |
9414 | 303 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
304 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
305 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
306 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
307 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
308 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
309 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\ |
9414 | 310 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\ |
311 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
312 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
313 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ |
9414 | 314 "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\ |
315 "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
316 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ |
9414 | 317 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\ |
318 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\ | |
319 "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\ | |
320 "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
321 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
322 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\ |
9414 | 323 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ |
324 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
325 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
326 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
327 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
328 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\ |
9414 | 329 "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\ |
330 "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\ | |
331 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\ | |
332 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\ | |
333 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\ | |
334 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
335 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
336 "paddw %%mm3, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
337 "movq %%mm2, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
338 "movq %%mm5, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
339 "movq %%mm4, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
340 "punpcklwd %%mm2, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
341 "punpcklwd %%mm5, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
342 "punpcklwd %%mm4, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
343 "paddw %%mm1, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
344 "paddw %%mm1, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
345 "paddw %%mm1, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
346 "punpckhwd %%mm0, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
347 "punpckhwd %%mm6, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
348 "punpckhwd %%mm3, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
349 "paddw %%mm7, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
350 "paddw %%mm7, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
351 "paddw %%mm7, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
352 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
353 "packuswb %%mm0, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
354 "packuswb %%mm6, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
355 "packuswb %%mm3, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
356 "pxor %%mm7, %%mm7 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
357 #define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c) |
7723 | 358 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
359 #define REAL_YSCALEYUV2PACKED1(index, c) \ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
360 "xor "#index", "#index" \n\t"\ |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
361 ASMALIGN16\ |
7723 | 362 "1: \n\t"\ |
9417 | 363 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\ |
364 "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\ | |
7723 | 365 "psraw $7, %%mm3 \n\t" \ |
366 "psraw $7, %%mm4 \n\t" \ | |
9417 | 367 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\ |
368 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\ | |
7723 | 369 "psraw $7, %%mm1 \n\t" \ |
370 "psraw $7, %%mm7 \n\t" \ | |
371 | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
372 #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
373 |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
374 #define REAL_YSCALEYUV2RGB1(index, c) \ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
375 "xor "#index", "#index" \n\t"\ |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
376 ASMALIGN16\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
377 "1: \n\t"\ |
9417 | 378 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\ |
379 "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
380 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
381 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\ |
9417 | 382 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\ |
383 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
384 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
385 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ |
9417 | 386 "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\ |
387 "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
388 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ |
9417 | 389 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\ |
390 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\ | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
391 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
392 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
9417 | 393 "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\ |
394 "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\ | |
395 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\ | |
396 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\ | |
397 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\ | |
398 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\ | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
399 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
400 "paddw %%mm3, %%mm4 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
401 "movq %%mm2, %%mm0 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
402 "movq %%mm5, %%mm6 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
403 "movq %%mm4, %%mm3 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
404 "punpcklwd %%mm2, %%mm2 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
405 "punpcklwd %%mm5, %%mm5 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
406 "punpcklwd %%mm4, %%mm4 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
407 "paddw %%mm1, %%mm2 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
408 "paddw %%mm1, %%mm5 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
409 "paddw %%mm1, %%mm4 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
410 "punpckhwd %%mm0, %%mm0 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
411 "punpckhwd %%mm6, %%mm6 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
412 "punpckhwd %%mm3, %%mm3 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
413 "paddw %%mm7, %%mm0 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
414 "paddw %%mm7, %%mm6 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
415 "paddw %%mm7, %%mm3 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
416 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
417 "packuswb %%mm0, %%mm2 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
418 "packuswb %%mm6, %%mm5 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
419 "packuswb %%mm3, %%mm4 \n\t"\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
420 "pxor %%mm7, %%mm7 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
421 #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c) |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
422 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
423 #define REAL_YSCALEYUV2PACKED1b(index, c) \ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
424 "xor "#index", "#index" \n\t"\ |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
425 ASMALIGN16\ |
7723 | 426 "1: \n\t"\ |
9417 | 427 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ |
428 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ | |
429 "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ | |
430 "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ | |
7723 | 431 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\ |
432 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\ | |
433 "psrlw $8, %%mm3 \n\t" \ | |
434 "psrlw $8, %%mm4 \n\t" \ | |
9417 | 435 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\ |
436 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\ | |
7723 | 437 "psraw $7, %%mm1 \n\t" \ |
438 "psraw $7, %%mm7 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
439 #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c) |
7723 | 440 |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
441 // do vertical chrominance interpolation |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
442 #define REAL_YSCALEYUV2RGB1b(index, c) \ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
443 "xor "#index", "#index" \n\t"\ |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
444 ASMALIGN16\ |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
445 "1: \n\t"\ |
9417 | 446 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\ |
447 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\ | |
448 "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\ | |
449 "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\ | |
2576 | 450 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\ |
451 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\ | |
3344 | 452 "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\ |
453 "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\ | |
9417 | 454 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\ |
455 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\ | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
456 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
457 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\ |
9417 | 458 "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\ |
459 "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\ | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
460 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\ |
9417 | 461 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\ |
462 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
463 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
464 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\ |
9417 | 465 "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\ |
466 "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\ | |
467 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\ | |
468 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\ | |
469 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\ | |
470 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
471 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
472 "paddw %%mm3, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
473 "movq %%mm2, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
474 "movq %%mm5, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
475 "movq %%mm4, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
476 "punpcklwd %%mm2, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
477 "punpcklwd %%mm5, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
478 "punpcklwd %%mm4, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
479 "paddw %%mm1, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
480 "paddw %%mm1, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
481 "paddw %%mm1, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
482 "punpckhwd %%mm0, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
483 "punpckhwd %%mm6, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
484 "punpckhwd %%mm3, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
485 "paddw %%mm7, %%mm0 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
486 "paddw %%mm7, %%mm6 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
487 "paddw %%mm7, %%mm3 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
488 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
489 "packuswb %%mm0, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
490 "packuswb %%mm6, %%mm5 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
491 "packuswb %%mm3, %%mm4 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
492 "pxor %%mm7, %%mm7 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
493 #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
494 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
495 #define REAL_WRITEBGR32(dst, dstw, index) \ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
496 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
497 "movq %%mm2, %%mm1 \n\t" /* B */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
498 "movq %%mm5, %%mm6 \n\t" /* R */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
499 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
500 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
501 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
502 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
503 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
504 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
505 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
506 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
507 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
508 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
509 \ |
9414 | 510 MOVNTQ(%%mm0, (dst, index, 4))\ |
511 MOVNTQ(%%mm2, 8(dst, index, 4))\ | |
512 MOVNTQ(%%mm1, 16(dst, index, 4))\ | |
513 MOVNTQ(%%mm3, 24(dst, index, 4))\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
514 \ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
515 "add $8, "#index" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
516 "cmp "#dstw", "#index" \n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
517 " jb 1b \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
518 #define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
519 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
520 #define REAL_WRITEBGR16(dst, dstw, index) \ |
4248 | 521 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\ |
522 "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\ | |
523 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\ | |
2669 | 524 "psrlq $3, %%mm2 \n\t"\ |
525 \ | |
526 "movq %%mm2, %%mm1 \n\t"\ | |
527 "movq %%mm4, %%mm3 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
528 \ |
2669 | 529 "punpcklbw %%mm7, %%mm3 \n\t"\ |
530 "punpcklbw %%mm5, %%mm2 \n\t"\ | |
531 "punpckhbw %%mm7, %%mm4 \n\t"\ | |
532 "punpckhbw %%mm5, %%mm1 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
533 \ |
2669 | 534 "psllq $3, %%mm3 \n\t"\ |
535 "psllq $3, %%mm4 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
536 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
537 "por %%mm3, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
538 "por %%mm4, %%mm1 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
539 \ |
9414 | 540 MOVNTQ(%%mm2, (dst, index, 2))\ |
541 MOVNTQ(%%mm1, 8(dst, index, 2))\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
542 \ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
543 "add $8, "#index" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
544 "cmp "#dstw", "#index" \n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
545 " jb 1b \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
546 #define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
547 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
548 #define REAL_WRITEBGR15(dst, dstw, index) \ |
4248 | 549 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\ |
550 "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\ | |
551 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\ | |
2669 | 552 "psrlq $3, %%mm2 \n\t"\ |
553 "psrlq $1, %%mm5 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
554 \ |
2669 | 555 "movq %%mm2, %%mm1 \n\t"\ |
556 "movq %%mm4, %%mm3 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
557 \ |
2669 | 558 "punpcklbw %%mm7, %%mm3 \n\t"\ |
559 "punpcklbw %%mm5, %%mm2 \n\t"\ | |
560 "punpckhbw %%mm7, %%mm4 \n\t"\ | |
561 "punpckhbw %%mm5, %%mm1 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
562 \ |
2669 | 563 "psllq $2, %%mm3 \n\t"\ |
564 "psllq $2, %%mm4 \n\t"\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
565 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
566 "por %%mm3, %%mm2 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
567 "por %%mm4, %%mm1 \n\t"\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
568 \ |
9414 | 569 MOVNTQ(%%mm2, (dst, index, 2))\ |
570 MOVNTQ(%%mm1, 8(dst, index, 2))\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
571 \ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
572 "add $8, "#index" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
573 "cmp "#dstw", "#index" \n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
574 " jb 1b \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
575 #define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index) |
2669 | 576 |
9414 | 577 #define WRITEBGR24OLD(dst, dstw, index) \ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
578 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
579 "movq %%mm2, %%mm1 \n\t" /* B */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
580 "movq %%mm5, %%mm6 \n\t" /* R */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
581 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
582 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
583 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
584 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
585 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
586 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\ |
2326 | 587 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\ |
588 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\ | |
589 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\ | |
590 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
591 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
592 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
593 "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\ |
4248 | 594 "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\ |
595 "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
596 "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
597 "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
598 "psllq $48, %%mm2 \n\t" /* GB000000 1 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
599 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
600 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
601 "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
602 "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
603 "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
604 "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\ |
4248 | 605 "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
606 "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
607 "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\ |
4248 | 608 "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\ |
609 "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
610 "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
611 "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
612 "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
613 "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
614 \ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
615 "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
616 "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
617 "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\ |
4248 | 618 "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\ |
619 "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\ | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
620 "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
621 "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
622 "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
623 \ |
9414 | 624 MOVNTQ(%%mm0, (dst))\ |
625 MOVNTQ(%%mm2, 8(dst))\ | |
626 MOVNTQ(%%mm3, 16(dst))\ | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
627 "add $24, "#dst" \n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
628 \ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
629 "add $8, "#index" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
630 "cmp "#dstw", "#index" \n\t"\ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
631 " jb 1b \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
632 |
9414 | 633 #define WRITEBGR24MMX(dst, dstw, index) \ |
2730 | 634 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ |
635 "movq %%mm2, %%mm1 \n\t" /* B */\ | |
636 "movq %%mm5, %%mm6 \n\t" /* R */\ | |
637 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\ | |
638 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\ | |
639 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\ | |
640 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\ | |
641 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\ | |
642 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\ | |
643 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\ | |
644 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\ | |
645 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\ | |
646 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\ | |
647 \ | |
648 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\ | |
649 "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\ | |
650 "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\ | |
651 "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\ | |
652 \ | |
653 "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\ | |
654 "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\ | |
655 "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\ | |
656 "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\ | |
657 \ | |
658 "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\ | |
659 "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\ | |
660 "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\ | |
661 "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\ | |
662 \ | |
663 "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\ | |
664 "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\ | |
665 "psllq $40, %%mm2 \n\t" /* GB000000 1 */\ | |
666 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\ | |
9414 | 667 MOVNTQ(%%mm0, (dst))\ |
2730 | 668 \ |
669 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\ | |
670 "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\ | |
671 "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\ | |
672 "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\ | |
9414 | 673 MOVNTQ(%%mm6, 8(dst))\ |
2730 | 674 \ |
675 "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\ | |
676 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\ | |
677 "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\ | |
9414 | 678 MOVNTQ(%%mm5, 16(dst))\ |
2730 | 679 \ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
680 "add $24, "#dst" \n\t"\ |
2730 | 681 \ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
682 "add $8, "#index" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
683 "cmp "#dstw", "#index" \n\t"\ |
2730 | 684 " jb 1b \n\t" |
685 | |
9414 | 686 #define WRITEBGR24MMX2(dst, dstw, index) \ |
2730 | 687 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\ |
4248 | 688 "movq "MANGLE(M24A)", %%mm0 \n\t"\ |
689 "movq "MANGLE(M24C)", %%mm7 \n\t"\ | |
2730 | 690 "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\ |
691 "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\ | |
692 "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\ | |
693 \ | |
694 "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\ | |
695 "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\ | |
696 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\ | |
697 \ | |
698 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\ | |
699 "por %%mm1, %%mm6 \n\t"\ | |
700 "por %%mm3, %%mm6 \n\t"\ | |
9414 | 701 MOVNTQ(%%mm6, (dst))\ |
2730 | 702 \ |
703 "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\ | |
704 "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\ | |
705 "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\ | |
706 "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\ | |
707 \ | |
4248 | 708 "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\ |
2730 | 709 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\ |
710 "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\ | |
711 \ | |
712 "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\ | |
713 "por %%mm3, %%mm6 \n\t"\ | |
9414 | 714 MOVNTQ(%%mm6, 8(dst))\ |
2730 | 715 \ |
716 "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\ | |
717 "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\ | |
718 "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\ | |
719 \ | |
720 "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\ | |
721 "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\ | |
4248 | 722 "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\ |
2730 | 723 \ |
724 "por %%mm1, %%mm3 \n\t"\ | |
725 "por %%mm3, %%mm6 \n\t"\ | |
9414 | 726 MOVNTQ(%%mm6, 16(dst))\ |
2730 | 727 \ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
728 "add $24, "#dst" \n\t"\ |
2730 | 729 \ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
730 "add $8, "#index" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
731 "cmp "#dstw", "#index" \n\t"\ |
2730 | 732 " jb 1b \n\t" |
733 | |
734 #ifdef HAVE_MMX2 | |
3126 | 735 #undef WRITEBGR24 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
736 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index) |
2730 | 737 #else |
3126 | 738 #undef WRITEBGR24 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
739 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index) |
2730 | 740 #endif |
741 | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
742 #define REAL_WRITEYUY2(dst, dstw, index) \ |
7723 | 743 "packuswb %%mm3, %%mm3 \n\t"\ |
744 "packuswb %%mm4, %%mm4 \n\t"\ | |
745 "packuswb %%mm7, %%mm1 \n\t"\ | |
746 "punpcklbw %%mm4, %%mm3 \n\t"\ | |
747 "movq %%mm1, %%mm7 \n\t"\ | |
748 "punpcklbw %%mm3, %%mm1 \n\t"\ | |
749 "punpckhbw %%mm3, %%mm7 \n\t"\ | |
750 \ | |
9414 | 751 MOVNTQ(%%mm1, (dst, index, 2))\ |
752 MOVNTQ(%%mm7, 8(dst, index, 2))\ | |
7723 | 753 \ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
754 "add $8, "#index" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
755 "cmp "#dstw", "#index" \n\t"\ |
7723 | 756 " jb 1b \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
757 #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index) |
7723 | 758 |
759 | |
9413 | 760 static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, |
3344 | 761 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
762 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW) |
2519 | 763 { |
3344 | 764 #ifdef HAVE_MMX |
765 if(uDest != NULL) | |
766 { | |
767 asm volatile( | |
9413 | 768 YSCALEYUV2YV12X(0, CHR_MMX_FILTER_OFFSET) |
769 :: "r" (&c->redDither), | |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
770 "r" (uDest), "p" (chrDstW) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
771 : "%"REG_a, "%"REG_d, "%"REG_S |
3344 | 772 ); |
2519 | 773 |
3344 | 774 asm volatile( |
9413 | 775 YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET) |
776 :: "r" (&c->redDither), | |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
777 "r" (vDest), "p" (chrDstW) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
778 : "%"REG_a, "%"REG_d, "%"REG_S |
3344 | 779 ); |
780 } | |
2521 | 781 |
3344 | 782 asm volatile( |
9413 | 783 YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET) |
784 :: "r" (&c->redDither), | |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
785 "r" (dest), "p" (dstW) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
786 : "%"REG_a, "%"REG_d, "%"REG_S |
3344 | 787 ); |
788 #else | |
12017
21e5cb258a95
AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
11122
diff
changeset
|
789 #ifdef HAVE_ALTIVEC |
21e5cb258a95
AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
11122
diff
changeset
|
790 yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize, |
21e5cb258a95
AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
11122
diff
changeset
|
791 chrFilter, chrSrc, chrFilterSize, |
21e5cb258a95
AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
11122
diff
changeset
|
792 dest, uDest, vDest, dstW, chrDstW); |
21e5cb258a95
AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
11122
diff
changeset
|
793 #else //HAVE_ALTIVEC |
6540 | 794 yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize, |
3352 | 795 chrFilter, chrSrc, chrFilterSize, |
6540 | 796 dest, uDest, vDest, dstW, chrDstW); |
12017
21e5cb258a95
AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
11122
diff
changeset
|
797 #endif //!HAVE_ALTIVEC |
3344 | 798 #endif |
799 } | |
800 | |
14715 | 801 static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, |
802 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, | |
803 uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat) | |
804 { | |
805 yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize, | |
806 chrFilter, chrSrc, chrFilterSize, | |
807 dest, uDest, dstW, chrDstW, dstFormat); | |
808 } | |
809 | |
3344 | 810 static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc, |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
811 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW) |
3344 | 812 { |
813 #ifdef HAVE_MMX | |
814 if(uDest != NULL) | |
815 { | |
816 asm volatile( | |
817 YSCALEYUV2YV121 | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
818 :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW), |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
819 "g" (-chrDstW) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
820 : "%"REG_a |
3344 | 821 ); |
822 | |
823 asm volatile( | |
824 YSCALEYUV2YV121 | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
825 :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW), |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
826 "g" (-chrDstW) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
827 : "%"REG_a |
3344 | 828 ); |
2519 | 829 } |
3344 | 830 |
831 asm volatile( | |
832 YSCALEYUV2YV121 | |
833 :: "r" (lumSrc + dstW), "r" (dest + dstW), | |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
834 "g" (-dstW) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
835 : "%"REG_a |
3344 | 836 ); |
837 #else | |
838 int i; | |
839 for(i=0; i<dstW; i++) | |
840 { | |
841 int val= lumSrc[i]>>7; | |
6503 | 842 |
843 if(val&256){ | |
844 if(val<0) val=0; | |
845 else val=255; | |
846 } | |
3344 | 847 |
6503 | 848 dest[i]= val; |
3344 | 849 } |
850 | |
851 if(uDest != NULL) | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
852 for(i=0; i<chrDstW; i++) |
3344 | 853 { |
854 int u=chrSrc[i]>>7; | |
855 int v=chrSrc[i + 2048]>>7; | |
856 | |
6503 | 857 if((u|v)&256){ |
858 if(u<0) u=0; | |
859 else if (u>255) u=255; | |
860 if(v<0) v=0; | |
861 else if (v>255) v=255; | |
862 } | |
863 | |
864 uDest[i]= u; | |
865 vDest[i]= v; | |
3344 | 866 } |
867 #endif | |
2519 | 868 } |
869 | |
3344 | 870 |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
871 /** |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
872 * vertical scale YV12 to RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
873 */ |
7723 | 874 static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, |
3344 | 875 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, |
18575
e00cea3e1732
fix variable type used via "m" asm constraint to match size used in asm.
reimar
parents:
18392
diff
changeset
|
876 uint8_t *dest, long dstW, long dstY) |
3344 | 877 { |
18575
e00cea3e1732
fix variable type used via "m" asm constraint to match size used in asm.
reimar
parents:
18392
diff
changeset
|
878 long dummy=0; |
6578 | 879 switch(c->dstFormat) |
3344 | 880 { |
881 #ifdef HAVE_MMX | |
6578 | 882 case IMGFMT_BGR32: |
3344 | 883 { |
884 asm volatile( | |
885 YSCALEYUV2RGBX | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
886 WRITEBGR32(%4, %5, %%REGa) |
3344 | 887 |
9413 | 888 :: "r" (&c->redDither), |
889 "m" (dummy), "m" (dummy), "m" (dummy), | |
890 "r" (dest), "m" (dstW) | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
891 : "%"REG_a, "%"REG_d, "%"REG_S |
3344 | 892 ); |
893 } | |
6578 | 894 break; |
895 case IMGFMT_BGR24: | |
3344 | 896 { |
897 asm volatile( | |
898 YSCALEYUV2RGBX | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
899 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
900 "add %4, %%"REG_b" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
901 WRITEBGR24(%%REGb, %5, %%REGa) |
3344 | 902 |
9413 | 903 :: "r" (&c->redDither), |
904 "m" (dummy), "m" (dummy), "m" (dummy), | |
905 "r" (dest), "m" (dstW) | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
906 : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx |
3344 | 907 ); |
908 } | |
6578 | 909 break; |
910 case IMGFMT_BGR15: | |
3344 | 911 { |
912 asm volatile( | |
913 YSCALEYUV2RGBX | |
914 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ | |
915 #ifdef DITHER1XBPP | |
4248 | 916 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
917 "paddusb "MANGLE(g5Dither)", %%mm4\n\t" | |
918 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
3344 | 919 #endif |
920 | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
921 WRITEBGR15(%4, %5, %%REGa) |
3344 | 922 |
9413 | 923 :: "r" (&c->redDither), |
924 "m" (dummy), "m" (dummy), "m" (dummy), | |
925 "r" (dest), "m" (dstW) | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
926 : "%"REG_a, "%"REG_d, "%"REG_S |
3344 | 927 ); |
928 } | |
6578 | 929 break; |
930 case IMGFMT_BGR16: | |
3344 | 931 { |
932 asm volatile( | |
933 YSCALEYUV2RGBX | |
934 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ | |
935 #ifdef DITHER1XBPP | |
4248 | 936 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
937 "paddusb "MANGLE(g6Dither)", %%mm4\n\t" | |
938 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
3344 | 939 #endif |
940 | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
941 WRITEBGR16(%4, %5, %%REGa) |
3344 | 942 |
9413 | 943 :: "r" (&c->redDither), |
944 "m" (dummy), "m" (dummy), "m" (dummy), | |
945 "r" (dest), "m" (dstW) | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
946 : "%"REG_a, "%"REG_d, "%"REG_S |
3344 | 947 ); |
948 } | |
6578 | 949 break; |
7723 | 950 case IMGFMT_YUY2: |
951 { | |
952 asm volatile( | |
953 YSCALEYUV2PACKEDX | |
954 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ | |
955 | |
956 "psraw $3, %%mm3 \n\t" | |
957 "psraw $3, %%mm4 \n\t" | |
958 "psraw $3, %%mm1 \n\t" | |
959 "psraw $3, %%mm7 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
960 WRITEYUY2(%4, %5, %%REGa) |
7723 | 961 |
9413 | 962 :: "r" (&c->redDither), |
963 "m" (dummy), "m" (dummy), "m" (dummy), | |
964 "r" (dest), "m" (dstW) | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
965 : "%"REG_a, "%"REG_d, "%"REG_S |
7723 | 966 ); |
967 } | |
968 break; | |
3344 | 969 #endif |
6578 | 970 default: |
12698 | 971 #ifdef HAVE_ALTIVEC |
17641
fbf94ea858f1
don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents:
17367
diff
changeset
|
972 /* The following list of supported dstFormat values should |
fbf94ea858f1
don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents:
17367
diff
changeset
|
973 match what's found in the body of altivec_yuv2packedX() */ |
fbf94ea858f1
don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents:
17367
diff
changeset
|
974 if(c->dstFormat==IMGFMT_ABGR || c->dstFormat==IMGFMT_BGRA || |
fbf94ea858f1
don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents:
17367
diff
changeset
|
975 c->dstFormat==IMGFMT_BGR24 || c->dstFormat==IMGFMT_RGB24 || |
fbf94ea858f1
don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents:
17367
diff
changeset
|
976 c->dstFormat==IMGFMT_RGBA || c->dstFormat==IMGFMT_ARGB) |
fbf94ea858f1
don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents:
17367
diff
changeset
|
977 altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize, |
fbf94ea858f1
don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents:
17367
diff
changeset
|
978 chrFilter, chrSrc, chrFilterSize, |
fbf94ea858f1
don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents:
17367
diff
changeset
|
979 dest, dstW, dstY); |
fbf94ea858f1
don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents:
17367
diff
changeset
|
980 else |
12698 | 981 #endif |
17641
fbf94ea858f1
don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents:
17367
diff
changeset
|
982 yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize, |
fbf94ea858f1
don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents:
17367
diff
changeset
|
983 chrFilter, chrSrc, chrFilterSize, |
fbf94ea858f1
don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents:
17367
diff
changeset
|
984 dest, dstW, dstY); |
6578 | 985 break; |
986 } | |
3344 | 987 } |
988 | |
989 /** | |
990 * vertical bilinear scale YV12 to RGB | |
991 */ | |
7723 | 992 static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1, |
6578 | 993 uint8_t *dest, int dstW, int yalpha, int uvalpha, int y) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
994 { |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
995 int yalpha1=yalpha^4095; |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
996 int uvalpha1=uvalpha^4095; |
6578 | 997 int i; |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
998 |
11000 | 999 #if 0 //isn't used |
4467 | 1000 if(flags&SWS_FULL_CHR_H_INT) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1001 { |
6578 | 1002 switch(dstFormat) |
1003 { | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1004 #ifdef HAVE_MMX |
6578 | 1005 case IMGFMT_BGR32: |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1006 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1007 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1008 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1009 FULL_YSCALEYUV2RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1010 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1011 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1012 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1013 "movq %%mm3, %%mm1 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1014 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1015 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1016 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1017 MOVNTQ(%%mm3, (%4, %%REGa, 4)) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1018 MOVNTQ(%%mm1, 8(%4, %%REGa, 4)) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1019 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1020 "add $4, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1021 "cmp %5, %%"REG_a" \n\t" |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1022 " jb 1b \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1023 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1024 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1025 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1026 "m" (yalpha1), "m" (uvalpha1) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1027 : "%"REG_a |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1028 ); |
6578 | 1029 break; |
1030 case IMGFMT_BGR24: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1031 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1032 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1033 FULL_YSCALEYUV2RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1034 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1035 // lsb ... msb |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1036 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1037 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1038 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1039 "movq %%mm3, %%mm1 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1040 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1041 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1042 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1043 "movq %%mm3, %%mm2 \n\t" // BGR0BGR0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1044 "psrlq $8, %%mm3 \n\t" // GR0BGR00 |
4248 | 1045 "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000 |
1046 "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00 | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1047 "por %%mm2, %%mm3 \n\t" // BGRBGR00 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1048 "movq %%mm1, %%mm2 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1049 "psllq $48, %%mm1 \n\t" // 000000BG |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1050 "por %%mm1, %%mm3 \n\t" // BGRBGRBG |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1051 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1052 "movq %%mm2, %%mm1 \n\t" // BGR0BGR0 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1053 "psrld $16, %%mm2 \n\t" // R000R000 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1054 "psrlq $24, %%mm1 \n\t" // 0BGR0000 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1055 "por %%mm2, %%mm1 \n\t" // RBGRR000 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1056 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1057 "mov %4, %%"REG_b" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1058 "add %%"REG_a", %%"REG_b" \n\t" |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1059 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1060 #ifdef HAVE_MMX2 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1061 //FIXME Alignment |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1062 "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1063 "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t" |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1064 #else |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1065 "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t" |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1066 "psrlq $32, %%mm3 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1067 "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1068 "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t" |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1069 #endif |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1070 "add $4, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1071 "cmp %5, %%"REG_a" \n\t" |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1072 " jb 1b \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1073 |
3209 | 1074 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1075 "m" (yalpha1), "m" (uvalpha1) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1076 : "%"REG_a, "%"REG_b |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1077 ); |
6578 | 1078 break; |
1079 case IMGFMT_BGR15: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1080 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1081 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1082 FULL_YSCALEYUV2RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1083 #ifdef DITHER1XBPP |
4248 | 1084 "paddusb "MANGLE(g5Dither)", %%mm1\n\t" |
1085 "paddusb "MANGLE(r5Dither)", %%mm0\n\t" | |
1086 "paddusb "MANGLE(b5Dither)", %%mm3\n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1087 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1088 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1089 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1090 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1091 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1092 "psrlw $3, %%mm3 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1093 "psllw $2, %%mm1 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1094 "psllw $7, %%mm0 \n\t" |
4248 | 1095 "pand "MANGLE(g15Mask)", %%mm1 \n\t" |
1096 "pand "MANGLE(r15Mask)", %%mm0 \n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1097 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1098 "por %%mm3, %%mm1 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1099 "por %%mm1, %%mm0 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1100 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1101 MOVNTQ(%%mm0, (%4, %%REGa, 2)) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1102 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1103 "add $4, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1104 "cmp %5, %%"REG_a" \n\t" |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1105 " jb 1b \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1106 |
3209 | 1107 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1108 "m" (yalpha1), "m" (uvalpha1) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1109 : "%"REG_a |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1110 ); |
6578 | 1111 break; |
1112 case IMGFMT_BGR16: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1113 asm volatile( |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1114 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1115 FULL_YSCALEYUV2RGB |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1116 #ifdef DITHER1XBPP |
4248 | 1117 "paddusb "MANGLE(g6Dither)", %%mm1\n\t" |
1118 "paddusb "MANGLE(r5Dither)", %%mm0\n\t" | |
1119 "paddusb "MANGLE(b5Dither)", %%mm3\n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1120 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1121 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1122 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1123 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1124 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1125 "psrlw $3, %%mm3 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1126 "psllw $3, %%mm1 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1127 "psllw $8, %%mm0 \n\t" |
4248 | 1128 "pand "MANGLE(g16Mask)", %%mm1 \n\t" |
1129 "pand "MANGLE(r16Mask)", %%mm0 \n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1130 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1131 "por %%mm3, %%mm1 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1132 "por %%mm1, %%mm0 \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1133 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1134 MOVNTQ(%%mm0, (%4, %%REGa, 2)) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1135 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1136 "add $4, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1137 "cmp %5, %%"REG_a" \n\t" |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1138 " jb 1b \n\t" |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1139 |
3209 | 1140 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW), |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1141 "m" (yalpha1), "m" (uvalpha1) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1142 : "%"REG_a |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1143 ); |
6578 | 1144 break; |
1145 #endif | |
1146 case IMGFMT_RGB32: | |
1147 #ifndef HAVE_MMX | |
1148 case IMGFMT_BGR32: | |
1149 #endif | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1150 if(dstFormat==IMGFMT_BGR32) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1151 { |
4794 | 1152 int i; |
4793 | 1153 #ifdef WORDS_BIGENDIAN |
1154 dest++; | |
1155 #endif | |
3209 | 1156 for(i=0;i<dstW;i++){ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1157 // vertical linear interpolation && yuv2rgb in a single step: |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1158 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)]; |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1159 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19); |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1160 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19); |
2503 | 1161 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)]; |
1162 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)]; | |
1163 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)]; | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1164 dest+= 4; |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1165 } |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1166 } |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1167 else if(dstFormat==IMGFMT_BGR24) |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1168 { |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1169 int i; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1170 for(i=0;i<dstW;i++){ |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1171 // vertical linear interpolation && yuv2rgb in a single step: |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1172 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)]; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1173 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19); |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1174 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19); |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1175 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)]; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1176 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)]; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1177 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)]; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1178 dest+= 3; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1179 } |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1180 } |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1181 else if(dstFormat==IMGFMT_BGR16) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1182 { |
2671 | 1183 int i; |
3209 | 1184 for(i=0;i<dstW;i++){ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1185 // vertical linear interpolation && yuv2rgb in a single step: |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1186 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)]; |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1187 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19); |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1188 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19); |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1189 |
2572
f2353173d52c
c optimizations (array is faster than pointer) (16bpp variants tested and 2% faster)
michael
parents:
2569
diff
changeset
|
1190 ((uint16_t*)dest)[i] = |
2584 | 1191 clip_table16b[(Y + yuvtab_40cf[U]) >>13] | |
1192 clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] | | |
1193 clip_table16r[(Y + yuvtab_3343[V]) >>13]; | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1194 } |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1195 } |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
1196 else if(dstFormat==IMGFMT_BGR15) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1197 { |
2671 | 1198 int i; |
3209 | 1199 for(i=0;i<dstW;i++){ |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1200 // vertical linear interpolation && yuv2rgb in a single step: |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1201 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)]; |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1202 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19); |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1203 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19); |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1204 |
2572
f2353173d52c
c optimizations (array is faster than pointer) (16bpp variants tested and 2% faster)
michael
parents:
2569
diff
changeset
|
1205 ((uint16_t*)dest)[i] = |
2584 | 1206 clip_table15b[(Y + yuvtab_40cf[U]) >>13] | |
1207 clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] | | |
1208 clip_table15r[(Y + yuvtab_3343[V]) >>13]; | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1209 } |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1210 } |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1211 }//FULL_UV_IPOL |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1212 else |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1213 { |
6578 | 1214 #endif // if 0 |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1215 #ifdef HAVE_MMX |
6578 | 1216 switch(c->dstFormat) |
1217 { | |
11000 | 1218 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( |
6578 | 1219 case IMGFMT_BGR32: |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1220 asm volatile( |
18392 | 1221 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1222 "mov %4, %%"REG_b" \n\t" | |
1223 "push %%"REG_BP" \n\t" | |
1224 YSCALEYUV2RGB(%%REGBP, %5) | |
1225 WRITEBGR32(%%REGb, 8280(%5), %%REGBP) | |
1226 "pop %%"REG_BP" \n\t" | |
1227 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1228 |
18392 | 1229 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), |
1230 "a" (&c->redDither) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1231 ); |
6578 | 1232 return; |
1233 case IMGFMT_BGR24: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1234 asm volatile( |
18392 | 1235 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1236 "mov %4, %%"REG_b" \n\t" | |
1237 "push %%"REG_BP" \n\t" | |
1238 YSCALEYUV2RGB(%%REGBP, %5) | |
1239 WRITEBGR24(%%REGb, 8280(%5), %%REGBP) | |
1240 "pop %%"REG_BP" \n\t" | |
1241 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1242 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1243 "a" (&c->redDither) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1244 ); |
6578 | 1245 return; |
1246 case IMGFMT_BGR15: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1247 asm volatile( |
18392 | 1248 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1249 "mov %4, %%"REG_b" \n\t" | |
1250 "push %%"REG_BP" \n\t" | |
1251 YSCALEYUV2RGB(%%REGBP, %5) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1252 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1253 #ifdef DITHER1XBPP |
4248 | 1254 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
1255 "paddusb "MANGLE(g5Dither)", %%mm4\n\t" | |
1256 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1257 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1258 |
18392 | 1259 WRITEBGR15(%%REGb, 8280(%5), %%REGBP) |
1260 "pop %%"REG_BP" \n\t" | |
1261 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1262 |
18392 | 1263 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), |
1264 "a" (&c->redDither) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1265 ); |
6578 | 1266 return; |
1267 case IMGFMT_BGR16: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1268 asm volatile( |
18392 | 1269 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1270 "mov %4, %%"REG_b" \n\t" | |
1271 "push %%"REG_BP" \n\t" | |
1272 YSCALEYUV2RGB(%%REGBP, %5) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1273 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1274 #ifdef DITHER1XBPP |
4248 | 1275 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
1276 "paddusb "MANGLE(g6Dither)", %%mm4\n\t" | |
1277 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1278 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1279 |
18392 | 1280 WRITEBGR16(%%REGb, 8280(%5), %%REGBP) |
1281 "pop %%"REG_BP" \n\t" | |
1282 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1283 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1284 "a" (&c->redDither) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1285 ); |
6578 | 1286 return; |
7723 | 1287 case IMGFMT_YUY2: |
1288 asm volatile( | |
18392 | 1289 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1290 "mov %4, %%"REG_b" \n\t" | |
1291 "push %%"REG_BP" \n\t" | |
1292 YSCALEYUV2PACKED(%%REGBP, %5) | |
1293 WRITEYUY2(%%REGb, 8280(%5), %%REGBP) | |
1294 "pop %%"REG_BP" \n\t" | |
1295 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
1296 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), | |
1297 "a" (&c->redDither) | |
7723 | 1298 ); |
1299 return; | |
6578 | 1300 default: break; |
1301 } | |
1302 #endif //HAVE_MMX | |
7723 | 1303 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1304 } |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1305 |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1306 /** |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1307 * YV12 to RGB without scaling or interpolating |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1308 */ |
7723 | 1309 static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1, |
6578 | 1310 uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1311 { |
3344 | 1312 const int yalpha1=0; |
6578 | 1313 int i; |
1314 | |
1315 uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1 | |
1316 const int yalpha= 4096; //FIXME ... | |
2671 | 1317 |
4467 | 1318 if(flags&SWS_FULL_CHR_H_INT) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1319 { |
7723 | 1320 RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y); |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1321 return; |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1322 } |
2576 | 1323 |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1324 #ifdef HAVE_MMX |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1325 if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1326 { |
6578 | 1327 switch(dstFormat) |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1328 { |
6578 | 1329 case IMGFMT_BGR32: |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1330 asm volatile( |
18392 | 1331 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1332 "mov %4, %%"REG_b" \n\t" | |
1333 "push %%"REG_BP" \n\t" | |
1334 YSCALEYUV2RGB1(%%REGBP, %5) | |
1335 WRITEBGR32(%%REGb, 8280(%5), %%REGBP) | |
1336 "pop %%"REG_BP" \n\t" | |
1337 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
9417 | 1338 |
18392 | 1339 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), |
1340 "a" (&c->redDither) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1341 ); |
6578 | 1342 return; |
1343 case IMGFMT_BGR24: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1344 asm volatile( |
18392 | 1345 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1346 "mov %4, %%"REG_b" \n\t" | |
1347 "push %%"REG_BP" \n\t" | |
1348 YSCALEYUV2RGB1(%%REGBP, %5) | |
1349 WRITEBGR24(%%REGb, 8280(%5), %%REGBP) | |
1350 "pop %%"REG_BP" \n\t" | |
1351 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
9417 | 1352 |
18392 | 1353 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), |
1354 "a" (&c->redDither) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1355 ); |
6578 | 1356 return; |
1357 case IMGFMT_BGR15: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1358 asm volatile( |
18392 | 1359 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1360 "mov %4, %%"REG_b" \n\t" | |
1361 "push %%"REG_BP" \n\t" | |
1362 YSCALEYUV2RGB1(%%REGBP, %5) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1363 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1364 #ifdef DITHER1XBPP |
4248 | 1365 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
1366 "paddusb "MANGLE(g5Dither)", %%mm4\n\t" | |
1367 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1368 #endif |
18392 | 1369 WRITEBGR15(%%REGb, 8280(%5), %%REGBP) |
1370 "pop %%"REG_BP" \n\t" | |
1371 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
9417 | 1372 |
18392 | 1373 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), |
1374 "a" (&c->redDither) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1375 ); |
6578 | 1376 return; |
1377 case IMGFMT_BGR16: | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1378 asm volatile( |
18392 | 1379 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1380 "mov %4, %%"REG_b" \n\t" | |
1381 "push %%"REG_BP" \n\t" | |
1382 YSCALEYUV2RGB1(%%REGBP, %5) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1383 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1384 #ifdef DITHER1XBPP |
4248 | 1385 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
1386 "paddusb "MANGLE(g6Dither)", %%mm4\n\t" | |
1387 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1388 #endif |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1389 |
18392 | 1390 WRITEBGR16(%%REGb, 8280(%5), %%REGBP) |
1391 "pop %%"REG_BP" \n\t" | |
1392 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
9417 | 1393 |
18392 | 1394 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), |
1395 "a" (&c->redDither) | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1396 ); |
6578 | 1397 return; |
7723 | 1398 case IMGFMT_YUY2: |
1399 asm volatile( | |
18392 | 1400 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1401 "mov %4, %%"REG_b" \n\t" | |
1402 "push %%"REG_BP" \n\t" | |
1403 YSCALEYUV2PACKED1(%%REGBP, %5) | |
1404 WRITEYUY2(%%REGb, 8280(%5), %%REGBP) | |
1405 "pop %%"REG_BP" \n\t" | |
1406 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
9417 | 1407 |
18392 | 1408 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), |
1409 "a" (&c->redDither) | |
7723 | 1410 ); |
1411 return; | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1412 } |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1413 } |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1414 else |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1415 { |
6578 | 1416 switch(dstFormat) |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1417 { |
6578 | 1418 case IMGFMT_BGR32: |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1419 asm volatile( |
18392 | 1420 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1421 "mov %4, %%"REG_b" \n\t" | |
1422 "push %%"REG_BP" \n\t" | |
1423 YSCALEYUV2RGB1b(%%REGBP, %5) | |
1424 WRITEBGR32(%%REGb, 8280(%5), %%REGBP) | |
1425 "pop %%"REG_BP" \n\t" | |
1426 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
9417 | 1427 |
18392 | 1428 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), |
1429 "a" (&c->redDither) | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1430 ); |
6578 | 1431 return; |
1432 case IMGFMT_BGR24: | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1433 asm volatile( |
18392 | 1434 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1435 "mov %4, %%"REG_b" \n\t" | |
1436 "push %%"REG_BP" \n\t" | |
1437 YSCALEYUV2RGB1b(%%REGBP, %5) | |
1438 WRITEBGR24(%%REGb, 8280(%5), %%REGBP) | |
1439 "pop %%"REG_BP" \n\t" | |
1440 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
9417 | 1441 |
18392 | 1442 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), |
1443 "a" (&c->redDither) | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1444 ); |
6578 | 1445 return; |
1446 case IMGFMT_BGR15: | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1447 asm volatile( |
18392 | 1448 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1449 "mov %4, %%"REG_b" \n\t" | |
1450 "push %%"REG_BP" \n\t" | |
1451 YSCALEYUV2RGB1b(%%REGBP, %5) | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1452 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1453 #ifdef DITHER1XBPP |
4248 | 1454 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
1455 "paddusb "MANGLE(g5Dither)", %%mm4\n\t" | |
1456 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1457 #endif |
18392 | 1458 WRITEBGR15(%%REGb, 8280(%5), %%REGBP) |
1459 "pop %%"REG_BP" \n\t" | |
1460 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
9417 | 1461 |
18392 | 1462 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), |
1463 "a" (&c->redDither) | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1464 ); |
6578 | 1465 return; |
1466 case IMGFMT_BGR16: | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1467 asm volatile( |
18392 | 1468 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1469 "mov %4, %%"REG_b" \n\t" | |
1470 "push %%"REG_BP" \n\t" | |
1471 YSCALEYUV2RGB1b(%%REGBP, %5) | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1472 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1473 #ifdef DITHER1XBPP |
4248 | 1474 "paddusb "MANGLE(b5Dither)", %%mm2\n\t" |
1475 "paddusb "MANGLE(g6Dither)", %%mm4\n\t" | |
1476 "paddusb "MANGLE(r5Dither)", %%mm5\n\t" | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1477 #endif |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1478 |
18392 | 1479 WRITEBGR16(%%REGb, 8280(%5), %%REGBP) |
1480 "pop %%"REG_BP" \n\t" | |
1481 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
9417 | 1482 |
18392 | 1483 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), |
1484 "a" (&c->redDither) | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1485 ); |
6578 | 1486 return; |
7723 | 1487 case IMGFMT_YUY2: |
1488 asm volatile( | |
18392 | 1489 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" |
1490 "mov %4, %%"REG_b" \n\t" | |
1491 "push %%"REG_BP" \n\t" | |
1492 YSCALEYUV2PACKED1b(%%REGBP, %5) | |
1493 WRITEYUY2(%%REGb, 8280(%5), %%REGBP) | |
1494 "pop %%"REG_BP" \n\t" | |
1495 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" | |
9417 | 1496 |
18392 | 1497 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest), |
1498 "a" (&c->redDither) | |
7723 | 1499 ); |
1500 return; | |
2569
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1501 } |
30b736e7feef
interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents:
2566
diff
changeset
|
1502 } |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1503 #endif |
6578 | 1504 if( uvalpha < 2048 ) |
1505 { | |
7723 | 1506 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C) |
6578 | 1507 }else{ |
7723 | 1508 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C) |
6578 | 1509 } |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1510 } |
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
1511 |
4481 | 1512 //FIXME yuy2* can read upto 7 samples to much |
1513 | |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
1514 static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width) |
4467 | 1515 { |
4481 | 1516 #ifdef HAVE_MMX |
1517 asm volatile( | |
1518 "movq "MANGLE(bm01010101)", %%mm2\n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1519 "mov %0, %%"REG_a" \n\t" |
4481 | 1520 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1521 "movq (%1, %%"REG_a",2), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1522 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t" |
4481 | 1523 "pand %%mm2, %%mm0 \n\t" |
1524 "pand %%mm2, %%mm1 \n\t" | |
1525 "packuswb %%mm1, %%mm0 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1526 "movq %%mm0, (%2, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1527 "add $8, %%"REG_a" \n\t" |
4481 | 1528 " js 1b \n\t" |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
1529 : : "g" (-width), "r" (src+width*2), "r" (dst+width) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1530 : "%"REG_a |
4481 | 1531 ); |
4467 | 1532 #else |
1533 int i; | |
1534 for(i=0; i<width; i++) | |
1535 dst[i]= src[2*i]; | |
1536 #endif | |
1537 } | |
1538 | |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
1539 static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width) |
4467 | 1540 { |
4481 | 1541 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) |
1542 asm volatile( | |
1543 "movq "MANGLE(bm01010101)", %%mm4\n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1544 "mov %0, %%"REG_a" \n\t" |
4481 | 1545 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1546 "movq (%1, %%"REG_a",4), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1547 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1548 "movq (%2, %%"REG_a",4), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1549 "movq 8(%2, %%"REG_a",4), %%mm3 \n\t" |
4481 | 1550 PAVGB(%%mm2, %%mm0) |
1551 PAVGB(%%mm3, %%mm1) | |
1552 "psrlw $8, %%mm0 \n\t" | |
1553 "psrlw $8, %%mm1 \n\t" | |
1554 "packuswb %%mm1, %%mm0 \n\t" | |
1555 "movq %%mm0, %%mm1 \n\t" | |
1556 "psrlw $8, %%mm0 \n\t" | |
1557 "pand %%mm4, %%mm1 \n\t" | |
1558 "packuswb %%mm0, %%mm0 \n\t" | |
1559 "packuswb %%mm1, %%mm1 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1560 "movd %%mm0, (%4, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1561 "movd %%mm1, (%3, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1562 "add $4, %%"REG_a" \n\t" |
4481 | 1563 " js 1b \n\t" |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
1564 : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1565 : "%"REG_a |
4481 | 1566 ); |
4467 | 1567 #else |
1568 int i; | |
1569 for(i=0; i<width; i++) | |
1570 { | |
1571 dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1; | |
1572 dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1; | |
1573 } | |
1574 #endif | |
1575 } | |
1576 | |
9071 | 1577 //this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
1578 static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width) |
9071 | 1579 { |
1580 #ifdef HAVE_MMX | |
1581 asm volatile( | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1582 "mov %0, %%"REG_a" \n\t" |
9071 | 1583 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1584 "movq (%1, %%"REG_a",2), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1585 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t" |
9071 | 1586 "psrlw $8, %%mm0 \n\t" |
1587 "psrlw $8, %%mm1 \n\t" | |
1588 "packuswb %%mm1, %%mm0 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1589 "movq %%mm0, (%2, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1590 "add $8, %%"REG_a" \n\t" |
9071 | 1591 " js 1b \n\t" |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
1592 : : "g" (-width), "r" (src+width*2), "r" (dst+width) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1593 : "%"REG_a |
9071 | 1594 ); |
1595 #else | |
1596 int i; | |
1597 for(i=0; i<width; i++) | |
1598 dst[i]= src[2*i+1]; | |
1599 #endif | |
1600 } | |
1601 | |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
1602 static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width) |
9071 | 1603 { |
1604 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) | |
1605 asm volatile( | |
1606 "movq "MANGLE(bm01010101)", %%mm4\n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1607 "mov %0, %%"REG_a" \n\t" |
9071 | 1608 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1609 "movq (%1, %%"REG_a",4), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1610 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1611 "movq (%2, %%"REG_a",4), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1612 "movq 8(%2, %%"REG_a",4), %%mm3 \n\t" |
9071 | 1613 PAVGB(%%mm2, %%mm0) |
1614 PAVGB(%%mm3, %%mm1) | |
1615 "pand %%mm4, %%mm0 \n\t" | |
1616 "pand %%mm4, %%mm1 \n\t" | |
1617 "packuswb %%mm1, %%mm0 \n\t" | |
1618 "movq %%mm0, %%mm1 \n\t" | |
1619 "psrlw $8, %%mm0 \n\t" | |
1620 "pand %%mm4, %%mm1 \n\t" | |
1621 "packuswb %%mm0, %%mm0 \n\t" | |
1622 "packuswb %%mm1, %%mm1 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1623 "movd %%mm0, (%4, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1624 "movd %%mm1, (%3, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1625 "add $4, %%"REG_a" \n\t" |
9071 | 1626 " js 1b \n\t" |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
1627 : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1628 : "%"REG_a |
9071 | 1629 ); |
1630 #else | |
1631 int i; | |
1632 for(i=0; i<width; i++) | |
1633 { | |
1634 dstU[i]= (src1[4*i + 0] + src2[4*i + 0])>>1; | |
1635 dstV[i]= (src1[4*i + 2] + src2[4*i + 2])>>1; | |
1636 } | |
1637 #endif | |
1638 } | |
1639 | |
4467 | 1640 static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width) |
1641 { | |
1642 int i; | |
1643 for(i=0; i<width; i++) | |
1644 { | |
9433 | 1645 int b= ((uint32_t*)src)[i]&0xFF; |
1646 int g= (((uint32_t*)src)[i]>>8)&0xFF; | |
9499 | 1647 int r= (((uint32_t*)src)[i]>>16)&0xFF; |
4467 | 1648 |
9433 | 1649 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT); |
4467 | 1650 } |
1651 } | |
1652 | |
1653 static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
1654 { | |
1655 int i; | |
1656 for(i=0; i<width; i++) | |
1657 { | |
9433 | 1658 const int a= ((uint32_t*)src1)[2*i+0]; |
1659 const int e= ((uint32_t*)src1)[2*i+1]; | |
1660 const int c= ((uint32_t*)src2)[2*i+0]; | |
1661 const int d= ((uint32_t*)src2)[2*i+1]; | |
1662 const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF); | |
1663 const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00); | |
1664 const int b= l&0x3FF; | |
1665 const int g= h>>8; | |
1666 const int r= l>>16; | |
4467 | 1667 |
1668 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1669 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1670 } | |
1671 } | |
1672 | |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
1673 static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width) |
4467 | 1674 { |
4612 | 1675 #ifdef HAVE_MMX |
1676 asm volatile( | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1677 "mov %2, %%"REG_a" \n\t" |
4923 | 1678 "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t" |
1679 "movq "MANGLE(w1111)", %%mm5 \n\t" | |
4612 | 1680 "pxor %%mm7, %%mm7 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1681 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
1682 ASMALIGN16 |
4612 | 1683 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1684 PREFETCH" 64(%0, %%"REG_b") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1685 "movd (%0, %%"REG_b"), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1686 "movd 3(%0, %%"REG_b"), %%mm1 \n\t" |
4612 | 1687 "punpcklbw %%mm7, %%mm0 \n\t" |
1688 "punpcklbw %%mm7, %%mm1 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1689 "movd 6(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1690 "movd 9(%0, %%"REG_b"), %%mm3 \n\t" |
4612 | 1691 "punpcklbw %%mm7, %%mm2 \n\t" |
1692 "punpcklbw %%mm7, %%mm3 \n\t" | |
1693 "pmaddwd %%mm6, %%mm0 \n\t" | |
1694 "pmaddwd %%mm6, %%mm1 \n\t" | |
1695 "pmaddwd %%mm6, %%mm2 \n\t" | |
1696 "pmaddwd %%mm6, %%mm3 \n\t" | |
1697 #ifndef FAST_BGR2YV12 | |
1698 "psrad $8, %%mm0 \n\t" | |
1699 "psrad $8, %%mm1 \n\t" | |
1700 "psrad $8, %%mm2 \n\t" | |
1701 "psrad $8, %%mm3 \n\t" | |
1702 #endif | |
1703 "packssdw %%mm1, %%mm0 \n\t" | |
1704 "packssdw %%mm3, %%mm2 \n\t" | |
1705 "pmaddwd %%mm5, %%mm0 \n\t" | |
1706 "pmaddwd %%mm5, %%mm2 \n\t" | |
1707 "packssdw %%mm2, %%mm0 \n\t" | |
1708 "psraw $7, %%mm0 \n\t" | |
1709 | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1710 "movd 12(%0, %%"REG_b"), %%mm4 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1711 "movd 15(%0, %%"REG_b"), %%mm1 \n\t" |
4612 | 1712 "punpcklbw %%mm7, %%mm4 \n\t" |
1713 "punpcklbw %%mm7, %%mm1 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1714 "movd 18(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1715 "movd 21(%0, %%"REG_b"), %%mm3 \n\t" |
4612 | 1716 "punpcklbw %%mm7, %%mm2 \n\t" |
1717 "punpcklbw %%mm7, %%mm3 \n\t" | |
1718 "pmaddwd %%mm6, %%mm4 \n\t" | |
1719 "pmaddwd %%mm6, %%mm1 \n\t" | |
1720 "pmaddwd %%mm6, %%mm2 \n\t" | |
1721 "pmaddwd %%mm6, %%mm3 \n\t" | |
1722 #ifndef FAST_BGR2YV12 | |
1723 "psrad $8, %%mm4 \n\t" | |
1724 "psrad $8, %%mm1 \n\t" | |
1725 "psrad $8, %%mm2 \n\t" | |
1726 "psrad $8, %%mm3 \n\t" | |
1727 #endif | |
1728 "packssdw %%mm1, %%mm4 \n\t" | |
1729 "packssdw %%mm3, %%mm2 \n\t" | |
1730 "pmaddwd %%mm5, %%mm4 \n\t" | |
1731 "pmaddwd %%mm5, %%mm2 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1732 "add $24, %%"REG_b" \n\t" |
4612 | 1733 "packssdw %%mm2, %%mm4 \n\t" |
1734 "psraw $7, %%mm4 \n\t" | |
1735 | |
1736 "packuswb %%mm4, %%mm0 \n\t" | |
4923 | 1737 "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t" |
4612 | 1738 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1739 "movq %%mm0, (%1, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1740 "add $8, %%"REG_a" \n\t" |
4612 | 1741 " js 1b \n\t" |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
1742 : : "r" (src+width*3), "r" (dst+width), "g" (-width) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1743 : "%"REG_a, "%"REG_b |
4612 | 1744 ); |
4467 | 1745 #else |
1746 int i; | |
1747 for(i=0; i<width; i++) | |
1748 { | |
1749 int b= src[i*3+0]; | |
1750 int g= src[i*3+1]; | |
1751 int r= src[i*3+2]; | |
1752 | |
9434 | 1753 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT); |
4467 | 1754 } |
1755 #endif | |
1756 } | |
1757 | |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
1758 static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width) |
4467 | 1759 { |
4619 | 1760 #ifdef HAVE_MMX |
1761 asm volatile( | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1762 "mov %4, %%"REG_a" \n\t" |
4923 | 1763 "movq "MANGLE(w1111)", %%mm5 \n\t" |
1764 "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t" | |
4619 | 1765 "pxor %%mm7, %%mm7 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1766 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1767 "add %%"REG_b", %%"REG_b" \n\t" |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
1768 ASMALIGN16 |
4619 | 1769 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1770 PREFETCH" 64(%0, %%"REG_b") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1771 PREFETCH" 64(%1, %%"REG_b") \n\t" |
4619 | 1772 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1773 "movq (%0, %%"REG_b"), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1774 "movq (%1, %%"REG_b"), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1775 "movq 6(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1776 "movq 6(%1, %%"REG_b"), %%mm3 \n\t" |
4619 | 1777 PAVGB(%%mm1, %%mm0) |
1778 PAVGB(%%mm3, %%mm2) | |
1779 "movq %%mm0, %%mm1 \n\t" | |
1780 "movq %%mm2, %%mm3 \n\t" | |
1781 "psrlq $24, %%mm0 \n\t" | |
1782 "psrlq $24, %%mm2 \n\t" | |
1783 PAVGB(%%mm1, %%mm0) | |
1784 PAVGB(%%mm3, %%mm2) | |
1785 "punpcklbw %%mm7, %%mm0 \n\t" | |
1786 "punpcklbw %%mm7, %%mm2 \n\t" | |
1787 #else | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1788 "movd (%0, %%"REG_b"), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1789 "movd (%1, %%"REG_b"), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1790 "movd 3(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1791 "movd 3(%1, %%"REG_b"), %%mm3 \n\t" |
4619 | 1792 "punpcklbw %%mm7, %%mm0 \n\t" |
1793 "punpcklbw %%mm7, %%mm1 \n\t" | |
1794 "punpcklbw %%mm7, %%mm2 \n\t" | |
1795 "punpcklbw %%mm7, %%mm3 \n\t" | |
1796 "paddw %%mm1, %%mm0 \n\t" | |
1797 "paddw %%mm3, %%mm2 \n\t" | |
1798 "paddw %%mm2, %%mm0 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1799 "movd 6(%0, %%"REG_b"), %%mm4 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1800 "movd 6(%1, %%"REG_b"), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1801 "movd 9(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1802 "movd 9(%1, %%"REG_b"), %%mm3 \n\t" |
4619 | 1803 "punpcklbw %%mm7, %%mm4 \n\t" |
1804 "punpcklbw %%mm7, %%mm1 \n\t" | |
1805 "punpcklbw %%mm7, %%mm2 \n\t" | |
1806 "punpcklbw %%mm7, %%mm3 \n\t" | |
1807 "paddw %%mm1, %%mm4 \n\t" | |
1808 "paddw %%mm3, %%mm2 \n\t" | |
1809 "paddw %%mm4, %%mm2 \n\t" | |
1810 "psrlw $2, %%mm0 \n\t" | |
1811 "psrlw $2, %%mm2 \n\t" | |
1812 #endif | |
4923 | 1813 "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t" |
1814 "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t" | |
4619 | 1815 |
1816 "pmaddwd %%mm0, %%mm1 \n\t" | |
1817 "pmaddwd %%mm2, %%mm3 \n\t" | |
1818 "pmaddwd %%mm6, %%mm0 \n\t" | |
1819 "pmaddwd %%mm6, %%mm2 \n\t" | |
1820 #ifndef FAST_BGR2YV12 | |
1821 "psrad $8, %%mm0 \n\t" | |
1822 "psrad $8, %%mm1 \n\t" | |
1823 "psrad $8, %%mm2 \n\t" | |
1824 "psrad $8, %%mm3 \n\t" | |
1825 #endif | |
1826 "packssdw %%mm2, %%mm0 \n\t" | |
1827 "packssdw %%mm3, %%mm1 \n\t" | |
1828 "pmaddwd %%mm5, %%mm0 \n\t" | |
1829 "pmaddwd %%mm5, %%mm1 \n\t" | |
1830 "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0 | |
1831 "psraw $7, %%mm0 \n\t" | |
1832 | |
1833 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1834 "movq 12(%0, %%"REG_b"), %%mm4 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1835 "movq 12(%1, %%"REG_b"), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1836 "movq 18(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1837 "movq 18(%1, %%"REG_b"), %%mm3 \n\t" |
4619 | 1838 PAVGB(%%mm1, %%mm4) |
1839 PAVGB(%%mm3, %%mm2) | |
1840 "movq %%mm4, %%mm1 \n\t" | |
1841 "movq %%mm2, %%mm3 \n\t" | |
1842 "psrlq $24, %%mm4 \n\t" | |
1843 "psrlq $24, %%mm2 \n\t" | |
1844 PAVGB(%%mm1, %%mm4) | |
1845 PAVGB(%%mm3, %%mm2) | |
1846 "punpcklbw %%mm7, %%mm4 \n\t" | |
1847 "punpcklbw %%mm7, %%mm2 \n\t" | |
1848 #else | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1849 "movd 12(%0, %%"REG_b"), %%mm4 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1850 "movd 12(%1, %%"REG_b"), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1851 "movd 15(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1852 "movd 15(%1, %%"REG_b"), %%mm3 \n\t" |
4619 | 1853 "punpcklbw %%mm7, %%mm4 \n\t" |
1854 "punpcklbw %%mm7, %%mm1 \n\t" | |
1855 "punpcklbw %%mm7, %%mm2 \n\t" | |
1856 "punpcklbw %%mm7, %%mm3 \n\t" | |
1857 "paddw %%mm1, %%mm4 \n\t" | |
1858 "paddw %%mm3, %%mm2 \n\t" | |
1859 "paddw %%mm2, %%mm4 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1860 "movd 18(%0, %%"REG_b"), %%mm5 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1861 "movd 18(%1, %%"REG_b"), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1862 "movd 21(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1863 "movd 21(%1, %%"REG_b"), %%mm3 \n\t" |
4619 | 1864 "punpcklbw %%mm7, %%mm5 \n\t" |
1865 "punpcklbw %%mm7, %%mm1 \n\t" | |
1866 "punpcklbw %%mm7, %%mm2 \n\t" | |
1867 "punpcklbw %%mm7, %%mm3 \n\t" | |
1868 "paddw %%mm1, %%mm5 \n\t" | |
1869 "paddw %%mm3, %%mm2 \n\t" | |
1870 "paddw %%mm5, %%mm2 \n\t" | |
4923 | 1871 "movq "MANGLE(w1111)", %%mm5 \n\t" |
4619 | 1872 "psrlw $2, %%mm4 \n\t" |
1873 "psrlw $2, %%mm2 \n\t" | |
1874 #endif | |
4923 | 1875 "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t" |
1876 "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t" | |
4619 | 1877 |
1878 "pmaddwd %%mm4, %%mm1 \n\t" | |
1879 "pmaddwd %%mm2, %%mm3 \n\t" | |
1880 "pmaddwd %%mm6, %%mm4 \n\t" | |
1881 "pmaddwd %%mm6, %%mm2 \n\t" | |
1882 #ifndef FAST_BGR2YV12 | |
1883 "psrad $8, %%mm4 \n\t" | |
1884 "psrad $8, %%mm1 \n\t" | |
1885 "psrad $8, %%mm2 \n\t" | |
1886 "psrad $8, %%mm3 \n\t" | |
1887 #endif | |
1888 "packssdw %%mm2, %%mm4 \n\t" | |
1889 "packssdw %%mm3, %%mm1 \n\t" | |
1890 "pmaddwd %%mm5, %%mm4 \n\t" | |
1891 "pmaddwd %%mm5, %%mm1 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1892 "add $24, %%"REG_b" \n\t" |
4619 | 1893 "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2 |
1894 "psraw $7, %%mm4 \n\t" | |
1895 | |
1896 "movq %%mm0, %%mm1 \n\t" | |
1897 "punpckldq %%mm4, %%mm0 \n\t" | |
1898 "punpckhdq %%mm4, %%mm1 \n\t" | |
1899 "packsswb %%mm1, %%mm0 \n\t" | |
4923 | 1900 "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t" |
4619 | 1901 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1902 "movd %%mm0, (%2, %%"REG_a") \n\t" |
4619 | 1903 "punpckhdq %%mm0, %%mm0 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1904 "movd %%mm0, (%3, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1905 "add $4, %%"REG_a" \n\t" |
4619 | 1906 " js 1b \n\t" |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
1907 : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
1908 : "%"REG_a, "%"REG_b |
4619 | 1909 ); |
4467 | 1910 #else |
1911 int i; | |
1912 for(i=0; i<width; i++) | |
1913 { | |
1914 int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3]; | |
1915 int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4]; | |
1916 int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5]; | |
1917 | |
1918 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1919 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
1920 } | |
1921 #endif | |
1922 } | |
1923 | |
4578 | 1924 static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width) |
1925 { | |
1926 int i; | |
1927 for(i=0; i<width; i++) | |
1928 { | |
9433 | 1929 int d= ((uint16_t*)src)[i]; |
4578 | 1930 int b= d&0x1F; |
1931 int g= (d>>5)&0x3F; | |
1932 int r= (d>>11)&0x1F; | |
1933 | |
1934 dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16; | |
1935 } | |
1936 } | |
1937 | |
1938 static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
1939 { | |
1940 int i; | |
1941 for(i=0; i<width; i++) | |
1942 { | |
9433 | 1943 int d0= ((uint32_t*)src1)[i]; |
1944 int d1= ((uint32_t*)src2)[i]; | |
4579 | 1945 |
1946 int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F); | |
1947 int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F); | |
1948 | |
1949 int dh2= (dh>>11) + (dh<<21); | |
1950 int d= dh2 + dl; | |
1951 | |
1952 int b= d&0x7F; | |
1953 int r= (d>>11)&0x7F; | |
1954 int g= d>>21; | |
4578 | 1955 dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128; |
1956 dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128; | |
1957 } | |
1958 } | |
1959 | |
4580 | 1960 static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width) |
1961 { | |
1962 int i; | |
1963 for(i=0; i<width; i++) | |
1964 { | |
9433 | 1965 int d= ((uint16_t*)src)[i]; |
4580 | 1966 int b= d&0x1F; |
1967 int g= (d>>5)&0x1F; | |
1968 int r= (d>>10)&0x1F; | |
1969 | |
1970 dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16; | |
1971 } | |
1972 } | |
1973 | |
1974 static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
1975 { | |
1976 int i; | |
1977 for(i=0; i<width; i++) | |
1978 { | |
9433 | 1979 int d0= ((uint32_t*)src1)[i]; |
1980 int d1= ((uint32_t*)src2)[i]; | |
4580 | 1981 |
1982 int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F); | |
1983 int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F); | |
1984 | |
1985 int dh2= (dh>>11) + (dh<<21); | |
1986 int d= dh2 + dl; | |
1987 | |
1988 int b= d&0x7F; | |
1989 int r= (d>>10)&0x7F; | |
1990 int g= d>>21; | |
1991 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128; | |
1992 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128; | |
1993 } | |
1994 } | |
1995 | |
1996 | |
4558 | 1997 static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width) |
1998 { | |
1999 int i; | |
2000 for(i=0; i<width; i++) | |
2001 { | |
9433 | 2002 int r= ((uint32_t*)src)[i]&0xFF; |
2003 int g= (((uint32_t*)src)[i]>>8)&0xFF; | |
9499 | 2004 int b= (((uint32_t*)src)[i]>>16)&0xFF; |
4558 | 2005 |
9433 | 2006 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT); |
4558 | 2007 } |
2008 } | |
2009 | |
2010 static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
2011 { | |
2012 int i; | |
2013 for(i=0; i<width; i++) | |
2014 { | |
9433 | 2015 const int a= ((uint32_t*)src1)[2*i+0]; |
2016 const int e= ((uint32_t*)src1)[2*i+1]; | |
2017 const int c= ((uint32_t*)src2)[2*i+0]; | |
2018 const int d= ((uint32_t*)src2)[2*i+1]; | |
2019 const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF); | |
2020 const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00); | |
2021 const int r= l&0x3FF; | |
2022 const int g= h>>8; | |
2023 const int b= l>>16; | |
4558 | 2024 |
2025 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
2026 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
2027 } | |
2028 } | |
2029 | |
2030 static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width) | |
2031 { | |
2032 int i; | |
2033 for(i=0; i<width; i++) | |
2034 { | |
2035 int r= src[i*3+0]; | |
2036 int g= src[i*3+1]; | |
2037 int b= src[i*3+2]; | |
2038 | |
9433 | 2039 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT); |
4558 | 2040 } |
2041 } | |
2042 | |
2043 static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) | |
2044 { | |
2045 int i; | |
2046 for(i=0; i<width; i++) | |
2047 { | |
2048 int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3]; | |
2049 int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4]; | |
2050 int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5]; | |
2051 | |
2052 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
2053 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128; | |
2054 } | |
2055 } | |
2056 | |
4467 | 2057 |
3272 | 2058 // Bilinear / Bicubic scaling |
2059 static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, | |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
2060 int16_t *filter, int16_t *filterPos, long filterSize) |
3272 | 2061 { |
2062 #ifdef HAVE_MMX | |
9921
61057de81510
mplayer idependant (not really yet) swscale example
michael
parents:
9499
diff
changeset
|
2063 assert(filterSize % 4 == 0 && filterSize>0); |
3272 | 2064 if(filterSize==4) // allways true for upscaling, sometimes for down too |
2065 { | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2066 long counter= -2*dstW; |
3272 | 2067 filter-= counter*2; |
2068 filterPos-= counter/2; | |
2069 dst-= counter/2; | |
2070 asm volatile( | |
2071 "pxor %%mm7, %%mm7 \n\t" | |
4248 | 2072 "movq "MANGLE(w02)", %%mm6 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2073 "push %%"REG_BP" \n\t" // we use 7 regs here ... |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2074 "mov %%"REG_a", %%"REG_BP" \n\t" |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
2075 ASMALIGN16 |
3272 | 2076 "1: \n\t" |
13733
c45cf718dfe8
10000l : fix a crash on x86 due to an horrible mistake in my x86_64 patch
aurel
parents:
13720
diff
changeset
|
2077 "movzwl (%2, %%"REG_BP"), %%eax \n\t" |
c45cf718dfe8
10000l : fix a crash on x86 due to an horrible mistake in my x86_64 patch
aurel
parents:
13720
diff
changeset
|
2078 "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2079 "movq (%1, %%"REG_BP", 4), %%mm1\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2080 "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2081 "movd (%3, %%"REG_a"), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2082 "movd (%3, %%"REG_b"), %%mm2 \n\t" |
3272 | 2083 "punpcklbw %%mm7, %%mm0 \n\t" |
2084 "punpcklbw %%mm7, %%mm2 \n\t" | |
2085 "pmaddwd %%mm1, %%mm0 \n\t" | |
2086 "pmaddwd %%mm2, %%mm3 \n\t" | |
2087 "psrad $8, %%mm0 \n\t" | |
2088 "psrad $8, %%mm3 \n\t" | |
2089 "packssdw %%mm3, %%mm0 \n\t" | |
2090 "pmaddwd %%mm6, %%mm0 \n\t" | |
2091 "packssdw %%mm0, %%mm0 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2092 "movd %%mm0, (%4, %%"REG_BP") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2093 "add $4, %%"REG_BP" \n\t" |
3272 | 2094 " jnc 1b \n\t" |
3352 | 2095 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2096 "pop %%"REG_BP" \n\t" |
3272 | 2097 : "+a" (counter) |
2098 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst) | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2099 : "%"REG_b |
3272 | 2100 ); |
2101 } | |
2102 else if(filterSize==8) | |
2103 { | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2104 long counter= -2*dstW; |
3272 | 2105 filter-= counter*4; |
2106 filterPos-= counter/2; | |
2107 dst-= counter/2; | |
2108 asm volatile( | |
2109 "pxor %%mm7, %%mm7 \n\t" | |
4248 | 2110 "movq "MANGLE(w02)", %%mm6 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2111 "push %%"REG_BP" \n\t" // we use 7 regs here ... |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2112 "mov %%"REG_a", %%"REG_BP" \n\t" |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
2113 ASMALIGN16 |
3272 | 2114 "1: \n\t" |
13733
c45cf718dfe8
10000l : fix a crash on x86 due to an horrible mistake in my x86_64 patch
aurel
parents:
13720
diff
changeset
|
2115 "movzwl (%2, %%"REG_BP"), %%eax \n\t" |
c45cf718dfe8
10000l : fix a crash on x86 due to an horrible mistake in my x86_64 patch
aurel
parents:
13720
diff
changeset
|
2116 "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2117 "movq (%1, %%"REG_BP", 8), %%mm1\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2118 "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2119 "movd (%3, %%"REG_a"), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2120 "movd (%3, %%"REG_b"), %%mm2 \n\t" |
3272 | 2121 "punpcklbw %%mm7, %%mm0 \n\t" |
2122 "punpcklbw %%mm7, %%mm2 \n\t" | |
2123 "pmaddwd %%mm1, %%mm0 \n\t" | |
2124 "pmaddwd %%mm2, %%mm3 \n\t" | |
2316
bcb229557e9b
fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents:
2297
diff
changeset
|
2125 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2126 "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2127 "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2128 "movd 4(%3, %%"REG_a"), %%mm4 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2129 "movd 4(%3, %%"REG_b"), %%mm2 \n\t" |
3272 | 2130 "punpcklbw %%mm7, %%mm4 \n\t" |
2131 "punpcklbw %%mm7, %%mm2 \n\t" | |
2132 "pmaddwd %%mm1, %%mm4 \n\t" | |
2133 "pmaddwd %%mm2, %%mm5 \n\t" | |
2134 "paddd %%mm4, %%mm0 \n\t" | |
2135 "paddd %%mm5, %%mm3 \n\t" | |
2136 | |
2137 "psrad $8, %%mm0 \n\t" | |
2138 "psrad $8, %%mm3 \n\t" | |
2139 "packssdw %%mm3, %%mm0 \n\t" | |
2140 "pmaddwd %%mm6, %%mm0 \n\t" | |
2141 "packssdw %%mm0, %%mm0 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2142 "movd %%mm0, (%4, %%"REG_BP") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2143 "add $4, %%"REG_BP" \n\t" |
3272 | 2144 " jnc 1b \n\t" |
3344 | 2145 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2146 "pop %%"REG_BP" \n\t" |
3272 | 2147 : "+a" (counter) |
2148 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst) | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2149 : "%"REG_b |
3272 | 2150 ); |
2151 } | |
2152 else | |
2153 { | |
15617
130dd060f723
one bugfix and a few gcc4 bug workaorunds by (Gianluigi Tiesi: mplayer, netfarm it)
michael
parents:
15295
diff
changeset
|
2154 uint8_t *offset = src+filterSize; |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2155 long counter= -2*dstW; |
3272 | 2156 // filter-= counter*filterSize/2; |
2157 filterPos-= counter/2; | |
2158 dst-= counter/2; | |
2159 asm volatile( | |
2160 "pxor %%mm7, %%mm7 \n\t" | |
4248 | 2161 "movq "MANGLE(w02)", %%mm6 \n\t" |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
2162 ASMALIGN16 |
3272 | 2163 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2164 "mov %2, %%"REG_c" \n\t" |
13733
c45cf718dfe8
10000l : fix a crash on x86 due to an horrible mistake in my x86_64 patch
aurel
parents:
13720
diff
changeset
|
2165 "movzwl (%%"REG_c", %0), %%eax \n\t" |
c45cf718dfe8
10000l : fix a crash on x86 due to an horrible mistake in my x86_64 patch
aurel
parents:
13720
diff
changeset
|
2166 "movzwl 2(%%"REG_c", %0), %%ebx \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2167 "mov %5, %%"REG_c" \n\t" |
3272 | 2168 "pxor %%mm4, %%mm4 \n\t" |
2169 "pxor %%mm5, %%mm5 \n\t" | |
2170 "2: \n\t" | |
2171 "movq (%1), %%mm1 \n\t" | |
2172 "movq (%1, %6), %%mm3 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2173 "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2174 "movd (%%"REG_c", %%"REG_b"), %%mm2\n\t" |
3272 | 2175 "punpcklbw %%mm7, %%mm0 \n\t" |
2176 "punpcklbw %%mm7, %%mm2 \n\t" | |
2177 "pmaddwd %%mm1, %%mm0 \n\t" | |
2178 "pmaddwd %%mm2, %%mm3 \n\t" | |
2179 "paddd %%mm3, %%mm5 \n\t" | |
2180 "paddd %%mm0, %%mm4 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2181 "add $8, %1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2182 "add $4, %%"REG_c" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2183 "cmp %4, %%"REG_c" \n\t" |
3272 | 2184 " jb 2b \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2185 "add %6, %1 \n\t" |
3272 | 2186 "psrad $8, %%mm4 \n\t" |
2187 "psrad $8, %%mm5 \n\t" | |
2188 "packssdw %%mm5, %%mm4 \n\t" | |
2189 "pmaddwd %%mm6, %%mm4 \n\t" | |
2190 "packssdw %%mm4, %%mm4 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2191 "mov %3, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2192 "movd %%mm4, (%%"REG_a", %0) \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2193 "add $4, %0 \n\t" |
3272 | 2194 " jnc 1b \n\t" |
3344 | 2195 |
3641 | 2196 : "+r" (counter), "+r" (filter) |
15617
130dd060f723
one bugfix and a few gcc4 bug workaorunds by (Gianluigi Tiesi: mplayer, netfarm it)
michael
parents:
15295
diff
changeset
|
2197 : "m" (filterPos), "m" (dst), "m"(offset), |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
2198 "m" (src), "r" (filterSize*2) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2199 : "%"REG_b, "%"REG_a, "%"REG_c |
3272 | 2200 ); |
2201 } | |
2202 #else | |
12130
2ef24558b732
AltiVec hScale, all size patch by (Romain Dolbeau <dolbeaur at club-internet dot fr>)
michael
parents:
12017
diff
changeset
|
2203 #ifdef HAVE_ALTIVEC |
2ef24558b732
AltiVec hScale, all size patch by (Romain Dolbeau <dolbeaur at club-internet dot fr>)
michael
parents:
12017
diff
changeset
|
2204 hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize); |
2ef24558b732
AltiVec hScale, all size patch by (Romain Dolbeau <dolbeaur at club-internet dot fr>)
michael
parents:
12017
diff
changeset
|
2205 #else |
3272 | 2206 int i; |
2207 for(i=0; i<dstW; i++) | |
2208 { | |
2209 int j; | |
2210 int srcPos= filterPos[i]; | |
2211 int val=0; | |
3344 | 2212 // printf("filterPos: %d\n", filterPos[i]); |
3272 | 2213 for(j=0; j<filterSize; j++) |
2214 { | |
2215 // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]); | |
2216 val += ((int)src[srcPos + j])*filter[filterSize*i + j]; | |
2217 } | |
2218 // filter += hFilterSize; | |
2219 dst[i] = MIN(MAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ... | |
2220 // dst[i] = val>>7; | |
2221 } | |
2222 #endif | |
12130
2ef24558b732
AltiVec hScale, all size patch by (Romain Dolbeau <dolbeaur at club-internet dot fr>)
michael
parents:
12017
diff
changeset
|
2223 #endif |
3272 | 2224 } |
2225 // *** horizontal scale Y line to temp buffer | |
18575
e00cea3e1732
fix variable type used via "m" asm constraint to match size used in asm.
reimar
parents:
18392
diff
changeset
|
2226 static inline void RENAME(hyscale)(uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc, |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2227 int flags, int canMMX2BeUsed, int16_t *hLumFilter, |
4467 | 2228 int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode, |
5452 | 2229 int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter, |
2230 int32_t *mmx2FilterPos) | |
2469 | 2231 { |
4467 | 2232 if(srcFormat==IMGFMT_YUY2) |
2233 { | |
2234 RENAME(yuy2ToY)(formatConvBuffer, src, srcW); | |
2235 src= formatConvBuffer; | |
2236 } | |
9071 | 2237 else if(srcFormat==IMGFMT_UYVY) |
2238 { | |
2239 RENAME(uyvyToY)(formatConvBuffer, src, srcW); | |
2240 src= formatConvBuffer; | |
2241 } | |
4467 | 2242 else if(srcFormat==IMGFMT_BGR32) |
2243 { | |
2244 RENAME(bgr32ToY)(formatConvBuffer, src, srcW); | |
2245 src= formatConvBuffer; | |
2246 } | |
2247 else if(srcFormat==IMGFMT_BGR24) | |
2248 { | |
2249 RENAME(bgr24ToY)(formatConvBuffer, src, srcW); | |
2250 src= formatConvBuffer; | |
2251 } | |
4578 | 2252 else if(srcFormat==IMGFMT_BGR16) |
2253 { | |
2254 RENAME(bgr16ToY)(formatConvBuffer, src, srcW); | |
2255 src= formatConvBuffer; | |
2256 } | |
4580 | 2257 else if(srcFormat==IMGFMT_BGR15) |
2258 { | |
2259 RENAME(bgr15ToY)(formatConvBuffer, src, srcW); | |
2260 src= formatConvBuffer; | |
2261 } | |
4558 | 2262 else if(srcFormat==IMGFMT_RGB32) |
2263 { | |
2264 RENAME(rgb32ToY)(formatConvBuffer, src, srcW); | |
2265 src= formatConvBuffer; | |
2266 } | |
2267 else if(srcFormat==IMGFMT_RGB24) | |
2268 { | |
2269 RENAME(rgb24ToY)(formatConvBuffer, src, srcW); | |
2270 src= formatConvBuffer; | |
2271 } | |
4467 | 2272 |
3352 | 2273 #ifdef HAVE_MMX |
11000 | 2274 // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one) |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2275 if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed)) |
3352 | 2276 #else |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2277 if(!(flags&SWS_FAST_BILINEAR)) |
3352 | 2278 #endif |
3272 | 2279 { |
2280 RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize); | |
2281 } | |
2282 else // Fast Bilinear upscale / crap downscale | |
2283 { | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2284 #if defined(ARCH_X86) || defined(ARCH_X86_64) |
2469 | 2285 #ifdef HAVE_MMX2 |
2671 | 2286 int i; |
2469 | 2287 if(canMMX2BeUsed) |
2288 { | |
2289 asm volatile( | |
2290 "pxor %%mm7, %%mm7 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2291 "mov %0, %%"REG_c" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2292 "mov %1, %%"REG_D" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2293 "mov %2, %%"REG_d" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2294 "mov %3, %%"REG_b" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2295 "xor %%"REG_a", %%"REG_a" \n\t" // i |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2296 PREFETCH" (%%"REG_c") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2297 PREFETCH" 32(%%"REG_c") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2298 PREFETCH" 64(%%"REG_c") \n\t" |
2520 | 2299 |
14556 | 2300 #ifdef ARCH_X86_64 |
2301 | |
2469 | 2302 #define FUNNY_Y_CODE \ |
14556 | 2303 "movl (%%"REG_b"), %%esi \n\t"\ |
5452 | 2304 "call *%4 \n\t"\ |
14556 | 2305 "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\ |
2306 "add %%"REG_S", %%"REG_c" \n\t"\ | |
14536
6f13379b1464
100l, fix broken AMD64 patch. To whoever applied it: Did you actually _try_
reimar
parents:
13733
diff
changeset
|
2307 "add %%"REG_a", %%"REG_D" \n\t"\ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2308 "xor %%"REG_a", %%"REG_a" \n\t"\ |
2520 | 2309 |
14556 | 2310 #else |
2311 | |
2312 #define FUNNY_Y_CODE \ | |
2313 "movl (%%"REG_b"), %%esi \n\t"\ | |
2314 "call *%4 \n\t"\ | |
2315 "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\ | |
2316 "add %%"REG_a", %%"REG_D" \n\t"\ | |
2317 "xor %%"REG_a", %%"REG_a" \n\t"\ | |
2318 | |
2319 #endif | |
2320 | |
2469 | 2321 FUNNY_Y_CODE |
2322 FUNNY_Y_CODE | |
2323 FUNNY_Y_CODE | |
2324 FUNNY_Y_CODE | |
2325 FUNNY_Y_CODE | |
2326 FUNNY_Y_CODE | |
2327 FUNNY_Y_CODE | |
2328 FUNNY_Y_CODE | |
2329 | |
5452 | 2330 :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos), |
2331 "m" (funnyYCode) | |
14536
6f13379b1464
100l, fix broken AMD64 patch. To whoever applied it: Did you actually _try_
reimar
parents:
13733
diff
changeset
|
2332 : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D |
2469 | 2333 ); |
3215 | 2334 for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128; |
2469 | 2335 } |
2336 else | |
2337 { | |
2338 #endif | |
18575
e00cea3e1732
fix variable type used via "m" asm constraint to match size used in asm.
reimar
parents:
18392
diff
changeset
|
2339 long xInc_shr16 = xInc >> 16; |
e00cea3e1732
fix variable type used via "m" asm constraint to match size used in asm.
reimar
parents:
18392
diff
changeset
|
2340 uint16_t xInc_mask = xInc & 0xffff; |
2469 | 2341 //NO MMX just normal asm ... |
2342 asm volatile( | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2343 "xor %%"REG_a", %%"REG_a" \n\t" // i |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2344 "xor %%"REG_b", %%"REG_b" \n\t" // xx |
2469 | 2345 "xorl %%ecx, %%ecx \n\t" // 2*xalpha |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
2346 ASMALIGN16 |
2469 | 2347 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2348 "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx] |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2349 "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1] |
2469 | 2350 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] |
2351 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha | |
2352 "shll $16, %%edi \n\t" | |
2353 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2354 "mov %1, %%"REG_D" \n\t" |
2469 | 2355 "shrl $9, %%esi \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2356 "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t" |
2469 | 2357 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2358 "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry |
2469 | 2359 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2360 "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx] |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2361 "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1] |
2469 | 2362 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] |
2363 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha | |
2364 "shll $16, %%edi \n\t" | |
2365 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2366 "mov %1, %%"REG_D" \n\t" |
2469 | 2367 "shrl $9, %%esi \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2368 "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t" |
2469 | 2369 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2370 "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry |
2469 | 2371 |
2372 | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2373 "add $2, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2374 "cmp %2, %%"REG_a" \n\t" |
2469 | 2375 " jb 1b \n\t" |
2376 | |
2377 | |
15617
130dd060f723
one bugfix and a few gcc4 bug workaorunds by (Gianluigi Tiesi: mplayer, netfarm it)
michael
parents:
15295
diff
changeset
|
2378 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2379 : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi" |
2469 | 2380 ); |
2381 #ifdef HAVE_MMX2 | |
11000 | 2382 } //if MMX2 can't be used |
2469 | 2383 #endif |
2384 #else | |
2671 | 2385 int i; |
2386 unsigned int xpos=0; | |
2387 for(i=0;i<dstWidth;i++) | |
2388 { | |
2389 register unsigned int xx=xpos>>16; | |
2390 register unsigned int xalpha=(xpos&0xFFFF)>>9; | |
2391 dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha; | |
2392 xpos+=xInc; | |
2393 } | |
2469 | 2394 #endif |
3272 | 2395 } |
2469 | 2396 } |
2397 | |
16739
e91f944f6ed9
Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents:
15972
diff
changeset
|
2398 inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2, |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2399 int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter, |
4467 | 2400 int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode, |
5452 | 2401 int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter, |
2402 int32_t *mmx2FilterPos) | |
2469 | 2403 { |
4467 | 2404 if(srcFormat==IMGFMT_YUY2) |
2405 { | |
2406 RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2407 src1= formatConvBuffer; | |
2408 src2= formatConvBuffer+2048; | |
2409 } | |
9071 | 2410 else if(srcFormat==IMGFMT_UYVY) |
2411 { | |
2412 RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2413 src1= formatConvBuffer; | |
2414 src2= formatConvBuffer+2048; | |
2415 } | |
4467 | 2416 else if(srcFormat==IMGFMT_BGR32) |
2417 { | |
2418 RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2419 src1= formatConvBuffer; | |
2420 src2= formatConvBuffer+2048; | |
2421 } | |
2422 else if(srcFormat==IMGFMT_BGR24) | |
2423 { | |
2424 RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2425 src1= formatConvBuffer; | |
2426 src2= formatConvBuffer+2048; | |
2427 } | |
4578 | 2428 else if(srcFormat==IMGFMT_BGR16) |
2429 { | |
2430 RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2431 src1= formatConvBuffer; | |
2432 src2= formatConvBuffer+2048; | |
2433 } | |
4580 | 2434 else if(srcFormat==IMGFMT_BGR15) |
2435 { | |
2436 RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2437 src1= formatConvBuffer; | |
2438 src2= formatConvBuffer+2048; | |
2439 } | |
4558 | 2440 else if(srcFormat==IMGFMT_RGB32) |
2441 { | |
2442 RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2443 src1= formatConvBuffer; | |
2444 src2= formatConvBuffer+2048; | |
2445 } | |
2446 else if(srcFormat==IMGFMT_RGB24) | |
2447 { | |
2448 RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW); | |
2449 src1= formatConvBuffer; | |
2450 src2= formatConvBuffer+2048; | |
2451 } | |
4481 | 2452 else if(isGray(srcFormat)) |
2453 { | |
2454 return; | |
2455 } | |
4467 | 2456 |
3352 | 2457 #ifdef HAVE_MMX |
11000 | 2458 // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one) |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2459 if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed)) |
3352 | 2460 #else |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2461 if(!(flags&SWS_FAST_BILINEAR)) |
3352 | 2462 #endif |
3272 | 2463 { |
2464 RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); | |
2465 RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize); | |
2466 } | |
2467 else // Fast Bilinear upscale / crap downscale | |
2468 { | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2469 #if defined(ARCH_X86) || defined(ARCH_X86_64) |
2469 | 2470 #ifdef HAVE_MMX2 |
2671 | 2471 int i; |
2469 | 2472 if(canMMX2BeUsed) |
2473 { | |
2474 asm volatile( | |
5452 | 2475 "pxor %%mm7, %%mm7 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2476 "mov %0, %%"REG_c" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2477 "mov %1, %%"REG_D" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2478 "mov %2, %%"REG_d" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2479 "mov %3, %%"REG_b" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2480 "xor %%"REG_a", %%"REG_a" \n\t" // i |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2481 PREFETCH" (%%"REG_c") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2482 PREFETCH" 32(%%"REG_c") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2483 PREFETCH" 64(%%"REG_c") \n\t" |
5452 | 2484 |
14556 | 2485 #ifdef ARCH_X86_64 |
2486 | |
5452 | 2487 #define FUNNY_UV_CODE \ |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2488 "movl (%%"REG_b"), %%esi \n\t"\ |
5452 | 2489 "call *%4 \n\t"\ |
14556 | 2490 "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\ |
2491 "add %%"REG_S", %%"REG_c" \n\t"\ | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2492 "add %%"REG_a", %%"REG_D" \n\t"\ |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2493 "xor %%"REG_a", %%"REG_a" \n\t"\ |
2469 | 2494 |
14556 | 2495 #else |
2496 | |
2497 #define FUNNY_UV_CODE \ | |
2498 "movl (%%"REG_b"), %%esi \n\t"\ | |
2499 "call *%4 \n\t"\ | |
2500 "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\ | |
2501 "add %%"REG_a", %%"REG_D" \n\t"\ | |
2502 "xor %%"REG_a", %%"REG_a" \n\t"\ | |
2503 | |
2504 #endif | |
2505 | |
5452 | 2506 FUNNY_UV_CODE |
2507 FUNNY_UV_CODE | |
2508 FUNNY_UV_CODE | |
2509 FUNNY_UV_CODE | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2510 "xor %%"REG_a", %%"REG_a" \n\t" // i |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2511 "mov %5, %%"REG_c" \n\t" // src |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2512 "mov %1, %%"REG_D" \n\t" // buf1 |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2513 "add $4096, %%"REG_D" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2514 PREFETCH" (%%"REG_c") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2515 PREFETCH" 32(%%"REG_c") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2516 PREFETCH" 64(%%"REG_c") \n\t" |
2469 | 2517 |
5452 | 2518 FUNNY_UV_CODE |
2519 FUNNY_UV_CODE | |
2520 FUNNY_UV_CODE | |
2521 FUNNY_UV_CODE | |
2469 | 2522 |
5452 | 2523 :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos), |
2524 "m" (funnyUVCode), "m" (src2) | |
14556 | 2525 : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D |
5452 | 2526 ); |
3344 | 2527 for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) |
2469 | 2528 { |
3344 | 2529 // printf("%d %d %d\n", dstWidth, i, srcW); |
2530 dst[i] = src1[srcW-1]*128; | |
2531 dst[i+2048] = src2[srcW-1]*128; | |
2469 | 2532 } |
2533 } | |
2534 else | |
2535 { | |
2536 #endif | |
15617
130dd060f723
one bugfix and a few gcc4 bug workaorunds by (Gianluigi Tiesi: mplayer, netfarm it)
michael
parents:
15295
diff
changeset
|
2537 long xInc_shr16 = (long) (xInc >> 16); |
18575
e00cea3e1732
fix variable type used via "m" asm constraint to match size used in asm.
reimar
parents:
18392
diff
changeset
|
2538 uint16_t xInc_mask = xInc & 0xffff; |
2469 | 2539 asm volatile( |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2540 "xor %%"REG_a", %%"REG_a" \n\t" // i |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2541 "xor %%"REG_b", %%"REG_b" \n\t" // xx |
2469 | 2542 "xorl %%ecx, %%ecx \n\t" // 2*xalpha |
18104
7b408d60de9e
add support for intel mac. mp3lib is not fixed yet.
nplourde
parents:
17641
diff
changeset
|
2543 ASMALIGN16 |
2469 | 2544 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2545 "mov %0, %%"REG_S" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2546 "movzbl (%%"REG_S", %%"REG_b"), %%edi \n\t" //src[xx] |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2547 "movzbl 1(%%"REG_S", %%"REG_b"), %%esi \n\t" //src[xx+1] |
2469 | 2548 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] |
2549 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha | |
2550 "shll $16, %%edi \n\t" | |
2551 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2552 "mov %1, %%"REG_D" \n\t" |
2469 | 2553 "shrl $9, %%esi \n\t" |
15845 | 2554 "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t" |
2469 | 2555 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2556 "movzbl (%5, %%"REG_b"), %%edi \n\t" //src[xx] |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2557 "movzbl 1(%5, %%"REG_b"), %%esi \n\t" //src[xx+1] |
2469 | 2558 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx] |
2559 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha | |
2560 "shll $16, %%edi \n\t" | |
2561 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha) | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2562 "mov %1, %%"REG_D" \n\t" |
2469 | 2563 "shrl $9, %%esi \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2564 "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t" |
2469 | 2565 |
2566 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2567 "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2568 "add $1, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2569 "cmp %2, %%"REG_a" \n\t" |
2469 | 2570 " jb 1b \n\t" |
2571 | |
15972
e4360060b79a
Re-enables the GCC-4 fix for AMD-64 only. Patch by cartman and poirierg
gpoirier
parents:
15858
diff
changeset
|
2572 /* GCC-3.3 makes MPlayer crash on IA-32 machines when using "g" operand here, |
e4360060b79a
Re-enables the GCC-4 fix for AMD-64 only. Patch by cartman and poirierg
gpoirier
parents:
15858
diff
changeset
|
2573 which is needed to support GCC-4.0 */ |
e4360060b79a
Re-enables the GCC-4 fix for AMD-64 only. Patch by cartman and poirierg
gpoirier
parents:
15858
diff
changeset
|
2574 #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4)) |
e4360060b79a
Re-enables the GCC-4 fix for AMD-64 only. Patch by cartman and poirierg
gpoirier
parents:
15858
diff
changeset
|
2575 :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask), |
e4360060b79a
Re-enables the GCC-4 fix for AMD-64 only. Patch by cartman and poirierg
gpoirier
parents:
15858
diff
changeset
|
2576 #else |
15858
045f91e5e67d
Reverts GCC-4.0 "fixe" which broke GCC-3.3 and maybe others
gpoirier
parents:
15845
diff
changeset
|
2577 :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask), |
15972
e4360060b79a
Re-enables the GCC-4 fix for AMD-64 only. Patch by cartman and poirierg
gpoirier
parents:
15858
diff
changeset
|
2578 #endif |
2469 | 2579 "r" (src2) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
12698
diff
changeset
|
2580 : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi" |
2469 | 2581 ); |
2582 #ifdef HAVE_MMX2 | |
11000 | 2583 } //if MMX2 can't be used |
2469 | 2584 #endif |
2585 #else | |
2671 | 2586 int i; |
2587 unsigned int xpos=0; | |
2588 for(i=0;i<dstWidth;i++) | |
2589 { | |
2590 register unsigned int xx=xpos>>16; | |
2591 register unsigned int xalpha=(xpos&0xFFFF)>>9; | |
2592 dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha); | |
2593 dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha); | |
2566 | 2594 /* slower |
2595 dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha; | |
2596 dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha; | |
2597 */ | |
2671 | 2598 xpos+=xInc; |
2599 } | |
2469 | 2600 #endif |
3272 | 2601 } |
2602 } | |
2603 | |
9499 | 2604 static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, |
2605 int srcSliceH, uint8_t* dst[], int dstStride[]){ | |
3344 | 2606 |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2607 /* load a few things into local vars to make the code more readable? and faster */ |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2608 const int srcW= c->srcW; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2609 const int dstW= c->dstW; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2610 const int dstH= c->dstH; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2611 const int chrDstW= c->chrDstW; |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2612 const int chrSrcW= c->chrSrcW; |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2613 const int lumXInc= c->lumXInc; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2614 const int chrXInc= c->chrXInc; |
4295 | 2615 const int dstFormat= c->dstFormat; |
6503 | 2616 const int srcFormat= c->srcFormat; |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2617 const int flags= c->flags; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2618 const int canMMX2BeUsed= c->canMMX2BeUsed; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2619 int16_t *vLumFilterPos= c->vLumFilterPos; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2620 int16_t *vChrFilterPos= c->vChrFilterPos; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2621 int16_t *hLumFilterPos= c->hLumFilterPos; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2622 int16_t *hChrFilterPos= c->hChrFilterPos; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2623 int16_t *vLumFilter= c->vLumFilter; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2624 int16_t *vChrFilter= c->vChrFilter; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2625 int16_t *hLumFilter= c->hLumFilter; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2626 int16_t *hChrFilter= c->hChrFilter; |
9413 | 2627 int32_t *lumMmxFilter= c->lumMmxFilter; |
2628 int32_t *chrMmxFilter= c->chrMmxFilter; | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2629 const int vLumFilterSize= c->vLumFilterSize; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2630 const int vChrFilterSize= c->vChrFilterSize; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2631 const int hLumFilterSize= c->hLumFilterSize; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2632 const int hChrFilterSize= c->hChrFilterSize; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2633 int16_t **lumPixBuf= c->lumPixBuf; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2634 int16_t **chrPixBuf= c->chrPixBuf; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2635 const int vLumBufSize= c->vLumBufSize; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2636 const int vChrBufSize= c->vChrBufSize; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2637 uint8_t *funnyYCode= c->funnyYCode; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2638 uint8_t *funnyUVCode= c->funnyUVCode; |
4467 | 2639 uint8_t *formatConvBuffer= c->formatConvBuffer; |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2640 const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample; |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2641 const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample); |
9494
543ab3909b78
sws_ prefix, more seperation between internal & external swscaler API
michael
parents:
9476
diff
changeset
|
2642 int lastDstY; |
3344 | 2643 |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2644 /* vars whch will change and which we need to storw back in the context */ |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2645 int dstY= c->dstY; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2646 int lumBufIndex= c->lumBufIndex; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2647 int chrBufIndex= c->chrBufIndex; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2648 int lastInLumBuf= c->lastInLumBuf; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2649 int lastInChrBuf= c->lastInChrBuf; |
6540 | 2650 |
2651 if(isPacked(c->srcFormat)){ | |
4467 | 2652 src[0]= |
2653 src[1]= | |
9499 | 2654 src[2]= src[0]; |
6540 | 2655 srcStride[0]= |
4467 | 2656 srcStride[1]= |
9499 | 2657 srcStride[2]= srcStride[0]; |
4467 | 2658 } |
6540 | 2659 srcStride[1]<<= c->vChrDrop; |
2660 srcStride[2]<<= c->vChrDrop; | |
4419 | 2661 |
6517 | 2662 // printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2], |
2663 // (int)dst[0], (int)dst[1], (int)dst[2]); | |
2664 | |
2665 #if 0 //self test FIXME move to a vfilter or something | |
2666 { | |
2667 static volatile int i=0; | |
2668 i++; | |
2669 if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH) | |
2670 selfTest(src, srcStride, c->srcW, c->srcH); | |
2671 i--; | |
2672 } | |
2673 #endif | |
4554 | 2674 |
2675 //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2], | |
2676 //dstStride[0],dstStride[1],dstStride[2]); | |
4419 | 2677 |
2678 if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0) | |
2679 { | |
2680 static int firstTime=1; //FIXME move this into the context perhaps | |
2681 if(flags & SWS_PRINT_INFO && firstTime) | |
2682 { | |
9970 | 2683 MSG_WARN("SwScaler: Warning: dstStride is not aligned!\n" |
4419 | 2684 "SwScaler: ->cannot do aligned memory acesses anymore\n"); |
2685 firstTime=0; | |
2686 } | |
2687 } | |
3344 | 2688 |
4467 | 2689 /* Note the user might start scaling the picture in the middle so this will not get executed |
2690 this is not really intended but works currently, so ppl might do it */ | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2691 if(srcSliceY ==0){ |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2692 lumBufIndex=0; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2693 chrBufIndex=0; |
4467 | 2694 dstY=0; |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2695 lastInLumBuf= -1; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2696 lastInChrBuf= -1; |
3272 | 2697 } |
3344 | 2698 |
9494
543ab3909b78
sws_ prefix, more seperation between internal & external swscaler API
michael
parents:
9476
diff
changeset
|
2699 lastDstY= dstY; |
543ab3909b78
sws_ prefix, more seperation between internal & external swscaler API
michael
parents:
9476
diff
changeset
|
2700 |
3344 | 2701 for(;dstY < dstH; dstY++){ |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2702 unsigned char *dest =dst[0]+dstStride[0]*dstY; |
6520 | 2703 const int chrDstY= dstY>>c->chrDstVSubSample; |
2704 unsigned char *uDest=dst[1]+dstStride[1]*chrDstY; | |
2705 unsigned char *vDest=dst[2]+dstStride[2]*chrDstY; | |
3344 | 2706 |
2707 const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input | |
2708 const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input | |
2709 const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input | |
2710 const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input | |
2711 | |
11122 | 2712 //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n", |
2713 // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample); | |
4290
1f8ceb12284d
general convolution filtering of the source picture
michael
parents:
4276
diff
changeset
|
2714 //handle holes (FAST_BILINEAR & weird filters) |
1f8ceb12284d
general convolution filtering of the source picture
michael
parents:
4276
diff
changeset
|
2715 if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1; |
1f8ceb12284d
general convolution filtering of the source picture
michael
parents:
4276
diff
changeset
|
2716 if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1; |
1f8ceb12284d
general convolution filtering of the source picture
michael
parents:
4276
diff
changeset
|
2717 //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize); |
3344 | 2718 ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1) |
2719 ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1) | |
2216 | 2720 |
3344 | 2721 // Do we have enough lines in this slice to output the dstY line |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2722 if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample)) |
2469 | 2723 { |
3344 | 2724 //Do horizontal scaling |
2725 while(lastInLumBuf < lastLumSrcY) | |
2726 { | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2727 uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; |
3344 | 2728 lumBufIndex++; |
4290
1f8ceb12284d
general convolution filtering of the source picture
michael
parents:
4276
diff
changeset
|
2729 // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY); |
3344 | 2730 ASSERT(lumBufIndex < 2*vLumBufSize) |
2731 ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) | |
2732 ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) | |
2733 // printf("%d %d\n", lumBufIndex, vLumBufSize); | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2734 RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2735 flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, |
5452 | 2736 funnyYCode, c->srcFormat, formatConvBuffer, |
2737 c->lumMmx2Filter, c->lumMmx2FilterPos); | |
3344 | 2738 lastInLumBuf++; |
2739 } | |
2740 while(lastInChrBuf < lastChrSrcY) | |
2741 { | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2742 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1]; |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2743 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2]; |
3344 | 2744 chrBufIndex++; |
2745 ASSERT(chrBufIndex < 2*vChrBufSize) | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2746 ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH)) |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2747 ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0) |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2748 //FIXME replace parameters through context struct (some at least) |
6503 | 2749 |
2750 if(!(isGray(srcFormat) || isGray(dstFormat))) | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2751 RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc, |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2752 flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, |
5452 | 2753 funnyUVCode, c->srcFormat, formatConvBuffer, |
2754 c->chrMmx2Filter, c->chrMmx2FilterPos); | |
3344 | 2755 lastInChrBuf++; |
2756 } | |
2757 //wrap buf index around to stay inside the ring buffer | |
2758 if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; | |
2759 if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; | |
2469 | 2760 } |
3344 | 2761 else // not enough lines left in this slice -> load the rest in the buffer |
2469 | 2762 { |
3344 | 2763 /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n", |
2764 firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY, | |
2765 lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize, | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2766 vChrBufSize, vLumBufSize);*/ |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2767 |
3344 | 2768 //Do horizontal scaling |
2769 while(lastInLumBuf+1 < srcSliceY + srcSliceH) | |
2469 | 2770 { |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2771 uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0]; |
3344 | 2772 lumBufIndex++; |
2773 ASSERT(lumBufIndex < 2*vLumBufSize) | |
2774 ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH) | |
2775 ASSERT(lastInLumBuf + 1 - srcSliceY >= 0) | |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2776 RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc, |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2777 flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize, |
5452 | 2778 funnyYCode, c->srcFormat, formatConvBuffer, |
2779 c->lumMmx2Filter, c->lumMmx2FilterPos); | |
3344 | 2780 lastInLumBuf++; |
2469 | 2781 } |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2782 while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH)) |
3344 | 2783 { |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2784 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1]; |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2785 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2]; |
3344 | 2786 chrBufIndex++; |
2787 ASSERT(chrBufIndex < 2*vChrBufSize) | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2788 ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH) |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2789 ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0) |
6503 | 2790 |
2791 if(!(isGray(srcFormat) || isGray(dstFormat))) | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2792 RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc, |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2793 flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize, |
5452 | 2794 funnyUVCode, c->srcFormat, formatConvBuffer, |
2795 c->chrMmx2Filter, c->chrMmx2FilterPos); | |
3344 | 2796 lastInChrBuf++; |
2797 } | |
2798 //wrap buf index around to stay inside the ring buffer | |
2799 if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize; | |
2800 if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize; | |
11000 | 2801 break; //we can't output a dstY line so let's try with the next slice |
2469 | 2802 } |
2264
7851375ea156
increased precission of s_xinc s_xinc2 (needed for the mmx2 bugfix)
michael
parents:
2237
diff
changeset
|
2803 |
2748 | 2804 #ifdef HAVE_MMX |
3344 | 2805 b5Dither= dither8[dstY&1]; |
2806 g6Dither= dither4[dstY&1]; | |
2807 g5Dither= dither8[dstY&1]; | |
2808 r5Dither= dither8[(dstY+1)&1]; | |
2748 | 2809 #endif |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2810 if(dstY < dstH-2) |
3352 | 2811 { |
9414 | 2812 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; |
2813 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; | |
2814 #ifdef HAVE_MMX | |
2815 int i; | |
2816 for(i=0; i<vLumFilterSize; i++) | |
2817 { | |
2818 lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i]; | |
2819 lumMmxFilter[4*i+2]= | |
2820 lumMmxFilter[4*i+3]= | |
2821 ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001; | |
2822 } | |
2823 for(i=0; i<vChrFilterSize; i++) | |
2824 { | |
2825 chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i]; | |
2826 chrMmxFilter[4*i+2]= | |
2827 chrMmxFilter[4*i+3]= | |
2828 ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001; | |
2829 } | |
2830 #endif | |
14715 | 2831 if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){ |
2832 const int chrSkipMask= (1<<c->chrDstVSubSample)-1; | |
2833 if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi | |
2834 RENAME(yuv2nv12X)(c, | |
2835 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, | |
2836 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, | |
2837 dest, uDest, dstW, chrDstW, dstFormat); | |
2838 } | |
2839 else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like | |
3344 | 2840 { |
7351 | 2841 const int chrSkipMask= (1<<c->chrDstVSubSample)-1; |
2842 if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi | |
3344 | 2843 if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12 |
2844 { | |
2845 int16_t *lumBuf = lumPixBuf[0]; | |
2846 int16_t *chrBuf= chrPixBuf[0]; | |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2847 RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW); |
3344 | 2848 } |
2849 else //General YV12 | |
2850 { | |
9413 | 2851 RENAME(yuv2yuvX)(c, |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2852 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2853 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, |
9414 | 2854 dest, uDest, vDest, dstW, chrDstW); |
3344 | 2855 } |
2856 } | |
2857 else | |
2858 { | |
2859 ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); | |
2860 ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); | |
2861 if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB | |
2862 { | |
2863 int chrAlpha= vChrFilter[2*dstY+1]; | |
7723 | 2864 RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1), |
6578 | 2865 dest, dstW, chrAlpha, dstFormat, flags, dstY); |
3344 | 2866 } |
2867 else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB | |
2868 { | |
2869 int lumAlpha= vLumFilter[2*dstY+1]; | |
2870 int chrAlpha= vChrFilter[2*dstY+1]; | |
7723 | 2871 RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1), |
6578 | 2872 dest, dstW, lumAlpha, chrAlpha, dstY); |
3344 | 2873 } |
2874 else //General RGB | |
2875 { | |
7723 | 2876 RENAME(yuv2packedX)(c, |
3344 | 2877 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, |
2878 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, | |
9413 | 2879 dest, dstW, dstY); |
3344 | 2880 } |
2881 } | |
3352 | 2882 } |
11000 | 2883 else // hmm looks like we can't use MMX here without overwriting this array's tail |
3352 | 2884 { |
2885 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; | |
2886 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; | |
14715 | 2887 if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){ |
2888 const int chrSkipMask= (1<<c->chrDstVSubSample)-1; | |
2889 if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi | |
2890 yuv2nv12XinC( | |
2891 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, | |
2892 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, | |
2893 dest, uDest, dstW, chrDstW, dstFormat); | |
2894 } | |
2895 else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 | |
3352 | 2896 { |
7351 | 2897 const int chrSkipMask= (1<<c->chrDstVSubSample)-1; |
2898 if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi | |
6540 | 2899 yuv2yuvXinC( |
6532
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2900 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize, |
9834d9980c45
yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents:
6520
diff
changeset
|
2901 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, |
6540 | 2902 dest, uDest, vDest, dstW, chrDstW); |
3352 | 2903 } |
2904 else | |
2905 { | |
2906 ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2); | |
2907 ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2); | |
7723 | 2908 yuv2packedXinC(c, |
3352 | 2909 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize, |
2910 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize, | |
6578 | 2911 dest, dstW, dstY); |
3352 | 2912 } |
2913 } | |
3344 | 2914 } |
2534
cc9d3fd626f0
patch from Martin Decky <deckm1am@ss1000.ms.mff.cuni.cz> applied and unnecassery "memory" removed
michael
parents:
2521
diff
changeset
|
2915 |
cc9d3fd626f0
patch from Martin Decky <deckm1am@ss1000.ms.mff.cuni.cz> applied and unnecassery "memory" removed
michael
parents:
2521
diff
changeset
|
2916 #ifdef HAVE_MMX |
cc9d3fd626f0
patch from Martin Decky <deckm1am@ss1000.ms.mff.cuni.cz> applied and unnecassery "memory" removed
michael
parents:
2521
diff
changeset
|
2917 __asm __volatile(SFENCE:::"memory"); |
2566 | 2918 __asm __volatile(EMMS:::"memory"); |
2534
cc9d3fd626f0
patch from Martin Decky <deckm1am@ss1000.ms.mff.cuni.cz> applied and unnecassery "memory" removed
michael
parents:
2521
diff
changeset
|
2919 #endif |
4276
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2920 /* store changed local vars back in the context */ |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2921 c->dstY= dstY; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2922 c->lumBufIndex= lumBufIndex; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2923 c->chrBufIndex= chrBufIndex; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2924 c->lastInLumBuf= lastInLumBuf; |
9199d15cb4e0
removed global vars so that multiple swscalers can be used
michael
parents:
4248
diff
changeset
|
2925 c->lastInChrBuf= lastInChrBuf; |
9494
543ab3909b78
sws_ prefix, more seperation between internal & external swscaler API
michael
parents:
9476
diff
changeset
|
2926 |
543ab3909b78
sws_ prefix, more seperation between internal & external swscaler API
michael
parents:
9476
diff
changeset
|
2927 return dstY - lastDstY; |
3641 | 2928 } |