annotate postproc/swscale_template.c @ 18813:53a08a2754b5

reword some of the german translations of the mga message strings
author attila
date Sun, 25 Jun 2006 16:09:57 +0000
parents e00cea3e1732
children
Ignore whitespace changes - Everywhere: Within whitespace: At end of lines:
rev   line source
4295
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
1 /*
9476
eff727517e6b yuv2rgb brightness/contrast/saturation/different colorspaces support finished
michael
parents: 9434
diff changeset
2 Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
2216
9da2a0515184 software yv12->rgb scaler - separated from fsdga
arpi
parents:
diff changeset
3
4295
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
4 This program is free software; you can redistribute it and/or modify
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
5 it under the terms of the GNU General Public License as published by
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
6 the Free Software Foundation; either version 2 of the License, or
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
7 (at your option) any later version.
2216
9da2a0515184 software yv12->rgb scaler - separated from fsdga
arpi
parents:
diff changeset
8
4295
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
9 This program is distributed in the hope that it will be useful,
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
12 GNU General Public License for more details.
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
13
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
14 You should have received a copy of the GNU General Public License
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
15 along with this program; if not, write to the Free Software
17367
401b440a6d76 Update licensing information: The FSF changed postal address.
diego
parents: 16739
diff changeset
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
4295
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
17 */
2264
7851375ea156 increased precission of s_xinc s_xinc2 (needed for the mmx2 bugfix)
michael
parents: 2237
diff changeset
18
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
19 #include "asmalign.h"
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
20
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
21 #undef REAL_MOVNTQ
2540
f2e70944d02a fixed a warning
michael
parents: 2534
diff changeset
22 #undef MOVNTQ
2680
e8a534509557 green line fix for dstw%8!=0
michael
parents: 2671
diff changeset
23 #undef PAVGB
3136
michael
parents: 3126
diff changeset
24 #undef PREFETCH
michael
parents: 3126
diff changeset
25 #undef PREFETCHW
michael
parents: 3126
diff changeset
26 #undef EMMS
michael
parents: 3126
diff changeset
27 #undef SFENCE
michael
parents: 3126
diff changeset
28
michael
parents: 3126
diff changeset
29 #ifdef HAVE_3DNOW
michael
parents: 3126
diff changeset
30 /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
michael
parents: 3126
diff changeset
31 #define EMMS "femms"
michael
parents: 3126
diff changeset
32 #else
michael
parents: 3126
diff changeset
33 #define EMMS "emms"
michael
parents: 3126
diff changeset
34 #endif
michael
parents: 3126
diff changeset
35
michael
parents: 3126
diff changeset
36 #ifdef HAVE_3DNOW
michael
parents: 3126
diff changeset
37 #define PREFETCH "prefetch"
michael
parents: 3126
diff changeset
38 #define PREFETCHW "prefetchw"
michael
parents: 3126
diff changeset
39 #elif defined ( HAVE_MMX2 )
michael
parents: 3126
diff changeset
40 #define PREFETCH "prefetchnta"
michael
parents: 3126
diff changeset
41 #define PREFETCHW "prefetcht0"
michael
parents: 3126
diff changeset
42 #else
michael
parents: 3126
diff changeset
43 #define PREFETCH "/nop"
michael
parents: 3126
diff changeset
44 #define PREFETCHW "/nop"
michael
parents: 3126
diff changeset
45 #endif
michael
parents: 3126
diff changeset
46
michael
parents: 3126
diff changeset
47 #ifdef HAVE_MMX2
michael
parents: 3126
diff changeset
48 #define SFENCE "sfence"
michael
parents: 3126
diff changeset
49 #else
michael
parents: 3126
diff changeset
50 #define SFENCE "/nop"
michael
parents: 3126
diff changeset
51 #endif
2232
65996b3467d7 MMX & MMX2 optimizations (MMX2 is buggy and commented out)
michael
parents: 2230
diff changeset
52
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
53 #ifdef HAVE_MMX2
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
54 #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
55 #elif defined (HAVE_3DNOW)
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
56 #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
57 #endif
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
58
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
59 #ifdef HAVE_MMX2
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
60 #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
61 #else
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
62 #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
63 #endif
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
64 #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
65
12017
21e5cb258a95 AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents: 11122
diff changeset
66 #ifdef HAVE_ALTIVEC
21e5cb258a95 AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents: 11122
diff changeset
67 #include "swscale_altivec_template.c"
21e5cb258a95 AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents: 11122
diff changeset
68 #endif
21e5cb258a95 AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents: 11122
diff changeset
69
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
70 #define YSCALEYUV2YV12X(x, offset) \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
71 "xor %%"REG_a", %%"REG_a" \n\t"\
11122
c552fe6acbaa rounding fixes
michael
parents: 11000
diff changeset
72 "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
c552fe6acbaa rounding fixes
michael
parents: 11000
diff changeset
73 "movq %%mm3, %%mm4 \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
74 "lea " offset "(%0), %%"REG_d" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
75 "mov (%%"REG_d"), %%"REG_S" \n\t"\
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
76 ASMALIGN16 /* FIXME Unroll? */\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
77 "1: \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
78 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
79 "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
80 "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
81 "add $16, %%"REG_d" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
82 "mov (%%"REG_d"), %%"REG_S" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
83 "test %%"REG_S", %%"REG_S" \n\t"\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
84 "pmulhw %%mm0, %%mm2 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
85 "pmulhw %%mm0, %%mm5 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
86 "paddw %%mm2, %%mm3 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
87 "paddw %%mm5, %%mm4 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
88 " jnz 1b \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
89 "psraw $3, %%mm3 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
90 "psraw $3, %%mm4 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
91 "packuswb %%mm4, %%mm3 \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
92 MOVNTQ(%%mm3, (%1, %%REGa))\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
93 "add $8, %%"REG_a" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
94 "cmp %2, %%"REG_a" \n\t"\
11122
c552fe6acbaa rounding fixes
michael
parents: 11000
diff changeset
95 "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
c552fe6acbaa rounding fixes
michael
parents: 11000
diff changeset
96 "movq %%mm3, %%mm4 \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
97 "lea " offset "(%0), %%"REG_d" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
98 "mov (%%"REG_d"), %%"REG_S" \n\t"\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
99 "jb 1b \n\t"
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
100
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
101 #define YSCALEYUV2YV121 \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
102 "mov %2, %%"REG_a" \n\t"\
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
103 ASMALIGN16 /* FIXME Unroll? */\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
104 "1: \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
105 "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
106 "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
107 "psraw $7, %%mm0 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
108 "psraw $7, %%mm1 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
109 "packuswb %%mm1, %%mm0 \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
110 MOVNTQ(%%mm0, (%1, %%REGa))\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
111 "add $8, %%"REG_a" \n\t"\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
112 "jnc 1b \n\t"
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
113
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
114 /*
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
115 :: "m" (-lumFilterSize), "m" (-chrFilterSize),
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
116 "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
117 "r" (dest), "m" (dstW),
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
118 "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
119 : "%eax", "%ebx", "%ecx", "%edx", "%esi"
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
120 */
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
121 #define YSCALEYUV2PACKEDX \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
122 "xor %%"REG_a", %%"REG_a" \n\t"\
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
123 ASMALIGN16\
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
124 "nop \n\t"\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
125 "1: \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
126 "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
127 "mov (%%"REG_d"), %%"REG_S" \n\t"\
11122
c552fe6acbaa rounding fixes
michael
parents: 11000
diff changeset
128 "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
c552fe6acbaa rounding fixes
michael
parents: 11000
diff changeset
129 "movq %%mm3, %%mm4 \n\t"\
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
130 ASMALIGN16\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
131 "2: \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
132 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
133 "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
134 "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
135 "add $16, %%"REG_d" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
136 "mov (%%"REG_d"), %%"REG_S" \n\t"\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
137 "pmulhw %%mm0, %%mm2 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
138 "pmulhw %%mm0, %%mm5 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
139 "paddw %%mm2, %%mm3 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
140 "paddw %%mm5, %%mm4 \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
141 "test %%"REG_S", %%"REG_S" \n\t"\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
142 " jnz 2b \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
143 \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
144 "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
145 "mov (%%"REG_d"), %%"REG_S" \n\t"\
11122
c552fe6acbaa rounding fixes
michael
parents: 11000
diff changeset
146 "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
c552fe6acbaa rounding fixes
michael
parents: 11000
diff changeset
147 "movq %%mm1, %%mm7 \n\t"\
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
148 ASMALIGN16\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
149 "2: \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
150 "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
151 "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
152 "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
153 "add $16, %%"REG_d" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
154 "mov (%%"REG_d"), %%"REG_S" \n\t"\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
155 "pmulhw %%mm0, %%mm2 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
156 "pmulhw %%mm0, %%mm5 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
157 "paddw %%mm2, %%mm1 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
158 "paddw %%mm5, %%mm7 \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
159 "test %%"REG_S", %%"REG_S" \n\t"\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
160 " jnz 2b \n\t"\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
161
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
162
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
163 #define YSCALEYUV2RGBX \
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
164 YSCALEYUV2PACKEDX\
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
165 "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
166 "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
167 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
168 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
169 "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
170 "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
171 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
172 "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
173 "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
174 "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
175 "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
176 "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
177 "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
178 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
179 "paddw %%mm3, %%mm4 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
180 "movq %%mm2, %%mm0 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
181 "movq %%mm5, %%mm6 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
182 "movq %%mm4, %%mm3 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
183 "punpcklwd %%mm2, %%mm2 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
184 "punpcklwd %%mm5, %%mm5 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
185 "punpcklwd %%mm4, %%mm4 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
186 "paddw %%mm1, %%mm2 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
187 "paddw %%mm1, %%mm5 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
188 "paddw %%mm1, %%mm4 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
189 "punpckhwd %%mm0, %%mm0 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
190 "punpckhwd %%mm6, %%mm6 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
191 "punpckhwd %%mm3, %%mm3 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
192 "paddw %%mm7, %%mm0 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
193 "paddw %%mm7, %%mm6 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
194 "paddw %%mm7, %%mm3 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
195 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
196 "packuswb %%mm0, %%mm2 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
197 "packuswb %%mm6, %%mm5 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
198 "packuswb %%mm3, %%mm4 \n\t"\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
199 "pxor %%mm7, %%mm7 \n\t"
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
200 #if 0
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
201 #define FULL_YSCALEYUV2RGB \
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
202 "pxor %%mm7, %%mm7 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
203 "movd %6, %%mm6 \n\t" /*yalpha1*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
204 "punpcklwd %%mm6, %%mm6 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
205 "punpcklwd %%mm6, %%mm6 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
206 "movd %7, %%mm5 \n\t" /*uvalpha1*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
207 "punpcklwd %%mm5, %%mm5 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
208 "punpcklwd %%mm5, %%mm5 \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
209 "xor %%"REG_a", %%"REG_a" \n\t"\
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
210 ASMALIGN16\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
211 "1: \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
212 "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
213 "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
214 "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
215 "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
216 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
217 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
218 "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
219 "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
220 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
221 "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
222 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
223 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
224 "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
225 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
226 "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
227 "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
228 "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
229 "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
230 \
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
231 \
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
232 "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
233 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
234 "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
235 "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
236 "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
237 "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
238 "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
239 \
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
240 \
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
241 "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
242 "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
243 "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
244 "paddw %%mm1, %%mm3 \n\t" /* B*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
245 "paddw %%mm1, %%mm0 \n\t" /* R*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
246 "packuswb %%mm3, %%mm3 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
247 \
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
248 "packuswb %%mm0, %%mm0 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
249 "paddw %%mm4, %%mm2 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
250 "paddw %%mm2, %%mm1 \n\t" /* G*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
251 \
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
252 "packuswb %%mm1, %%mm1 \n\t"
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
253 #endif
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
254
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
255 #define REAL_YSCALEYUV2PACKED(index, c) \
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
256 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
257 "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
258 "psraw $3, %%mm0 \n\t"\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
259 "psraw $3, %%mm1 \n\t"\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
260 "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
261 "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
262 "xor "#index", "#index" \n\t"\
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
263 ASMALIGN16\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
264 "1: \n\t"\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
265 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
266 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
267 "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
268 "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
269 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
270 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
271 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
272 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
273 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
274 "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
275 "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
276 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
277 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
278 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
279 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
280 "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
281 "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
282 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
283 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
284 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
285 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
286 "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
287 "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
288 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
289 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
290
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
291 #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
292
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
293 #define REAL_YSCALEYUV2RGB(index, c) \
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
294 "xor "#index", "#index" \n\t"\
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
295 ASMALIGN16\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
296 "1: \n\t"\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
297 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
298 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
299 "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
300 "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
301 "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
302 "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
303 "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
304 "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
305 "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
306 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
307 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
308 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
309 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
310 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
311 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
312 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
313 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
314 "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
315 "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
316 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
317 "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
318 "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
319 "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
320 "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
321 "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
322 "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
323 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
324 "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
325 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
326 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
327 "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
328 "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
329 "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
330 "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
331 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
332 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
333 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
334 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
335 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
336 "paddw %%mm3, %%mm4 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
337 "movq %%mm2, %%mm0 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
338 "movq %%mm5, %%mm6 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
339 "movq %%mm4, %%mm3 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
340 "punpcklwd %%mm2, %%mm2 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
341 "punpcklwd %%mm5, %%mm5 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
342 "punpcklwd %%mm4, %%mm4 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
343 "paddw %%mm1, %%mm2 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
344 "paddw %%mm1, %%mm5 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
345 "paddw %%mm1, %%mm4 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
346 "punpckhwd %%mm0, %%mm0 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
347 "punpckhwd %%mm6, %%mm6 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
348 "punpckhwd %%mm3, %%mm3 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
349 "paddw %%mm7, %%mm0 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
350 "paddw %%mm7, %%mm6 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
351 "paddw %%mm7, %%mm3 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
352 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
353 "packuswb %%mm0, %%mm2 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
354 "packuswb %%mm6, %%mm5 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
355 "packuswb %%mm3, %%mm4 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
356 "pxor %%mm7, %%mm7 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
357 #define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
358
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
359 #define REAL_YSCALEYUV2PACKED1(index, c) \
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
360 "xor "#index", "#index" \n\t"\
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
361 ASMALIGN16\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
362 "1: \n\t"\
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
363 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
364 "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
365 "psraw $7, %%mm3 \n\t" \
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
366 "psraw $7, %%mm4 \n\t" \
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
367 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
368 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
369 "psraw $7, %%mm1 \n\t" \
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
370 "psraw $7, %%mm7 \n\t" \
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
371
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
372 #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
373
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
374 #define REAL_YSCALEYUV2RGB1(index, c) \
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
375 "xor "#index", "#index" \n\t"\
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
376 ASMALIGN16\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
377 "1: \n\t"\
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
378 "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
379 "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
380 "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
381 "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
382 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
383 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
384 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
385 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
386 "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
387 "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
388 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
389 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
390 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
391 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
392 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
393 "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
394 "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
395 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
396 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
397 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
398 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
399 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
400 "paddw %%mm3, %%mm4 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
401 "movq %%mm2, %%mm0 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
402 "movq %%mm5, %%mm6 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
403 "movq %%mm4, %%mm3 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
404 "punpcklwd %%mm2, %%mm2 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
405 "punpcklwd %%mm5, %%mm5 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
406 "punpcklwd %%mm4, %%mm4 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
407 "paddw %%mm1, %%mm2 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
408 "paddw %%mm1, %%mm5 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
409 "paddw %%mm1, %%mm4 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
410 "punpckhwd %%mm0, %%mm0 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
411 "punpckhwd %%mm6, %%mm6 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
412 "punpckhwd %%mm3, %%mm3 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
413 "paddw %%mm7, %%mm0 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
414 "paddw %%mm7, %%mm6 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
415 "paddw %%mm7, %%mm3 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
416 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
417 "packuswb %%mm0, %%mm2 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
418 "packuswb %%mm6, %%mm5 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
419 "packuswb %%mm3, %%mm4 \n\t"\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
420 "pxor %%mm7, %%mm7 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
421 #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
422
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
423 #define REAL_YSCALEYUV2PACKED1b(index, c) \
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
424 "xor "#index", "#index" \n\t"\
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
425 ASMALIGN16\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
426 "1: \n\t"\
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
427 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
428 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
429 "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
430 "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
431 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
432 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
433 "psrlw $8, %%mm3 \n\t" \
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
434 "psrlw $8, %%mm4 \n\t" \
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
435 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
436 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
437 "psraw $7, %%mm1 \n\t" \
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
438 "psraw $7, %%mm7 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
439 #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
440
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
441 // do vertical chrominance interpolation
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
442 #define REAL_YSCALEYUV2RGB1b(index, c) \
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
443 "xor "#index", "#index" \n\t"\
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
444 ASMALIGN16\
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
445 "1: \n\t"\
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
446 "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
447 "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
448 "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
449 "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
2576
437ed06579d8 c optimizations
michael
parents: 2575
diff changeset
450 "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
437ed06579d8 c optimizations
michael
parents: 2575
diff changeset
451 "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
452 "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
453 "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
454 "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
455 "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
456 "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
457 "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
458 "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
459 "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
460 /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
461 "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
462 "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
463 "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
464 "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
465 "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
466 "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
467 "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
468 "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
469 "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
470 "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
471 /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
472 "paddw %%mm3, %%mm4 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
473 "movq %%mm2, %%mm0 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
474 "movq %%mm5, %%mm6 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
475 "movq %%mm4, %%mm3 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
476 "punpcklwd %%mm2, %%mm2 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
477 "punpcklwd %%mm5, %%mm5 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
478 "punpcklwd %%mm4, %%mm4 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
479 "paddw %%mm1, %%mm2 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
480 "paddw %%mm1, %%mm5 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
481 "paddw %%mm1, %%mm4 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
482 "punpckhwd %%mm0, %%mm0 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
483 "punpckhwd %%mm6, %%mm6 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
484 "punpckhwd %%mm3, %%mm3 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
485 "paddw %%mm7, %%mm0 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
486 "paddw %%mm7, %%mm6 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
487 "paddw %%mm7, %%mm3 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
488 /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
489 "packuswb %%mm0, %%mm2 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
490 "packuswb %%mm6, %%mm5 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
491 "packuswb %%mm3, %%mm4 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
492 "pxor %%mm7, %%mm7 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
493 #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
494
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
495 #define REAL_WRITEBGR32(dst, dstw, index) \
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
496 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
497 "movq %%mm2, %%mm1 \n\t" /* B */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
498 "movq %%mm5, %%mm6 \n\t" /* R */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
499 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
500 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
501 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
502 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
503 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
504 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
505 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
506 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
507 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
508 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
509 \
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
510 MOVNTQ(%%mm0, (dst, index, 4))\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
511 MOVNTQ(%%mm2, 8(dst, index, 4))\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
512 MOVNTQ(%%mm1, 16(dst, index, 4))\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
513 MOVNTQ(%%mm3, 24(dst, index, 4))\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
514 \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
515 "add $8, "#index" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
516 "cmp "#dstw", "#index" \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
517 " jb 1b \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
518 #define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
519
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
520 #define REAL_WRITEBGR16(dst, dstw, index) \
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
521 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
522 "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
523 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
2669
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
524 "psrlq $3, %%mm2 \n\t"\
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
525 \
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
526 "movq %%mm2, %%mm1 \n\t"\
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
527 "movq %%mm4, %%mm3 \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
528 \
2669
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
529 "punpcklbw %%mm7, %%mm3 \n\t"\
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
530 "punpcklbw %%mm5, %%mm2 \n\t"\
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
531 "punpckhbw %%mm7, %%mm4 \n\t"\
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
532 "punpckhbw %%mm5, %%mm1 \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
533 \
2669
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
534 "psllq $3, %%mm3 \n\t"\
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
535 "psllq $3, %%mm4 \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
536 \
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
537 "por %%mm3, %%mm2 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
538 "por %%mm4, %%mm1 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
539 \
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
540 MOVNTQ(%%mm2, (dst, index, 2))\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
541 MOVNTQ(%%mm1, 8(dst, index, 2))\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
542 \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
543 "add $8, "#index" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
544 "cmp "#dstw", "#index" \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
545 " jb 1b \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
546 #define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
547
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
548 #define REAL_WRITEBGR15(dst, dstw, index) \
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
549 "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
550 "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
551 "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
2669
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
552 "psrlq $3, %%mm2 \n\t"\
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
553 "psrlq $1, %%mm5 \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
554 \
2669
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
555 "movq %%mm2, %%mm1 \n\t"\
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
556 "movq %%mm4, %%mm3 \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
557 \
2669
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
558 "punpcklbw %%mm7, %%mm3 \n\t"\
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
559 "punpcklbw %%mm5, %%mm2 \n\t"\
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
560 "punpckhbw %%mm7, %%mm4 \n\t"\
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
561 "punpckhbw %%mm5, %%mm1 \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
562 \
2669
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
563 "psllq $2, %%mm3 \n\t"\
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
564 "psllq $2, %%mm4 \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
565 \
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
566 "por %%mm3, %%mm2 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
567 "por %%mm4, %%mm1 \n\t"\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
568 \
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
569 MOVNTQ(%%mm2, (dst, index, 2))\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
570 MOVNTQ(%%mm1, 8(dst, index, 2))\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
571 \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
572 "add $8, "#index" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
573 "cmp "#dstw", "#index" \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
574 " jb 1b \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
575 #define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index)
2669
476b9b3b91be faster bgr15/16
michael
parents: 2638
diff changeset
576
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
577 #define WRITEBGR24OLD(dst, dstw, index) \
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
578 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
579 "movq %%mm2, %%mm1 \n\t" /* B */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
580 "movq %%mm5, %%mm6 \n\t" /* R */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
581 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
582 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
583 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
584 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
585 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
586 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
2326
7d3542955132 BGR24 bugfix
michael
parents: 2316
diff changeset
587 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
7d3542955132 BGR24 bugfix
michael
parents: 2316
diff changeset
588 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
7d3542955132 BGR24 bugfix
michael
parents: 2316
diff changeset
589 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
7d3542955132 BGR24 bugfix
michael
parents: 2316
diff changeset
590 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
591 \
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
592 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
593 "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
594 "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
595 "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
596 "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
597 "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
598 "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
599 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
600 \
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
601 "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
602 "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
603 "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
604 "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
605 "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
606 "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
607 "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
608 "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
609 "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
610 "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
611 "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
612 "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
613 "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
614 \
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
615 "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
616 "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
617 "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
618 "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
619 "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
620 "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
621 "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
622 "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
623 \
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
624 MOVNTQ(%%mm0, (dst))\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
625 MOVNTQ(%%mm2, 8(dst))\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
626 MOVNTQ(%%mm3, 16(dst))\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
627 "add $24, "#dst" \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
628 \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
629 "add $8, "#index" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
630 "cmp "#dstw", "#index" \n\t"\
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
631 " jb 1b \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
632
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
633 #define WRITEBGR24MMX(dst, dstw, index) \
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
634 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
635 "movq %%mm2, %%mm1 \n\t" /* B */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
636 "movq %%mm5, %%mm6 \n\t" /* R */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
637 "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
638 "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
639 "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
640 "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
641 "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
642 "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
643 "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
644 "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
645 "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
646 "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
647 \
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
648 "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
649 "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
650 "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
651 "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
652 \
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
653 "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
654 "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
655 "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
656 "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
657 \
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
658 "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
659 "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
660 "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
661 "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
662 \
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
663 "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
664 "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
665 "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
666 "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
667 MOVNTQ(%%mm0, (dst))\
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
668 \
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
669 "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
670 "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
671 "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
672 "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
673 MOVNTQ(%%mm6, 8(dst))\
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
674 \
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
675 "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
676 "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
677 "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
678 MOVNTQ(%%mm5, 16(dst))\
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
679 \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
680 "add $24, "#dst" \n\t"\
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
681 \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
682 "add $8, "#index" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
683 "cmp "#dstw", "#index" \n\t"\
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
684 " jb 1b \n\t"
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
685
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
686 #define WRITEBGR24MMX2(dst, dstw, index) \
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
687 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
688 "movq "MANGLE(M24A)", %%mm0 \n\t"\
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
689 "movq "MANGLE(M24C)", %%mm7 \n\t"\
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
690 "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
691 "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
692 "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
693 \
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
694 "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
695 "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
696 "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
697 \
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
698 "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
699 "por %%mm1, %%mm6 \n\t"\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
700 "por %%mm3, %%mm6 \n\t"\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
701 MOVNTQ(%%mm6, (dst))\
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
702 \
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
703 "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
704 "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
705 "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
706 "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
707 \
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
708 "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
709 "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
710 "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
711 \
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
712 "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
713 "por %%mm3, %%mm6 \n\t"\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
714 MOVNTQ(%%mm6, 8(dst))\
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
715 \
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
716 "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
717 "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
718 "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
719 \
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
720 "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
721 "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
722 "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
723 \
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
724 "por %%mm1, %%mm3 \n\t"\
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
725 "por %%mm3, %%mm6 \n\t"\
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
726 MOVNTQ(%%mm6, 16(dst))\
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
727 \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
728 "add $24, "#dst" \n\t"\
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
729 \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
730 "add $8, "#index" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
731 "cmp "#dstw", "#index" \n\t"\
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
732 " jb 1b \n\t"
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
733
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
734 #ifdef HAVE_MMX2
3126
e71ae0213431 runtime cpu detection
michael
parents: 2800
diff changeset
735 #undef WRITEBGR24
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
736 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
737 #else
3126
e71ae0213431 runtime cpu detection
michael
parents: 2800
diff changeset
738 #undef WRITEBGR24
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
739 #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
2730
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
740 #endif
c483fc9bf0c4 faster bgr24 output
michael
parents: 2728
diff changeset
741
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
742 #define REAL_WRITEYUY2(dst, dstw, index) \
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
743 "packuswb %%mm3, %%mm3 \n\t"\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
744 "packuswb %%mm4, %%mm4 \n\t"\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
745 "packuswb %%mm7, %%mm1 \n\t"\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
746 "punpcklbw %%mm4, %%mm3 \n\t"\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
747 "movq %%mm1, %%mm7 \n\t"\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
748 "punpcklbw %%mm3, %%mm1 \n\t"\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
749 "punpckhbw %%mm3, %%mm7 \n\t"\
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
750 \
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
751 MOVNTQ(%%mm1, (dst, index, 2))\
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
752 MOVNTQ(%%mm7, 8(dst, index, 2))\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
753 \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
754 "add $8, "#index" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
755 "cmp "#dstw", "#index" \n\t"\
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
756 " jb 1b \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
757 #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
758
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
759
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
760 static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
761 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
762 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
2519
6f3fa9bc3b27 yv12 to yv12 scaler
michael
parents: 2503
diff changeset
763 {
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
764 #ifdef HAVE_MMX
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
765 if(uDest != NULL)
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
766 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
767 asm volatile(
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
768 YSCALEYUV2YV12X(0, CHR_MMX_FILTER_OFFSET)
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
769 :: "r" (&c->redDither),
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
770 "r" (uDest), "p" (chrDstW)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
771 : "%"REG_a, "%"REG_d, "%"REG_S
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
772 );
2519
6f3fa9bc3b27 yv12 to yv12 scaler
michael
parents: 2503
diff changeset
773
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
774 asm volatile(
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
775 YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET)
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
776 :: "r" (&c->redDither),
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
777 "r" (vDest), "p" (chrDstW)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
778 : "%"REG_a, "%"REG_d, "%"REG_S
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
779 );
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
780 }
2521
b70a77066611 tell the c compiler that the memory changed
michael
parents: 2520
diff changeset
781
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
782 asm volatile(
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
783 YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET)
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
784 :: "r" (&c->redDither),
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
785 "r" (dest), "p" (dstW)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
786 : "%"REG_a, "%"REG_d, "%"REG_S
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
787 );
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
788 #else
12017
21e5cb258a95 AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents: 11122
diff changeset
789 #ifdef HAVE_ALTIVEC
21e5cb258a95 AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents: 11122
diff changeset
790 yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
21e5cb258a95 AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents: 11122
diff changeset
791 chrFilter, chrSrc, chrFilterSize,
21e5cb258a95 AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents: 11122
diff changeset
792 dest, uDest, vDest, dstW, chrDstW);
21e5cb258a95 AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents: 11122
diff changeset
793 #else //HAVE_ALTIVEC
6540
5b3cace64e9d 100l (non mmx versions didnt compile)
michael
parents: 6532
diff changeset
794 yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
795 chrFilter, chrSrc, chrFilterSize,
6540
5b3cace64e9d 100l (non mmx versions didnt compile)
michael
parents: 6532
diff changeset
796 dest, uDest, vDest, dstW, chrDstW);
12017
21e5cb258a95 AltiVec support in postproc/ + altivec optimizations for yuv2yuvX patch by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents: 11122
diff changeset
797 #endif //!HAVE_ALTIVEC
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
798 #endif
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
799 }
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
800
14715
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
801 static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
802 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
803 uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
804 {
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
805 yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
806 chrFilter, chrSrc, chrFilterSize,
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
807 dest, uDest, dstW, chrDstW, dstFormat);
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
808 }
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
809
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
810 static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
811 uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
812 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
813 #ifdef HAVE_MMX
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
814 if(uDest != NULL)
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
815 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
816 asm volatile(
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
817 YSCALEYUV2YV121
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
818 :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
819 "g" (-chrDstW)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
820 : "%"REG_a
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
821 );
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
822
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
823 asm volatile(
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
824 YSCALEYUV2YV121
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
825 :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
826 "g" (-chrDstW)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
827 : "%"REG_a
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
828 );
2519
6f3fa9bc3b27 yv12 to yv12 scaler
michael
parents: 2503
diff changeset
829 }
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
830
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
831 asm volatile(
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
832 YSCALEYUV2YV121
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
833 :: "r" (lumSrc + dstW), "r" (dest + dstW),
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
834 "g" (-dstW)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
835 : "%"REG_a
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
836 );
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
837 #else
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
838 int i;
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
839 for(i=0; i<dstW; i++)
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
840 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
841 int val= lumSrc[i]>>7;
6503
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
842
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
843 if(val&256){
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
844 if(val<0) val=0;
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
845 else val=255;
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
846 }
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
847
6503
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
848 dest[i]= val;
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
849 }
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
850
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
851 if(uDest != NULL)
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
852 for(i=0; i<chrDstW; i++)
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
853 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
854 int u=chrSrc[i]>>7;
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
855 int v=chrSrc[i + 2048]>>7;
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
856
6503
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
857 if((u|v)&256){
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
858 if(u<0) u=0;
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
859 else if (u>255) u=255;
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
860 if(v<0) v=0;
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
861 else if (v>255) v=255;
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
862 }
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
863
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
864 uDest[i]= u;
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
865 vDest[i]= v;
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
866 }
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
867 #endif
2519
6f3fa9bc3b27 yv12 to yv12 scaler
michael
parents: 2503
diff changeset
868 }
6f3fa9bc3b27 yv12 to yv12 scaler
michael
parents: 2503
diff changeset
869
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
870
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
871 /**
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
872 * vertical scale YV12 to RGB
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
873 */
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
874 static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
875 int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
18575
e00cea3e1732 fix variable type used via "m" asm constraint to match size used in asm.
reimar
parents: 18392
diff changeset
876 uint8_t *dest, long dstW, long dstY)
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
877 {
18575
e00cea3e1732 fix variable type used via "m" asm constraint to match size used in asm.
reimar
parents: 18392
diff changeset
878 long dummy=0;
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
879 switch(c->dstFormat)
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
880 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
881 #ifdef HAVE_MMX
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
882 case IMGFMT_BGR32:
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
883 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
884 asm volatile(
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
885 YSCALEYUV2RGBX
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
886 WRITEBGR32(%4, %5, %%REGa)
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
887
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
888 :: "r" (&c->redDither),
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
889 "m" (dummy), "m" (dummy), "m" (dummy),
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
890 "r" (dest), "m" (dstW)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
891 : "%"REG_a, "%"REG_d, "%"REG_S
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
892 );
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
893 }
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
894 break;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
895 case IMGFMT_BGR24:
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
896 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
897 asm volatile(
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
898 YSCALEYUV2RGBX
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
899 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
900 "add %4, %%"REG_b" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
901 WRITEBGR24(%%REGb, %5, %%REGa)
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
902
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
903 :: "r" (&c->redDither),
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
904 "m" (dummy), "m" (dummy), "m" (dummy),
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
905 "r" (dest), "m" (dstW)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
906 : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
907 );
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
908 }
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
909 break;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
910 case IMGFMT_BGR15:
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
911 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
912 asm volatile(
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
913 YSCALEYUV2RGBX
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
914 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
915 #ifdef DITHER1XBPP
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
916 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
917 "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
918 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
919 #endif
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
920
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
921 WRITEBGR15(%4, %5, %%REGa)
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
922
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
923 :: "r" (&c->redDither),
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
924 "m" (dummy), "m" (dummy), "m" (dummy),
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
925 "r" (dest), "m" (dstW)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
926 : "%"REG_a, "%"REG_d, "%"REG_S
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
927 );
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
928 }
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
929 break;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
930 case IMGFMT_BGR16:
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
931 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
932 asm volatile(
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
933 YSCALEYUV2RGBX
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
934 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
935 #ifdef DITHER1XBPP
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
936 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
937 "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
938 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
939 #endif
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
940
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
941 WRITEBGR16(%4, %5, %%REGa)
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
942
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
943 :: "r" (&c->redDither),
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
944 "m" (dummy), "m" (dummy), "m" (dummy),
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
945 "r" (dest), "m" (dstW)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
946 : "%"REG_a, "%"REG_d, "%"REG_S
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
947 );
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
948 }
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
949 break;
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
950 case IMGFMT_YUY2:
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
951 {
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
952 asm volatile(
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
953 YSCALEYUV2PACKEDX
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
954 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
955
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
956 "psraw $3, %%mm3 \n\t"
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
957 "psraw $3, %%mm4 \n\t"
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
958 "psraw $3, %%mm1 \n\t"
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
959 "psraw $3, %%mm7 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
960 WRITEYUY2(%4, %5, %%REGa)
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
961
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
962 :: "r" (&c->redDither),
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
963 "m" (dummy), "m" (dummy), "m" (dummy),
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
964 "r" (dest), "m" (dstW)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
965 : "%"REG_a, "%"REG_d, "%"REG_S
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
966 );
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
967 }
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
968 break;
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
969 #endif
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
970 default:
12698
d2aef091743c altivec yuv->rgb converter
michael
parents: 12130
diff changeset
971 #ifdef HAVE_ALTIVEC
17641
fbf94ea858f1 don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents: 17367
diff changeset
972 /* The following list of supported dstFormat values should
fbf94ea858f1 don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents: 17367
diff changeset
973 match what's found in the body of altivec_yuv2packedX() */
fbf94ea858f1 don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents: 17367
diff changeset
974 if(c->dstFormat==IMGFMT_ABGR || c->dstFormat==IMGFMT_BGRA ||
fbf94ea858f1 don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents: 17367
diff changeset
975 c->dstFormat==IMGFMT_BGR24 || c->dstFormat==IMGFMT_RGB24 ||
fbf94ea858f1 don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents: 17367
diff changeset
976 c->dstFormat==IMGFMT_RGBA || c->dstFormat==IMGFMT_ARGB)
fbf94ea858f1 don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents: 17367
diff changeset
977 altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
fbf94ea858f1 don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents: 17367
diff changeset
978 chrFilter, chrSrc, chrFilterSize,
fbf94ea858f1 don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents: 17367
diff changeset
979 dest, dstW, dstY);
fbf94ea858f1 don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents: 17367
diff changeset
980 else
12698
d2aef091743c altivec yuv->rgb converter
michael
parents: 12130
diff changeset
981 #endif
17641
fbf94ea858f1 don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents: 17367
diff changeset
982 yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
fbf94ea858f1 don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents: 17367
diff changeset
983 chrFilter, chrSrc, chrFilterSize,
fbf94ea858f1 don't call altivec_yuv2packedX() with a dstFormat that it doesn't support;
pacman
parents: 17367
diff changeset
984 dest, dstW, dstY);
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
985 break;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
986 }
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
987 }
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
988
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
989 /**
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
990 * vertical bilinear scale YV12 to RGB
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
991 */
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
992 static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
993 uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
994 {
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
995 int yalpha1=yalpha^4095;
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
996 int uvalpha1=uvalpha^4095;
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
997 int i;
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
998
11000
6e35326c742f many small typo and grammar fixes
gabucino
parents: 9970
diff changeset
999 #if 0 //isn't used
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1000 if(flags&SWS_FULL_CHR_H_INT)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1001 {
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1002 switch(dstFormat)
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1003 {
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1004 #ifdef HAVE_MMX
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1005 case IMGFMT_BGR32:
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1006 asm volatile(
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1007
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1008
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1009 FULL_YSCALEYUV2RGB
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1010 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1011 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1012
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1013 "movq %%mm3, %%mm1 \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1014 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1015 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1016
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1017 MOVNTQ(%%mm3, (%4, %%REGa, 4))
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1018 MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1019
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1020 "add $4, %%"REG_a" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1021 "cmp %5, %%"REG_a" \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1022 " jb 1b \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1023
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1024
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1025 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1026 "m" (yalpha1), "m" (uvalpha1)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1027 : "%"REG_a
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1028 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1029 break;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1030 case IMGFMT_BGR24:
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1031 asm volatile(
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1032
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1033 FULL_YSCALEYUV2RGB
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1034
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1035 // lsb ... msb
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1036 "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1037 "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1038
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1039 "movq %%mm3, %%mm1 \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1040 "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1041 "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1042
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1043 "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1044 "psrlq $8, %%mm3 \n\t" // GR0BGR00
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1045 "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1046 "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1047 "por %%mm2, %%mm3 \n\t" // BGRBGR00
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1048 "movq %%mm1, %%mm2 \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1049 "psllq $48, %%mm1 \n\t" // 000000BG
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1050 "por %%mm1, %%mm3 \n\t" // BGRBGRBG
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1051
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1052 "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1053 "psrld $16, %%mm2 \n\t" // R000R000
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1054 "psrlq $24, %%mm1 \n\t" // 0BGR0000
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1055 "por %%mm2, %%mm1 \n\t" // RBGRR000
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1056
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1057 "mov %4, %%"REG_b" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1058 "add %%"REG_a", %%"REG_b" \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1059
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1060 #ifdef HAVE_MMX2
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1061 //FIXME Alignment
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1062 "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1063 "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1064 #else
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1065 "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1066 "psrlq $32, %%mm3 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1067 "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1068 "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1069 #endif
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1070 "add $4, %%"REG_a" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1071 "cmp %5, %%"REG_a" \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1072 " jb 1b \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1073
3209
0b172eb639f1 swscaler cleanup
michael
parents: 3136
diff changeset
1074 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1075 "m" (yalpha1), "m" (uvalpha1)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1076 : "%"REG_a, "%"REG_b
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1077 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1078 break;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1079 case IMGFMT_BGR15:
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1080 asm volatile(
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1081
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1082 FULL_YSCALEYUV2RGB
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1083 #ifdef DITHER1XBPP
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1084 "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1085 "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1086 "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1087 #endif
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1088 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1089 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1090 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1091
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1092 "psrlw $3, %%mm3 \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1093 "psllw $2, %%mm1 \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1094 "psllw $7, %%mm0 \n\t"
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1095 "pand "MANGLE(g15Mask)", %%mm1 \n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1096 "pand "MANGLE(r15Mask)", %%mm0 \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1097
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1098 "por %%mm3, %%mm1 \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1099 "por %%mm1, %%mm0 \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1100
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1101 MOVNTQ(%%mm0, (%4, %%REGa, 2))
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1102
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1103 "add $4, %%"REG_a" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1104 "cmp %5, %%"REG_a" \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1105 " jb 1b \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1106
3209
0b172eb639f1 swscaler cleanup
michael
parents: 3136
diff changeset
1107 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1108 "m" (yalpha1), "m" (uvalpha1)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1109 : "%"REG_a
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1110 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1111 break;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1112 case IMGFMT_BGR16:
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1113 asm volatile(
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1114
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1115 FULL_YSCALEYUV2RGB
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1116 #ifdef DITHER1XBPP
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1117 "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1118 "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1119 "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1120 #endif
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1121 "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1122 "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1123 "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1124
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1125 "psrlw $3, %%mm3 \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1126 "psllw $3, %%mm1 \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1127 "psllw $8, %%mm0 \n\t"
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1128 "pand "MANGLE(g16Mask)", %%mm1 \n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1129 "pand "MANGLE(r16Mask)", %%mm0 \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1130
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1131 "por %%mm3, %%mm1 \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1132 "por %%mm1, %%mm0 \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1133
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1134 MOVNTQ(%%mm0, (%4, %%REGa, 2))
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1135
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1136 "add $4, %%"REG_a" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1137 "cmp %5, %%"REG_a" \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1138 " jb 1b \n\t"
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1139
3209
0b172eb639f1 swscaler cleanup
michael
parents: 3136
diff changeset
1140 :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1141 "m" (yalpha1), "m" (uvalpha1)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1142 : "%"REG_a
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1143 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1144 break;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1145 #endif
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1146 case IMGFMT_RGB32:
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1147 #ifndef HAVE_MMX
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1148 case IMGFMT_BGR32:
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1149 #endif
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1150 if(dstFormat==IMGFMT_BGR32)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1151 {
4794
michael
parents: 4793
diff changeset
1152 int i;
4793
68e7ed0c22be fixing bgr32 output on big-endian systems
michael
parents: 4698
diff changeset
1153 #ifdef WORDS_BIGENDIAN
68e7ed0c22be fixing bgr32 output on big-endian systems
michael
parents: 4698
diff changeset
1154 dest++;
68e7ed0c22be fixing bgr32 output on big-endian systems
michael
parents: 4698
diff changeset
1155 #endif
3209
0b172eb639f1 swscaler cleanup
michael
parents: 3136
diff changeset
1156 for(i=0;i<dstW;i++){
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1157 // vertical linear interpolation && yuv2rgb in a single step:
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1158 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1159 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1160 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
2503
d21d8d5f2e23 yuv2rgb bugfix
michael
parents: 2476
diff changeset
1161 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
d21d8d5f2e23 yuv2rgb bugfix
michael
parents: 2476
diff changeset
1162 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
d21d8d5f2e23 yuv2rgb bugfix
michael
parents: 2476
diff changeset
1163 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1164 dest+= 4;
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1165 }
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1166 }
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1167 else if(dstFormat==IMGFMT_BGR24)
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1168 {
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1169 int i;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1170 for(i=0;i<dstW;i++){
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1171 // vertical linear interpolation && yuv2rgb in a single step:
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1172 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1173 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1174 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1175 dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1176 dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1177 dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1178 dest+= 3;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1179 }
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1180 }
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1181 else if(dstFormat==IMGFMT_BGR16)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1182 {
2671
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
1183 int i;
3209
0b172eb639f1 swscaler cleanup
michael
parents: 3136
diff changeset
1184 for(i=0;i<dstW;i++){
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1185 // vertical linear interpolation && yuv2rgb in a single step:
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1186 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1187 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1188 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1189
2572
f2353173d52c c optimizations (array is faster than pointer) (16bpp variants tested and 2% faster)
michael
parents: 2569
diff changeset
1190 ((uint16_t*)dest)[i] =
2584
6d20d5d5829f 15/16bit in C speedup
michael
parents: 2576
diff changeset
1191 clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
6d20d5d5829f 15/16bit in C speedup
michael
parents: 2576
diff changeset
1192 clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
6d20d5d5829f 15/16bit in C speedup
michael
parents: 2576
diff changeset
1193 clip_table16r[(Y + yuvtab_3343[V]) >>13];
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1194 }
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1195 }
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
1196 else if(dstFormat==IMGFMT_BGR15)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1197 {
2671
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
1198 int i;
3209
0b172eb639f1 swscaler cleanup
michael
parents: 3136
diff changeset
1199 for(i=0;i<dstW;i++){
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1200 // vertical linear interpolation && yuv2rgb in a single step:
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1201 int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1202 int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1203 int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1204
2572
f2353173d52c c optimizations (array is faster than pointer) (16bpp variants tested and 2% faster)
michael
parents: 2569
diff changeset
1205 ((uint16_t*)dest)[i] =
2584
6d20d5d5829f 15/16bit in C speedup
michael
parents: 2576
diff changeset
1206 clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
6d20d5d5829f 15/16bit in C speedup
michael
parents: 2576
diff changeset
1207 clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
6d20d5d5829f 15/16bit in C speedup
michael
parents: 2576
diff changeset
1208 clip_table15r[(Y + yuvtab_3343[V]) >>13];
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1209 }
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1210 }
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1211 }//FULL_UV_IPOL
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1212 else
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1213 {
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1214 #endif // if 0
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1215 #ifdef HAVE_MMX
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1216 switch(c->dstFormat)
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1217 {
11000
6e35326c742f many small typo and grammar fixes
gabucino
parents: 9970
diff changeset
1218 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1219 case IMGFMT_BGR32:
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1220 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1221 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1222 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1223 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1224 YSCALEYUV2RGB(%%REGBP, %5)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1225 WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1226 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1227 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1228
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1229 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1230 "a" (&c->redDither)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1231 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1232 return;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1233 case IMGFMT_BGR24:
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1234 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1235 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1236 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1237 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1238 YSCALEYUV2RGB(%%REGBP, %5)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1239 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1240 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1241 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1242 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1243 "a" (&c->redDither)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1244 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1245 return;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1246 case IMGFMT_BGR15:
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1247 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1248 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1249 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1250 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1251 YSCALEYUV2RGB(%%REGBP, %5)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1252 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1253 #ifdef DITHER1XBPP
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1254 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1255 "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1256 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1257 #endif
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1258
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1259 WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1260 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1261 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1262
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1263 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1264 "a" (&c->redDither)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1265 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1266 return;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1267 case IMGFMT_BGR16:
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1268 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1269 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1270 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1271 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1272 YSCALEYUV2RGB(%%REGBP, %5)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1273 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1274 #ifdef DITHER1XBPP
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1275 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1276 "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1277 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1278 #endif
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1279
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1280 WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1281 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1282 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1283 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1284 "a" (&c->redDither)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1285 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1286 return;
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1287 case IMGFMT_YUY2:
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1288 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1289 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1290 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1291 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1292 YSCALEYUV2PACKED(%%REGBP, %5)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1293 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1294 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1295 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1296 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1297 "a" (&c->redDither)
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1298 );
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1299 return;
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1300 default: break;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1301 }
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1302 #endif //HAVE_MMX
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1303 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1304 }
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1305
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1306 /**
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1307 * YV12 to RGB without scaling or interpolating
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1308 */
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1309 static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1310 uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1311 {
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
1312 const int yalpha1=0;
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1313 int i;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1314
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1315 uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1316 const int yalpha= 4096; //FIXME ...
2671
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
1317
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1318 if(flags&SWS_FULL_CHR_H_INT)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1319 {
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1320 RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1321 return;
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1322 }
2576
437ed06579d8 c optimizations
michael
parents: 2575
diff changeset
1323
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1324 #ifdef HAVE_MMX
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1325 if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1326 {
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1327 switch(dstFormat)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1328 {
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1329 case IMGFMT_BGR32:
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1330 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1331 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1332 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1333 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1334 YSCALEYUV2RGB1(%%REGBP, %5)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1335 WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1336 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1337 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
1338
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1339 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1340 "a" (&c->redDither)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1341 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1342 return;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1343 case IMGFMT_BGR24:
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1344 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1345 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1346 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1347 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1348 YSCALEYUV2RGB1(%%REGBP, %5)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1349 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1350 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1351 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
1352
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1353 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1354 "a" (&c->redDither)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1355 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1356 return;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1357 case IMGFMT_BGR15:
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1358 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1359 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1360 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1361 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1362 YSCALEYUV2RGB1(%%REGBP, %5)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1363 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1364 #ifdef DITHER1XBPP
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1365 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1366 "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1367 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1368 #endif
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1369 WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1370 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1371 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
1372
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1373 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1374 "a" (&c->redDither)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1375 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1376 return;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1377 case IMGFMT_BGR16:
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1378 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1379 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1380 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1381 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1382 YSCALEYUV2RGB1(%%REGBP, %5)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1383 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1384 #ifdef DITHER1XBPP
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1385 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1386 "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1387 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1388 #endif
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1389
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1390 WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1391 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1392 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
1393
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1394 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1395 "a" (&c->redDither)
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1396 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1397 return;
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1398 case IMGFMT_YUY2:
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1399 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1400 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1401 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1402 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1403 YSCALEYUV2PACKED1(%%REGBP, %5)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1404 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1405 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1406 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
1407
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1408 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1409 "a" (&c->redDither)
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1410 );
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1411 return;
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1412 }
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1413 }
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1414 else
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1415 {
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1416 switch(dstFormat)
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1417 {
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1418 case IMGFMT_BGR32:
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1419 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1420 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1421 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1422 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1423 YSCALEYUV2RGB1b(%%REGBP, %5)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1424 WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1425 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1426 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
1427
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1428 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1429 "a" (&c->redDither)
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1430 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1431 return;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1432 case IMGFMT_BGR24:
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1433 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1434 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1435 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1436 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1437 YSCALEYUV2RGB1b(%%REGBP, %5)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1438 WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1439 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1440 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
1441
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1442 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1443 "a" (&c->redDither)
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1444 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1445 return;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1446 case IMGFMT_BGR15:
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1447 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1448 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1449 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1450 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1451 YSCALEYUV2RGB1b(%%REGBP, %5)
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1452 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1453 #ifdef DITHER1XBPP
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1454 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1455 "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1456 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1457 #endif
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1458 WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1459 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1460 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
1461
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1462 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1463 "a" (&c->redDither)
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1464 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1465 return;
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1466 case IMGFMT_BGR16:
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1467 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1468 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1469 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1470 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1471 YSCALEYUV2RGB1b(%%REGBP, %5)
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1472 /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1473 #ifdef DITHER1XBPP
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1474 "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1475 "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
1476 "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1477 #endif
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1478
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1479 WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1480 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1481 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
1482
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1483 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1484 "a" (&c->redDither)
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1485 );
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1486 return;
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1487 case IMGFMT_YUY2:
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1488 asm volatile(
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1489 "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1490 "mov %4, %%"REG_b" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1491 "push %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1492 YSCALEYUV2PACKED1b(%%REGBP, %5)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1493 WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1494 "pop %%"REG_BP" \n\t"
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1495 "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
9417
5eea6d903b4c cleanup
michael
parents: 9414
diff changeset
1496
18392
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1497 :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
b10d4b3cb9ec removing esp usage
michael
parents: 18104
diff changeset
1498 "a" (&c->redDither)
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1499 );
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1500 return;
2569
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1501 }
30b736e7feef interpolate chrominance for every second line in the 1:1 vertical scale function
michael
parents: 2566
diff changeset
1502 }
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1503 #endif
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1504 if( uvalpha < 2048 )
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1505 {
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1506 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1507 }else{
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
1508 YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
1509 }
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1510 }
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
1511
4481
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1512 //FIXME yuy2* can read upto 7 samples to much
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1513
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
1514 static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width)
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1515 {
4481
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1516 #ifdef HAVE_MMX
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1517 asm volatile(
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1518 "movq "MANGLE(bm01010101)", %%mm2\n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1519 "mov %0, %%"REG_a" \n\t"
4481
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1520 "1: \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1521 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1522 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
4481
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1523 "pand %%mm2, %%mm0 \n\t"
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1524 "pand %%mm2, %%mm1 \n\t"
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1525 "packuswb %%mm1, %%mm0 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1526 "movq %%mm0, (%2, %%"REG_a") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1527 "add $8, %%"REG_a" \n\t"
4481
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1528 " js 1b \n\t"
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
1529 : : "g" (-width), "r" (src+width*2), "r" (dst+width)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1530 : "%"REG_a
4481
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1531 );
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1532 #else
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1533 int i;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1534 for(i=0; i<width; i++)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1535 dst[i]= src[2*i];
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1536 #endif
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1537 }
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1538
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
1539 static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1540 {
4481
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1541 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1542 asm volatile(
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1543 "movq "MANGLE(bm01010101)", %%mm4\n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1544 "mov %0, %%"REG_a" \n\t"
4481
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1545 "1: \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1546 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1547 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1548 "movq (%2, %%"REG_a",4), %%mm2 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1549 "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
4481
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1550 PAVGB(%%mm2, %%mm0)
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1551 PAVGB(%%mm3, %%mm1)
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1552 "psrlw $8, %%mm0 \n\t"
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1553 "psrlw $8, %%mm1 \n\t"
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1554 "packuswb %%mm1, %%mm0 \n\t"
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1555 "movq %%mm0, %%mm1 \n\t"
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1556 "psrlw $8, %%mm0 \n\t"
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1557 "pand %%mm4, %%mm1 \n\t"
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1558 "packuswb %%mm0, %%mm0 \n\t"
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1559 "packuswb %%mm1, %%mm1 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1560 "movd %%mm0, (%4, %%"REG_a") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1561 "movd %%mm1, (%3, %%"REG_a") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1562 "add $4, %%"REG_a" \n\t"
4481
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1563 " js 1b \n\t"
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
1564 : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1565 : "%"REG_a
4481
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
1566 );
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1567 #else
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1568 int i;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1569 for(i=0; i<width; i++)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1570 {
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1571 dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1572 dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1573 }
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1574 #endif
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1575 }
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1576
9071
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1577 //this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
1578 static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width)
9071
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1579 {
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1580 #ifdef HAVE_MMX
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1581 asm volatile(
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1582 "mov %0, %%"REG_a" \n\t"
9071
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1583 "1: \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1584 "movq (%1, %%"REG_a",2), %%mm0 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1585 "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
9071
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1586 "psrlw $8, %%mm0 \n\t"
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1587 "psrlw $8, %%mm1 \n\t"
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1588 "packuswb %%mm1, %%mm0 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1589 "movq %%mm0, (%2, %%"REG_a") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1590 "add $8, %%"REG_a" \n\t"
9071
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1591 " js 1b \n\t"
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
1592 : : "g" (-width), "r" (src+width*2), "r" (dst+width)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1593 : "%"REG_a
9071
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1594 );
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1595 #else
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1596 int i;
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1597 for(i=0; i<width; i++)
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1598 dst[i]= src[2*i+1];
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1599 #endif
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1600 }
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1601
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
1602 static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
9071
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1603 {
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1604 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1605 asm volatile(
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1606 "movq "MANGLE(bm01010101)", %%mm4\n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1607 "mov %0, %%"REG_a" \n\t"
9071
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1608 "1: \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1609 "movq (%1, %%"REG_a",4), %%mm0 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1610 "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1611 "movq (%2, %%"REG_a",4), %%mm2 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1612 "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
9071
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1613 PAVGB(%%mm2, %%mm0)
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1614 PAVGB(%%mm3, %%mm1)
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1615 "pand %%mm4, %%mm0 \n\t"
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1616 "pand %%mm4, %%mm1 \n\t"
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1617 "packuswb %%mm1, %%mm0 \n\t"
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1618 "movq %%mm0, %%mm1 \n\t"
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1619 "psrlw $8, %%mm0 \n\t"
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1620 "pand %%mm4, %%mm1 \n\t"
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1621 "packuswb %%mm0, %%mm0 \n\t"
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1622 "packuswb %%mm1, %%mm1 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1623 "movd %%mm0, (%4, %%"REG_a") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1624 "movd %%mm1, (%3, %%"REG_a") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1625 "add $4, %%"REG_a" \n\t"
9071
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1626 " js 1b \n\t"
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
1627 : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1628 : "%"REG_a
9071
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1629 );
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1630 #else
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1631 int i;
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1632 for(i=0; i<width; i++)
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1633 {
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1634 dstU[i]= (src1[4*i + 0] + src2[4*i + 0])>>1;
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1635 dstV[i]= (src1[4*i + 2] + src2[4*i + 2])>>1;
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1636 }
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1637 #endif
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1638 }
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
1639
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1640 static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1641 {
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1642 int i;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1643 for(i=0; i<width; i++)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1644 {
9433
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1645 int b= ((uint32_t*)src)[i]&0xFF;
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1646 int g= (((uint32_t*)src)[i]>>8)&0xFF;
9499
bc5b87370cd1 cleanup
michael
parents: 9494
diff changeset
1647 int r= (((uint32_t*)src)[i]>>16)&0xFF;
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1648
9433
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1649 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1650 }
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1651 }
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1652
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1653 static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1654 {
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1655 int i;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1656 for(i=0; i<width; i++)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1657 {
9433
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1658 const int a= ((uint32_t*)src1)[2*i+0];
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1659 const int e= ((uint32_t*)src1)[2*i+1];
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1660 const int c= ((uint32_t*)src2)[2*i+0];
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1661 const int d= ((uint32_t*)src2)[2*i+1];
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1662 const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1663 const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1664 const int b= l&0x3FF;
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1665 const int g= h>>8;
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1666 const int r= l>>16;
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1667
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1668 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1669 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1670 }
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1671 }
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1672
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
1673 static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width)
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1674 {
4612
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1675 #ifdef HAVE_MMX
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1676 asm volatile(
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1677 "mov %2, %%"REG_a" \n\t"
4923
3cc0f4938be1 add mangling
atmos4
parents: 4794
diff changeset
1678 "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
3cc0f4938be1 add mangling
atmos4
parents: 4794
diff changeset
1679 "movq "MANGLE(w1111)", %%mm5 \n\t"
4612
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1680 "pxor %%mm7, %%mm7 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1681 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
1682 ASMALIGN16
4612
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1683 "1: \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1684 PREFETCH" 64(%0, %%"REG_b") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1685 "movd (%0, %%"REG_b"), %%mm0 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1686 "movd 3(%0, %%"REG_b"), %%mm1 \n\t"
4612
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1687 "punpcklbw %%mm7, %%mm0 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1688 "punpcklbw %%mm7, %%mm1 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1689 "movd 6(%0, %%"REG_b"), %%mm2 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1690 "movd 9(%0, %%"REG_b"), %%mm3 \n\t"
4612
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1691 "punpcklbw %%mm7, %%mm2 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1692 "punpcklbw %%mm7, %%mm3 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1693 "pmaddwd %%mm6, %%mm0 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1694 "pmaddwd %%mm6, %%mm1 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1695 "pmaddwd %%mm6, %%mm2 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1696 "pmaddwd %%mm6, %%mm3 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1697 #ifndef FAST_BGR2YV12
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1698 "psrad $8, %%mm0 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1699 "psrad $8, %%mm1 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1700 "psrad $8, %%mm2 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1701 "psrad $8, %%mm3 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1702 #endif
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1703 "packssdw %%mm1, %%mm0 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1704 "packssdw %%mm3, %%mm2 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1705 "pmaddwd %%mm5, %%mm0 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1706 "pmaddwd %%mm5, %%mm2 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1707 "packssdw %%mm2, %%mm0 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1708 "psraw $7, %%mm0 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1709
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1710 "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1711 "movd 15(%0, %%"REG_b"), %%mm1 \n\t"
4612
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1712 "punpcklbw %%mm7, %%mm4 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1713 "punpcklbw %%mm7, %%mm1 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1714 "movd 18(%0, %%"REG_b"), %%mm2 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1715 "movd 21(%0, %%"REG_b"), %%mm3 \n\t"
4612
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1716 "punpcklbw %%mm7, %%mm2 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1717 "punpcklbw %%mm7, %%mm3 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1718 "pmaddwd %%mm6, %%mm4 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1719 "pmaddwd %%mm6, %%mm1 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1720 "pmaddwd %%mm6, %%mm2 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1721 "pmaddwd %%mm6, %%mm3 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1722 #ifndef FAST_BGR2YV12
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1723 "psrad $8, %%mm4 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1724 "psrad $8, %%mm1 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1725 "psrad $8, %%mm2 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1726 "psrad $8, %%mm3 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1727 #endif
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1728 "packssdw %%mm1, %%mm4 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1729 "packssdw %%mm3, %%mm2 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1730 "pmaddwd %%mm5, %%mm4 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1731 "pmaddwd %%mm5, %%mm2 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1732 "add $24, %%"REG_b" \n\t"
4612
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1733 "packssdw %%mm2, %%mm4 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1734 "psraw $7, %%mm4 \n\t"
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1735
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1736 "packuswb %%mm4, %%mm0 \n\t"
4923
3cc0f4938be1 add mangling
atmos4
parents: 4794
diff changeset
1737 "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
4612
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1738
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1739 "movq %%mm0, (%1, %%"REG_a") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1740 "add $8, %%"REG_a" \n\t"
4612
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1741 " js 1b \n\t"
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
1742 : : "r" (src+width*3), "r" (dst+width), "g" (-width)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1743 : "%"REG_a, "%"REG_b
4612
4edfdec1dc12 bgr24toY in MMX
michael
parents: 4580
diff changeset
1744 );
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1745 #else
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1746 int i;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1747 for(i=0; i<width; i++)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1748 {
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1749 int b= src[i*3+0];
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1750 int g= src[i*3+1];
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1751 int r= src[i*3+2];
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1752
9434
michael
parents: 9433
diff changeset
1753 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1754 }
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1755 #endif
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1756 }
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1757
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
1758 static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1759 {
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1760 #ifdef HAVE_MMX
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1761 asm volatile(
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1762 "mov %4, %%"REG_a" \n\t"
4923
3cc0f4938be1 add mangling
atmos4
parents: 4794
diff changeset
1763 "movq "MANGLE(w1111)", %%mm5 \n\t"
3cc0f4938be1 add mangling
atmos4
parents: 4794
diff changeset
1764 "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1765 "pxor %%mm7, %%mm7 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1766 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1767 "add %%"REG_b", %%"REG_b" \n\t"
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
1768 ASMALIGN16
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1769 "1: \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1770 PREFETCH" 64(%0, %%"REG_b") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1771 PREFETCH" 64(%1, %%"REG_b") \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1772 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1773 "movq (%0, %%"REG_b"), %%mm0 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1774 "movq (%1, %%"REG_b"), %%mm1 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1775 "movq 6(%0, %%"REG_b"), %%mm2 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1776 "movq 6(%1, %%"REG_b"), %%mm3 \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1777 PAVGB(%%mm1, %%mm0)
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1778 PAVGB(%%mm3, %%mm2)
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1779 "movq %%mm0, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1780 "movq %%mm2, %%mm3 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1781 "psrlq $24, %%mm0 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1782 "psrlq $24, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1783 PAVGB(%%mm1, %%mm0)
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1784 PAVGB(%%mm3, %%mm2)
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1785 "punpcklbw %%mm7, %%mm0 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1786 "punpcklbw %%mm7, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1787 #else
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1788 "movd (%0, %%"REG_b"), %%mm0 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1789 "movd (%1, %%"REG_b"), %%mm1 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1790 "movd 3(%0, %%"REG_b"), %%mm2 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1791 "movd 3(%1, %%"REG_b"), %%mm3 \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1792 "punpcklbw %%mm7, %%mm0 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1793 "punpcklbw %%mm7, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1794 "punpcklbw %%mm7, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1795 "punpcklbw %%mm7, %%mm3 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1796 "paddw %%mm1, %%mm0 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1797 "paddw %%mm3, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1798 "paddw %%mm2, %%mm0 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1799 "movd 6(%0, %%"REG_b"), %%mm4 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1800 "movd 6(%1, %%"REG_b"), %%mm1 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1801 "movd 9(%0, %%"REG_b"), %%mm2 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1802 "movd 9(%1, %%"REG_b"), %%mm3 \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1803 "punpcklbw %%mm7, %%mm4 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1804 "punpcklbw %%mm7, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1805 "punpcklbw %%mm7, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1806 "punpcklbw %%mm7, %%mm3 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1807 "paddw %%mm1, %%mm4 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1808 "paddw %%mm3, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1809 "paddw %%mm4, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1810 "psrlw $2, %%mm0 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1811 "psrlw $2, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1812 #endif
4923
3cc0f4938be1 add mangling
atmos4
parents: 4794
diff changeset
1813 "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
3cc0f4938be1 add mangling
atmos4
parents: 4794
diff changeset
1814 "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1815
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1816 "pmaddwd %%mm0, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1817 "pmaddwd %%mm2, %%mm3 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1818 "pmaddwd %%mm6, %%mm0 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1819 "pmaddwd %%mm6, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1820 #ifndef FAST_BGR2YV12
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1821 "psrad $8, %%mm0 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1822 "psrad $8, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1823 "psrad $8, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1824 "psrad $8, %%mm3 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1825 #endif
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1826 "packssdw %%mm2, %%mm0 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1827 "packssdw %%mm3, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1828 "pmaddwd %%mm5, %%mm0 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1829 "pmaddwd %%mm5, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1830 "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1831 "psraw $7, %%mm0 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1832
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1833 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1834 "movq 12(%0, %%"REG_b"), %%mm4 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1835 "movq 12(%1, %%"REG_b"), %%mm1 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1836 "movq 18(%0, %%"REG_b"), %%mm2 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1837 "movq 18(%1, %%"REG_b"), %%mm3 \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1838 PAVGB(%%mm1, %%mm4)
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1839 PAVGB(%%mm3, %%mm2)
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1840 "movq %%mm4, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1841 "movq %%mm2, %%mm3 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1842 "psrlq $24, %%mm4 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1843 "psrlq $24, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1844 PAVGB(%%mm1, %%mm4)
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1845 PAVGB(%%mm3, %%mm2)
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1846 "punpcklbw %%mm7, %%mm4 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1847 "punpcklbw %%mm7, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1848 #else
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1849 "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1850 "movd 12(%1, %%"REG_b"), %%mm1 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1851 "movd 15(%0, %%"REG_b"), %%mm2 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1852 "movd 15(%1, %%"REG_b"), %%mm3 \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1853 "punpcklbw %%mm7, %%mm4 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1854 "punpcklbw %%mm7, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1855 "punpcklbw %%mm7, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1856 "punpcklbw %%mm7, %%mm3 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1857 "paddw %%mm1, %%mm4 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1858 "paddw %%mm3, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1859 "paddw %%mm2, %%mm4 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1860 "movd 18(%0, %%"REG_b"), %%mm5 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1861 "movd 18(%1, %%"REG_b"), %%mm1 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1862 "movd 21(%0, %%"REG_b"), %%mm2 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1863 "movd 21(%1, %%"REG_b"), %%mm3 \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1864 "punpcklbw %%mm7, %%mm5 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1865 "punpcklbw %%mm7, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1866 "punpcklbw %%mm7, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1867 "punpcklbw %%mm7, %%mm3 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1868 "paddw %%mm1, %%mm5 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1869 "paddw %%mm3, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1870 "paddw %%mm5, %%mm2 \n\t"
4923
3cc0f4938be1 add mangling
atmos4
parents: 4794
diff changeset
1871 "movq "MANGLE(w1111)", %%mm5 \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1872 "psrlw $2, %%mm4 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1873 "psrlw $2, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1874 #endif
4923
3cc0f4938be1 add mangling
atmos4
parents: 4794
diff changeset
1875 "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
3cc0f4938be1 add mangling
atmos4
parents: 4794
diff changeset
1876 "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1877
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1878 "pmaddwd %%mm4, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1879 "pmaddwd %%mm2, %%mm3 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1880 "pmaddwd %%mm6, %%mm4 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1881 "pmaddwd %%mm6, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1882 #ifndef FAST_BGR2YV12
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1883 "psrad $8, %%mm4 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1884 "psrad $8, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1885 "psrad $8, %%mm2 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1886 "psrad $8, %%mm3 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1887 #endif
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1888 "packssdw %%mm2, %%mm4 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1889 "packssdw %%mm3, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1890 "pmaddwd %%mm5, %%mm4 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1891 "pmaddwd %%mm5, %%mm1 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1892 "add $24, %%"REG_b" \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1893 "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1894 "psraw $7, %%mm4 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1895
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1896 "movq %%mm0, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1897 "punpckldq %%mm4, %%mm0 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1898 "punpckhdq %%mm4, %%mm1 \n\t"
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1899 "packsswb %%mm1, %%mm0 \n\t"
4923
3cc0f4938be1 add mangling
atmos4
parents: 4794
diff changeset
1900 "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1901
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1902 "movd %%mm0, (%2, %%"REG_a") \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1903 "punpckhdq %%mm0, %%mm0 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1904 "movd %%mm0, (%3, %%"REG_a") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1905 "add $4, %%"REG_a" \n\t"
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1906 " js 1b \n\t"
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
1907 : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
1908 : "%"REG_a, "%"REG_b
4619
ef213d64e20a bgr24toUV in MMX
michael
parents: 4612
diff changeset
1909 );
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1910 #else
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1911 int i;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1912 for(i=0; i<width; i++)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1913 {
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1914 int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1915 int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1916 int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1917
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1918 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1919 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1920 }
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1921 #endif
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1922 }
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
1923
4578
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1924 static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1925 {
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1926 int i;
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1927 for(i=0; i<width; i++)
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1928 {
9433
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1929 int d= ((uint16_t*)src)[i];
4578
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1930 int b= d&0x1F;
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1931 int g= (d>>5)&0x3F;
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1932 int r= (d>>11)&0x1F;
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1933
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1934 dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1935 }
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1936 }
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1937
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1938 static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1939 {
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1940 int i;
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1941 for(i=0; i<width; i++)
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1942 {
9433
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1943 int d0= ((uint32_t*)src1)[i];
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1944 int d1= ((uint32_t*)src2)[i];
4579
c35454989164 faster bgr16 input
michael
parents: 4578
diff changeset
1945
c35454989164 faster bgr16 input
michael
parents: 4578
diff changeset
1946 int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
c35454989164 faster bgr16 input
michael
parents: 4578
diff changeset
1947 int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
c35454989164 faster bgr16 input
michael
parents: 4578
diff changeset
1948
c35454989164 faster bgr16 input
michael
parents: 4578
diff changeset
1949 int dh2= (dh>>11) + (dh<<21);
c35454989164 faster bgr16 input
michael
parents: 4578
diff changeset
1950 int d= dh2 + dl;
c35454989164 faster bgr16 input
michael
parents: 4578
diff changeset
1951
c35454989164 faster bgr16 input
michael
parents: 4578
diff changeset
1952 int b= d&0x7F;
c35454989164 faster bgr16 input
michael
parents: 4578
diff changeset
1953 int r= (d>>11)&0x7F;
c35454989164 faster bgr16 input
michael
parents: 4578
diff changeset
1954 int g= d>>21;
4578
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1955 dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1956 dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1957 }
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1958 }
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
1959
4580
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1960 static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1961 {
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1962 int i;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1963 for(i=0; i<width; i++)
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1964 {
9433
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1965 int d= ((uint16_t*)src)[i];
4580
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1966 int b= d&0x1F;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1967 int g= (d>>5)&0x1F;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1968 int r= (d>>10)&0x1F;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1969
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1970 dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1971 }
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1972 }
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1973
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1974 static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1975 {
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1976 int i;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1977 for(i=0; i<width; i++)
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1978 {
9433
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1979 int d0= ((uint32_t*)src1)[i];
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
1980 int d1= ((uint32_t*)src2)[i];
4580
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1981
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1982 int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1983 int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1984
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1985 int dh2= (dh>>11) + (dh<<21);
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1986 int d= dh2 + dl;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1987
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1988 int b= d&0x7F;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1989 int r= (d>>10)&0x7F;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1990 int g= d>>21;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1991 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1992 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1993 }
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1994 }
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1995
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
1996
4558
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
1997 static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
1998 {
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
1999 int i;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2000 for(i=0; i<width; i++)
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2001 {
9433
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
2002 int r= ((uint32_t*)src)[i]&0xFF;
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
2003 int g= (((uint32_t*)src)[i]>>8)&0xFF;
9499
bc5b87370cd1 cleanup
michael
parents: 9494
diff changeset
2004 int b= (((uint32_t*)src)[i]>>16)&0xFF;
4558
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2005
9433
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
2006 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
4558
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2007 }
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2008 }
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2009
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2010 static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2011 {
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2012 int i;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2013 for(i=0; i<width; i++)
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2014 {
9433
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
2015 const int a= ((uint32_t*)src1)[2*i+0];
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
2016 const int e= ((uint32_t*)src1)[2*i+1];
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
2017 const int c= ((uint32_t*)src2)[2*i+0];
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
2018 const int d= ((uint32_t*)src2)[2*i+1];
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
2019 const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
2020 const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
2021 const int r= l&0x3FF;
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
2022 const int g= h>>8;
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
2023 const int b= l>>16;
4558
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2024
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2025 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2026 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2027 }
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2028 }
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2029
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2030 static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2031 {
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2032 int i;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2033 for(i=0; i<width; i++)
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2034 {
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2035 int r= src[i*3+0];
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2036 int g= src[i*3+1];
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2037 int b= src[i*3+2];
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2038
9433
53f03173e48f bigendian bug (fixes?)
michael
parents: 9417
diff changeset
2039 dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
4558
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2040 }
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2041 }
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2042
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2043 static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2044 {
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2045 int i;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2046 for(i=0; i<width; i++)
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2047 {
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2048 int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2049 int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2050 int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2051
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2052 dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2053 dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2054 }
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2055 }
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2056
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2057
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2058 // Bilinear / Bicubic scaling
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2059 static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
2060 int16_t *filter, int16_t *filterPos, long filterSize)
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2061 {
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2062 #ifdef HAVE_MMX
9921
61057de81510 mplayer idependant (not really yet) swscale example
michael
parents: 9499
diff changeset
2063 assert(filterSize % 4 == 0 && filterSize>0);
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2064 if(filterSize==4) // allways true for upscaling, sometimes for down too
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2065 {
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2066 long counter= -2*dstW;
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2067 filter-= counter*2;
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2068 filterPos-= counter/2;
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2069 dst-= counter/2;
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2070 asm volatile(
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2071 "pxor %%mm7, %%mm7 \n\t"
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
2072 "movq "MANGLE(w02)", %%mm6 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2073 "push %%"REG_BP" \n\t" // we use 7 regs here ...
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2074 "mov %%"REG_a", %%"REG_BP" \n\t"
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
2075 ASMALIGN16
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2076 "1: \n\t"
13733
c45cf718dfe8 10000l : fix a crash on x86 due to an horrible mistake in my x86_64 patch
aurel
parents: 13720
diff changeset
2077 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
c45cf718dfe8 10000l : fix a crash on x86 due to an horrible mistake in my x86_64 patch
aurel
parents: 13720
diff changeset
2078 "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2079 "movq (%1, %%"REG_BP", 4), %%mm1\n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2080 "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2081 "movd (%3, %%"REG_a"), %%mm0 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2082 "movd (%3, %%"REG_b"), %%mm2 \n\t"
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2083 "punpcklbw %%mm7, %%mm0 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2084 "punpcklbw %%mm7, %%mm2 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2085 "pmaddwd %%mm1, %%mm0 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2086 "pmaddwd %%mm2, %%mm3 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2087 "psrad $8, %%mm0 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2088 "psrad $8, %%mm3 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2089 "packssdw %%mm3, %%mm0 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2090 "pmaddwd %%mm6, %%mm0 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2091 "packssdw %%mm0, %%mm0 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2092 "movd %%mm0, (%4, %%"REG_BP") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2093 "add $4, %%"REG_BP" \n\t"
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2094 " jnc 1b \n\t"
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2095
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2096 "pop %%"REG_BP" \n\t"
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2097 : "+a" (counter)
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2098 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2099 : "%"REG_b
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2100 );
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2101 }
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2102 else if(filterSize==8)
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2103 {
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2104 long counter= -2*dstW;
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2105 filter-= counter*4;
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2106 filterPos-= counter/2;
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2107 dst-= counter/2;
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2108 asm volatile(
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2109 "pxor %%mm7, %%mm7 \n\t"
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
2110 "movq "MANGLE(w02)", %%mm6 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2111 "push %%"REG_BP" \n\t" // we use 7 regs here ...
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2112 "mov %%"REG_a", %%"REG_BP" \n\t"
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
2113 ASMALIGN16
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2114 "1: \n\t"
13733
c45cf718dfe8 10000l : fix a crash on x86 due to an horrible mistake in my x86_64 patch
aurel
parents: 13720
diff changeset
2115 "movzwl (%2, %%"REG_BP"), %%eax \n\t"
c45cf718dfe8 10000l : fix a crash on x86 due to an horrible mistake in my x86_64 patch
aurel
parents: 13720
diff changeset
2116 "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2117 "movq (%1, %%"REG_BP", 8), %%mm1\n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2118 "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2119 "movd (%3, %%"REG_a"), %%mm0 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2120 "movd (%3, %%"REG_b"), %%mm2 \n\t"
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2121 "punpcklbw %%mm7, %%mm0 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2122 "punpcklbw %%mm7, %%mm2 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2123 "pmaddwd %%mm1, %%mm0 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2124 "pmaddwd %%mm2, %%mm3 \n\t"
2316
bcb229557e9b fixed alignment (static variables where sometimes not 8-byte aligned)
michael
parents: 2297
diff changeset
2125
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2126 "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2127 "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2128 "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2129 "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2130 "punpcklbw %%mm7, %%mm4 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2131 "punpcklbw %%mm7, %%mm2 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2132 "pmaddwd %%mm1, %%mm4 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2133 "pmaddwd %%mm2, %%mm5 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2134 "paddd %%mm4, %%mm0 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2135 "paddd %%mm5, %%mm3 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2136
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2137 "psrad $8, %%mm0 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2138 "psrad $8, %%mm3 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2139 "packssdw %%mm3, %%mm0 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2140 "pmaddwd %%mm6, %%mm0 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2141 "packssdw %%mm0, %%mm0 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2142 "movd %%mm0, (%4, %%"REG_BP") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2143 "add $4, %%"REG_BP" \n\t"
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2144 " jnc 1b \n\t"
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2145
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2146 "pop %%"REG_BP" \n\t"
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2147 : "+a" (counter)
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2148 : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2149 : "%"REG_b
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2150 );
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2151 }
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2152 else
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2153 {
15617
130dd060f723 one bugfix and a few gcc4 bug workaorunds by (Gianluigi Tiesi: mplayer, netfarm it)
michael
parents: 15295
diff changeset
2154 uint8_t *offset = src+filterSize;
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2155 long counter= -2*dstW;
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2156 // filter-= counter*filterSize/2;
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2157 filterPos-= counter/2;
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2158 dst-= counter/2;
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2159 asm volatile(
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2160 "pxor %%mm7, %%mm7 \n\t"
4248
3cdb86beebce mangle for win32 in postproc
atmos4
parents: 3883
diff changeset
2161 "movq "MANGLE(w02)", %%mm6 \n\t"
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
2162 ASMALIGN16
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2163 "1: \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2164 "mov %2, %%"REG_c" \n\t"
13733
c45cf718dfe8 10000l : fix a crash on x86 due to an horrible mistake in my x86_64 patch
aurel
parents: 13720
diff changeset
2165 "movzwl (%%"REG_c", %0), %%eax \n\t"
c45cf718dfe8 10000l : fix a crash on x86 due to an horrible mistake in my x86_64 patch
aurel
parents: 13720
diff changeset
2166 "movzwl 2(%%"REG_c", %0), %%ebx \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2167 "mov %5, %%"REG_c" \n\t"
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2168 "pxor %%mm4, %%mm4 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2169 "pxor %%mm5, %%mm5 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2170 "2: \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2171 "movq (%1), %%mm1 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2172 "movq (%1, %6), %%mm3 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2173 "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2174 "movd (%%"REG_c", %%"REG_b"), %%mm2\n\t"
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2175 "punpcklbw %%mm7, %%mm0 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2176 "punpcklbw %%mm7, %%mm2 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2177 "pmaddwd %%mm1, %%mm0 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2178 "pmaddwd %%mm2, %%mm3 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2179 "paddd %%mm3, %%mm5 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2180 "paddd %%mm0, %%mm4 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2181 "add $8, %1 \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2182 "add $4, %%"REG_c" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2183 "cmp %4, %%"REG_c" \n\t"
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2184 " jb 2b \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2185 "add %6, %1 \n\t"
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2186 "psrad $8, %%mm4 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2187 "psrad $8, %%mm5 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2188 "packssdw %%mm5, %%mm4 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2189 "pmaddwd %%mm6, %%mm4 \n\t"
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2190 "packssdw %%mm4, %%mm4 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2191 "mov %3, %%"REG_a" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2192 "movd %%mm4, (%%"REG_a", %0) \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2193 "add $4, %0 \n\t"
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2194 " jnc 1b \n\t"
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2195
3641
33c560ffd3dc minor bugfixes (noone noticed them)
michael
parents: 3352
diff changeset
2196 : "+r" (counter), "+r" (filter)
15617
130dd060f723 one bugfix and a few gcc4 bug workaorunds by (Gianluigi Tiesi: mplayer, netfarm it)
michael
parents: 15295
diff changeset
2197 : "m" (filterPos), "m" (dst), "m"(offset),
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
2198 "m" (src), "r" (filterSize*2)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2199 : "%"REG_b, "%"REG_a, "%"REG_c
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2200 );
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2201 }
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2202 #else
12130
2ef24558b732 AltiVec hScale, all size patch by (Romain Dolbeau <dolbeaur at club-internet dot fr>)
michael
parents: 12017
diff changeset
2203 #ifdef HAVE_ALTIVEC
2ef24558b732 AltiVec hScale, all size patch by (Romain Dolbeau <dolbeaur at club-internet dot fr>)
michael
parents: 12017
diff changeset
2204 hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
2ef24558b732 AltiVec hScale, all size patch by (Romain Dolbeau <dolbeaur at club-internet dot fr>)
michael
parents: 12017
diff changeset
2205 #else
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2206 int i;
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2207 for(i=0; i<dstW; i++)
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2208 {
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2209 int j;
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2210 int srcPos= filterPos[i];
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2211 int val=0;
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2212 // printf("filterPos: %d\n", filterPos[i]);
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2213 for(j=0; j<filterSize; j++)
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2214 {
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2215 // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2216 val += ((int)src[srcPos + j])*filter[filterSize*i + j];
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2217 }
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2218 // filter += hFilterSize;
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2219 dst[i] = MIN(MAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2220 // dst[i] = val>>7;
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2221 }
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2222 #endif
12130
2ef24558b732 AltiVec hScale, all size patch by (Romain Dolbeau <dolbeaur at club-internet dot fr>)
michael
parents: 12017
diff changeset
2223 #endif
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2224 }
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2225 // *** horizontal scale Y line to temp buffer
18575
e00cea3e1732 fix variable type used via "m" asm constraint to match size used in asm.
reimar
parents: 18392
diff changeset
2226 static inline void RENAME(hyscale)(uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2227 int flags, int canMMX2BeUsed, int16_t *hLumFilter,
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2228 int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2229 int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2230 int32_t *mmx2FilterPos)
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2231 {
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2232 if(srcFormat==IMGFMT_YUY2)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2233 {
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2234 RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2235 src= formatConvBuffer;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2236 }
9071
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
2237 else if(srcFormat==IMGFMT_UYVY)
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
2238 {
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
2239 RENAME(uyvyToY)(formatConvBuffer, src, srcW);
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
2240 src= formatConvBuffer;
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
2241 }
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2242 else if(srcFormat==IMGFMT_BGR32)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2243 {
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2244 RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2245 src= formatConvBuffer;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2246 }
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2247 else if(srcFormat==IMGFMT_BGR24)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2248 {
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2249 RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2250 src= formatConvBuffer;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2251 }
4578
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
2252 else if(srcFormat==IMGFMT_BGR16)
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
2253 {
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
2254 RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
2255 src= formatConvBuffer;
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
2256 }
4580
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
2257 else if(srcFormat==IMGFMT_BGR15)
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
2258 {
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
2259 RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
2260 src= formatConvBuffer;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
2261 }
4558
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2262 else if(srcFormat==IMGFMT_RGB32)
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2263 {
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2264 RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2265 src= formatConvBuffer;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2266 }
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2267 else if(srcFormat==IMGFMT_RGB24)
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2268 {
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2269 RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2270 src= formatConvBuffer;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2271 }
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2272
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2273 #ifdef HAVE_MMX
11000
6e35326c742f many small typo and grammar fixes
gabucino
parents: 9970
diff changeset
2274 // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2275 if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2276 #else
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2277 if(!(flags&SWS_FAST_BILINEAR))
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2278 #endif
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2279 {
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2280 RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2281 }
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2282 else // Fast Bilinear upscale / crap downscale
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2283 {
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2284 #if defined(ARCH_X86) || defined(ARCH_X86_64)
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2285 #ifdef HAVE_MMX2
2671
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2286 int i;
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2287 if(canMMX2BeUsed)
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2288 {
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2289 asm volatile(
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2290 "pxor %%mm7, %%mm7 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2291 "mov %0, %%"REG_c" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2292 "mov %1, %%"REG_D" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2293 "mov %2, %%"REG_d" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2294 "mov %3, %%"REG_b" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2295 "xor %%"REG_a", %%"REG_a" \n\t" // i
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2296 PREFETCH" (%%"REG_c") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2297 PREFETCH" 32(%%"REG_c") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2298 PREFETCH" 64(%%"REG_c") \n\t"
2520
b58c43aab619 3dnow prefetch & sfence
michael
parents: 2519
diff changeset
2299
14556
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2300 #ifdef ARCH_X86_64
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2301
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2302 #define FUNNY_Y_CODE \
14556
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2303 "movl (%%"REG_b"), %%esi \n\t"\
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2304 "call *%4 \n\t"\
14556
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2305 "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2306 "add %%"REG_S", %%"REG_c" \n\t"\
14536
6f13379b1464 100l, fix broken AMD64 patch. To whoever applied it: Did you actually _try_
reimar
parents: 13733
diff changeset
2307 "add %%"REG_a", %%"REG_D" \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2308 "xor %%"REG_a", %%"REG_a" \n\t"\
2520
b58c43aab619 3dnow prefetch & sfence
michael
parents: 2519
diff changeset
2309
14556
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2310 #else
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2311
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2312 #define FUNNY_Y_CODE \
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2313 "movl (%%"REG_b"), %%esi \n\t"\
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2314 "call *%4 \n\t"\
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2315 "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2316 "add %%"REG_a", %%"REG_D" \n\t"\
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2317 "xor %%"REG_a", %%"REG_a" \n\t"\
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2318
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2319 #endif
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2320
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2321 FUNNY_Y_CODE
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2322 FUNNY_Y_CODE
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2323 FUNNY_Y_CODE
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2324 FUNNY_Y_CODE
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2325 FUNNY_Y_CODE
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2326 FUNNY_Y_CODE
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2327 FUNNY_Y_CODE
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2328 FUNNY_Y_CODE
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2329
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2330 :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2331 "m" (funnyYCode)
14536
6f13379b1464 100l, fix broken AMD64 patch. To whoever applied it: Did you actually _try_
reimar
parents: 13733
diff changeset
2332 : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2333 );
3215
3083616ba4d6 sliced scaleing bugfix
michael
parents: 3209
diff changeset
2334 for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2335 }
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2336 else
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2337 {
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2338 #endif
18575
e00cea3e1732 fix variable type used via "m" asm constraint to match size used in asm.
reimar
parents: 18392
diff changeset
2339 long xInc_shr16 = xInc >> 16;
e00cea3e1732 fix variable type used via "m" asm constraint to match size used in asm.
reimar
parents: 18392
diff changeset
2340 uint16_t xInc_mask = xInc & 0xffff;
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2341 //NO MMX just normal asm ...
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2342 asm volatile(
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2343 "xor %%"REG_a", %%"REG_a" \n\t" // i
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2344 "xor %%"REG_b", %%"REG_b" \n\t" // xx
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2345 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
2346 ASMALIGN16
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2347 "1: \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2348 "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2349 "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2350 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2351 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2352 "shll $16, %%edi \n\t"
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2353 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2354 "mov %1, %%"REG_D" \n\t"
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2355 "shrl $9, %%esi \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2356 "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2357 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2358 "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2359
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2360 "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2361 "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2362 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2363 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2364 "shll $16, %%edi \n\t"
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2365 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2366 "mov %1, %%"REG_D" \n\t"
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2367 "shrl $9, %%esi \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2368 "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t"
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2369 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2370 "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2371
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2372
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2373 "add $2, %%"REG_a" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2374 "cmp %2, %%"REG_a" \n\t"
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2375 " jb 1b \n\t"
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2376
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2377
15617
130dd060f723 one bugfix and a few gcc4 bug workaorunds by (Gianluigi Tiesi: mplayer, netfarm it)
michael
parents: 15295
diff changeset
2378 :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2379 : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2380 );
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2381 #ifdef HAVE_MMX2
11000
6e35326c742f many small typo and grammar fixes
gabucino
parents: 9970
diff changeset
2382 } //if MMX2 can't be used
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2383 #endif
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2384 #else
2671
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2385 int i;
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2386 unsigned int xpos=0;
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2387 for(i=0;i<dstWidth;i++)
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2388 {
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2389 register unsigned int xx=xpos>>16;
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2390 register unsigned int xalpha=(xpos&0xFFFF)>>9;
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2391 dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2392 xpos+=xInc;
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2393 }
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2394 #endif
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2395 }
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2396 }
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2397
16739
e91f944f6ed9 Change unsigned->signed and int->long, this fits the asm code better on 64
reimar
parents: 15972
diff changeset
2398 inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2399 int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2400 int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2401 int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2402 int32_t *mmx2FilterPos)
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2403 {
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2404 if(srcFormat==IMGFMT_YUY2)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2405 {
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2406 RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2407 src1= formatConvBuffer;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2408 src2= formatConvBuffer+2048;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2409 }
9071
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
2410 else if(srcFormat==IMGFMT_UYVY)
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
2411 {
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
2412 RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
2413 src1= formatConvBuffer;
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
2414 src2= formatConvBuffer+2048;
25baacd1c650 UYVY input
michael
parents: 8254
diff changeset
2415 }
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2416 else if(srcFormat==IMGFMT_BGR32)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2417 {
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2418 RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2419 src1= formatConvBuffer;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2420 src2= formatConvBuffer+2048;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2421 }
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2422 else if(srcFormat==IMGFMT_BGR24)
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2423 {
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2424 RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2425 src1= formatConvBuffer;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2426 src2= formatConvBuffer+2048;
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2427 }
4578
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
2428 else if(srcFormat==IMGFMT_BGR16)
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
2429 {
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
2430 RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
2431 src1= formatConvBuffer;
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
2432 src2= formatConvBuffer+2048;
395b1233b856 bgr16 input support
michael
parents: 4558
diff changeset
2433 }
4580
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
2434 else if(srcFormat==IMGFMT_BGR15)
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
2435 {
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
2436 RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
2437 src1= formatConvBuffer;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
2438 src2= formatConvBuffer+2048;
f01ed4dfa868 bgr15 input support
michael
parents: 4579
diff changeset
2439 }
4558
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2440 else if(srcFormat==IMGFMT_RGB32)
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2441 {
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2442 RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2443 src1= formatConvBuffer;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2444 src2= formatConvBuffer+2048;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2445 }
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2446 else if(srcFormat==IMGFMT_RGB24)
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2447 {
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2448 RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2449 src1= formatConvBuffer;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2450 src2= formatConvBuffer+2048;
aac57ef92bab rgb32 & rgb24 input support
michael
parents: 4554
diff changeset
2451 }
4481
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
2452 else if(isGray(srcFormat))
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
2453 {
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
2454 return;
b8ec9cc1b2c5 MMX(2) optimized YUY2 input
michael
parents: 4467
diff changeset
2455 }
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2456
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2457 #ifdef HAVE_MMX
11000
6e35326c742f many small typo and grammar fixes
gabucino
parents: 9970
diff changeset
2458 // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2459 if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2460 #else
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2461 if(!(flags&SWS_FAST_BILINEAR))
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2462 #endif
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2463 {
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2464 RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2465 RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2466 }
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2467 else // Fast Bilinear upscale / crap downscale
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2468 {
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2469 #if defined(ARCH_X86) || defined(ARCH_X86_64)
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2470 #ifdef HAVE_MMX2
2671
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2471 int i;
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2472 if(canMMX2BeUsed)
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2473 {
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2474 asm volatile(
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2475 "pxor %%mm7, %%mm7 \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2476 "mov %0, %%"REG_c" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2477 "mov %1, %%"REG_D" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2478 "mov %2, %%"REG_d" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2479 "mov %3, %%"REG_b" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2480 "xor %%"REG_a", %%"REG_a" \n\t" // i
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2481 PREFETCH" (%%"REG_c") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2482 PREFETCH" 32(%%"REG_c") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2483 PREFETCH" 64(%%"REG_c") \n\t"
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2484
14556
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2485 #ifdef ARCH_X86_64
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2486
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2487 #define FUNNY_UV_CODE \
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2488 "movl (%%"REG_b"), %%esi \n\t"\
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2489 "call *%4 \n\t"\
14556
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2490 "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2491 "add %%"REG_S", %%"REG_c" \n\t"\
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2492 "add %%"REG_a", %%"REG_D" \n\t"\
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2493 "xor %%"REG_a", %%"REG_a" \n\t"\
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2494
14556
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2495 #else
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2496
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2497 #define FUNNY_UV_CODE \
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2498 "movl (%%"REG_b"), %%esi \n\t"\
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2499 "call *%4 \n\t"\
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2500 "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2501 "add %%"REG_a", %%"REG_D" \n\t"\
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2502 "xor %%"REG_a", %%"REG_a" \n\t"\
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2503
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2504 #endif
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2505
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2506 FUNNY_UV_CODE
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2507 FUNNY_UV_CODE
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2508 FUNNY_UV_CODE
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2509 FUNNY_UV_CODE
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2510 "xor %%"REG_a", %%"REG_a" \n\t" // i
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2511 "mov %5, %%"REG_c" \n\t" // src
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2512 "mov %1, %%"REG_D" \n\t" // buf1
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2513 "add $4096, %%"REG_D" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2514 PREFETCH" (%%"REG_c") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2515 PREFETCH" 32(%%"REG_c") \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2516 PREFETCH" 64(%%"REG_c") \n\t"
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2517
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2518 FUNNY_UV_CODE
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2519 FUNNY_UV_CODE
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2520 FUNNY_UV_CODE
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2521 FUNNY_UV_CODE
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2522
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2523 :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2524 "m" (funnyUVCode), "m" (src2)
14556
31cb219364a4 fix few x86_64 registers handling
aurel
parents: 14536
diff changeset
2525 : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2526 );
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2527 for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2528 {
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2529 // printf("%d %d %d\n", dstWidth, i, srcW);
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2530 dst[i] = src1[srcW-1]*128;
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2531 dst[i+2048] = src2[srcW-1]*128;
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2532 }
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2533 }
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2534 else
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2535 {
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2536 #endif
15617
130dd060f723 one bugfix and a few gcc4 bug workaorunds by (Gianluigi Tiesi: mplayer, netfarm it)
michael
parents: 15295
diff changeset
2537 long xInc_shr16 = (long) (xInc >> 16);
18575
e00cea3e1732 fix variable type used via "m" asm constraint to match size used in asm.
reimar
parents: 18392
diff changeset
2538 uint16_t xInc_mask = xInc & 0xffff;
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2539 asm volatile(
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2540 "xor %%"REG_a", %%"REG_a" \n\t" // i
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2541 "xor %%"REG_b", %%"REG_b" \n\t" // xx
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2542 "xorl %%ecx, %%ecx \n\t" // 2*xalpha
18104
7b408d60de9e add support for intel mac. mp3lib is not fixed yet.
nplourde
parents: 17641
diff changeset
2543 ASMALIGN16
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2544 "1: \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2545 "mov %0, %%"REG_S" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2546 "movzbl (%%"REG_S", %%"REG_b"), %%edi \n\t" //src[xx]
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2547 "movzbl 1(%%"REG_S", %%"REG_b"), %%esi \n\t" //src[xx+1]
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2548 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2549 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2550 "shll $16, %%edi \n\t"
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2551 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2552 "mov %1, %%"REG_D" \n\t"
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2553 "shrl $9, %%esi \n\t"
15845
7ccbf83d108d Another REG_d -> REG_D fix.
reimar
parents: 15813
diff changeset
2554 "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2555
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2556 "movzbl (%5, %%"REG_b"), %%edi \n\t" //src[xx]
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2557 "movzbl 1(%5, %%"REG_b"), %%esi \n\t" //src[xx+1]
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2558 "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2559 "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2560 "shll $16, %%edi \n\t"
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2561 "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2562 "mov %1, %%"REG_D" \n\t"
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2563 "shrl $9, %%esi \n\t"
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2564 "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2565
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2566 "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2567 "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2568 "add $1, %%"REG_a" \n\t"
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2569 "cmp %2, %%"REG_a" \n\t"
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2570 " jb 1b \n\t"
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2571
15972
e4360060b79a Re-enables the GCC-4 fix for AMD-64 only. Patch by cartman and poirierg
gpoirier
parents: 15858
diff changeset
2572 /* GCC-3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
e4360060b79a Re-enables the GCC-4 fix for AMD-64 only. Patch by cartman and poirierg
gpoirier
parents: 15858
diff changeset
2573 which is needed to support GCC-4.0 */
e4360060b79a Re-enables the GCC-4 fix for AMD-64 only. Patch by cartman and poirierg
gpoirier
parents: 15858
diff changeset
2574 #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4))
e4360060b79a Re-enables the GCC-4 fix for AMD-64 only. Patch by cartman and poirierg
gpoirier
parents: 15858
diff changeset
2575 :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
e4360060b79a Re-enables the GCC-4 fix for AMD-64 only. Patch by cartman and poirierg
gpoirier
parents: 15858
diff changeset
2576 #else
15858
045f91e5e67d Reverts GCC-4.0 "fixe" which broke GCC-3.3 and maybe others
gpoirier
parents: 15845
diff changeset
2577 :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
15972
e4360060b79a Re-enables the GCC-4 fix for AMD-64 only. Patch by cartman and poirierg
gpoirier
parents: 15858
diff changeset
2578 #endif
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2579 "r" (src2)
13720
821f464b4d90 adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents: 12698
diff changeset
2580 : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2581 );
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2582 #ifdef HAVE_MMX2
11000
6e35326c742f many small typo and grammar fixes
gabucino
parents: 9970
diff changeset
2583 } //if MMX2 can't be used
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2584 #endif
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2585 #else
2671
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2586 int i;
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2587 unsigned int xpos=0;
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2588 for(i=0;i<dstWidth;i++)
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2589 {
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2590 register unsigned int xx=xpos>>16;
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2591 register unsigned int xalpha=(xpos&0xFFFF)>>9;
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2592 dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2593 dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
2566
a350d8bed636 bugfixes
michael
parents: 2540
diff changeset
2594 /* slower
a350d8bed636 bugfixes
michael
parents: 2540
diff changeset
2595 dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
a350d8bed636 bugfixes
michael
parents: 2540
diff changeset
2596 dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
a350d8bed636 bugfixes
michael
parents: 2540
diff changeset
2597 */
2671
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2598 xpos+=xInc;
555cb027c7a7 fixed warnings
michael
parents: 2669
diff changeset
2599 }
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2600 #endif
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2601 }
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2602 }
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2603
9499
bc5b87370cd1 cleanup
michael
parents: 9494
diff changeset
2604 static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
bc5b87370cd1 cleanup
michael
parents: 9494
diff changeset
2605 int srcSliceH, uint8_t* dst[], int dstStride[]){
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2606
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2607 /* load a few things into local vars to make the code more readable? and faster */
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2608 const int srcW= c->srcW;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2609 const int dstW= c->dstW;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2610 const int dstH= c->dstH;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2611 const int chrDstW= c->chrDstW;
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2612 const int chrSrcW= c->chrSrcW;
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2613 const int lumXInc= c->lumXInc;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2614 const int chrXInc= c->chrXInc;
4295
67c56df76a44 copyright(swscaler) = GPL
michael
parents: 4290
diff changeset
2615 const int dstFormat= c->dstFormat;
6503
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
2616 const int srcFormat= c->srcFormat;
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2617 const int flags= c->flags;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2618 const int canMMX2BeUsed= c->canMMX2BeUsed;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2619 int16_t *vLumFilterPos= c->vLumFilterPos;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2620 int16_t *vChrFilterPos= c->vChrFilterPos;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2621 int16_t *hLumFilterPos= c->hLumFilterPos;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2622 int16_t *hChrFilterPos= c->hChrFilterPos;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2623 int16_t *vLumFilter= c->vLumFilter;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2624 int16_t *vChrFilter= c->vChrFilter;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2625 int16_t *hLumFilter= c->hLumFilter;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2626 int16_t *hChrFilter= c->hChrFilter;
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
2627 int32_t *lumMmxFilter= c->lumMmxFilter;
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
2628 int32_t *chrMmxFilter= c->chrMmxFilter;
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2629 const int vLumFilterSize= c->vLumFilterSize;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2630 const int vChrFilterSize= c->vChrFilterSize;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2631 const int hLumFilterSize= c->hLumFilterSize;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2632 const int hChrFilterSize= c->hChrFilterSize;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2633 int16_t **lumPixBuf= c->lumPixBuf;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2634 int16_t **chrPixBuf= c->chrPixBuf;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2635 const int vLumBufSize= c->vLumBufSize;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2636 const int vChrBufSize= c->vChrBufSize;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2637 uint8_t *funnyYCode= c->funnyYCode;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2638 uint8_t *funnyUVCode= c->funnyUVCode;
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2639 uint8_t *formatConvBuffer= c->formatConvBuffer;
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2640 const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2641 const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
9494
543ab3909b78 sws_ prefix, more seperation between internal & external swscaler API
michael
parents: 9476
diff changeset
2642 int lastDstY;
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2643
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2644 /* vars whch will change and which we need to storw back in the context */
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2645 int dstY= c->dstY;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2646 int lumBufIndex= c->lumBufIndex;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2647 int chrBufIndex= c->chrBufIndex;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2648 int lastInLumBuf= c->lastInLumBuf;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2649 int lastInChrBuf= c->lastInChrBuf;
6540
5b3cace64e9d 100l (non mmx versions didnt compile)
michael
parents: 6532
diff changeset
2650
5b3cace64e9d 100l (non mmx versions didnt compile)
michael
parents: 6532
diff changeset
2651 if(isPacked(c->srcFormat)){
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2652 src[0]=
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2653 src[1]=
9499
bc5b87370cd1 cleanup
michael
parents: 9494
diff changeset
2654 src[2]= src[0];
6540
5b3cace64e9d 100l (non mmx versions didnt compile)
michael
parents: 6532
diff changeset
2655 srcStride[0]=
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2656 srcStride[1]=
9499
bc5b87370cd1 cleanup
michael
parents: 9494
diff changeset
2657 srcStride[2]= srcStride[0];
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2658 }
6540
5b3cace64e9d 100l (non mmx versions didnt compile)
michael
parents: 6532
diff changeset
2659 srcStride[1]<<= c->vChrDrop;
5b3cace64e9d 100l (non mmx versions didnt compile)
michael
parents: 6532
diff changeset
2660 srcStride[2]<<= c->vChrDrop;
4419
f002407e298d YV12 like formats support (I420/IYUV)
michael
parents: 4297
diff changeset
2661
6517
5ac294a77a87 selftest support
michael
parents: 6503
diff changeset
2662 // printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
5ac294a77a87 selftest support
michael
parents: 6503
diff changeset
2663 // (int)dst[0], (int)dst[1], (int)dst[2]);
5ac294a77a87 selftest support
michael
parents: 6503
diff changeset
2664
5ac294a77a87 selftest support
michael
parents: 6503
diff changeset
2665 #if 0 //self test FIXME move to a vfilter or something
5ac294a77a87 selftest support
michael
parents: 6503
diff changeset
2666 {
5ac294a77a87 selftest support
michael
parents: 6503
diff changeset
2667 static volatile int i=0;
5ac294a77a87 selftest support
michael
parents: 6503
diff changeset
2668 i++;
5ac294a77a87 selftest support
michael
parents: 6503
diff changeset
2669 if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH)
5ac294a77a87 selftest support
michael
parents: 6503
diff changeset
2670 selfTest(src, srcStride, c->srcW, c->srcH);
5ac294a77a87 selftest support
michael
parents: 6503
diff changeset
2671 i--;
5ac294a77a87 selftest support
michael
parents: 6503
diff changeset
2672 }
5ac294a77a87 selftest support
michael
parents: 6503
diff changeset
2673 #endif
4554
16fdb694cf13 swScale internally uses yuv2rgb now if possible
michael
parents: 4481
diff changeset
2674
16fdb694cf13 swScale internally uses yuv2rgb now if possible
michael
parents: 4481
diff changeset
2675 //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
16fdb694cf13 swScale internally uses yuv2rgb now if possible
michael
parents: 4481
diff changeset
2676 //dstStride[0],dstStride[1],dstStride[2]);
4419
f002407e298d YV12 like formats support (I420/IYUV)
michael
parents: 4297
diff changeset
2677
f002407e298d YV12 like formats support (I420/IYUV)
michael
parents: 4297
diff changeset
2678 if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
f002407e298d YV12 like formats support (I420/IYUV)
michael
parents: 4297
diff changeset
2679 {
f002407e298d YV12 like formats support (I420/IYUV)
michael
parents: 4297
diff changeset
2680 static int firstTime=1; //FIXME move this into the context perhaps
f002407e298d YV12 like formats support (I420/IYUV)
michael
parents: 4297
diff changeset
2681 if(flags & SWS_PRINT_INFO && firstTime)
f002407e298d YV12 like formats support (I420/IYUV)
michael
parents: 4297
diff changeset
2682 {
9970
9deb7e948fa6 killed an mp_msg occurance
alex
parents: 9921
diff changeset
2683 MSG_WARN("SwScaler: Warning: dstStride is not aligned!\n"
4419
f002407e298d YV12 like formats support (I420/IYUV)
michael
parents: 4297
diff changeset
2684 "SwScaler: ->cannot do aligned memory acesses anymore\n");
f002407e298d YV12 like formats support (I420/IYUV)
michael
parents: 4297
diff changeset
2685 firstTime=0;
f002407e298d YV12 like formats support (I420/IYUV)
michael
parents: 4297
diff changeset
2686 }
f002407e298d YV12 like formats support (I420/IYUV)
michael
parents: 4297
diff changeset
2687 }
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2688
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2689 /* Note the user might start scaling the picture in the middle so this will not get executed
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2690 this is not really intended but works currently, so ppl might do it */
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2691 if(srcSliceY ==0){
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2692 lumBufIndex=0;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2693 chrBufIndex=0;
4467
9512d6832b38 YUY2, BGR24, BGR32 input support (no mmx yet)
michael
parents: 4419
diff changeset
2694 dstY=0;
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2695 lastInLumBuf= -1;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2696 lastInChrBuf= -1;
3272
7e4399d1eb65 horizontal up/downscale linear & cubic
michael
parents: 3215
diff changeset
2697 }
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2698
9494
543ab3909b78 sws_ prefix, more seperation between internal & external swscaler API
michael
parents: 9476
diff changeset
2699 lastDstY= dstY;
543ab3909b78 sws_ prefix, more seperation between internal & external swscaler API
michael
parents: 9476
diff changeset
2700
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2701 for(;dstY < dstH; dstY++){
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2702 unsigned char *dest =dst[0]+dstStride[0]*dstY;
6520
f9a1870bc4a5 cleanup
michael
parents: 6517
diff changeset
2703 const int chrDstY= dstY>>c->chrDstVSubSample;
f9a1870bc4a5 cleanup
michael
parents: 6517
diff changeset
2704 unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
f9a1870bc4a5 cleanup
michael
parents: 6517
diff changeset
2705 unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2706
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2707 const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2708 const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2709 const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2710 const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2711
11122
c552fe6acbaa rounding fixes
michael
parents: 11000
diff changeset
2712 //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
c552fe6acbaa rounding fixes
michael
parents: 11000
diff changeset
2713 // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
4290
1f8ceb12284d general convolution filtering of the source picture
michael
parents: 4276
diff changeset
2714 //handle holes (FAST_BILINEAR & weird filters)
1f8ceb12284d general convolution filtering of the source picture
michael
parents: 4276
diff changeset
2715 if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
1f8ceb12284d general convolution filtering of the source picture
michael
parents: 4276
diff changeset
2716 if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
1f8ceb12284d general convolution filtering of the source picture
michael
parents: 4276
diff changeset
2717 //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2718 ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2719 ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
2216
9da2a0515184 software yv12->rgb scaler - separated from fsdga
arpi
parents:
diff changeset
2720
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2721 // Do we have enough lines in this slice to output the dstY line
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2722 if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2723 {
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2724 //Do horizontal scaling
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2725 while(lastInLumBuf < lastLumSrcY)
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2726 {
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2727 uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2728 lumBufIndex++;
4290
1f8ceb12284d general convolution filtering of the source picture
michael
parents: 4276
diff changeset
2729 // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2730 ASSERT(lumBufIndex < 2*vLumBufSize)
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2731 ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2732 ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2733 // printf("%d %d\n", lumBufIndex, vLumBufSize);
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2734 RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2735 flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2736 funnyYCode, c->srcFormat, formatConvBuffer,
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2737 c->lumMmx2Filter, c->lumMmx2FilterPos);
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2738 lastInLumBuf++;
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2739 }
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2740 while(lastInChrBuf < lastChrSrcY)
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2741 {
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2742 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2743 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2744 chrBufIndex++;
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2745 ASSERT(chrBufIndex < 2*vChrBufSize)
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2746 ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2747 ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2748 //FIXME replace parameters through context struct (some at least)
6503
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
2749
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
2750 if(!(isGray(srcFormat) || isGray(dstFormat)))
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2751 RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2752 flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2753 funnyUVCode, c->srcFormat, formatConvBuffer,
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2754 c->chrMmx2Filter, c->chrMmx2FilterPos);
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2755 lastInChrBuf++;
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2756 }
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2757 //wrap buf index around to stay inside the ring buffer
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2758 if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2759 if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2760 }
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2761 else // not enough lines left in this slice -> load the rest in the buffer
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2762 {
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2763 /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2764 firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2765 lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2766 vChrBufSize, vLumBufSize);*/
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2767
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2768 //Do horizontal scaling
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2769 while(lastInLumBuf+1 < srcSliceY + srcSliceH)
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2770 {
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2771 uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2772 lumBufIndex++;
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2773 ASSERT(lumBufIndex < 2*vLumBufSize)
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2774 ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2775 ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2776 RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2777 flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2778 funnyYCode, c->srcFormat, formatConvBuffer,
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2779 c->lumMmx2Filter, c->lumMmx2FilterPos);
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2780 lastInLumBuf++;
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2781 }
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2782 while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2783 {
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2784 uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2785 uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2786 chrBufIndex++;
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2787 ASSERT(chrBufIndex < 2*vChrBufSize)
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2788 ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2789 ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
6503
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
2790
5a2bd4021c8e cleanup & grayscale support
michael
parents: 6492
diff changeset
2791 if(!(isGray(srcFormat) || isGray(dstFormat)))
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2792 RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2793 flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
5452
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2794 funnyUVCode, c->srcFormat, formatConvBuffer,
eb87391a5292 overread in the mmx2 horizontal scaler fixed
michael
parents: 4923
diff changeset
2795 c->chrMmx2Filter, c->chrMmx2FilterPos);
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2796 lastInChrBuf++;
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2797 }
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2798 //wrap buf index around to stay inside the ring buffer
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2799 if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2800 if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
11000
6e35326c742f many small typo and grammar fixes
gabucino
parents: 9970
diff changeset
2801 break; //we can't output a dstY line so let's try with the next slice
2469
03abc2743ed6 downscale
michael
parents: 2326
diff changeset
2802 }
2264
7851375ea156 increased precission of s_xinc s_xinc2 (needed for the mmx2 bugfix)
michael
parents: 2237
diff changeset
2803
2748
01dbf100b4f8 better dithering
michael
parents: 2730
diff changeset
2804 #ifdef HAVE_MMX
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2805 b5Dither= dither8[dstY&1];
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2806 g6Dither= dither4[dstY&1];
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2807 g5Dither= dither8[dstY&1];
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2808 r5Dither= dither8[(dstY+1)&1];
2748
01dbf100b4f8 better dithering
michael
parents: 2730
diff changeset
2809 #endif
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2810 if(dstY < dstH-2)
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2811 {
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2812 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2813 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2814 #ifdef HAVE_MMX
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2815 int i;
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2816 for(i=0; i<vLumFilterSize; i++)
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2817 {
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2818 lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2819 lumMmxFilter[4*i+2]=
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2820 lumMmxFilter[4*i+3]=
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2821 ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2822 }
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2823 for(i=0; i<vChrFilterSize; i++)
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2824 {
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2825 chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2826 chrMmxFilter[4*i+2]=
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2827 chrMmxFilter[4*i+3]=
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2828 ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2829 }
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2830 #endif
14715
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2831 if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2832 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2833 if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2834 RENAME(yuv2nv12X)(c,
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2835 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2836 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2837 dest, uDest, dstW, chrDstW, dstFormat);
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2838 }
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2839 else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2840 {
7351
064ada190b6c fixing y422p output
michael
parents: 6679
diff changeset
2841 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
064ada190b6c fixing y422p output
michael
parents: 6679
diff changeset
2842 if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2843 if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2844 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2845 int16_t *lumBuf = lumPixBuf[0];
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2846 int16_t *chrBuf= chrPixBuf[0];
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2847 RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2848 }
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2849 else //General YV12
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2850 {
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
2851 RENAME(yuv2yuvX)(c,
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2852 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2853 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
9414
04c6fd75ed96 cleanup
michael
parents: 9413
diff changeset
2854 dest, uDest, vDest, dstW, chrDstW);
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2855 }
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2856 }
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2857 else
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2858 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2859 ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2860 ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2861 if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2862 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2863 int chrAlpha= vChrFilter[2*dstY+1];
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
2864 RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
2865 dest, dstW, chrAlpha, dstFormat, flags, dstY);
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2866 }
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2867 else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2868 {
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2869 int lumAlpha= vLumFilter[2*dstY+1];
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2870 int chrAlpha= vChrFilter[2*dstY+1];
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
2871 RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
2872 dest, dstW, lumAlpha, chrAlpha, dstY);
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2873 }
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2874 else //General RGB
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2875 {
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
2876 RENAME(yuv2packedX)(c,
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2877 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2878 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
9413
0d86fe21b281 cleanup
michael
parents: 9071
diff changeset
2879 dest, dstW, dstY);
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2880 }
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2881 }
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2882 }
11000
6e35326c742f many small typo and grammar fixes
gabucino
parents: 9970
diff changeset
2883 else // hmm looks like we can't use MMX here without overwriting this array's tail
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2884 {
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2885 int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2886 int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
14715
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2887 if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2888 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2889 if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2890 yuv2nv12XinC(
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2891 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2892 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2893 dest, uDest, dstW, chrDstW, dstFormat);
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2894 }
1fab95e4513c Improved NV12/NV21 support.
syrjala
parents: 14556
diff changeset
2895 else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2896 {
7351
064ada190b6c fixing y422p output
michael
parents: 6679
diff changeset
2897 const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
064ada190b6c fixing y422p output
michael
parents: 6679
diff changeset
2898 if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
6540
5b3cace64e9d 100l (non mmx versions didnt compile)
michael
parents: 6532
diff changeset
2899 yuv2yuvXinC(
6532
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2900 vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
9834d9980c45 yvu9 support (other planar yuv formats with other chroma subsamplings should be trivial to add, if they had a IMGFMT)
michael
parents: 6520
diff changeset
2901 vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
6540
5b3cace64e9d 100l (non mmx versions didnt compile)
michael
parents: 6532
diff changeset
2902 dest, uDest, vDest, dstW, chrDstW);
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2903 }
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2904 else
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2905 {
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2906 ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2907 ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
7723
11492d5b0896 mmx yuy2 output
michael
parents: 7720
diff changeset
2908 yuv2packedXinC(c,
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2909 vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2910 vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
6578
3727eb94a783 use unified yuv2rgb init
michael
parents: 6554
diff changeset
2911 dest, dstW, dstY);
3352
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2912 }
64121e8a43f5 print more info if -v
michael
parents: 3345
diff changeset
2913 }
3344
e87c59969d17 vertical cubic/linear scaling
michael
parents: 3299
diff changeset
2914 }
2534
cc9d3fd626f0 patch from Martin Decky <deckm1am@ss1000.ms.mff.cuni.cz> applied and unnecassery "memory" removed
michael
parents: 2521
diff changeset
2915
cc9d3fd626f0 patch from Martin Decky <deckm1am@ss1000.ms.mff.cuni.cz> applied and unnecassery "memory" removed
michael
parents: 2521
diff changeset
2916 #ifdef HAVE_MMX
cc9d3fd626f0 patch from Martin Decky <deckm1am@ss1000.ms.mff.cuni.cz> applied and unnecassery "memory" removed
michael
parents: 2521
diff changeset
2917 __asm __volatile(SFENCE:::"memory");
2566
a350d8bed636 bugfixes
michael
parents: 2540
diff changeset
2918 __asm __volatile(EMMS:::"memory");
2534
cc9d3fd626f0 patch from Martin Decky <deckm1am@ss1000.ms.mff.cuni.cz> applied and unnecassery "memory" removed
michael
parents: 2521
diff changeset
2919 #endif
4276
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2920 /* store changed local vars back in the context */
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2921 c->dstY= dstY;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2922 c->lumBufIndex= lumBufIndex;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2923 c->chrBufIndex= chrBufIndex;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2924 c->lastInLumBuf= lastInLumBuf;
9199d15cb4e0 removed global vars so that multiple swscalers can be used
michael
parents: 4248
diff changeset
2925 c->lastInChrBuf= lastInChrBuf;
9494
543ab3909b78 sws_ prefix, more seperation between internal & external swscaler API
michael
parents: 9476
diff changeset
2926
543ab3909b78 sws_ prefix, more seperation between internal & external swscaler API
michael
parents: 9476
diff changeset
2927 return dstY - lastDstY;
3641
33c560ffd3dc minor bugfixes (noone noticed them)
michael
parents: 3352
diff changeset
2928 }