Mercurial > mplayer.hg
annotate postproc/rgb2rgb_template.c @ 14110:9d242e613fd8
Some other fixes. Better wordings and better translations of tech stuff
author | gpoirier |
---|---|
date | Sun, 05 Dec 2004 21:51:39 +0000 |
parents | 821f464b4d90 |
children | 49dd10a86b23 |
rev | line source |
---|---|
2694 | 1 /* |
2538
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
2 * |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
3 * rgb2rgb.c, Software RGB to RGB convertor |
2732 | 4 * pluralize by Software PAL8 to RGB convertor |
5 * Software YUV to YUV convertor | |
6 * Software YUV to RGB convertor | |
2538
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
7 * Written by Nick Kurshev. |
3132 | 8 * palette & yuv & runtime cpu stuff by Michael (michaelni@gmx.at) (under GPL) |
13423 | 9 * lot of big-endian byteorder fixes by Alex Beregszaszi |
2538
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
10 */ |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
11 |
6492 | 12 #include <stddef.h> |
13 #include <inttypes.h> /* for __WORDSIZE */ | |
14 | |
15 #ifndef __WORDSIZE | |
7421
0684cad9b204
use detected WORDSIZE instead of warning, when inttypes.h doesn't define __WORDSIZE
arpi
parents:
6608
diff
changeset
|
16 // #warning You have misconfigured system and probably will lose performance! |
0684cad9b204
use detected WORDSIZE instead of warning, when inttypes.h doesn't define __WORDSIZE
arpi
parents:
6608
diff
changeset
|
17 #define __WORDSIZE MP_WORDSIZE |
6492 | 18 #endif |
19 | |
3132 | 20 #undef PREFETCH |
21 #undef MOVNTQ | |
22 #undef EMMS | |
23 #undef SFENCE | |
24 #undef MMREG_SIZE | |
25 #undef PREFETCHW | |
26 #undef PAVGB | |
2755 | 27 |
3132 | 28 #ifdef HAVE_SSE2 |
29 #define MMREG_SIZE 16 | |
30 #else | |
31 #define MMREG_SIZE 8 | |
2535 | 32 #endif |
2513 | 33 |
3132 | 34 #ifdef HAVE_3DNOW |
35 #define PREFETCH "prefetch" | |
36 #define PREFETCHW "prefetchw" | |
37 #define PAVGB "pavgusb" | |
38 #elif defined ( HAVE_MMX2 ) | |
39 #define PREFETCH "prefetchnta" | |
40 #define PREFETCHW "prefetcht0" | |
41 #define PAVGB "pavgb" | |
42 #else | |
43 #define PREFETCH "/nop" | |
44 #define PREFETCHW "/nop" | |
45 #endif | |
46 | |
47 #ifdef HAVE_3DNOW | |
48 /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */ | |
49 #define EMMS "femms" | |
50 #else | |
51 #define EMMS "emms" | |
52 #endif | |
53 | |
54 #ifdef HAVE_MMX2 | |
55 #define MOVNTQ "movntq" | |
56 #define SFENCE "sfence" | |
57 #else | |
58 #define MOVNTQ "movq" | |
59 #define SFENCE "/nop" | |
60 #endif | |
61 | |
62 static inline void RENAME(rgb24to32)(const uint8_t *src,uint8_t *dst,unsigned src_size) | |
2504 | 63 { |
2508 | 64 uint8_t *dest = dst; |
2677 | 65 const uint8_t *s = src; |
66 const uint8_t *end; | |
2510 | 67 #ifdef HAVE_MMX |
6605 | 68 const uint8_t *mm_end; |
2510 | 69 #endif |
2504 | 70 end = s + src_size; |
2510 | 71 #ifdef HAVE_MMX |
2538
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
72 __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); |
6605 | 73 mm_end = end - 23; |
2538
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
74 __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory"); |
2510 | 75 while(s < mm_end) |
76 { | |
2511 | 77 __asm __volatile( |
2538
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
78 PREFETCH" 32%1\n\t" |
2510 | 79 "movd %1, %%mm0\n\t" |
2738 | 80 "punpckldq 3%1, %%mm0\n\t" |
81 "movd 6%1, %%mm1\n\t" | |
82 "punpckldq 9%1, %%mm1\n\t" | |
83 "movd 12%1, %%mm2\n\t" | |
84 "punpckldq 15%1, %%mm2\n\t" | |
85 "movd 18%1, %%mm3\n\t" | |
86 "punpckldq 21%1, %%mm3\n\t" | |
2510 | 87 "pand %%mm7, %%mm0\n\t" |
2738 | 88 "pand %%mm7, %%mm1\n\t" |
2510 | 89 "pand %%mm7, %%mm2\n\t" |
2738 | 90 "pand %%mm7, %%mm3\n\t" |
2511 | 91 MOVNTQ" %%mm0, %0\n\t" |
2738 | 92 MOVNTQ" %%mm1, 8%0\n\t" |
93 MOVNTQ" %%mm2, 16%0\n\t" | |
94 MOVNTQ" %%mm3, 24%0" | |
2510 | 95 :"=m"(*dest) |
96 :"m"(*s) | |
97 :"memory"); | |
2738 | 98 dest += 32; |
99 s += 24; | |
2510 | 100 } |
2513 | 101 __asm __volatile(SFENCE:::"memory"); |
2511 | 102 __asm __volatile(EMMS:::"memory"); |
2510 | 103 #endif |
2504 | 104 while(s < end) |
105 { | |
13423 | 106 #ifdef WORDS_BIGENDIAN |
107 *dest++ = 0; | |
108 *dest++ = *s++; | |
109 *dest++ = *s++; | |
110 *dest++ = *s++; | |
111 #else | |
2508 | 112 *dest++ = *s++; |
113 *dest++ = *s++; | |
114 *dest++ = *s++; | |
115 *dest++ = 0; | |
13423 | 116 #endif |
2504 | 117 } |
118 } | |
2505 | 119 |
3132 | 120 static inline void RENAME(rgb32to24)(const uint8_t *src,uint8_t *dst,unsigned src_size) |
2505 | 121 { |
122 uint8_t *dest = dst; | |
2677 | 123 const uint8_t *s = src; |
124 const uint8_t *end; | |
2517 | 125 #ifdef HAVE_MMX |
6605 | 126 const uint8_t *mm_end; |
2517 | 127 #endif |
2505 | 128 end = s + src_size; |
2517 | 129 #ifdef HAVE_MMX |
2538
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
130 __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); |
6605 | 131 mm_end = end - 31; |
2517 | 132 while(s < mm_end) |
133 { | |
134 __asm __volatile( | |
2538
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
135 PREFETCH" 32%1\n\t" |
2517 | 136 "movq %1, %%mm0\n\t" |
137 "movq 8%1, %%mm1\n\t" | |
2746
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
138 "movq 16%1, %%mm4\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
139 "movq 24%1, %%mm5\n\t" |
2517 | 140 "movq %%mm0, %%mm2\n\t" |
141 "movq %%mm1, %%mm3\n\t" | |
2746
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
142 "movq %%mm4, %%mm6\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
143 "movq %%mm5, %%mm7\n\t" |
2517 | 144 "psrlq $8, %%mm2\n\t" |
145 "psrlq $8, %%mm3\n\t" | |
2746
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
146 "psrlq $8, %%mm6\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
147 "psrlq $8, %%mm7\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
148 "pand %2, %%mm0\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
149 "pand %2, %%mm1\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
150 "pand %2, %%mm4\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
151 "pand %2, %%mm5\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
152 "pand %3, %%mm2\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
153 "pand %3, %%mm3\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
154 "pand %3, %%mm6\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
155 "pand %3, %%mm7\n\t" |
2517 | 156 "por %%mm2, %%mm0\n\t" |
157 "por %%mm3, %%mm1\n\t" | |
2746
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
158 "por %%mm6, %%mm4\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
159 "por %%mm7, %%mm5\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
160 |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
161 "movq %%mm1, %%mm2\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
162 "movq %%mm4, %%mm3\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
163 "psllq $48, %%mm2\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
164 "psllq $32, %%mm3\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
165 "pand %4, %%mm2\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
166 "pand %5, %%mm3\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
167 "por %%mm2, %%mm0\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
168 "psrlq $16, %%mm1\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
169 "psrlq $32, %%mm4\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
170 "psllq $16, %%mm5\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
171 "por %%mm3, %%mm1\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
172 "pand %6, %%mm5\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
173 "por %%mm5, %%mm4\n\t" |
3132 | 174 |
2517 | 175 MOVNTQ" %%mm0, %0\n\t" |
2746
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
176 MOVNTQ" %%mm1, 8%0\n\t" |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
177 MOVNTQ" %%mm4, 16%0" |
2517 | 178 :"=m"(*dest) |
2746
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
179 :"m"(*s),"m"(mask24l), |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
180 "m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) |
2517 | 181 :"memory"); |
2746
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
182 dest += 24; |
dece635a28e3
Minor speedup of rgb32to24. (performance is not successful)
nick
parents:
2741
diff
changeset
|
183 s += 32; |
2517 | 184 } |
185 __asm __volatile(SFENCE:::"memory"); | |
186 __asm __volatile(EMMS:::"memory"); | |
187 #endif | |
2505 | 188 while(s < end) |
189 { | |
13423 | 190 #ifdef WORDS_BIGENDIAN |
191 s++; | |
192 *dest++ = *s++; | |
193 *dest++ = *s++; | |
194 *dest++ = *s++; | |
195 #else | |
2505 | 196 *dest++ = *s++; |
197 *dest++ = *s++; | |
198 *dest++ = *s++; | |
199 s++; | |
13423 | 200 #endif |
2505 | 201 } |
202 } | |
2506 | 203 |
2538
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
204 /* |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
205 Original by Strepto/Astral |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
206 ported to gcc & bugfixed : A'rpi |
2564 | 207 MMX2, 3DNOW optimization by Nick Kurshev |
2698
22652c028692
faster 15to16 bit rgb (the mmx routine is limited by memory speed so there is no difference ): but the c routine is faster
michael
parents:
2697
diff
changeset
|
208 32bit c version, and and&add trick by Michael Niedermayer |
2538
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
209 */ |
3132 | 210 static inline void RENAME(rgb15to16)(const uint8_t *src,uint8_t *dst,unsigned src_size) |
2506 | 211 { |
6492 | 212 register const uint8_t* s=src; |
213 register uint8_t* d=dst; | |
214 register const uint8_t *end; | |
6605 | 215 const uint8_t *mm_end; |
6492 | 216 end = s + src_size; |
2506 | 217 #ifdef HAVE_MMX |
6492 | 218 __asm __volatile(PREFETCH" %0"::"m"(*s)); |
219 __asm __volatile("movq %0, %%mm4"::"m"(mask15s)); | |
6605 | 220 mm_end = end - 15; |
6492 | 221 while(s<mm_end) |
2538
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
222 { |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
223 __asm __volatile( |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
224 PREFETCH" 32%1\n\t" |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
225 "movq %1, %%mm0\n\t" |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
226 "movq 8%1, %%mm2\n\t" |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
227 "movq %%mm0, %%mm1\n\t" |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
228 "movq %%mm2, %%mm3\n\t" |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
229 "pand %%mm4, %%mm0\n\t" |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
230 "pand %%mm4, %%mm2\n\t" |
2698
22652c028692
faster 15to16 bit rgb (the mmx routine is limited by memory speed so there is no difference ): but the c routine is faster
michael
parents:
2697
diff
changeset
|
231 "paddw %%mm1, %%mm0\n\t" |
22652c028692
faster 15to16 bit rgb (the mmx routine is limited by memory speed so there is no difference ): but the c routine is faster
michael
parents:
2697
diff
changeset
|
232 "paddw %%mm3, %%mm2\n\t" |
2538
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
233 MOVNTQ" %%mm0, %0\n\t" |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
234 MOVNTQ" %%mm2, 8%0" |
6492 | 235 :"=m"(*d) |
236 :"m"(*s) | |
2698
22652c028692
faster 15to16 bit rgb (the mmx routine is limited by memory speed so there is no difference ): but the c routine is faster
michael
parents:
2697
diff
changeset
|
237 ); |
6492 | 238 d+=16; |
239 s+=16; | |
2506 | 240 } |
2538
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
241 __asm __volatile(SFENCE:::"memory"); |
71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
nick
parents:
2535
diff
changeset
|
242 __asm __volatile(EMMS:::"memory"); |
2698
22652c028692
faster 15to16 bit rgb (the mmx routine is limited by memory speed so there is no difference ): but the c routine is faster
michael
parents:
2697
diff
changeset
|
243 #endif |
6605 | 244 mm_end = end - 3; |
6492 | 245 while(s < mm_end) |
246 { | |
247 register unsigned x= *((uint32_t *)s); | |
248 *((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0); | |
249 d+=4; | |
250 s+=4; | |
251 } | |
252 if(s < end) | |
253 { | |
254 register unsigned short x= *((uint16_t *)s); | |
255 *((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0); | |
256 } | |
2506 | 257 } |
2694 | 258 |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
259 static inline void RENAME(rgb16to15)(const uint8_t *src,uint8_t *dst,unsigned src_size) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
260 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
261 register const uint8_t* s=src; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
262 register uint8_t* d=dst; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
263 register const uint8_t *end; |
6608
da27a1bc1763
fixing memory overwrite bugs in the new converters
michael
parents:
6606
diff
changeset
|
264 const uint8_t *mm_end; |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
265 end = s + src_size; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
266 #ifdef HAVE_MMX |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
267 __asm __volatile(PREFETCH" %0"::"m"(*s)); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
268 __asm __volatile("movq %0, %%mm7"::"m"(mask15rg)); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
269 __asm __volatile("movq %0, %%mm6"::"m"(mask15b)); |
6608
da27a1bc1763
fixing memory overwrite bugs in the new converters
michael
parents:
6606
diff
changeset
|
270 mm_end = end - 15; |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
271 while(s<mm_end) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
272 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
273 __asm __volatile( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
274 PREFETCH" 32%1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
275 "movq %1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
276 "movq 8%1, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
277 "movq %%mm0, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
278 "movq %%mm2, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
279 "psrlq $1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
280 "psrlq $1, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
281 "pand %%mm7, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
282 "pand %%mm7, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
283 "pand %%mm6, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
284 "pand %%mm6, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
285 "por %%mm1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
286 "por %%mm3, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
287 MOVNTQ" %%mm0, %0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
288 MOVNTQ" %%mm2, 8%0" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
289 :"=m"(*d) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
290 :"m"(*s) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
291 ); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
292 d+=16; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
293 s+=16; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
294 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
295 __asm __volatile(SFENCE:::"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
296 __asm __volatile(EMMS:::"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
297 #endif |
6608
da27a1bc1763
fixing memory overwrite bugs in the new converters
michael
parents:
6606
diff
changeset
|
298 mm_end = end - 3; |
da27a1bc1763
fixing memory overwrite bugs in the new converters
michael
parents:
6606
diff
changeset
|
299 while(s < mm_end) |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
300 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
301 register uint32_t x= *((uint32_t *)s); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
302 *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
303 s+=4; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
304 d+=4; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
305 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
306 if(s < end) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
307 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
308 register uint16_t x= *((uint16_t *)s); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
309 *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
310 s+=2; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
311 d+=2; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
312 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
313 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
314 |
3132 | 315 static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, unsigned src_size) |
2694 | 316 { |
6492 | 317 const uint8_t *s = src; |
318 const uint8_t *end; | |
2741 | 319 #ifdef HAVE_MMX |
6492 | 320 const uint8_t *mm_end; |
321 #endif | |
2741 | 322 uint16_t *d = (uint16_t *)dst; |
323 end = s + src_size; | |
6492 | 324 #ifdef HAVE_MMX |
9454 | 325 mm_end = end - 15; |
326 #if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster) | |
327 asm volatile( | |
328 "movq %3, %%mm5 \n\t" | |
329 "movq %4, %%mm6 \n\t" | |
330 "movq %5, %%mm7 \n\t" | |
331 ".balign 16 \n\t" | |
332 "1: \n\t" | |
333 PREFETCH" 32(%1) \n\t" | |
334 "movd (%1), %%mm0 \n\t" | |
335 "movd 4(%1), %%mm3 \n\t" | |
336 "punpckldq 8(%1), %%mm0 \n\t" | |
337 "punpckldq 12(%1), %%mm3 \n\t" | |
338 "movq %%mm0, %%mm1 \n\t" | |
339 "movq %%mm3, %%mm4 \n\t" | |
340 "pand %%mm6, %%mm0 \n\t" | |
341 "pand %%mm6, %%mm3 \n\t" | |
342 "pmaddwd %%mm7, %%mm0 \n\t" | |
343 "pmaddwd %%mm7, %%mm3 \n\t" | |
344 "pand %%mm5, %%mm1 \n\t" | |
345 "pand %%mm5, %%mm4 \n\t" | |
346 "por %%mm1, %%mm0 \n\t" | |
347 "por %%mm4, %%mm3 \n\t" | |
348 "psrld $5, %%mm0 \n\t" | |
349 "pslld $11, %%mm3 \n\t" | |
350 "por %%mm3, %%mm0 \n\t" | |
351 MOVNTQ" %%mm0, (%0) \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
352 "add $16, %1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
353 "add $8, %0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
354 "cmp %2, %1 \n\t" |
9454 | 355 " jb 1b \n\t" |
356 : "+r" (d), "+r"(s) | |
357 : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216) | |
358 ); | |
359 #else | |
2741 | 360 __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); |
361 __asm __volatile( | |
362 "movq %0, %%mm7\n\t" | |
363 "movq %1, %%mm6\n\t" | |
364 ::"m"(red_16mask),"m"(green_16mask)); | |
365 while(s < mm_end) | |
366 { | |
367 __asm __volatile( | |
368 PREFETCH" 32%1\n\t" | |
369 "movd %1, %%mm0\n\t" | |
370 "movd 4%1, %%mm3\n\t" | |
371 "punpckldq 8%1, %%mm0\n\t" | |
372 "punpckldq 12%1, %%mm3\n\t" | |
373 "movq %%mm0, %%mm1\n\t" | |
374 "movq %%mm0, %%mm2\n\t" | |
375 "movq %%mm3, %%mm4\n\t" | |
376 "movq %%mm3, %%mm5\n\t" | |
377 "psrlq $3, %%mm0\n\t" | |
378 "psrlq $3, %%mm3\n\t" | |
379 "pand %2, %%mm0\n\t" | |
380 "pand %2, %%mm3\n\t" | |
381 "psrlq $5, %%mm1\n\t" | |
382 "psrlq $5, %%mm4\n\t" | |
383 "pand %%mm6, %%mm1\n\t" | |
384 "pand %%mm6, %%mm4\n\t" | |
385 "psrlq $8, %%mm2\n\t" | |
386 "psrlq $8, %%mm5\n\t" | |
387 "pand %%mm7, %%mm2\n\t" | |
388 "pand %%mm7, %%mm5\n\t" | |
389 "por %%mm1, %%mm0\n\t" | |
390 "por %%mm4, %%mm3\n\t" | |
391 "por %%mm2, %%mm0\n\t" | |
392 "por %%mm5, %%mm3\n\t" | |
393 "psllq $16, %%mm3\n\t" | |
394 "por %%mm3, %%mm0\n\t" | |
395 MOVNTQ" %%mm0, %0\n\t" | |
396 :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); | |
397 d += 4; | |
398 s += 16; | |
399 } | |
9454 | 400 #endif |
6492 | 401 __asm __volatile(SFENCE:::"memory"); |
402 __asm __volatile(EMMS:::"memory"); | |
403 #endif | |
2741 | 404 while(s < end) |
405 { | |
13423 | 406 // FIXME on bigendian |
12385
b5c106b694e4
this isn't actually stupid, but it's not valid C and gcc 3.5 rejects it as such
rfelker
parents:
11072
diff
changeset
|
407 const int src= *s; s += 4; |
9430 | 408 *d++ = ((src&0xFF)>>3) + ((src&0xFC00)>>5) + ((src&0xF80000)>>8); |
409 // *d++ = ((src>>3)&0x1F) + ((src>>5)&0x7E0) + ((src>>8)&0xF800); | |
2741 | 410 } |
2694 | 411 } |
412 | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
413 static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, unsigned int src_size) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
414 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
415 const uint8_t *s = src; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
416 const uint8_t *end; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
417 #ifdef HAVE_MMX |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
418 const uint8_t *mm_end; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
419 #endif |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
420 uint16_t *d = (uint16_t *)dst; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
421 end = s + src_size; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
422 #ifdef HAVE_MMX |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
423 __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
424 __asm __volatile( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
425 "movq %0, %%mm7\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
426 "movq %1, %%mm6\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
427 ::"m"(red_16mask),"m"(green_16mask)); |
6608
da27a1bc1763
fixing memory overwrite bugs in the new converters
michael
parents:
6606
diff
changeset
|
428 mm_end = end - 15; |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
429 while(s < mm_end) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
430 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
431 __asm __volatile( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
432 PREFETCH" 32%1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
433 "movd %1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
434 "movd 4%1, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
435 "punpckldq 8%1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
436 "punpckldq 12%1, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
437 "movq %%mm0, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
438 "movq %%mm0, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
439 "movq %%mm3, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
440 "movq %%mm3, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
441 "psllq $8, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
442 "psllq $8, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
443 "pand %%mm7, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
444 "pand %%mm7, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
445 "psrlq $5, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
446 "psrlq $5, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
447 "pand %%mm6, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
448 "pand %%mm6, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
449 "psrlq $19, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
450 "psrlq $19, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
451 "pand %2, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
452 "pand %2, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
453 "por %%mm1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
454 "por %%mm4, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
455 "por %%mm2, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
456 "por %%mm5, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
457 "psllq $16, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
458 "por %%mm3, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
459 MOVNTQ" %%mm0, %0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
460 :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
461 d += 4; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
462 s += 16; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
463 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
464 __asm __volatile(SFENCE:::"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
465 __asm __volatile(EMMS:::"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
466 #endif |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
467 while(s < end) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
468 { |
13423 | 469 // FIXME on bigendian |
12385
b5c106b694e4
this isn't actually stupid, but it's not valid C and gcc 3.5 rejects it as such
rfelker
parents:
11072
diff
changeset
|
470 const int src= *s; s += 4; |
9430 | 471 *d++ = ((src&0xF8)<<8) + ((src&0xFC00)>>5) + ((src&0xF80000)>>19); |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
472 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
473 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
474 |
3132 | 475 static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, unsigned src_size) |
2694 | 476 { |
6492 | 477 const uint8_t *s = src; |
478 const uint8_t *end; | |
2741 | 479 #ifdef HAVE_MMX |
6492 | 480 const uint8_t *mm_end; |
481 #endif | |
2741 | 482 uint16_t *d = (uint16_t *)dst; |
483 end = s + src_size; | |
6492 | 484 #ifdef HAVE_MMX |
9454 | 485 mm_end = end - 15; |
486 #if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster) | |
487 asm volatile( | |
488 "movq %3, %%mm5 \n\t" | |
489 "movq %4, %%mm6 \n\t" | |
490 "movq %5, %%mm7 \n\t" | |
491 ".balign 16 \n\t" | |
492 "1: \n\t" | |
493 PREFETCH" 32(%1) \n\t" | |
494 "movd (%1), %%mm0 \n\t" | |
495 "movd 4(%1), %%mm3 \n\t" | |
496 "punpckldq 8(%1), %%mm0 \n\t" | |
497 "punpckldq 12(%1), %%mm3 \n\t" | |
498 "movq %%mm0, %%mm1 \n\t" | |
499 "movq %%mm3, %%mm4 \n\t" | |
500 "pand %%mm6, %%mm0 \n\t" | |
501 "pand %%mm6, %%mm3 \n\t" | |
502 "pmaddwd %%mm7, %%mm0 \n\t" | |
503 "pmaddwd %%mm7, %%mm3 \n\t" | |
504 "pand %%mm5, %%mm1 \n\t" | |
505 "pand %%mm5, %%mm4 \n\t" | |
506 "por %%mm1, %%mm0 \n\t" | |
507 "por %%mm4, %%mm3 \n\t" | |
508 "psrld $6, %%mm0 \n\t" | |
509 "pslld $10, %%mm3 \n\t" | |
510 "por %%mm3, %%mm0 \n\t" | |
511 MOVNTQ" %%mm0, (%0) \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
512 "add $16, %1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
513 "add $8, %0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
514 "cmp %2, %1 \n\t" |
9454 | 515 " jb 1b \n\t" |
516 : "+r" (d), "+r"(s) | |
517 : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215) | |
518 ); | |
519 #else | |
2741 | 520 __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); |
521 __asm __volatile( | |
522 "movq %0, %%mm7\n\t" | |
523 "movq %1, %%mm6\n\t" | |
524 ::"m"(red_15mask),"m"(green_15mask)); | |
525 while(s < mm_end) | |
526 { | |
527 __asm __volatile( | |
528 PREFETCH" 32%1\n\t" | |
529 "movd %1, %%mm0\n\t" | |
530 "movd 4%1, %%mm3\n\t" | |
531 "punpckldq 8%1, %%mm0\n\t" | |
532 "punpckldq 12%1, %%mm3\n\t" | |
533 "movq %%mm0, %%mm1\n\t" | |
534 "movq %%mm0, %%mm2\n\t" | |
535 "movq %%mm3, %%mm4\n\t" | |
536 "movq %%mm3, %%mm5\n\t" | |
537 "psrlq $3, %%mm0\n\t" | |
538 "psrlq $3, %%mm3\n\t" | |
539 "pand %2, %%mm0\n\t" | |
540 "pand %2, %%mm3\n\t" | |
541 "psrlq $6, %%mm1\n\t" | |
542 "psrlq $6, %%mm4\n\t" | |
543 "pand %%mm6, %%mm1\n\t" | |
544 "pand %%mm6, %%mm4\n\t" | |
545 "psrlq $9, %%mm2\n\t" | |
546 "psrlq $9, %%mm5\n\t" | |
547 "pand %%mm7, %%mm2\n\t" | |
548 "pand %%mm7, %%mm5\n\t" | |
549 "por %%mm1, %%mm0\n\t" | |
550 "por %%mm4, %%mm3\n\t" | |
551 "por %%mm2, %%mm0\n\t" | |
552 "por %%mm5, %%mm3\n\t" | |
553 "psllq $16, %%mm3\n\t" | |
554 "por %%mm3, %%mm0\n\t" | |
555 MOVNTQ" %%mm0, %0\n\t" | |
556 :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); | |
557 d += 4; | |
558 s += 16; | |
559 } | |
9454 | 560 #endif |
6492 | 561 __asm __volatile(SFENCE:::"memory"); |
562 __asm __volatile(EMMS:::"memory"); | |
563 #endif | |
2741 | 564 while(s < end) |
565 { | |
13423 | 566 // FIXME on bigendian |
12385
b5c106b694e4
this isn't actually stupid, but it's not valid C and gcc 3.5 rejects it as such
rfelker
parents:
11072
diff
changeset
|
567 const int src= *s; s += 4; |
9430 | 568 *d++ = ((src&0xFF)>>3) + ((src&0xF800)>>6) + ((src&0xF80000)>>9); |
2741 | 569 } |
2694 | 570 } |
571 | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
572 static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, unsigned src_size) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
573 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
574 const uint8_t *s = src; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
575 const uint8_t *end; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
576 #ifdef HAVE_MMX |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
577 const uint8_t *mm_end; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
578 #endif |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
579 uint16_t *d = (uint16_t *)dst; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
580 end = s + src_size; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
581 #ifdef HAVE_MMX |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
582 __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
583 __asm __volatile( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
584 "movq %0, %%mm7\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
585 "movq %1, %%mm6\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
586 ::"m"(red_15mask),"m"(green_15mask)); |
6608
da27a1bc1763
fixing memory overwrite bugs in the new converters
michael
parents:
6606
diff
changeset
|
587 mm_end = end - 15; |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
588 while(s < mm_end) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
589 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
590 __asm __volatile( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
591 PREFETCH" 32%1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
592 "movd %1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
593 "movd 4%1, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
594 "punpckldq 8%1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
595 "punpckldq 12%1, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
596 "movq %%mm0, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
597 "movq %%mm0, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
598 "movq %%mm3, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
599 "movq %%mm3, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
600 "psllq $7, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
601 "psllq $7, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
602 "pand %%mm7, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
603 "pand %%mm7, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
604 "psrlq $6, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
605 "psrlq $6, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
606 "pand %%mm6, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
607 "pand %%mm6, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
608 "psrlq $19, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
609 "psrlq $19, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
610 "pand %2, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
611 "pand %2, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
612 "por %%mm1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
613 "por %%mm4, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
614 "por %%mm2, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
615 "por %%mm5, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
616 "psllq $16, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
617 "por %%mm3, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
618 MOVNTQ" %%mm0, %0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
619 :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
620 d += 4; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
621 s += 16; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
622 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
623 __asm __volatile(SFENCE:::"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
624 __asm __volatile(EMMS:::"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
625 #endif |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
626 while(s < end) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
627 { |
13423 | 628 // FIXME on bigendian |
12385
b5c106b694e4
this isn't actually stupid, but it's not valid C and gcc 3.5 rejects it as such
rfelker
parents:
11072
diff
changeset
|
629 const int src= *s; s += 4; |
9430 | 630 *d++ = ((src&0xF8)<<7) + ((src&0xF800)>>6) + ((src&0xF80000)>>19); |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
631 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
632 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
633 |
3132 | 634 static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, unsigned src_size) |
2718 | 635 { |
6492 | 636 const uint8_t *s = src; |
637 const uint8_t *end; | |
2740 | 638 #ifdef HAVE_MMX |
6492 | 639 const uint8_t *mm_end; |
640 #endif | |
2719
fafa73d6d80c
Fixed rgb32(24)to16 stuff, rgb32(24)to15 is still broken
nick
parents:
2718
diff
changeset
|
641 uint16_t *d = (uint16_t *)dst; |
2740 | 642 end = s + src_size; |
6492 | 643 #ifdef HAVE_MMX |
2738 | 644 __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); |
645 __asm __volatile( | |
646 "movq %0, %%mm7\n\t" | |
647 "movq %1, %%mm6\n\t" | |
2741 | 648 ::"m"(red_16mask),"m"(green_16mask)); |
6605 | 649 mm_end = end - 11; |
2740 | 650 while(s < mm_end) |
2738 | 651 { |
652 __asm __volatile( | |
653 PREFETCH" 32%1\n\t" | |
654 "movd %1, %%mm0\n\t" | |
2740 | 655 "movd 3%1, %%mm3\n\t" |
656 "punpckldq 6%1, %%mm0\n\t" | |
2738 | 657 "punpckldq 9%1, %%mm3\n\t" |
658 "movq %%mm0, %%mm1\n\t" | |
659 "movq %%mm0, %%mm2\n\t" | |
660 "movq %%mm3, %%mm4\n\t" | |
661 "movq %%mm3, %%mm5\n\t" | |
662 "psrlq $3, %%mm0\n\t" | |
663 "psrlq $3, %%mm3\n\t" | |
2740 | 664 "pand %2, %%mm0\n\t" |
665 "pand %2, %%mm3\n\t" | |
666 "psrlq $5, %%mm1\n\t" | |
667 "psrlq $5, %%mm4\n\t" | |
668 "pand %%mm6, %%mm1\n\t" | |
669 "pand %%mm6, %%mm4\n\t" | |
670 "psrlq $8, %%mm2\n\t" | |
671 "psrlq $8, %%mm5\n\t" | |
672 "pand %%mm7, %%mm2\n\t" | |
673 "pand %%mm7, %%mm5\n\t" | |
2738 | 674 "por %%mm1, %%mm0\n\t" |
2740 | 675 "por %%mm4, %%mm3\n\t" |
2738 | 676 "por %%mm2, %%mm0\n\t" |
677 "por %%mm5, %%mm3\n\t" | |
2740 | 678 "psllq $16, %%mm3\n\t" |
679 "por %%mm3, %%mm0\n\t" | |
2738 | 680 MOVNTQ" %%mm0, %0\n\t" |
2741 | 681 :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); |
2740 | 682 d += 4; |
683 s += 12; | |
2738 | 684 } |
6492 | 685 __asm __volatile(SFENCE:::"memory"); |
686 __asm __volatile(EMMS:::"memory"); | |
687 #endif | |
2740 | 688 while(s < end) |
689 { | |
690 const int b= *s++; | |
691 const int g= *s++; | |
692 const int r= *s++; | |
693 *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); | |
694 } | |
2718 | 695 } |
696 | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
697 static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, unsigned int src_size) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
698 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
699 const uint8_t *s = src; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
700 const uint8_t *end; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
701 #ifdef HAVE_MMX |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
702 const uint8_t *mm_end; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
703 #endif |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
704 uint16_t *d = (uint16_t *)dst; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
705 end = s + src_size; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
706 #ifdef HAVE_MMX |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
707 __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
708 __asm __volatile( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
709 "movq %0, %%mm7\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
710 "movq %1, %%mm6\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
711 ::"m"(red_16mask),"m"(green_16mask)); |
6608
da27a1bc1763
fixing memory overwrite bugs in the new converters
michael
parents:
6606
diff
changeset
|
712 mm_end = end - 15; |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
713 while(s < mm_end) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
714 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
715 __asm __volatile( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
716 PREFETCH" 32%1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
717 "movd %1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
718 "movd 3%1, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
719 "punpckldq 6%1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
720 "punpckldq 9%1, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
721 "movq %%mm0, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
722 "movq %%mm0, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
723 "movq %%mm3, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
724 "movq %%mm3, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
725 "psllq $8, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
726 "psllq $8, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
727 "pand %%mm7, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
728 "pand %%mm7, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
729 "psrlq $5, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
730 "psrlq $5, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
731 "pand %%mm6, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
732 "pand %%mm6, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
733 "psrlq $19, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
734 "psrlq $19, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
735 "pand %2, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
736 "pand %2, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
737 "por %%mm1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
738 "por %%mm4, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
739 "por %%mm2, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
740 "por %%mm5, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
741 "psllq $16, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
742 "por %%mm3, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
743 MOVNTQ" %%mm0, %0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
744 :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
745 d += 4; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
746 s += 12; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
747 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
748 __asm __volatile(SFENCE:::"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
749 __asm __volatile(EMMS:::"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
750 #endif |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
751 while(s < end) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
752 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
753 const int r= *s++; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
754 const int g= *s++; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
755 const int b= *s++; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
756 *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
757 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
758 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
759 |
3132 | 760 static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, unsigned src_size) |
2718 | 761 { |
6492 | 762 const uint8_t *s = src; |
763 const uint8_t *end; | |
2741 | 764 #ifdef HAVE_MMX |
6492 | 765 const uint8_t *mm_end; |
766 #endif | |
2741 | 767 uint16_t *d = (uint16_t *)dst; |
768 end = s + src_size; | |
6492 | 769 #ifdef HAVE_MMX |
2741 | 770 __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); |
771 __asm __volatile( | |
772 "movq %0, %%mm7\n\t" | |
773 "movq %1, %%mm6\n\t" | |
774 ::"m"(red_15mask),"m"(green_15mask)); | |
6605 | 775 mm_end = end - 11; |
2741 | 776 while(s < mm_end) |
777 { | |
778 __asm __volatile( | |
779 PREFETCH" 32%1\n\t" | |
780 "movd %1, %%mm0\n\t" | |
781 "movd 3%1, %%mm3\n\t" | |
782 "punpckldq 6%1, %%mm0\n\t" | |
783 "punpckldq 9%1, %%mm3\n\t" | |
784 "movq %%mm0, %%mm1\n\t" | |
785 "movq %%mm0, %%mm2\n\t" | |
786 "movq %%mm3, %%mm4\n\t" | |
787 "movq %%mm3, %%mm5\n\t" | |
788 "psrlq $3, %%mm0\n\t" | |
789 "psrlq $3, %%mm3\n\t" | |
790 "pand %2, %%mm0\n\t" | |
791 "pand %2, %%mm3\n\t" | |
792 "psrlq $6, %%mm1\n\t" | |
793 "psrlq $6, %%mm4\n\t" | |
794 "pand %%mm6, %%mm1\n\t" | |
795 "pand %%mm6, %%mm4\n\t" | |
796 "psrlq $9, %%mm2\n\t" | |
797 "psrlq $9, %%mm5\n\t" | |
798 "pand %%mm7, %%mm2\n\t" | |
799 "pand %%mm7, %%mm5\n\t" | |
800 "por %%mm1, %%mm0\n\t" | |
801 "por %%mm4, %%mm3\n\t" | |
802 "por %%mm2, %%mm0\n\t" | |
803 "por %%mm5, %%mm3\n\t" | |
804 "psllq $16, %%mm3\n\t" | |
805 "por %%mm3, %%mm0\n\t" | |
806 MOVNTQ" %%mm0, %0\n\t" | |
807 :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); | |
808 d += 4; | |
809 s += 12; | |
810 } | |
6492 | 811 __asm __volatile(SFENCE:::"memory"); |
812 __asm __volatile(EMMS:::"memory"); | |
813 #endif | |
2741 | 814 while(s < end) |
815 { | |
816 const int b= *s++; | |
817 const int g= *s++; | |
818 const int r= *s++; | |
819 *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); | |
820 } | |
6492 | 821 } |
822 | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
823 static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, unsigned src_size) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
824 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
825 const uint8_t *s = src; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
826 const uint8_t *end; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
827 #ifdef HAVE_MMX |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
828 const uint8_t *mm_end; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
829 #endif |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
830 uint16_t *d = (uint16_t *)dst; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
831 end = s + src_size; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
832 #ifdef HAVE_MMX |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
833 __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
834 __asm __volatile( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
835 "movq %0, %%mm7\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
836 "movq %1, %%mm6\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
837 ::"m"(red_15mask),"m"(green_15mask)); |
6608
da27a1bc1763
fixing memory overwrite bugs in the new converters
michael
parents:
6606
diff
changeset
|
838 mm_end = end - 15; |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
839 while(s < mm_end) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
840 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
841 __asm __volatile( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
842 PREFETCH" 32%1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
843 "movd %1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
844 "movd 3%1, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
845 "punpckldq 6%1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
846 "punpckldq 9%1, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
847 "movq %%mm0, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
848 "movq %%mm0, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
849 "movq %%mm3, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
850 "movq %%mm3, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
851 "psllq $7, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
852 "psllq $7, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
853 "pand %%mm7, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
854 "pand %%mm7, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
855 "psrlq $6, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
856 "psrlq $6, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
857 "pand %%mm6, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
858 "pand %%mm6, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
859 "psrlq $19, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
860 "psrlq $19, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
861 "pand %2, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
862 "pand %2, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
863 "por %%mm1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
864 "por %%mm4, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
865 "por %%mm2, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
866 "por %%mm5, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
867 "psllq $16, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
868 "por %%mm3, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
869 MOVNTQ" %%mm0, %0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
870 :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
871 d += 4; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
872 s += 12; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
873 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
874 __asm __volatile(SFENCE:::"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
875 __asm __volatile(EMMS:::"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
876 #endif |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
877 while(s < end) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
878 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
879 const int r= *s++; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
880 const int g= *s++; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
881 const int b= *s++; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
882 *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
883 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
884 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
885 |
6492 | 886 /* |
887 I use here less accurate approximation by simply | |
888 left-shifting the input | |
889 value and filling the low order bits with | |
890 zeroes. This method improves png's | |
891 compression but this scheme cannot reproduce white exactly, since it does not | |
892 generate an all-ones maximum value; the net effect is to darken the | |
893 image slightly. | |
894 | |
895 The better method should be "left bit replication": | |
896 | |
897 4 3 2 1 0 | |
898 --------- | |
899 1 1 0 1 1 | |
900 | |
901 7 6 5 4 3 2 1 0 | |
902 ---------------- | |
903 1 1 0 1 1 1 1 0 | |
904 |=======| |===| | |
905 | Leftmost Bits Repeated to Fill Open Bits | |
906 | | |
907 Original Bits | |
908 */ | |
909 static inline void RENAME(rgb15to24)(const uint8_t *src, uint8_t *dst, unsigned src_size) | |
910 { | |
911 const uint16_t *end; | |
912 #ifdef HAVE_MMX | |
913 const uint16_t *mm_end; | |
914 #endif | |
915 uint8_t *d = (uint8_t *)dst; | |
916 const uint16_t *s = (uint16_t *)src; | |
917 end = s + src_size/2; | |
918 #ifdef HAVE_MMX | |
919 __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); | |
6605 | 920 mm_end = end - 7; |
6492 | 921 while(s < mm_end) |
922 { | |
923 __asm __volatile( | |
924 PREFETCH" 32%1\n\t" | |
925 "movq %1, %%mm0\n\t" | |
926 "movq %1, %%mm1\n\t" | |
927 "movq %1, %%mm2\n\t" | |
928 "pand %2, %%mm0\n\t" | |
929 "pand %3, %%mm1\n\t" | |
930 "pand %4, %%mm2\n\t" | |
931 "psllq $3, %%mm0\n\t" | |
932 "psrlq $2, %%mm1\n\t" | |
933 "psrlq $7, %%mm2\n\t" | |
934 "movq %%mm0, %%mm3\n\t" | |
935 "movq %%mm1, %%mm4\n\t" | |
936 "movq %%mm2, %%mm5\n\t" | |
937 "punpcklwd %5, %%mm0\n\t" | |
938 "punpcklwd %5, %%mm1\n\t" | |
939 "punpcklwd %5, %%mm2\n\t" | |
940 "punpckhwd %5, %%mm3\n\t" | |
941 "punpckhwd %5, %%mm4\n\t" | |
942 "punpckhwd %5, %%mm5\n\t" | |
943 "psllq $8, %%mm1\n\t" | |
944 "psllq $16, %%mm2\n\t" | |
945 "por %%mm1, %%mm0\n\t" | |
946 "por %%mm2, %%mm0\n\t" | |
947 "psllq $8, %%mm4\n\t" | |
948 "psllq $16, %%mm5\n\t" | |
949 "por %%mm4, %%mm3\n\t" | |
950 "por %%mm5, %%mm3\n\t" | |
951 | |
952 "movq %%mm0, %%mm6\n\t" | |
953 "movq %%mm3, %%mm7\n\t" | |
954 | |
955 "movq 8%1, %%mm0\n\t" | |
956 "movq 8%1, %%mm1\n\t" | |
957 "movq 8%1, %%mm2\n\t" | |
958 "pand %2, %%mm0\n\t" | |
959 "pand %3, %%mm1\n\t" | |
960 "pand %4, %%mm2\n\t" | |
961 "psllq $3, %%mm0\n\t" | |
962 "psrlq $2, %%mm1\n\t" | |
963 "psrlq $7, %%mm2\n\t" | |
964 "movq %%mm0, %%mm3\n\t" | |
965 "movq %%mm1, %%mm4\n\t" | |
966 "movq %%mm2, %%mm5\n\t" | |
967 "punpcklwd %5, %%mm0\n\t" | |
968 "punpcklwd %5, %%mm1\n\t" | |
969 "punpcklwd %5, %%mm2\n\t" | |
970 "punpckhwd %5, %%mm3\n\t" | |
971 "punpckhwd %5, %%mm4\n\t" | |
972 "punpckhwd %5, %%mm5\n\t" | |
973 "psllq $8, %%mm1\n\t" | |
974 "psllq $16, %%mm2\n\t" | |
975 "por %%mm1, %%mm0\n\t" | |
976 "por %%mm2, %%mm0\n\t" | |
977 "psllq $8, %%mm4\n\t" | |
978 "psllq $16, %%mm5\n\t" | |
979 "por %%mm4, %%mm3\n\t" | |
980 "por %%mm5, %%mm3\n\t" | |
981 | |
982 :"=m"(*d) | |
983 :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null) | |
984 :"memory"); | |
985 /* Borrowed 32 to 24 */ | |
986 __asm __volatile( | |
987 "movq %%mm0, %%mm4\n\t" | |
988 "movq %%mm3, %%mm5\n\t" | |
989 "movq %%mm6, %%mm0\n\t" | |
990 "movq %%mm7, %%mm1\n\t" | |
991 | |
992 "movq %%mm4, %%mm6\n\t" | |
993 "movq %%mm5, %%mm7\n\t" | |
994 "movq %%mm0, %%mm2\n\t" | |
995 "movq %%mm1, %%mm3\n\t" | |
996 | |
997 "psrlq $8, %%mm2\n\t" | |
998 "psrlq $8, %%mm3\n\t" | |
999 "psrlq $8, %%mm6\n\t" | |
1000 "psrlq $8, %%mm7\n\t" | |
1001 "pand %2, %%mm0\n\t" | |
1002 "pand %2, %%mm1\n\t" | |
1003 "pand %2, %%mm4\n\t" | |
1004 "pand %2, %%mm5\n\t" | |
1005 "pand %3, %%mm2\n\t" | |
1006 "pand %3, %%mm3\n\t" | |
1007 "pand %3, %%mm6\n\t" | |
1008 "pand %3, %%mm7\n\t" | |
1009 "por %%mm2, %%mm0\n\t" | |
1010 "por %%mm3, %%mm1\n\t" | |
1011 "por %%mm6, %%mm4\n\t" | |
1012 "por %%mm7, %%mm5\n\t" | |
1013 | |
1014 "movq %%mm1, %%mm2\n\t" | |
1015 "movq %%mm4, %%mm3\n\t" | |
1016 "psllq $48, %%mm2\n\t" | |
1017 "psllq $32, %%mm3\n\t" | |
1018 "pand %4, %%mm2\n\t" | |
1019 "pand %5, %%mm3\n\t" | |
1020 "por %%mm2, %%mm0\n\t" | |
1021 "psrlq $16, %%mm1\n\t" | |
1022 "psrlq $32, %%mm4\n\t" | |
1023 "psllq $16, %%mm5\n\t" | |
1024 "por %%mm3, %%mm1\n\t" | |
1025 "pand %6, %%mm5\n\t" | |
1026 "por %%mm5, %%mm4\n\t" | |
1027 | |
1028 MOVNTQ" %%mm0, %0\n\t" | |
1029 MOVNTQ" %%mm1, 8%0\n\t" | |
1030 MOVNTQ" %%mm4, 16%0" | |
1031 | |
1032 :"=m"(*d) | |
1033 :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) | |
1034 :"memory"); | |
1035 d += 24; | |
1036 s += 8; | |
1037 } | |
2741 | 1038 __asm __volatile(SFENCE:::"memory"); |
1039 __asm __volatile(EMMS:::"memory"); | |
6492 | 1040 #endif |
1041 while(s < end) | |
1042 { | |
1043 register uint16_t bgr; | |
1044 bgr = *s++; | |
1045 *d++ = (bgr&0x1F)<<3; | |
1046 *d++ = (bgr&0x3E0)>>2; | |
1047 *d++ = (bgr&0x7C00)>>7; | |
1048 } | |
1049 } | |
1050 | |
1051 static inline void RENAME(rgb16to24)(const uint8_t *src, uint8_t *dst, unsigned src_size) | |
1052 { | |
1053 const uint16_t *end; | |
1054 #ifdef HAVE_MMX | |
1055 const uint16_t *mm_end; | |
1056 #endif | |
1057 uint8_t *d = (uint8_t *)dst; | |
1058 const uint16_t *s = (const uint16_t *)src; | |
1059 end = s + src_size/2; | |
1060 #ifdef HAVE_MMX | |
1061 __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); | |
6605 | 1062 mm_end = end - 7; |
6492 | 1063 while(s < mm_end) |
2718 | 1064 { |
6492 | 1065 __asm __volatile( |
1066 PREFETCH" 32%1\n\t" | |
1067 "movq %1, %%mm0\n\t" | |
1068 "movq %1, %%mm1\n\t" | |
1069 "movq %1, %%mm2\n\t" | |
1070 "pand %2, %%mm0\n\t" | |
1071 "pand %3, %%mm1\n\t" | |
1072 "pand %4, %%mm2\n\t" | |
1073 "psllq $3, %%mm0\n\t" | |
1074 "psrlq $3, %%mm1\n\t" | |
1075 "psrlq $8, %%mm2\n\t" | |
1076 "movq %%mm0, %%mm3\n\t" | |
1077 "movq %%mm1, %%mm4\n\t" | |
1078 "movq %%mm2, %%mm5\n\t" | |
1079 "punpcklwd %5, %%mm0\n\t" | |
1080 "punpcklwd %5, %%mm1\n\t" | |
1081 "punpcklwd %5, %%mm2\n\t" | |
1082 "punpckhwd %5, %%mm3\n\t" | |
1083 "punpckhwd %5, %%mm4\n\t" | |
1084 "punpckhwd %5, %%mm5\n\t" | |
1085 "psllq $8, %%mm1\n\t" | |
1086 "psllq $16, %%mm2\n\t" | |
1087 "por %%mm1, %%mm0\n\t" | |
1088 "por %%mm2, %%mm0\n\t" | |
1089 "psllq $8, %%mm4\n\t" | |
1090 "psllq $16, %%mm5\n\t" | |
1091 "por %%mm4, %%mm3\n\t" | |
1092 "por %%mm5, %%mm3\n\t" | |
1093 | |
1094 "movq %%mm0, %%mm6\n\t" | |
1095 "movq %%mm3, %%mm7\n\t" | |
1096 | |
1097 "movq 8%1, %%mm0\n\t" | |
1098 "movq 8%1, %%mm1\n\t" | |
1099 "movq 8%1, %%mm2\n\t" | |
1100 "pand %2, %%mm0\n\t" | |
1101 "pand %3, %%mm1\n\t" | |
1102 "pand %4, %%mm2\n\t" | |
1103 "psllq $3, %%mm0\n\t" | |
1104 "psrlq $3, %%mm1\n\t" | |
1105 "psrlq $8, %%mm2\n\t" | |
1106 "movq %%mm0, %%mm3\n\t" | |
1107 "movq %%mm1, %%mm4\n\t" | |
1108 "movq %%mm2, %%mm5\n\t" | |
1109 "punpcklwd %5, %%mm0\n\t" | |
1110 "punpcklwd %5, %%mm1\n\t" | |
1111 "punpcklwd %5, %%mm2\n\t" | |
1112 "punpckhwd %5, %%mm3\n\t" | |
1113 "punpckhwd %5, %%mm4\n\t" | |
1114 "punpckhwd %5, %%mm5\n\t" | |
1115 "psllq $8, %%mm1\n\t" | |
1116 "psllq $16, %%mm2\n\t" | |
1117 "por %%mm1, %%mm0\n\t" | |
1118 "por %%mm2, %%mm0\n\t" | |
1119 "psllq $8, %%mm4\n\t" | |
1120 "psllq $16, %%mm5\n\t" | |
1121 "por %%mm4, %%mm3\n\t" | |
1122 "por %%mm5, %%mm3\n\t" | |
1123 :"=m"(*d) | |
1124 :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null) | |
1125 :"memory"); | |
1126 /* Borrowed 32 to 24 */ | |
1127 __asm __volatile( | |
1128 "movq %%mm0, %%mm4\n\t" | |
1129 "movq %%mm3, %%mm5\n\t" | |
1130 "movq %%mm6, %%mm0\n\t" | |
1131 "movq %%mm7, %%mm1\n\t" | |
1132 | |
1133 "movq %%mm4, %%mm6\n\t" | |
1134 "movq %%mm5, %%mm7\n\t" | |
1135 "movq %%mm0, %%mm2\n\t" | |
1136 "movq %%mm1, %%mm3\n\t" | |
1137 | |
1138 "psrlq $8, %%mm2\n\t" | |
1139 "psrlq $8, %%mm3\n\t" | |
1140 "psrlq $8, %%mm6\n\t" | |
1141 "psrlq $8, %%mm7\n\t" | |
1142 "pand %2, %%mm0\n\t" | |
1143 "pand %2, %%mm1\n\t" | |
1144 "pand %2, %%mm4\n\t" | |
1145 "pand %2, %%mm5\n\t" | |
1146 "pand %3, %%mm2\n\t" | |
1147 "pand %3, %%mm3\n\t" | |
1148 "pand %3, %%mm6\n\t" | |
1149 "pand %3, %%mm7\n\t" | |
1150 "por %%mm2, %%mm0\n\t" | |
1151 "por %%mm3, %%mm1\n\t" | |
1152 "por %%mm6, %%mm4\n\t" | |
1153 "por %%mm7, %%mm5\n\t" | |
1154 | |
1155 "movq %%mm1, %%mm2\n\t" | |
1156 "movq %%mm4, %%mm3\n\t" | |
1157 "psllq $48, %%mm2\n\t" | |
1158 "psllq $32, %%mm3\n\t" | |
1159 "pand %4, %%mm2\n\t" | |
1160 "pand %5, %%mm3\n\t" | |
1161 "por %%mm2, %%mm0\n\t" | |
1162 "psrlq $16, %%mm1\n\t" | |
1163 "psrlq $32, %%mm4\n\t" | |
1164 "psllq $16, %%mm5\n\t" | |
1165 "por %%mm3, %%mm1\n\t" | |
1166 "pand %6, %%mm5\n\t" | |
1167 "por %%mm5, %%mm4\n\t" | |
1168 | |
1169 MOVNTQ" %%mm0, %0\n\t" | |
1170 MOVNTQ" %%mm1, 8%0\n\t" | |
1171 MOVNTQ" %%mm4, 16%0" | |
1172 | |
1173 :"=m"(*d) | |
1174 :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) | |
1175 :"memory"); | |
1176 d += 24; | |
1177 s += 8; | |
1178 } | |
1179 __asm __volatile(SFENCE:::"memory"); | |
1180 __asm __volatile(EMMS:::"memory"); | |
1181 #endif | |
1182 while(s < end) | |
1183 { | |
1184 register uint16_t bgr; | |
1185 bgr = *s++; | |
1186 *d++ = (bgr&0x1F)<<3; | |
1187 *d++ = (bgr&0x7E0)>>3; | |
1188 *d++ = (bgr&0xF800)>>8; | |
1189 } | |
1190 } | |
2718 | 1191 |
6492 | 1192 static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, unsigned src_size) |
1193 { | |
1194 const uint16_t *end; | |
1195 #ifdef HAVE_MMX | |
1196 const uint16_t *mm_end; | |
1197 #endif | |
1198 uint8_t *d = (uint8_t *)dst; | |
1199 const uint16_t *s = (const uint16_t *)src; | |
1200 end = s + src_size/2; | |
1201 #ifdef HAVE_MMX | |
1202 __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); | |
1203 __asm __volatile("pxor %%mm7,%%mm7\n\t":::"memory"); | |
6605 | 1204 mm_end = end - 3; |
6492 | 1205 while(s < mm_end) |
1206 { | |
1207 __asm __volatile( | |
1208 PREFETCH" 32%1\n\t" | |
1209 "movq %1, %%mm0\n\t" | |
1210 "movq %1, %%mm1\n\t" | |
1211 "movq %1, %%mm2\n\t" | |
1212 "pand %2, %%mm0\n\t" | |
1213 "pand %3, %%mm1\n\t" | |
1214 "pand %4, %%mm2\n\t" | |
1215 "psllq $3, %%mm0\n\t" | |
1216 "psrlq $2, %%mm1\n\t" | |
1217 "psrlq $7, %%mm2\n\t" | |
1218 "movq %%mm0, %%mm3\n\t" | |
1219 "movq %%mm1, %%mm4\n\t" | |
1220 "movq %%mm2, %%mm5\n\t" | |
1221 "punpcklwd %%mm7, %%mm0\n\t" | |
1222 "punpcklwd %%mm7, %%mm1\n\t" | |
1223 "punpcklwd %%mm7, %%mm2\n\t" | |
1224 "punpckhwd %%mm7, %%mm3\n\t" | |
1225 "punpckhwd %%mm7, %%mm4\n\t" | |
1226 "punpckhwd %%mm7, %%mm5\n\t" | |
1227 "psllq $8, %%mm1\n\t" | |
1228 "psllq $16, %%mm2\n\t" | |
1229 "por %%mm1, %%mm0\n\t" | |
1230 "por %%mm2, %%mm0\n\t" | |
1231 "psllq $8, %%mm4\n\t" | |
1232 "psllq $16, %%mm5\n\t" | |
1233 "por %%mm4, %%mm3\n\t" | |
1234 "por %%mm5, %%mm3\n\t" | |
1235 MOVNTQ" %%mm0, %0\n\t" | |
1236 MOVNTQ" %%mm3, 8%0\n\t" | |
1237 :"=m"(*d) | |
1238 :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r) | |
1239 :"memory"); | |
1240 d += 16; | |
1241 s += 4; | |
1242 } | |
1243 __asm __volatile(SFENCE:::"memory"); | |
1244 __asm __volatile(EMMS:::"memory"); | |
1245 #endif | |
1246 while(s < end) | |
1247 { | |
9430 | 1248 #if 0 //slightly slower on athlon |
1249 int bgr= *s++; | |
1250 *((uint32_t*)d)++ = ((bgr&0x1F)<<3) + ((bgr&0x3E0)<<6) + ((bgr&0x7C00)<<9); | |
1251 #else | |
1252 //FIXME this is very likely wrong for bigendian (and the following converters too) | |
6492 | 1253 register uint16_t bgr; |
1254 bgr = *s++; | |
13423 | 1255 #ifdef WORDS_BIGENDIAN |
1256 *d++ = 0; | |
1257 *d++ = (bgr&0x1F)<<3; | |
1258 *d++ = (bgr&0x3E0)>>2; | |
1259 *d++ = (bgr&0x7C00)>>7; | |
1260 #else | |
6492 | 1261 *d++ = (bgr&0x1F)<<3; |
1262 *d++ = (bgr&0x3E0)>>2; | |
1263 *d++ = (bgr&0x7C00)>>7; | |
1264 *d++ = 0; | |
9430 | 1265 #endif |
13423 | 1266 |
1267 #endif | |
2718 | 1268 } |
6492 | 1269 } |
1270 | |
1271 static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, unsigned src_size) | |
1272 { | |
1273 const uint16_t *end; | |
1274 #ifdef HAVE_MMX | |
1275 const uint16_t *mm_end; | |
2741 | 1276 #endif |
6492 | 1277 uint8_t *d = (uint8_t *)dst; |
1278 const uint16_t *s = (uint16_t *)src; | |
1279 end = s + src_size/2; | |
1280 #ifdef HAVE_MMX | |
1281 __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); | |
1282 __asm __volatile("pxor %%mm7,%%mm7\n\t":::"memory"); | |
6605 | 1283 mm_end = end - 3; |
6492 | 1284 while(s < mm_end) |
1285 { | |
1286 __asm __volatile( | |
1287 PREFETCH" 32%1\n\t" | |
1288 "movq %1, %%mm0\n\t" | |
1289 "movq %1, %%mm1\n\t" | |
1290 "movq %1, %%mm2\n\t" | |
1291 "pand %2, %%mm0\n\t" | |
1292 "pand %3, %%mm1\n\t" | |
1293 "pand %4, %%mm2\n\t" | |
1294 "psllq $3, %%mm0\n\t" | |
1295 "psrlq $3, %%mm1\n\t" | |
1296 "psrlq $8, %%mm2\n\t" | |
1297 "movq %%mm0, %%mm3\n\t" | |
1298 "movq %%mm1, %%mm4\n\t" | |
1299 "movq %%mm2, %%mm5\n\t" | |
1300 "punpcklwd %%mm7, %%mm0\n\t" | |
1301 "punpcklwd %%mm7, %%mm1\n\t" | |
1302 "punpcklwd %%mm7, %%mm2\n\t" | |
1303 "punpckhwd %%mm7, %%mm3\n\t" | |
1304 "punpckhwd %%mm7, %%mm4\n\t" | |
1305 "punpckhwd %%mm7, %%mm5\n\t" | |
1306 "psllq $8, %%mm1\n\t" | |
1307 "psllq $16, %%mm2\n\t" | |
1308 "por %%mm1, %%mm0\n\t" | |
1309 "por %%mm2, %%mm0\n\t" | |
1310 "psllq $8, %%mm4\n\t" | |
1311 "psllq $16, %%mm5\n\t" | |
1312 "por %%mm4, %%mm3\n\t" | |
1313 "por %%mm5, %%mm3\n\t" | |
1314 MOVNTQ" %%mm0, %0\n\t" | |
1315 MOVNTQ" %%mm3, 8%0\n\t" | |
1316 :"=m"(*d) | |
1317 :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r) | |
1318 :"memory"); | |
1319 d += 16; | |
1320 s += 4; | |
1321 } | |
1322 __asm __volatile(SFENCE:::"memory"); | |
1323 __asm __volatile(EMMS:::"memory"); | |
1324 #endif | |
1325 while(s < end) | |
1326 { | |
1327 register uint16_t bgr; | |
1328 bgr = *s++; | |
13423 | 1329 #ifdef WORDS_BIGENDIAN |
1330 *d++ = 0; | |
1331 *d++ = (bgr&0x1F)<<3; | |
1332 *d++ = (bgr&0x7E0)>>3; | |
1333 *d++ = (bgr&0xF800)>>8; | |
1334 #else | |
6492 | 1335 *d++ = (bgr&0x1F)<<3; |
1336 *d++ = (bgr&0x7E0)>>3; | |
1337 *d++ = (bgr&0xF800)>>8; | |
1338 *d++ = 0; | |
13423 | 1339 #endif |
6492 | 1340 } |
2718 | 1341 } |
2694 | 1342 |
3132 | 1343 static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, unsigned int src_size) |
2755 | 1344 { |
1345 #ifdef HAVE_MMX | |
6492 | 1346 /* TODO: unroll this loop */ |
2755 | 1347 asm volatile ( |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1348 "xor %%"REG_a", %%"REG_a" \n\t" |
2800
7847d6b7ad3d
.balign or we¡ll align by 64kb on some architectures
michael
parents:
2799
diff
changeset
|
1349 ".balign 16 \n\t" |
2755 | 1350 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1351 PREFETCH" 32(%0, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1352 "movq (%0, %%"REG_a"), %%mm0 \n\t" |
2755 | 1353 "movq %%mm0, %%mm1 \n\t" |
1354 "movq %%mm0, %%mm2 \n\t" | |
1355 "pslld $16, %%mm0 \n\t" | |
1356 "psrld $16, %%mm1 \n\t" | |
6492 | 1357 "pand "MANGLE(mask32r)", %%mm0 \n\t" |
1358 "pand "MANGLE(mask32g)", %%mm2 \n\t" | |
1359 "pand "MANGLE(mask32b)", %%mm1 \n\t" | |
2755 | 1360 "por %%mm0, %%mm2 \n\t" |
1361 "por %%mm1, %%mm2 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1362 MOVNTQ" %%mm2, (%1, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1363 "add $8, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1364 "cmp %2, %%"REG_a" \n\t" |
2755 | 1365 " jb 1b \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1366 :: "r" (src), "r"(dst), "r" ((long)src_size-7) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1367 : "%"REG_a |
2755 | 1368 ); |
2766 | 1369 |
1370 __asm __volatile(SFENCE:::"memory"); | |
1371 __asm __volatile(EMMS:::"memory"); | |
2755 | 1372 #else |
6492 | 1373 unsigned i; |
1374 unsigned num_pixels = src_size >> 2; | |
2755 | 1375 for(i=0; i<num_pixels; i++) |
1376 { | |
9988
a32fb6812221
bigendian fix by (Samuel Kleiner <kleiner at cd dot chalmers dot se>)
michael
parents:
9987
diff
changeset
|
1377 #ifdef WORDS_BIGENDIAN |
a32fb6812221
bigendian fix by (Samuel Kleiner <kleiner at cd dot chalmers dot se>)
michael
parents:
9987
diff
changeset
|
1378 dst[4*i + 1] = src[4*i + 3]; |
a32fb6812221
bigendian fix by (Samuel Kleiner <kleiner at cd dot chalmers dot se>)
michael
parents:
9987
diff
changeset
|
1379 dst[4*i + 2] = src[4*i + 2]; |
a32fb6812221
bigendian fix by (Samuel Kleiner <kleiner at cd dot chalmers dot se>)
michael
parents:
9987
diff
changeset
|
1380 dst[4*i + 3] = src[4*i + 1]; |
a32fb6812221
bigendian fix by (Samuel Kleiner <kleiner at cd dot chalmers dot se>)
michael
parents:
9987
diff
changeset
|
1381 #else |
a32fb6812221
bigendian fix by (Samuel Kleiner <kleiner at cd dot chalmers dot se>)
michael
parents:
9987
diff
changeset
|
1382 dst[4*i + 0] = src[4*i + 2]; |
a32fb6812221
bigendian fix by (Samuel Kleiner <kleiner at cd dot chalmers dot se>)
michael
parents:
9987
diff
changeset
|
1383 dst[4*i + 1] = src[4*i + 1]; |
a32fb6812221
bigendian fix by (Samuel Kleiner <kleiner at cd dot chalmers dot se>)
michael
parents:
9987
diff
changeset
|
1384 dst[4*i + 2] = src[4*i + 0]; |
a32fb6812221
bigendian fix by (Samuel Kleiner <kleiner at cd dot chalmers dot se>)
michael
parents:
9987
diff
changeset
|
1385 #endif |
2755 | 1386 } |
1387 #endif | |
1388 } | |
1389 | |
5582 | 1390 static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, unsigned int src_size) |
1391 { | |
6492 | 1392 unsigned i; |
5582 | 1393 #ifdef HAVE_MMX |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1394 long mmx_size= 23 - src_size; |
5582 | 1395 asm volatile ( |
1396 "movq "MANGLE(mask24r)", %%mm5 \n\t" | |
1397 "movq "MANGLE(mask24g)", %%mm6 \n\t" | |
1398 "movq "MANGLE(mask24b)", %%mm7 \n\t" | |
1399 ".balign 16 \n\t" | |
1400 "1: \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1401 PREFETCH" 32(%1, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1402 "movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1403 "movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1404 "movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B |
5582 | 1405 "psllq $16, %%mm0 \n\t" // 00 BGR BGR |
1406 "pand %%mm5, %%mm0 \n\t" | |
1407 "pand %%mm6, %%mm1 \n\t" | |
1408 "pand %%mm7, %%mm2 \n\t" | |
1409 "por %%mm0, %%mm1 \n\t" | |
1410 "por %%mm2, %%mm1 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1411 "movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1412 MOVNTQ" %%mm1, (%2, %%"REG_a")\n\t" // RGB RGB RG |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1413 "movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1414 "movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR |
5582 | 1415 "pand %%mm7, %%mm0 \n\t" |
1416 "pand %%mm5, %%mm1 \n\t" | |
1417 "pand %%mm6, %%mm2 \n\t" | |
1418 "por %%mm0, %%mm1 \n\t" | |
1419 "por %%mm2, %%mm1 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1420 "movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1421 MOVNTQ" %%mm1, 8(%2, %%"REG_a")\n\t" // B RGB RGB R |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1422 "movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1423 "movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG |
5582 | 1424 "pand %%mm6, %%mm0 \n\t" |
1425 "pand %%mm7, %%mm1 \n\t" | |
1426 "pand %%mm5, %%mm2 \n\t" | |
1427 "por %%mm0, %%mm1 \n\t" | |
1428 "por %%mm2, %%mm1 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1429 MOVNTQ" %%mm1, 16(%2, %%"REG_a")\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1430 "add $24, %%"REG_a" \n\t" |
5582 | 1431 " js 1b \n\t" |
1432 : "+a" (mmx_size) | |
1433 : "r" (src-mmx_size), "r"(dst-mmx_size) | |
1434 ); | |
1435 | |
1436 __asm __volatile(SFENCE:::"memory"); | |
1437 __asm __volatile(EMMS:::"memory"); | |
1438 | |
6096 | 1439 if(mmx_size==23) return; //finihsed, was multiple of 8 |
6492 | 1440 |
5582 | 1441 src+= src_size; |
1442 dst+= src_size; | |
6492 | 1443 src_size= 23-mmx_size; |
5582 | 1444 src-= src_size; |
1445 dst-= src_size; | |
1446 #endif | |
1447 for(i=0; i<src_size; i+=3) | |
1448 { | |
6492 | 1449 register uint8_t x; |
5582 | 1450 x = src[i + 2]; |
1451 dst[i + 1] = src[i + 1]; | |
1452 dst[i + 2] = src[i + 0]; | |
1453 dst[i + 0] = x; | |
1454 } | |
1455 } | |
1456 | |
5588 | 1457 static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, |
2725 | 1458 unsigned int width, unsigned int height, |
9392 | 1459 int lumStride, int chromStride, int dstStride, int vertLumPerChroma) |
2701 | 1460 { |
6492 | 1461 unsigned y; |
1462 const unsigned chromWidth= width>>1; | |
2723 | 1463 for(y=0; y<height; y++) |
1464 { | |
2702 | 1465 #ifdef HAVE_MMX |
2723 | 1466 //FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway) |
1467 asm volatile( | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1468 "xor %%"REG_a", %%"REG_a" \n\t" |
2800
7847d6b7ad3d
.balign or we¡ll align by 64kb on some architectures
michael
parents:
2799
diff
changeset
|
1469 ".balign 16 \n\t" |
2723 | 1470 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1471 PREFETCH" 32(%1, %%"REG_a", 2) \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1472 PREFETCH" 32(%2, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1473 PREFETCH" 32(%3, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1474 "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0) |
2723 | 1475 "movq %%mm0, %%mm2 \n\t" // U(0) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1476 "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0) |
2723 | 1477 "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0) |
1478 "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8) | |
1479 | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1480 "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1481 "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8) |
2723 | 1482 "movq %%mm3, %%mm4 \n\t" // Y(0) |
1483 "movq %%mm5, %%mm6 \n\t" // Y(8) | |
1484 "punpcklbw %%mm0, %%mm3 \n\t" // YUYV YUYV(0) | |
1485 "punpckhbw %%mm0, %%mm4 \n\t" // YUYV YUYV(4) | |
1486 "punpcklbw %%mm2, %%mm5 \n\t" // YUYV YUYV(8) | |
1487 "punpckhbw %%mm2, %%mm6 \n\t" // YUYV YUYV(12) | |
2702 | 1488 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1489 MOVNTQ" %%mm3, (%0, %%"REG_a", 4)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1490 MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1491 MOVNTQ" %%mm5, 16(%0, %%"REG_a", 4)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1492 MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t" |
2702 | 1493 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1494 "add $8, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1495 "cmp %4, %%"REG_a" \n\t" |
2723 | 1496 " jb 1b \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1497 ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" ((long)chromWidth) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1498 : "%"REG_a |
2723 | 1499 ); |
2702 | 1500 #else |
9393
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1501 |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1502 #if defined ARCH_ALPHA && defined HAVE_MVI |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1503 #define pl2yuy2(n) \ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1504 y1 = yc[n]; \ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1505 y2 = yc2[n]; \ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1506 u = uc[n]; \ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1507 v = vc[n]; \ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1508 asm("unpkbw %1, %0" : "=r"(y1) : "r"(y1)); \ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1509 asm("unpkbw %1, %0" : "=r"(y2) : "r"(y2)); \ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1510 asm("unpkbl %1, %0" : "=r"(u) : "r"(u)); \ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1511 asm("unpkbl %1, %0" : "=r"(v) : "r"(v)); \ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1512 yuv1 = (u << 8) + (v << 24); \ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1513 yuv2 = yuv1 + y2; \ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1514 yuv1 += y1; \ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1515 qdst[n] = yuv1; \ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1516 qdst2[n] = yuv2; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1517 |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1518 int i; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1519 uint64_t *qdst = (uint64_t *) dst; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1520 uint64_t *qdst2 = (uint64_t *) (dst + dstStride); |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1521 const uint32_t *yc = (uint32_t *) ysrc; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1522 const uint32_t *yc2 = (uint32_t *) (ysrc + lumStride); |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1523 const uint16_t *uc = (uint16_t*) usrc, *vc = (uint16_t*) vsrc; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1524 for(i = 0; i < chromWidth; i += 8){ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1525 uint64_t y1, y2, yuv1, yuv2; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1526 uint64_t u, v; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1527 /* Prefetch */ |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1528 asm("ldq $31,64(%0)" :: "r"(yc)); |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1529 asm("ldq $31,64(%0)" :: "r"(yc2)); |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1530 asm("ldq $31,64(%0)" :: "r"(uc)); |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1531 asm("ldq $31,64(%0)" :: "r"(vc)); |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1532 |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1533 pl2yuy2(0); |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1534 pl2yuy2(1); |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1535 pl2yuy2(2); |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1536 pl2yuy2(3); |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1537 |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1538 yc += 4; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1539 yc2 += 4; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1540 uc += 4; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1541 vc += 4; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1542 qdst += 4; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1543 qdst2 += 4; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1544 } |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1545 y++; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1546 ysrc += lumStride; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1547 dst += dstStride; |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1548 |
5f9c97070b56
yv12 -> yuy2 converter in alpha asm (from mplayerxp)
michael
parents:
9392
diff
changeset
|
1549 #elif __WORDSIZE >= 64 |
2723 | 1550 int i; |
6492 | 1551 uint64_t *ldst = (uint64_t *) dst; |
1552 const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; | |
1553 for(i = 0; i < chromWidth; i += 2){ | |
1554 uint64_t k, l; | |
1555 k = yc[0] + (uc[0] << 8) + | |
1556 (yc[1] << 16) + (vc[0] << 24); | |
1557 l = yc[2] + (uc[1] << 8) + | |
1558 (yc[3] << 16) + (vc[1] << 24); | |
1559 *ldst++ = k + (l << 32); | |
1560 yc += 4; | |
1561 uc += 2; | |
1562 vc += 2; | |
2723 | 1563 } |
6492 | 1564 |
1565 #else | |
1566 int i, *idst = (int32_t *) dst; | |
1567 const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; | |
1568 for(i = 0; i < chromWidth; i++){ | |
12395
b969547bb0b1
bigendian fix by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
12385
diff
changeset
|
1569 #ifdef WORDS_BIGENDIAN |
b969547bb0b1
bigendian fix by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
12385
diff
changeset
|
1570 *idst++ = (yc[0] << 24)+ (uc[0] << 16) + |
b969547bb0b1
bigendian fix by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
12385
diff
changeset
|
1571 (yc[1] << 8) + (vc[0] << 0); |
b969547bb0b1
bigendian fix by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
12385
diff
changeset
|
1572 #else |
6492 | 1573 *idst++ = yc[0] + (uc[0] << 8) + |
1574 (yc[1] << 16) + (vc[0] << 24); | |
12395
b969547bb0b1
bigendian fix by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
12385
diff
changeset
|
1575 #endif |
6492 | 1576 yc += 2; |
1577 uc++; | |
1578 vc++; | |
1579 } | |
1580 #endif | |
2723 | 1581 #endif |
5588 | 1582 if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) ) |
2723 | 1583 { |
1584 usrc += chromStride; | |
1585 vsrc += chromStride; | |
1586 } | |
1587 ysrc += lumStride; | |
1588 dst += dstStride; | |
2701 | 1589 } |
2723 | 1590 #ifdef HAVE_MMX |
1591 asm( EMMS" \n\t" | |
1592 SFENCE" \n\t" | |
1593 :::"memory"); | |
2702 | 1594 #endif |
2701 | 1595 } |
1596 | |
2724 | 1597 /** |
1598 * | |
1599 * height should be a multiple of 2 and width should be a multiple of 16 (if this is a | |
1600 * problem for anyone then tell me, and ill fix it) | |
1601 */ | |
5588 | 1602 static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, |
1603 unsigned int width, unsigned int height, | |
9392 | 1604 int lumStride, int chromStride, int dstStride) |
5588 | 1605 { |
1606 //FIXME interpolate chroma | |
1607 RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2); | |
1608 } | |
1609 | |
11068 | 1610 static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, |
1611 unsigned int width, unsigned int height, | |
1612 int lumStride, int chromStride, int dstStride, int vertLumPerChroma) | |
1613 { | |
1614 unsigned y; | |
1615 const unsigned chromWidth= width>>1; | |
1616 for(y=0; y<height; y++) | |
1617 { | |
11072 | 1618 #ifdef HAVE_MMX |
1619 //FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway) | |
1620 asm volatile( | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1621 "xor %%"REG_a", %%"REG_a" \n\t" |
11072 | 1622 ".balign 16 \n\t" |
1623 "1: \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1624 PREFETCH" 32(%1, %%"REG_a", 2) \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1625 PREFETCH" 32(%2, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1626 PREFETCH" 32(%3, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1627 "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0) |
11072 | 1628 "movq %%mm0, %%mm2 \n\t" // U(0) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1629 "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0) |
11072 | 1630 "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0) |
1631 "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8) | |
1632 | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1633 "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1634 "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8) |
11072 | 1635 "movq %%mm0, %%mm4 \n\t" // Y(0) |
1636 "movq %%mm2, %%mm6 \n\t" // Y(8) | |
1637 "punpcklbw %%mm3, %%mm0 \n\t" // YUYV YUYV(0) | |
1638 "punpckhbw %%mm3, %%mm4 \n\t" // YUYV YUYV(4) | |
1639 "punpcklbw %%mm5, %%mm2 \n\t" // YUYV YUYV(8) | |
1640 "punpckhbw %%mm5, %%mm6 \n\t" // YUYV YUYV(12) | |
1641 | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1642 MOVNTQ" %%mm0, (%0, %%"REG_a", 4)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1643 MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1644 MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1645 MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t" |
11072 | 1646 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1647 "add $8, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1648 "cmp %4, %%"REG_a" \n\t" |
11072 | 1649 " jb 1b \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1650 ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" ((long)chromWidth) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1651 : "%"REG_a |
11072 | 1652 ); |
1653 #else | |
1654 //FIXME adapt the alpha asm code from yv12->yuy2 | |
1655 | |
11068 | 1656 #if __WORDSIZE >= 64 |
1657 int i; | |
1658 uint64_t *ldst = (uint64_t *) dst; | |
1659 const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; | |
1660 for(i = 0; i < chromWidth; i += 2){ | |
1661 uint64_t k, l; | |
1662 k = uc[0] + (yc[0] << 8) + | |
1663 (vc[0] << 16) + (yc[1] << 24); | |
1664 l = uc[1] + (yc[2] << 8) + | |
1665 (vc[1] << 16) + (yc[3] << 24); | |
1666 *ldst++ = k + (l << 32); | |
1667 yc += 4; | |
1668 uc += 2; | |
1669 vc += 2; | |
1670 } | |
1671 | |
1672 #else | |
1673 int i, *idst = (int32_t *) dst; | |
1674 const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; | |
1675 for(i = 0; i < chromWidth; i++){ | |
12395
b969547bb0b1
bigendian fix by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
12385
diff
changeset
|
1676 #ifdef WORDS_BIGENDIAN |
b969547bb0b1
bigendian fix by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
12385
diff
changeset
|
1677 *idst++ = (uc[0] << 24)+ (yc[0] << 16) + |
b969547bb0b1
bigendian fix by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
12385
diff
changeset
|
1678 (vc[0] << 8) + (yc[1] << 0); |
b969547bb0b1
bigendian fix by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
12385
diff
changeset
|
1679 #else |
11068 | 1680 *idst++ = uc[0] + (yc[0] << 8) + |
1681 (vc[0] << 16) + (yc[1] << 24); | |
12395
b969547bb0b1
bigendian fix by (Romain Dolbeau <dolbeau at irisa dot fr>)
michael
parents:
12385
diff
changeset
|
1682 #endif |
11068 | 1683 yc += 2; |
1684 uc++; | |
1685 vc++; | |
1686 } | |
1687 #endif | |
11072 | 1688 #endif |
11068 | 1689 if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) ) |
1690 { | |
1691 usrc += chromStride; | |
1692 vsrc += chromStride; | |
1693 } | |
1694 ysrc += lumStride; | |
1695 dst += dstStride; | |
1696 } | |
11072 | 1697 #ifdef HAVE_MMX |
1698 asm( EMMS" \n\t" | |
1699 SFENCE" \n\t" | |
1700 :::"memory"); | |
1701 #endif | |
11068 | 1702 } |
1703 | |
1704 /** | |
1705 * | |
1706 * height should be a multiple of 2 and width should be a multiple of 16 (if this is a | |
1707 * problem for anyone then tell me, and ill fix it) | |
1708 */ | |
1709 static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, | |
1710 unsigned int width, unsigned int height, | |
1711 int lumStride, int chromStride, int dstStride) | |
1712 { | |
1713 //FIXME interpolate chroma | |
1714 RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2); | |
1715 } | |
1716 | |
5588 | 1717 /** |
1718 * | |
1719 * width should be a multiple of 16 | |
1720 */ | |
1721 static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, | |
1722 unsigned int width, unsigned int height, | |
9392 | 1723 int lumStride, int chromStride, int dstStride) |
5588 | 1724 { |
1725 RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1); | |
1726 } | |
1727 | |
1728 /** | |
1729 * | |
1730 * height should be a multiple of 2 and width should be a multiple of 16 (if this is a | |
1731 * problem for anyone then tell me, and ill fix it) | |
1732 */ | |
3132 | 1733 static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, |
2725 | 1734 unsigned int width, unsigned int height, |
9392 | 1735 int lumStride, int chromStride, int srcStride) |
2701 | 1736 { |
6492 | 1737 unsigned y; |
1738 const unsigned chromWidth= width>>1; | |
2724 | 1739 for(y=0; y<height; y+=2) |
1740 { | |
2704 | 1741 #ifdef HAVE_MMX |
2724 | 1742 asm volatile( |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1743 "xor %%"REG_a", %%"REG_a" \n\t" |
2724 | 1744 "pcmpeqw %%mm7, %%mm7 \n\t" |
1745 "psrlw $8, %%mm7 \n\t" // FF,00,FF,00... | |
2800
7847d6b7ad3d
.balign or we¡ll align by 64kb on some architectures
michael
parents:
2799
diff
changeset
|
1746 ".balign 16 \n\t" |
2724 | 1747 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1748 PREFETCH" 64(%0, %%"REG_a", 4) \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1749 "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1750 "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4) |
2724 | 1751 "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0) |
1752 "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4) | |
1753 "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0) | |
1754 "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4) | |
1755 "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0) | |
1756 "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4) | |
1757 "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0) | |
1758 "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0) | |
1759 | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1760 MOVNTQ" %%mm2, (%1, %%"REG_a", 2)\n\t" |
2704 | 1761 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1762 "movq 16(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(8) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1763 "movq 24(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(12) |
2724 | 1764 "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8) |
1765 "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12) | |
1766 "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8) | |
1767 "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12) | |
1768 "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8) | |
1769 "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12) | |
1770 "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8) | |
1771 "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8) | |
2704 | 1772 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1773 MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2)\n\t" |
2724 | 1774 |
1775 "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0) | |
1776 "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8) | |
1777 "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0) | |
1778 "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8) | |
1779 "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0) | |
1780 "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8) | |
1781 "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0) | |
1782 "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0) | |
2704 | 1783 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1784 MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1785 MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t" |
2724 | 1786 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1787 "add $8, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1788 "cmp %4, %%"REG_a" \n\t" |
2724 | 1789 " jb 1b \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1790 ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" ((long)chromWidth) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1791 : "memory", "%"REG_a |
2725 | 1792 ); |
2704 | 1793 |
2806 | 1794 ydst += lumStride; |
1795 src += srcStride; | |
1796 | |
2725 | 1797 asm volatile( |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1798 "xor %%"REG_a", %%"REG_a" \n\t" |
2800
7847d6b7ad3d
.balign or we¡ll align by 64kb on some architectures
michael
parents:
2799
diff
changeset
|
1799 ".balign 16 \n\t" |
2724 | 1800 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1801 PREFETCH" 64(%0, %%"REG_a", 4) \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1802 "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1803 "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1804 "movq 16(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(8) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1805 "movq 24(%0, %%"REG_a", 4), %%mm3\n\t" // YUYV YUYV(12) |
2724 | 1806 "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0) |
1807 "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4) | |
1808 "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8) | |
1809 "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12) | |
1810 "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0) | |
1811 "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8) | |
2704 | 1812 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1813 MOVNTQ" %%mm0, (%1, %%"REG_a", 2)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1814 MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2)\n\t" |
2724 | 1815 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1816 "add $8, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1817 "cmp %4, %%"REG_a" \n\t" |
2724 | 1818 " jb 1b \n\t" |
2704 | 1819 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1820 ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" ((long)chromWidth) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1821 : "memory", "%"REG_a |
2724 | 1822 ); |
2704 | 1823 #else |
6492 | 1824 unsigned i; |
2724 | 1825 for(i=0; i<chromWidth; i++) |
1826 { | |
1827 ydst[2*i+0] = src[4*i+0]; | |
1828 udst[i] = src[4*i+1]; | |
1829 ydst[2*i+1] = src[4*i+2]; | |
1830 vdst[i] = src[4*i+3]; | |
1831 } | |
1832 ydst += lumStride; | |
1833 src += srcStride; | |
1834 | |
1835 for(i=0; i<chromWidth; i++) | |
1836 { | |
1837 ydst[2*i+0] = src[4*i+0]; | |
1838 ydst[2*i+1] = src[4*i+2]; | |
1839 } | |
1840 #endif | |
1841 udst += chromStride; | |
1842 vdst += chromStride; | |
1843 ydst += lumStride; | |
1844 src += srcStride; | |
2701 | 1845 } |
2724 | 1846 #ifdef HAVE_MMX |
2847 | 1847 asm volatile( EMMS" \n\t" |
1848 SFENCE" \n\t" | |
1849 :::"memory"); | |
2704 | 1850 #endif |
2723 | 1851 } |
2801 | 1852 |
6484
c5cf988c6d6f
pre-yvu9toyv12 converter, only grayscale Y-plane coping :)
alex
parents:
6096
diff
changeset
|
1853 static inline void RENAME(yvu9toyv12)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, |
c5cf988c6d6f
pre-yvu9toyv12 converter, only grayscale Y-plane coping :)
alex
parents:
6096
diff
changeset
|
1854 uint8_t *ydst, uint8_t *udst, uint8_t *vdst, |
9392 | 1855 unsigned int width, unsigned int height, int lumStride, int chromStride) |
6484
c5cf988c6d6f
pre-yvu9toyv12 converter, only grayscale Y-plane coping :)
alex
parents:
6096
diff
changeset
|
1856 { |
c5cf988c6d6f
pre-yvu9toyv12 converter, only grayscale Y-plane coping :)
alex
parents:
6096
diff
changeset
|
1857 /* Y Plane */ |
c5cf988c6d6f
pre-yvu9toyv12 converter, only grayscale Y-plane coping :)
alex
parents:
6096
diff
changeset
|
1858 memcpy(ydst, ysrc, width*height); |
c5cf988c6d6f
pre-yvu9toyv12 converter, only grayscale Y-plane coping :)
alex
parents:
6096
diff
changeset
|
1859 |
c5cf988c6d6f
pre-yvu9toyv12 converter, only grayscale Y-plane coping :)
alex
parents:
6096
diff
changeset
|
1860 /* XXX: implement upscaling for U,V */ |
c5cf988c6d6f
pre-yvu9toyv12 converter, only grayscale Y-plane coping :)
alex
parents:
6096
diff
changeset
|
1861 } |
c5cf988c6d6f
pre-yvu9toyv12 converter, only grayscale Y-plane coping :)
alex
parents:
6096
diff
changeset
|
1862 |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1863 static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, int srcWidth, int srcHeight, int srcStride, int dstStride) |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1864 { |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1865 int x,y; |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1866 |
9256 | 1867 dst[0]= src[0]; |
1868 | |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1869 // first line |
9256 | 1870 for(x=0; x<srcWidth-1; x++){ |
1871 dst[2*x+1]= (3*src[x] + src[x+1])>>2; | |
1872 dst[2*x+2]= ( src[x] + 3*src[x+1])>>2; | |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1873 } |
9256 | 1874 dst[2*srcWidth-1]= src[srcWidth-1]; |
1875 | |
1876 dst+= dstStride; | |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1877 |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1878 for(y=1; y<srcHeight; y++){ |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1879 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1880 const long mmxSize= srcWidth&~15; |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1881 asm volatile( |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1882 "mov %4, %%"REG_a" \n\t" |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1883 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1884 "movq (%0, %%"REG_a"), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1885 "movq (%1, %%"REG_a"), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1886 "movq 1(%0, %%"REG_a"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1887 "movq 1(%1, %%"REG_a"), %%mm3 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1888 "movq -1(%0, %%"REG_a"), %%mm4 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1889 "movq -1(%1, %%"REG_a"), %%mm5 \n\t" |
9256 | 1890 PAVGB" %%mm0, %%mm5 \n\t" |
1891 PAVGB" %%mm0, %%mm3 \n\t" | |
1892 PAVGB" %%mm0, %%mm5 \n\t" | |
1893 PAVGB" %%mm0, %%mm3 \n\t" | |
1894 PAVGB" %%mm1, %%mm4 \n\t" | |
1895 PAVGB" %%mm1, %%mm2 \n\t" | |
1896 PAVGB" %%mm1, %%mm4 \n\t" | |
1897 PAVGB" %%mm1, %%mm2 \n\t" | |
1898 "movq %%mm5, %%mm7 \n\t" | |
1899 "movq %%mm4, %%mm6 \n\t" | |
1900 "punpcklbw %%mm3, %%mm5 \n\t" | |
1901 "punpckhbw %%mm3, %%mm7 \n\t" | |
1902 "punpcklbw %%mm2, %%mm4 \n\t" | |
1903 "punpckhbw %%mm2, %%mm6 \n\t" | |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1904 #if 1 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1905 MOVNTQ" %%mm5, (%2, %%"REG_a", 2)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1906 MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1907 MOVNTQ" %%mm4, (%3, %%"REG_a", 2)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1908 MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2)\n\t" |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1909 #else |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1910 "movq %%mm5, (%2, %%"REG_a", 2) \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1911 "movq %%mm7, 8(%2, %%"REG_a", 2)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1912 "movq %%mm4, (%3, %%"REG_a", 2) \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1913 "movq %%mm6, 8(%3, %%"REG_a", 2)\n\t" |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1914 #endif |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1915 "add $8, %%"REG_a" \n\t" |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1916 " js 1b \n\t" |
9256 | 1917 :: "r" (src + mmxSize ), "r" (src + srcStride + mmxSize ), |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1918 "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2), |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1919 "g" (-mmxSize) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
1920 : "%"REG_a |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1921 |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1922 ); |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1923 #else |
9256 | 1924 const int mmxSize=1; |
1925 #endif | |
1926 dst[0 ]= (3*src[0] + src[srcStride])>>2; | |
1927 dst[dstStride]= ( src[0] + 3*src[srcStride])>>2; | |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1928 |
9256 | 1929 for(x=mmxSize-1; x<srcWidth-1; x++){ |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1930 dst[2*x +1]= (3*src[x+0] + src[x+srcStride+1])>>2; |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1931 dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2; |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1932 dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2; |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1933 dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2; |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1934 } |
9256 | 1935 dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2; |
1936 dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2; | |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1937 |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1938 dst+=dstStride*2; |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1939 src+=srcStride; |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1940 } |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1941 |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1942 // last line |
9256 | 1943 #if 1 |
1944 dst[0]= src[0]; | |
1945 | |
1946 for(x=0; x<srcWidth-1; x++){ | |
1947 dst[2*x+1]= (3*src[x] + src[x+1])>>2; | |
1948 dst[2*x+2]= ( src[x] + 3*src[x+1])>>2; | |
1949 } | |
1950 dst[2*srcWidth-1]= src[srcWidth-1]; | |
1951 #else | |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1952 for(x=0; x<srcWidth; x++){ |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1953 dst[2*x+0]= |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1954 dst[2*x+1]= src[x]; |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1955 } |
9256 | 1956 #endif |
1957 | |
6582
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1958 #ifdef HAVE_MMX |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1959 asm volatile( EMMS" \n\t" |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1960 SFENCE" \n\t" |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1961 :::"memory"); |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1962 #endif |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1963 } |
f98313dcd428
yvu9 -> yv12 unscaled converter with linear chroma scaling
michael
parents:
6492
diff
changeset
|
1964 |
2801 | 1965 /** |
1966 * | |
1967 * height should be a multiple of 2 and width should be a multiple of 16 (if this is a | |
1968 * problem for anyone then tell me, and ill fix it) | |
3132 | 1969 * chrominance data is only taken from every secound line others are ignored FIXME write HQ version |
2801 | 1970 */ |
3132 | 1971 static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, |
2801 | 1972 unsigned int width, unsigned int height, |
9392 | 1973 int lumStride, int chromStride, int srcStride) |
2801 | 1974 { |
6492 | 1975 unsigned y; |
1976 const unsigned chromWidth= width>>1; | |
2801 | 1977 for(y=0; y<height; y+=2) |
1978 { | |
2847 | 1979 #ifdef HAVE_MMX |
1980 asm volatile( | |
1981 "xorl %%eax, %%eax \n\t" | |
1982 "pcmpeqw %%mm7, %%mm7 \n\t" | |
1983 "psrlw $8, %%mm7 \n\t" // FF,00,FF,00... | |
1984 ".balign 16 \n\t" | |
1985 "1: \n\t" | |
1986 PREFETCH" 64(%0, %%eax, 4) \n\t" | |
1987 "movq (%0, %%eax, 4), %%mm0 \n\t" // UYVY UYVY(0) | |
1988 "movq 8(%0, %%eax, 4), %%mm1 \n\t" // UYVY UYVY(4) | |
1989 "movq %%mm0, %%mm2 \n\t" // UYVY UYVY(0) | |
1990 "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(4) | |
1991 "pand %%mm7, %%mm0 \n\t" // U0V0 U0V0(0) | |
1992 "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(4) | |
1993 "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(0) | |
1994 "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(4) | |
1995 "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0) | |
1996 "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0) | |
1997 | |
1998 MOVNTQ" %%mm2, (%1, %%eax, 2) \n\t" | |
1999 | |
2000 "movq 16(%0, %%eax, 4), %%mm1 \n\t" // UYVY UYVY(8) | |
2001 "movq 24(%0, %%eax, 4), %%mm2 \n\t" // UYVY UYVY(12) | |
2002 "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(8) | |
2003 "movq %%mm2, %%mm4 \n\t" // UYVY UYVY(12) | |
2004 "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(8) | |
2005 "pand %%mm7, %%mm2 \n\t" // U0V0 U0V0(12) | |
2006 "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(8) | |
2007 "psrlw $8, %%mm4 \n\t" // Y0Y0 Y0Y0(12) | |
2008 "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8) | |
2009 "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8) | |
2010 | |
2011 MOVNTQ" %%mm3, 8(%1, %%eax, 2) \n\t" | |
2012 | |
2013 "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0) | |
2014 "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8) | |
2015 "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0) | |
2016 "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8) | |
2017 "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0) | |
2018 "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8) | |
2019 "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0) | |
2020 "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0) | |
2021 | |
2022 MOVNTQ" %%mm0, (%3, %%eax) \n\t" | |
2023 MOVNTQ" %%mm2, (%2, %%eax) \n\t" | |
2024 | |
2025 "addl $8, %%eax \n\t" | |
2026 "cmpl %4, %%eax \n\t" | |
2027 " jb 1b \n\t" | |
9394 | 2028 ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) |
2847 | 2029 : "memory", "%eax" |
2030 ); | |
2031 | |
2032 ydst += lumStride; | |
2033 src += srcStride; | |
2034 | |
2035 asm volatile( | |
2036 "xorl %%eax, %%eax \n\t" | |
2037 ".balign 16 \n\t" | |
2038 "1: \n\t" | |
2039 PREFETCH" 64(%0, %%eax, 4) \n\t" | |
2040 "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0) | |
2041 "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4) | |
2042 "movq 16(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(8) | |
2043 "movq 24(%0, %%eax, 4), %%mm3 \n\t" // YUYV YUYV(12) | |
2044 "psrlw $8, %%mm0 \n\t" // Y0Y0 Y0Y0(0) | |
2045 "psrlw $8, %%mm1 \n\t" // Y0Y0 Y0Y0(4) | |
2046 "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(8) | |
2047 "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(12) | |
2048 "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0) | |
2049 "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8) | |
2050 | |
2051 MOVNTQ" %%mm0, (%1, %%eax, 2) \n\t" | |
2052 MOVNTQ" %%mm2, 8(%1, %%eax, 2) \n\t" | |
2053 | |
2054 "addl $8, %%eax \n\t" | |
2055 "cmpl %4, %%eax \n\t" | |
2056 " jb 1b \n\t" | |
2057 | |
9394 | 2058 ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) |
2847 | 2059 : "memory", "%eax" |
2060 ); | |
2061 #else | |
6492 | 2062 unsigned i; |
2801 | 2063 for(i=0; i<chromWidth; i++) |
2064 { | |
2065 udst[i] = src[4*i+0]; | |
2066 ydst[2*i+0] = src[4*i+1]; | |
2067 vdst[i] = src[4*i+2]; | |
2068 ydst[2*i+1] = src[4*i+3]; | |
2069 } | |
2070 ydst += lumStride; | |
2071 src += srcStride; | |
2072 | |
2073 for(i=0; i<chromWidth; i++) | |
2074 { | |
2075 ydst[2*i+0] = src[4*i+1]; | |
2076 ydst[2*i+1] = src[4*i+3]; | |
2077 } | |
2847 | 2078 #endif |
2801 | 2079 udst += chromStride; |
2080 vdst += chromStride; | |
2081 ydst += lumStride; | |
2082 src += srcStride; | |
2083 } | |
2847 | 2084 #ifdef HAVE_MMX |
2085 asm volatile( EMMS" \n\t" | |
2086 SFENCE" \n\t" | |
2087 :::"memory"); | |
2088 #endif | |
2801 | 2089 } |
2090 | |
3132 | 2091 /** |
2092 * | |
2093 * height should be a multiple of 2 and width should be a multiple of 2 (if this is a | |
2094 * problem for anyone then tell me, and ill fix it) | |
4622 | 2095 * chrominance data is only taken from every secound line others are ignored in the C version FIXME write HQ version |
3132 | 2096 */ |
2097 static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, | |
2098 unsigned int width, unsigned int height, | |
9392 | 2099 int lumStride, int chromStride, int srcStride) |
3132 | 2100 { |
6492 | 2101 unsigned y; |
2102 const unsigned chromWidth= width>>1; | |
4622 | 2103 #ifdef HAVE_MMX |
2104 for(y=0; y<height-2; y+=2) | |
2105 { | |
6492 | 2106 unsigned i; |
4622 | 2107 for(i=0; i<2; i++) |
2108 { | |
2109 asm volatile( | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2110 "mov %2, %%"REG_a" \n\t" |
4923 | 2111 "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t" |
2112 "movq "MANGLE(w1111)", %%mm5 \n\t" | |
4622 | 2113 "pxor %%mm7, %%mm7 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2114 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" |
4622 | 2115 ".balign 16 \n\t" |
2116 "1: \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2117 PREFETCH" 64(%0, %%"REG_b") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2118 "movd (%0, %%"REG_b"), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2119 "movd 3(%0, %%"REG_b"), %%mm1 \n\t" |
4622 | 2120 "punpcklbw %%mm7, %%mm0 \n\t" |
2121 "punpcklbw %%mm7, %%mm1 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2122 "movd 6(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2123 "movd 9(%0, %%"REG_b"), %%mm3 \n\t" |
4622 | 2124 "punpcklbw %%mm7, %%mm2 \n\t" |
2125 "punpcklbw %%mm7, %%mm3 \n\t" | |
2126 "pmaddwd %%mm6, %%mm0 \n\t" | |
2127 "pmaddwd %%mm6, %%mm1 \n\t" | |
2128 "pmaddwd %%mm6, %%mm2 \n\t" | |
2129 "pmaddwd %%mm6, %%mm3 \n\t" | |
2130 #ifndef FAST_BGR2YV12 | |
2131 "psrad $8, %%mm0 \n\t" | |
2132 "psrad $8, %%mm1 \n\t" | |
2133 "psrad $8, %%mm2 \n\t" | |
2134 "psrad $8, %%mm3 \n\t" | |
2135 #endif | |
2136 "packssdw %%mm1, %%mm0 \n\t" | |
2137 "packssdw %%mm3, %%mm2 \n\t" | |
2138 "pmaddwd %%mm5, %%mm0 \n\t" | |
2139 "pmaddwd %%mm5, %%mm2 \n\t" | |
2140 "packssdw %%mm2, %%mm0 \n\t" | |
2141 "psraw $7, %%mm0 \n\t" | |
2142 | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2143 "movd 12(%0, %%"REG_b"), %%mm4 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2144 "movd 15(%0, %%"REG_b"), %%mm1 \n\t" |
4622 | 2145 "punpcklbw %%mm7, %%mm4 \n\t" |
2146 "punpcklbw %%mm7, %%mm1 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2147 "movd 18(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2148 "movd 21(%0, %%"REG_b"), %%mm3 \n\t" |
4622 | 2149 "punpcklbw %%mm7, %%mm2 \n\t" |
2150 "punpcklbw %%mm7, %%mm3 \n\t" | |
2151 "pmaddwd %%mm6, %%mm4 \n\t" | |
2152 "pmaddwd %%mm6, %%mm1 \n\t" | |
2153 "pmaddwd %%mm6, %%mm2 \n\t" | |
2154 "pmaddwd %%mm6, %%mm3 \n\t" | |
2155 #ifndef FAST_BGR2YV12 | |
2156 "psrad $8, %%mm4 \n\t" | |
2157 "psrad $8, %%mm1 \n\t" | |
2158 "psrad $8, %%mm2 \n\t" | |
2159 "psrad $8, %%mm3 \n\t" | |
2160 #endif | |
2161 "packssdw %%mm1, %%mm4 \n\t" | |
2162 "packssdw %%mm3, %%mm2 \n\t" | |
2163 "pmaddwd %%mm5, %%mm4 \n\t" | |
2164 "pmaddwd %%mm5, %%mm2 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2165 "add $24, %%"REG_b" \n\t" |
4622 | 2166 "packssdw %%mm2, %%mm4 \n\t" |
2167 "psraw $7, %%mm4 \n\t" | |
2168 | |
2169 "packuswb %%mm4, %%mm0 \n\t" | |
4923 | 2170 "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t" |
4622 | 2171 |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2172 MOVNTQ" %%mm0, (%1, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2173 "add $8, %%"REG_a" \n\t" |
4622 | 2174 " js 1b \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2175 : : "r" (src+width*3), "r" (ydst+width), "g" ((long)-width) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2176 : "%"REG_a, "%"REG_b |
4622 | 2177 ); |
2178 ydst += lumStride; | |
2179 src += srcStride; | |
2180 } | |
2181 src -= srcStride*2; | |
2182 asm volatile( | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2183 "mov %4, %%"REG_a" \n\t" |
4923 | 2184 "movq "MANGLE(w1111)", %%mm5 \n\t" |
2185 "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t" | |
4622 | 2186 "pxor %%mm7, %%mm7 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2187 "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2188 "add %%"REG_b", %%"REG_b" \n\t" |
4622 | 2189 ".balign 16 \n\t" |
2190 "1: \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2191 PREFETCH" 64(%0, %%"REG_b") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2192 PREFETCH" 64(%1, %%"REG_b") \n\t" |
4622 | 2193 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2194 "movq (%0, %%"REG_b"), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2195 "movq (%1, %%"REG_b"), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2196 "movq 6(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2197 "movq 6(%1, %%"REG_b"), %%mm3 \n\t" |
4622 | 2198 PAVGB" %%mm1, %%mm0 \n\t" |
2199 PAVGB" %%mm3, %%mm2 \n\t" | |
2200 "movq %%mm0, %%mm1 \n\t" | |
2201 "movq %%mm2, %%mm3 \n\t" | |
2202 "psrlq $24, %%mm0 \n\t" | |
2203 "psrlq $24, %%mm2 \n\t" | |
2204 PAVGB" %%mm1, %%mm0 \n\t" | |
2205 PAVGB" %%mm3, %%mm2 \n\t" | |
2206 "punpcklbw %%mm7, %%mm0 \n\t" | |
2207 "punpcklbw %%mm7, %%mm2 \n\t" | |
2208 #else | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2209 "movd (%0, %%"REG_b"), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2210 "movd (%1, %%"REG_b"), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2211 "movd 3(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2212 "movd 3(%1, %%"REG_b"), %%mm3 \n\t" |
4622 | 2213 "punpcklbw %%mm7, %%mm0 \n\t" |
2214 "punpcklbw %%mm7, %%mm1 \n\t" | |
2215 "punpcklbw %%mm7, %%mm2 \n\t" | |
2216 "punpcklbw %%mm7, %%mm3 \n\t" | |
2217 "paddw %%mm1, %%mm0 \n\t" | |
2218 "paddw %%mm3, %%mm2 \n\t" | |
2219 "paddw %%mm2, %%mm0 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2220 "movd 6(%0, %%"REG_b"), %%mm4 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2221 "movd 6(%1, %%"REG_b"), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2222 "movd 9(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2223 "movd 9(%1, %%"REG_b"), %%mm3 \n\t" |
4622 | 2224 "punpcklbw %%mm7, %%mm4 \n\t" |
2225 "punpcklbw %%mm7, %%mm1 \n\t" | |
2226 "punpcklbw %%mm7, %%mm2 \n\t" | |
2227 "punpcklbw %%mm7, %%mm3 \n\t" | |
2228 "paddw %%mm1, %%mm4 \n\t" | |
2229 "paddw %%mm3, %%mm2 \n\t" | |
2230 "paddw %%mm4, %%mm2 \n\t" | |
2231 "psrlw $2, %%mm0 \n\t" | |
2232 "psrlw $2, %%mm2 \n\t" | |
2233 #endif | |
4923 | 2234 "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t" |
2235 "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t" | |
4622 | 2236 |
2237 "pmaddwd %%mm0, %%mm1 \n\t" | |
2238 "pmaddwd %%mm2, %%mm3 \n\t" | |
2239 "pmaddwd %%mm6, %%mm0 \n\t" | |
2240 "pmaddwd %%mm6, %%mm2 \n\t" | |
2241 #ifndef FAST_BGR2YV12 | |
2242 "psrad $8, %%mm0 \n\t" | |
2243 "psrad $8, %%mm1 \n\t" | |
2244 "psrad $8, %%mm2 \n\t" | |
2245 "psrad $8, %%mm3 \n\t" | |
2246 #endif | |
2247 "packssdw %%mm2, %%mm0 \n\t" | |
2248 "packssdw %%mm3, %%mm1 \n\t" | |
2249 "pmaddwd %%mm5, %%mm0 \n\t" | |
2250 "pmaddwd %%mm5, %%mm1 \n\t" | |
2251 "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0 | |
2252 "psraw $7, %%mm0 \n\t" | |
2253 | |
2254 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW) | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2255 "movq 12(%0, %%"REG_b"), %%mm4 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2256 "movq 12(%1, %%"REG_b"), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2257 "movq 18(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2258 "movq 18(%1, %%"REG_b"), %%mm3 \n\t" |
4622 | 2259 PAVGB" %%mm1, %%mm4 \n\t" |
2260 PAVGB" %%mm3, %%mm2 \n\t" | |
2261 "movq %%mm4, %%mm1 \n\t" | |
2262 "movq %%mm2, %%mm3 \n\t" | |
2263 "psrlq $24, %%mm4 \n\t" | |
2264 "psrlq $24, %%mm2 \n\t" | |
2265 PAVGB" %%mm1, %%mm4 \n\t" | |
2266 PAVGB" %%mm3, %%mm2 \n\t" | |
2267 "punpcklbw %%mm7, %%mm4 \n\t" | |
2268 "punpcklbw %%mm7, %%mm2 \n\t" | |
2269 #else | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2270 "movd 12(%0, %%"REG_b"), %%mm4 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2271 "movd 12(%1, %%"REG_b"), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2272 "movd 15(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2273 "movd 15(%1, %%"REG_b"), %%mm3 \n\t" |
4622 | 2274 "punpcklbw %%mm7, %%mm4 \n\t" |
2275 "punpcklbw %%mm7, %%mm1 \n\t" | |
2276 "punpcklbw %%mm7, %%mm2 \n\t" | |
2277 "punpcklbw %%mm7, %%mm3 \n\t" | |
2278 "paddw %%mm1, %%mm4 \n\t" | |
2279 "paddw %%mm3, %%mm2 \n\t" | |
2280 "paddw %%mm2, %%mm4 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2281 "movd 18(%0, %%"REG_b"), %%mm5 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2282 "movd 18(%1, %%"REG_b"), %%mm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2283 "movd 21(%0, %%"REG_b"), %%mm2 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2284 "movd 21(%1, %%"REG_b"), %%mm3 \n\t" |
4622 | 2285 "punpcklbw %%mm7, %%mm5 \n\t" |
2286 "punpcklbw %%mm7, %%mm1 \n\t" | |
2287 "punpcklbw %%mm7, %%mm2 \n\t" | |
2288 "punpcklbw %%mm7, %%mm3 \n\t" | |
2289 "paddw %%mm1, %%mm5 \n\t" | |
2290 "paddw %%mm3, %%mm2 \n\t" | |
2291 "paddw %%mm5, %%mm2 \n\t" | |
4923 | 2292 "movq "MANGLE(w1111)", %%mm5 \n\t" |
4622 | 2293 "psrlw $2, %%mm4 \n\t" |
2294 "psrlw $2, %%mm2 \n\t" | |
2295 #endif | |
4923 | 2296 "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t" |
2297 "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t" | |
4622 | 2298 |
2299 "pmaddwd %%mm4, %%mm1 \n\t" | |
2300 "pmaddwd %%mm2, %%mm3 \n\t" | |
2301 "pmaddwd %%mm6, %%mm4 \n\t" | |
2302 "pmaddwd %%mm6, %%mm2 \n\t" | |
2303 #ifndef FAST_BGR2YV12 | |
2304 "psrad $8, %%mm4 \n\t" | |
2305 "psrad $8, %%mm1 \n\t" | |
2306 "psrad $8, %%mm2 \n\t" | |
2307 "psrad $8, %%mm3 \n\t" | |
2308 #endif | |
2309 "packssdw %%mm2, %%mm4 \n\t" | |
2310 "packssdw %%mm3, %%mm1 \n\t" | |
2311 "pmaddwd %%mm5, %%mm4 \n\t" | |
2312 "pmaddwd %%mm5, %%mm1 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2313 "add $24, %%"REG_b" \n\t" |
4622 | 2314 "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2 |
2315 "psraw $7, %%mm4 \n\t" | |
2316 | |
2317 "movq %%mm0, %%mm1 \n\t" | |
2318 "punpckldq %%mm4, %%mm0 \n\t" | |
2319 "punpckhdq %%mm4, %%mm1 \n\t" | |
2320 "packsswb %%mm1, %%mm0 \n\t" | |
4923 | 2321 "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2322 "movd %%mm0, (%2, %%"REG_a") \n\t" |
4622 | 2323 "punpckhdq %%mm0, %%mm0 \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2324 "movd %%mm0, (%3, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2325 "add $4, %%"REG_a" \n\t" |
4622 | 2326 " js 1b \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2327 : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" ((long)-chromWidth) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2328 : "%"REG_a, "%"REG_b |
4622 | 2329 ); |
2330 | |
2331 udst += chromStride; | |
2332 vdst += chromStride; | |
2333 src += srcStride*2; | |
2334 } | |
2335 | |
2336 asm volatile( EMMS" \n\t" | |
2337 SFENCE" \n\t" | |
2338 :::"memory"); | |
2339 #else | |
2340 y=0; | |
2341 #endif | |
2342 for(; y<height; y+=2) | |
3132 | 2343 { |
6492 | 2344 unsigned i; |
3132 | 2345 for(i=0; i<chromWidth; i++) |
2346 { | |
2347 unsigned int b= src[6*i+0]; | |
2348 unsigned int g= src[6*i+1]; | |
2349 unsigned int r= src[6*i+2]; | |
2801 | 2350 |
3633 | 2351 unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; |
2352 unsigned int V = ((RV*r + GV*g + BV*b)>>RGB2YUV_SHIFT) + 128; | |
2353 unsigned int U = ((RU*r + GU*g + BU*b)>>RGB2YUV_SHIFT) + 128; | |
3132 | 2354 |
2355 udst[i] = U; | |
2356 vdst[i] = V; | |
2357 ydst[2*i] = Y; | |
2358 | |
2359 b= src[6*i+3]; | |
2360 g= src[6*i+4]; | |
2361 r= src[6*i+5]; | |
2362 | |
3633 | 2363 Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; |
3132 | 2364 ydst[2*i+1] = Y; |
2365 } | |
2366 ydst += lumStride; | |
2367 src += srcStride; | |
2368 | |
2369 for(i=0; i<chromWidth; i++) | |
2370 { | |
2371 unsigned int b= src[6*i+0]; | |
2372 unsigned int g= src[6*i+1]; | |
2373 unsigned int r= src[6*i+2]; | |
2374 | |
3633 | 2375 unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; |
3132 | 2376 |
2377 ydst[2*i] = Y; | |
2378 | |
2379 b= src[6*i+3]; | |
2380 g= src[6*i+4]; | |
2381 r= src[6*i+5]; | |
2382 | |
3633 | 2383 Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; |
3132 | 2384 ydst[2*i+1] = Y; |
2385 } | |
2386 udst += chromStride; | |
2387 vdst += chromStride; | |
2388 ydst += lumStride; | |
2389 src += srcStride; | |
2390 } | |
2391 } | |
5337 | 2392 |
2393 void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest, | |
9392 | 2394 unsigned width, unsigned height, int src1Stride, |
2395 int src2Stride, int dstStride){ | |
6492 | 2396 unsigned h; |
5337 | 2397 |
2398 for(h=0; h < height; h++) | |
2399 { | |
6492 | 2400 unsigned w; |
5337 | 2401 |
2402 #ifdef HAVE_MMX | |
2403 #ifdef HAVE_SSE2 | |
2404 asm( | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2405 "xor %%"REG_a", %%"REG_a" \n\t" |
5337 | 2406 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2407 PREFETCH" 64(%1, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2408 PREFETCH" 64(%2, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2409 "movdqa (%1, %%"REG_a"), %%xmm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2410 "movdqa (%1, %%"REG_a"), %%xmm1 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2411 "movdqa (%2, %%"REG_a"), %%xmm2 \n\t" |
5337 | 2412 "punpcklbw %%xmm2, %%xmm0 \n\t" |
2413 "punpckhbw %%xmm2, %%xmm1 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2414 "movntdq %%xmm0, (%0, %%"REG_a", 2)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2415 "movntdq %%xmm1, 16(%0, %%"REG_a", 2)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2416 "add $16, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2417 "cmp %3, %%"REG_a" \n\t" |
5337 | 2418 " jb 1b \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2419 ::"r"(dest), "r"(src1), "r"(src2), "r" ((long)width-15) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2420 : "memory", "%"REG_a"" |
5337 | 2421 ); |
2422 #else | |
2423 asm( | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2424 "xor %%"REG_a", %%"REG_a" \n\t" |
5337 | 2425 "1: \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2426 PREFETCH" 64(%1, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2427 PREFETCH" 64(%2, %%"REG_a") \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2428 "movq (%1, %%"REG_a"), %%mm0 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2429 "movq 8(%1, %%"REG_a"), %%mm2 \n\t" |
5337 | 2430 "movq %%mm0, %%mm1 \n\t" |
2431 "movq %%mm2, %%mm3 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2432 "movq (%2, %%"REG_a"), %%mm4 \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2433 "movq 8(%2, %%"REG_a"), %%mm5 \n\t" |
5337 | 2434 "punpcklbw %%mm4, %%mm0 \n\t" |
2435 "punpckhbw %%mm4, %%mm1 \n\t" | |
2436 "punpcklbw %%mm5, %%mm2 \n\t" | |
2437 "punpckhbw %%mm5, %%mm3 \n\t" | |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2438 MOVNTQ" %%mm0, (%0, %%"REG_a", 2)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2439 MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2440 MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2441 MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2)\n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2442 "add $16, %%"REG_a" \n\t" |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2443 "cmp %3, %%"REG_a" \n\t" |
5337 | 2444 " jb 1b \n\t" |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2445 ::"r"(dest), "r"(src1), "r"(src2), "r" ((long)width-15) |
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2446 : "memory", "%"REG_a |
5337 | 2447 ); |
2448 #endif | |
2449 for(w= (width&(~15)); w < width; w++) | |
2450 { | |
2451 dest[2*w+0] = src1[w]; | |
2452 dest[2*w+1] = src2[w]; | |
2453 } | |
2454 #else | |
2455 for(w=0; w < width; w++) | |
2456 { | |
2457 dest[2*w+0] = src1[w]; | |
2458 dest[2*w+1] = src2[w]; | |
2459 } | |
2460 #endif | |
2461 dest += dstStride; | |
2462 src1 += src1Stride; | |
2463 src2 += src2Stride; | |
2464 } | |
2465 #ifdef HAVE_MMX | |
2466 asm( | |
2467 EMMS" \n\t" | |
2468 SFENCE" \n\t" | |
2469 ::: "memory" | |
2470 ); | |
2471 #endif | |
2472 } | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2473 |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2474 static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2475 uint8_t *dst1, uint8_t *dst2, |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2476 unsigned width, unsigned height, |
9392 | 2477 int srcStride1, int srcStride2, |
2478 int dstStride1, int dstStride2) | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2479 { |
9392 | 2480 unsigned int y,x,h; |
2481 int w; | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2482 w=width/2; h=height/2; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2483 #ifdef HAVE_MMX |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2484 asm volatile( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2485 PREFETCH" %0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2486 PREFETCH" %1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2487 ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2488 #endif |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2489 for(y=0;y<h;y++){ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2490 const uint8_t* s1=src1+srcStride1*(y>>1); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2491 uint8_t* d=dst1+dstStride1*y; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2492 x=0; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2493 #ifdef HAVE_MMX |
9392 | 2494 for(;x<w-31;x+=32) |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2495 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2496 asm volatile( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2497 PREFETCH" 32%1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2498 "movq %1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2499 "movq 8%1, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2500 "movq 16%1, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2501 "movq 24%1, %%mm6\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2502 "movq %%mm0, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2503 "movq %%mm2, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2504 "movq %%mm4, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2505 "movq %%mm6, %%mm7\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2506 "punpcklbw %%mm0, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2507 "punpckhbw %%mm1, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2508 "punpcklbw %%mm2, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2509 "punpckhbw %%mm3, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2510 "punpcklbw %%mm4, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2511 "punpckhbw %%mm5, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2512 "punpcklbw %%mm6, %%mm6\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2513 "punpckhbw %%mm7, %%mm7\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2514 MOVNTQ" %%mm0, %0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2515 MOVNTQ" %%mm1, 8%0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2516 MOVNTQ" %%mm2, 16%0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2517 MOVNTQ" %%mm3, 24%0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2518 MOVNTQ" %%mm4, 32%0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2519 MOVNTQ" %%mm5, 40%0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2520 MOVNTQ" %%mm6, 48%0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2521 MOVNTQ" %%mm7, 56%0" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2522 :"=m"(d[2*x]) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2523 :"m"(s1[x]) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2524 :"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2525 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2526 #endif |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2527 for(;x<w;x++) d[2*x]=d[2*x+1]=s1[x]; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2528 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2529 for(y=0;y<h;y++){ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2530 const uint8_t* s2=src2+srcStride2*(y>>1); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2531 uint8_t* d=dst2+dstStride2*y; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2532 x=0; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2533 #ifdef HAVE_MMX |
9392 | 2534 for(;x<w-31;x+=32) |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2535 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2536 asm volatile( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2537 PREFETCH" 32%1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2538 "movq %1, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2539 "movq 8%1, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2540 "movq 16%1, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2541 "movq 24%1, %%mm6\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2542 "movq %%mm0, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2543 "movq %%mm2, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2544 "movq %%mm4, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2545 "movq %%mm6, %%mm7\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2546 "punpcklbw %%mm0, %%mm0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2547 "punpckhbw %%mm1, %%mm1\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2548 "punpcklbw %%mm2, %%mm2\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2549 "punpckhbw %%mm3, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2550 "punpcklbw %%mm4, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2551 "punpckhbw %%mm5, %%mm5\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2552 "punpcklbw %%mm6, %%mm6\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2553 "punpckhbw %%mm7, %%mm7\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2554 MOVNTQ" %%mm0, %0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2555 MOVNTQ" %%mm1, 8%0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2556 MOVNTQ" %%mm2, 16%0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2557 MOVNTQ" %%mm3, 24%0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2558 MOVNTQ" %%mm4, 32%0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2559 MOVNTQ" %%mm5, 40%0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2560 MOVNTQ" %%mm6, 48%0\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2561 MOVNTQ" %%mm7, 56%0" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2562 :"=m"(d[2*x]) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2563 :"m"(s2[x]) |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2564 :"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2565 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2566 #endif |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2567 for(;x<w;x++) d[2*x]=d[2*x+1]=s2[x]; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2568 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2569 #ifdef HAVE_MMX |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2570 asm( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2571 EMMS" \n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2572 SFENCE" \n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2573 ::: "memory" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2574 ); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2575 #endif |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2576 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2577 |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2578 static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2579 uint8_t *dst, |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2580 unsigned width, unsigned height, |
9392 | 2581 int srcStride1, int srcStride2, |
2582 int srcStride3, int dstStride) | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2583 { |
13720
821f464b4d90
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
aurel
parents:
13423
diff
changeset
|
2584 unsigned long y,x,w,h; |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2585 w=width/2; h=height; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2586 for(y=0;y<h;y++){ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2587 const uint8_t* yp=src1+srcStride1*y; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2588 const uint8_t* up=src2+srcStride2*(y>>2); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2589 const uint8_t* vp=src3+srcStride3*(y>>2); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2590 uint8_t* d=dst+dstStride*y; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2591 x=0; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2592 #ifdef HAVE_MMX |
9394 | 2593 for(;x<w-7;x+=8) |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2594 { |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2595 asm volatile( |
9394 | 2596 PREFETCH" 32(%1, %0)\n\t" |
2597 PREFETCH" 32(%2, %0)\n\t" | |
2598 PREFETCH" 32(%3, %0)\n\t" | |
2599 "movq (%1, %0, 4), %%mm0\n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */ | |
2600 "movq (%2, %0), %%mm1\n\t" /* U0U1U2U3U4U5U6U7 */ | |
2601 "movq (%3, %0), %%mm2\n\t" /* V0V1V2V3V4V5V6V7 */ | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2602 "movq %%mm0, %%mm3\n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2603 "movq %%mm1, %%mm4\n\t" /* U0U1U2U3U4U5U6U7 */ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2604 "movq %%mm2, %%mm5\n\t" /* V0V1V2V3V4V5V6V7 */ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2605 "punpcklbw %%mm1, %%mm1\n\t" /* U0U0 U1U1 U2U2 U3U3 */ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2606 "punpcklbw %%mm2, %%mm2\n\t" /* V0V0 V1V1 V2V2 V3V3 */ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2607 "punpckhbw %%mm4, %%mm4\n\t" /* U4U4 U5U5 U6U6 U7U7 */ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2608 "punpckhbw %%mm5, %%mm5\n\t" /* V4V4 V5V5 V6V6 V7V7 */ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2609 |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2610 "movq %%mm1, %%mm6\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2611 "punpcklbw %%mm2, %%mm1\n\t" /* U0V0 U0V0 U1V1 U1V1*/ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2612 "punpcklbw %%mm1, %%mm0\n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2613 "punpckhbw %%mm1, %%mm3\n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/ |
9394 | 2614 MOVNTQ" %%mm0, (%4, %0, 8)\n\t" |
2615 MOVNTQ" %%mm3, 8(%4, %0, 8)\n\t" | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2616 |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2617 "punpckhbw %%mm2, %%mm6\n\t" /* U2V2 U2V2 U3V3 U3V3*/ |
9394 | 2618 "movq 8(%1, %0, 4), %%mm0\n\t" |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2619 "movq %%mm0, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2620 "punpcklbw %%mm6, %%mm0\n\t" /* Y U2 Y V2 Y U2 Y V2*/ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2621 "punpckhbw %%mm6, %%mm3\n\t" /* Y U3 Y V3 Y U3 Y V3*/ |
9394 | 2622 MOVNTQ" %%mm0, 16(%4, %0, 8)\n\t" |
2623 MOVNTQ" %%mm3, 24(%4, %0, 8)\n\t" | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2624 |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2625 "movq %%mm4, %%mm6\n\t" |
9394 | 2626 "movq 16(%1, %0, 4), %%mm0\n\t" |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2627 "movq %%mm0, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2628 "punpcklbw %%mm5, %%mm4\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2629 "punpcklbw %%mm4, %%mm0\n\t" /* Y U4 Y V4 Y U4 Y V4*/ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2630 "punpckhbw %%mm4, %%mm3\n\t" /* Y U5 Y V5 Y U5 Y V5*/ |
9394 | 2631 MOVNTQ" %%mm0, 32(%4, %0, 8)\n\t" |
2632 MOVNTQ" %%mm3, 40(%4, %0, 8)\n\t" | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2633 |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2634 "punpckhbw %%mm5, %%mm6\n\t" |
9394 | 2635 "movq 24(%1, %0, 4), %%mm0\n\t" |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2636 "movq %%mm0, %%mm3\n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2637 "punpcklbw %%mm6, %%mm0\n\t" /* Y U6 Y V6 Y U6 Y V6*/ |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2638 "punpckhbw %%mm6, %%mm3\n\t" /* Y U7 Y V7 Y U7 Y V7*/ |
9394 | 2639 MOVNTQ" %%mm0, 48(%4, %0, 8)\n\t" |
2640 MOVNTQ" %%mm3, 56(%4, %0, 8)\n\t" | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2641 |
9394 | 2642 : "+r" (x) |
2643 : "r"(yp), "r" (up), "r"(vp), "r"(d) | |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2644 :"memory"); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2645 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2646 #endif |
9394 | 2647 for(; x<w; x++) |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2648 { |
9394 | 2649 const int x2= x<<2; |
6606
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2650 d[8*x+0]=yp[x2]; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2651 d[8*x+1]=up[x]; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2652 d[8*x+2]=yp[x2+1]; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2653 d[8*x+3]=vp[x]; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2654 d[8*x+4]=yp[x2+2]; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2655 d[8*x+5]=up[x]; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2656 d[8*x+6]=yp[x2+3]; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2657 d[8*x+7]=vp[x]; |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2658 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2659 } |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2660 #ifdef HAVE_MMX |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2661 asm( |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2662 EMMS" \n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2663 SFENCE" \n\t" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2664 ::: "memory" |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2665 ); |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2666 #endif |
50b5d8367318
merging changes from mplayerxp (rgb2rgb*.{c,h} only)
michael
parents:
6605
diff
changeset
|
2667 } |