2504
|
1 #include <inttypes.h>
|
|
2 #include "../config.h"
|
|
3 #include "rgb2rgb.h"
|
2535
|
4 #ifdef HAVE_MMX
|
2506
|
5 #include "mmx.h"
|
2535
|
6 #endif
|
2516
|
7 #include "../mmx_defs.h"
|
2513
|
8
|
2504
|
9 void rgb24to32(uint8_t *src,uint8_t *dst,uint32_t src_size)
|
|
10 {
|
2508
|
11 uint8_t *dest = dst;
|
2504
|
12 uint8_t *s = src;
|
|
13 uint8_t *end;
|
2510
|
14 #ifdef HAVE_MMX
|
|
15 const uint64_t mask32 = 0x00FFFFFF00FFFFFFULL;
|
|
16 uint8_t *mm_end;
|
|
17 #endif
|
2504
|
18 end = s + src_size;
|
2510
|
19 #ifdef HAVE_MMX
|
2516
|
20 __asm __volatile(PREFETCH" %0\n\t"::"m"(*s):"memory");
|
|
21 mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
|
2510
|
22 __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory");
|
2516
|
23 if(mm_end == end) mm_end -= MMREG_SIZE*2;
|
2510
|
24 while(s < mm_end)
|
|
25 {
|
2511
|
26 __asm __volatile(
|
2516
|
27 PREFETCH" 32%1\n\t"
|
2510
|
28 "movd %1, %%mm0\n\t"
|
|
29 "movd 3%1, %%mm1\n\t"
|
|
30 "movd 6%1, %%mm2\n\t"
|
|
31 "movd 9%1, %%mm3\n\t"
|
|
32 "punpckldq %%mm1, %%mm0\n\t"
|
|
33 "punpckldq %%mm3, %%mm2\n\t"
|
|
34 "pand %%mm7, %%mm0\n\t"
|
|
35 "pand %%mm7, %%mm2\n\t"
|
2511
|
36 MOVNTQ" %%mm0, %0\n\t"
|
|
37 MOVNTQ" %%mm2, 8%0"
|
2510
|
38 :"=m"(*dest)
|
|
39 :"m"(*s)
|
|
40 :"memory");
|
|
41 dest += 16;
|
|
42 s += 12;
|
|
43 }
|
2513
|
44 __asm __volatile(SFENCE:::"memory");
|
2511
|
45 __asm __volatile(EMMS:::"memory");
|
2510
|
46 #endif
|
2504
|
47 while(s < end)
|
|
48 {
|
2508
|
49 *dest++ = *s++;
|
|
50 *dest++ = *s++;
|
|
51 *dest++ = *s++;
|
|
52 *dest++ = 0;
|
2504
|
53 }
|
|
54 }
|
2505
|
55
|
|
56 void rgb32to24(uint8_t *src,uint8_t *dst,uint32_t src_size)
|
|
57 {
|
|
58 uint8_t *dest = dst;
|
|
59 uint8_t *s = src;
|
|
60 uint8_t *end;
|
2517
|
61 #ifdef HAVE_MMX
|
|
62 const uint64_t mask24l = 0x0000000000FFFFFFULL;
|
|
63 const uint64_t mask24h = 0x0000FFFFFF000000ULL;
|
|
64 uint8_t *mm_end;
|
|
65 #endif
|
2505
|
66 end = s + src_size;
|
2517
|
67 #ifdef HAVE_MMX
|
|
68 __asm __volatile(PREFETCH" %0\n\t"::"m"(*s):"memory");
|
|
69 mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
|
|
70 __asm __volatile(
|
|
71 "movq %0, %%mm7\n\t"
|
|
72 "movq %1, %%mm6\n\t"
|
|
73 ::"m"(mask24l),"m"(mask24h):"memory");
|
|
74 if(mm_end == end) mm_end -= MMREG_SIZE*2;
|
|
75 while(s < mm_end)
|
|
76 {
|
|
77 __asm __volatile(
|
|
78 PREFETCH" 32%1\n\t"
|
|
79 "movq %1, %%mm0\n\t"
|
|
80 "movq 8%1, %%mm1\n\t"
|
|
81 "movq %%mm0, %%mm2\n\t"
|
|
82 "movq %%mm1, %%mm3\n\t"
|
|
83 "psrlq $8, %%mm2\n\t"
|
|
84 "psrlq $8, %%mm3\n\t"
|
|
85 "pand %%mm7, %%mm0\n\t"
|
|
86 "pand %%mm7, %%mm1\n\t"
|
|
87 "pand %%mm6, %%mm2\n\t"
|
|
88 "pand %%mm6, %%mm3\n\t"
|
|
89 "por %%mm2, %%mm0\n\t"
|
|
90 "por %%mm3, %%mm1\n\t"
|
|
91 MOVNTQ" %%mm0, %0\n\t"
|
|
92 MOVNTQ" %%mm1, 6%0"
|
|
93 :"=m"(*dest)
|
|
94 :"m"(*s)
|
|
95 :"memory");
|
|
96 dest += 12;
|
|
97 s += 16;
|
|
98 }
|
|
99 __asm __volatile(SFENCE:::"memory");
|
|
100 __asm __volatile(EMMS:::"memory");
|
|
101 #endif
|
2505
|
102 while(s < end)
|
|
103 {
|
|
104 *dest++ = *s++;
|
|
105 *dest++ = *s++;
|
|
106 *dest++ = *s++;
|
|
107 s++;
|
|
108 }
|
|
109 }
|
2506
|
110
|
2517
|
111 /* TODO: 3DNOW, MMX2 optimization */
|
|
112
|
2506
|
113 /* Original by Strepto/Astral
|
|
114 ported to gcc & bugfixed : A'rpi */
|
|
115 void rgb15to16(uint8_t *src,uint8_t *dst,uint32_t src_size)
|
|
116 {
|
|
117 #ifdef HAVE_MMX
|
|
118 static uint64_t mask_b = 0x001F001F001F001FLL; // 00000000 00011111 xxB
|
|
119 static uint64_t mask_rg = 0x7FE07FE07FE07FE0LL; // 01111111 11100000 RGx
|
|
120 register char* s=src+src_size;
|
|
121 register char* d=dst+src_size;
|
|
122 register int offs=-src_size;
|
|
123 movq_m2r (mask_b, mm4);
|
|
124 movq_m2r (mask_rg, mm5);
|
|
125 while(offs<0){
|
|
126 movq_m2r (*(s+offs), mm0);
|
|
127 movq_r2r (mm0, mm1);
|
|
128
|
|
129 movq_m2r (*(s+8+offs), mm2);
|
|
130 movq_r2r (mm2, mm3);
|
|
131
|
|
132 pand_r2r (mm4, mm0);
|
|
133 pand_r2r (mm5, mm1);
|
|
134
|
|
135 psllq_i2r(1,mm1);
|
|
136 pand_r2r (mm4, mm2);
|
|
137
|
|
138 pand_r2r (mm5, mm3);
|
|
139 por_r2r (mm1, mm0);
|
|
140
|
|
141 psllq_i2r(1,mm3);
|
|
142 movq_r2m (mm0,*(d+offs));
|
|
143
|
|
144 por_r2r (mm3,mm2);
|
|
145 movq_r2m (mm2,*(d+8+offs));
|
|
146
|
|
147 offs+=16;
|
|
148 }
|
|
149 emms();
|
|
150 #else
|
|
151 uint16_t *s1=( uint16_t * )src;
|
|
152 uint16_t *d1=( uint16_t * )dst;
|
|
153 uint16_t *e=((uint8_t *)s1)+src_size;
|
|
154 while( s1<e ){
|
|
155 register int x=*( s1++ );
|
|
156 /* rrrrrggggggbbbbb
|
|
157 0rrrrrgggggbbbbb
|
|
158 0111 1111 1110 0000=0x7FE0
|
|
159 00000000000001 1111=0x001F */
|
|
160 *( d1++ )=( x&0x001F )|( ( x&0x7FE0 )<<1 );
|
|
161 }
|
|
162 #endif
|
|
163 }
|