Mercurial > mplayer.hg
comparison postproc/rgb2rgb_template.c @ 2538:71320898b333
Finish mmx2, 3dnow optimiz. 15to16 should be tested. Better fix of can't compile
author | nick |
---|---|
date | Mon, 29 Oct 2001 18:28:06 +0000 |
parents | b44113f46c96 |
children | 3d04a0991dce |
comparison
equal
deleted
inserted
replaced
2537:28d30f50d89c | 2538:71320898b333 |
---|---|
1 /* | |
2 * | |
3 * rgb2rgb.c, Software RGB to RGB convertor | |
4 * Written by Nick Kurshev. | |
5 */ | |
1 #include <inttypes.h> | 6 #include <inttypes.h> |
2 #include "../config.h" | 7 #include "../config.h" |
3 #include "rgb2rgb.h" | 8 #include "rgb2rgb.h" |
9 #include "../mmx_defs.h" | |
10 | |
4 #ifdef HAVE_MMX | 11 #ifdef HAVE_MMX |
5 #include "mmx.h" | 12 static const uint64_t mask32 __attribute__((aligned(8))) = 0x00FFFFFF00FFFFFFULL; |
13 static const uint64_t mask24l __attribute__((aligned(8))) = 0x0000000000FFFFFFULL; | |
14 static const uint64_t mask24h __attribute__((aligned(8))) = 0x0000FFFFFF000000ULL; | |
15 static const uint64_t mask15b __attribute__((aligned(8))) = 0x001F001F001F001FULL; /* 00000000 00011111 xxB */ | |
16 static const uint64_t mask15rg __attribute__((aligned(8))) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000 RGx */ | |
6 #endif | 17 #endif |
7 #include "../mmx_defs.h" | |
8 | 18 |
9 void rgb24to32(uint8_t *src,uint8_t *dst,uint32_t src_size) | 19 void rgb24to32(uint8_t *src,uint8_t *dst,uint32_t src_size) |
10 { | 20 { |
11 uint8_t *dest = dst; | 21 uint8_t *dest = dst; |
12 uint8_t *s = src; | 22 uint8_t *s = src; |
13 uint8_t *end; | 23 uint8_t *end; |
14 #ifdef HAVE_MMX | 24 #ifdef HAVE_MMX |
15 const uint64_t mask32 = 0x00FFFFFF00FFFFFFULL; | |
16 uint8_t *mm_end; | 25 uint8_t *mm_end; |
17 #endif | 26 #endif |
18 end = s + src_size; | 27 end = s + src_size; |
19 #ifdef HAVE_MMX | 28 #ifdef HAVE_MMX |
20 __asm __volatile(PREFETCH" %0\n\t"::"m"(*s):"memory"); | 29 __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); |
21 mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2)); | 30 mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2)); |
22 __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory"); | 31 __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory"); |
23 if(mm_end == end) mm_end -= MMREG_SIZE*2; | 32 if(mm_end == end) mm_end -= MMREG_SIZE*2; |
24 while(s < mm_end) | 33 while(s < mm_end) |
25 { | 34 { |
26 __asm __volatile( | 35 __asm __volatile( |
27 PREFETCH" 32%1\n\t" | 36 PREFETCH" 32%1\n\t" |
28 "movd %1, %%mm0\n\t" | 37 "movd %1, %%mm0\n\t" |
29 "movd 3%1, %%mm1\n\t" | 38 "movd 3%1, %%mm1\n\t" |
30 "movd 6%1, %%mm2\n\t" | 39 "movd 6%1, %%mm2\n\t" |
31 "movd 9%1, %%mm3\n\t" | 40 "movd 9%1, %%mm3\n\t" |
32 "punpckldq %%mm1, %%mm0\n\t" | 41 "punpckldq %%mm1, %%mm0\n\t" |
57 { | 66 { |
58 uint8_t *dest = dst; | 67 uint8_t *dest = dst; |
59 uint8_t *s = src; | 68 uint8_t *s = src; |
60 uint8_t *end; | 69 uint8_t *end; |
61 #ifdef HAVE_MMX | 70 #ifdef HAVE_MMX |
62 const uint64_t mask24l = 0x0000000000FFFFFFULL; | |
63 const uint64_t mask24h = 0x0000FFFFFF000000ULL; | |
64 uint8_t *mm_end; | 71 uint8_t *mm_end; |
65 #endif | 72 #endif |
66 end = s + src_size; | 73 end = s + src_size; |
67 #ifdef HAVE_MMX | 74 #ifdef HAVE_MMX |
68 __asm __volatile(PREFETCH" %0\n\t"::"m"(*s):"memory"); | 75 __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); |
69 mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2)); | 76 mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2)); |
70 __asm __volatile( | 77 __asm __volatile( |
71 "movq %0, %%mm7\n\t" | 78 "movq %0, %%mm7\n\t" |
72 "movq %1, %%mm6\n\t" | 79 "movq %1, %%mm6" |
73 ::"m"(mask24l),"m"(mask24h):"memory"); | 80 ::"m"(mask24l),"m"(mask24h):"memory"); |
74 if(mm_end == end) mm_end -= MMREG_SIZE*2; | 81 if(mm_end == end) mm_end -= MMREG_SIZE*2; |
75 while(s < mm_end) | 82 while(s < mm_end) |
76 { | 83 { |
77 __asm __volatile( | 84 __asm __volatile( |
78 PREFETCH" 32%1\n\t" | 85 PREFETCH" 32%1\n\t" |
79 "movq %1, %%mm0\n\t" | 86 "movq %1, %%mm0\n\t" |
80 "movq 8%1, %%mm1\n\t" | 87 "movq 8%1, %%mm1\n\t" |
81 "movq %%mm0, %%mm2\n\t" | 88 "movq %%mm0, %%mm2\n\t" |
82 "movq %%mm1, %%mm3\n\t" | 89 "movq %%mm1, %%mm3\n\t" |
83 "psrlq $8, %%mm2\n\t" | 90 "psrlq $8, %%mm2\n\t" |
106 *dest++ = *s++; | 113 *dest++ = *s++; |
107 s++; | 114 s++; |
108 } | 115 } |
109 } | 116 } |
110 | 117 |
111 /* TODO: 3DNOW, MMX2 optimization */ | 118 /* |
112 | 119 Original by Strepto/Astral |
113 /* Original by Strepto/Astral | 120 ported to gcc & bugfixed : A'rpi |
114 ported to gcc & bugfixed : A'rpi */ | 121 MMX, 3DNOW optimization by Nick Kurshev |
122 */ | |
115 void rgb15to16(uint8_t *src,uint8_t *dst,uint32_t src_size) | 123 void rgb15to16(uint8_t *src,uint8_t *dst,uint32_t src_size) |
116 { | 124 { |
117 #ifdef HAVE_MMX | 125 #ifdef HAVE_MMX |
118 static uint64_t mask_b = 0x001F001F001F001FLL; // 00000000 00011111 xxB | |
119 static uint64_t mask_rg = 0x7FE07FE07FE07FE0LL; // 01111111 11100000 RGx | |
120 register char* s=src+src_size; | 126 register char* s=src+src_size; |
121 register char* d=dst+src_size; | 127 register char* d=dst+src_size; |
122 register int offs=-src_size; | 128 register int offs=-src_size; |
123 movq_m2r (mask_b, mm4); | 129 __asm __volatile(PREFETCH" %0"::"m"(*(s+offs)):"memory"); |
124 movq_m2r (mask_rg, mm5); | 130 __asm __volatile( |
125 while(offs<0){ | 131 "movq %0, %%mm4\n\t" |
126 movq_m2r (*(s+offs), mm0); | 132 "movq %1, %%mm5" |
127 movq_r2r (mm0, mm1); | 133 ::"m"(mask15b), "m"(mask15rg):"memory"); |
128 | 134 while(offs<0) |
129 movq_m2r (*(s+8+offs), mm2); | 135 { |
130 movq_r2r (mm2, mm3); | 136 __asm __volatile( |
131 | 137 PREFETCH" 32%1\n\t" |
132 pand_r2r (mm4, mm0); | 138 "movq %1, %%mm0\n\t" |
133 pand_r2r (mm5, mm1); | 139 "movq 8%1, %%mm2\n\t" |
134 | 140 "movq %%mm0, %%mm1\n\t" |
135 psllq_i2r(1,mm1); | 141 "movq %%mm2, %%mm3\n\t" |
136 pand_r2r (mm4, mm2); | 142 "pand %%mm4, %%mm0\n\t" |
137 | 143 "pand %%mm5, %%mm1\n\t" |
138 pand_r2r (mm5, mm3); | 144 "pand %%mm4, %%mm2\n\t" |
139 por_r2r (mm1, mm0); | 145 "pand %%mm5, %%mm3\n\t" |
140 | 146 "psllq $1, %%mm1\n\t" |
141 psllq_i2r(1,mm3); | 147 "psllq $1, %%mm3\n\t" |
142 movq_r2m (mm0,*(d+offs)); | 148 "por %%mm1, %%mm0\n\t" |
143 | 149 "por %%mm3, %%mm2\n\t" |
144 por_r2r (mm3,mm2); | 150 MOVNTQ" %%mm0, %0\n\t" |
145 movq_r2m (mm2,*(d+8+offs)); | 151 MOVNTQ" %%mm2, 8%0" |
146 | 152 :"=m"(*(d+offs)) |
147 offs+=16; | 153 :"m"(*(s+offs)) |
154 :"memory"); | |
155 offs+=16; | |
148 } | 156 } |
149 emms(); | 157 __asm __volatile(SFENCE:::"memory"); |
158 __asm __volatile(EMMS:::"memory"); | |
150 #else | 159 #else |
151 uint16_t *s1=( uint16_t * )src; | 160 uint16_t *s1=( uint16_t * )src; |
152 uint16_t *d1=( uint16_t * )dst; | 161 uint16_t *d1=( uint16_t * )dst; |
153 uint16_t *e=((uint8_t *)s1)+src_size; | 162 uint16_t *e=((uint8_t *)s1)+src_size; |
154 while( s1<e ){ | 163 while( s1<e ){ |