9002
|
1
|
|
2 /*
|
|
3 * Discrete Cosine Tansform (DCT) for subband synthesis
|
|
4 * optimized for machines with no auto-increment.
|
|
5 * The performance is highly compiler dependend. Maybe
|
|
6 * the dct64.c version for 'normal' processor may be faster
|
|
7 * even for Intel processors.
|
|
8 */
|
|
9
|
|
10 #define real float
|
|
11
|
|
12 #include "mpg123.h"
|
|
13
|
|
14 #ifdef HAVE_ALTIVEC
|
|
15
|
9122
|
16 #ifndef SYS_DARWIN
|
|
17 #include <altivec.h>
|
|
18 #endif
|
|
19
|
9002
|
20 // used to build registers permutation vectors (vcprm)
|
|
21 // the 's' are for words in the _s_econd vector
|
|
22 #define WORD_0 0x00,0x01,0x02,0x03
|
|
23 #define WORD_1 0x04,0x05,0x06,0x07
|
|
24 #define WORD_2 0x08,0x09,0x0a,0x0b
|
|
25 #define WORD_3 0x0c,0x0d,0x0e,0x0f
|
|
26 #define WORD_s0 0x10,0x11,0x12,0x13
|
|
27 #define WORD_s1 0x14,0x15,0x16,0x17
|
|
28 #define WORD_s2 0x18,0x19,0x1a,0x1b
|
|
29 #define WORD_s3 0x1c,0x1d,0x1e,0x1f
|
|
30
|
9122
|
31 #ifdef SYS_DARWIN
|
9002
|
32 #define vcprm(a,b,c,d) (const vector unsigned char)(WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d)
|
9122
|
33 #else
|
|
34 #define vcprm(a,b,c,d) (const vector unsigned char){WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d}
|
|
35 #endif
|
9002
|
36
|
|
37 // vcprmle is used to keep the same index as in the SSE version.
|
|
38 // it's the same as vcprm, with the index inversed
|
|
39 // ('le' is Little Endian)
|
|
40 #define vcprmle(a,b,c,d) vcprm(d,c,b,a)
|
|
41
|
|
42 // used to build inverse/identity vectors (vcii)
|
|
43 // n is _n_egative, p is _p_ositive
|
|
44 #define FLOAT_n -1.
|
|
45 #define FLOAT_p 1.
|
|
46
|
9122
|
47 #ifdef SYS_DARWIN
|
9002
|
48 #define vcii(a,b,c,d) (const vector float)(FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d)
|
9122
|
49 #else
|
|
50 #define vcii(a,b,c,d) (const vector float){FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d}
|
|
51 #endif
|
|
52
|
|
53 #ifdef SYS_DARWIN
|
|
54 #define FOUROF(a) (a)
|
|
55 #else
|
|
56 #define FOUROF(a) {a,a,a,a}
|
|
57 #endif
|
9002
|
58
|
|
59 void dct64_altivec(real *a,real *b,real *c)
|
|
60 {
|
|
61 real __attribute__ ((aligned(16))) b1[0x20];
|
|
62 real __attribute__ ((aligned(16))) b2[0x20];
|
|
63
|
|
64 real *out0 = a;
|
|
65 real *out1 = b;
|
|
66 real *samples = c;
|
|
67
|
9122
|
68 const vector float vczero = (const vector float)FOUROF(0.);
|
9002
|
69 const vector unsigned char reverse = (const vector unsigned char)vcprm(3,2,1,0);
|
|
70
|
|
71
|
|
72 if (((unsigned long)b1 & 0x0000000F) ||
|
|
73 ((unsigned long)b2 & 0x0000000F))
|
|
74
|
|
75 {
|
|
76 printf("MISALIGNED:\t%p\t%p\t%p\t%p\t%p\n",
|
|
77 b1, b2, a, b, samples);
|
|
78 }
|
|
79
|
|
80
|
|
81 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
|
|
82
|
|
83 {
|
|
84 register real *costab = pnts[0];
|
|
85
|
|
86 b1[0x00] = samples[0x00] + samples[0x1F];
|
|
87 b1[0x01] = samples[0x01] + samples[0x1E];
|
|
88 b1[0x02] = samples[0x02] + samples[0x1D];
|
|
89 b1[0x03] = samples[0x03] + samples[0x1C];
|
|
90 b1[0x04] = samples[0x04] + samples[0x1B];
|
|
91 b1[0x05] = samples[0x05] + samples[0x1A];
|
|
92 b1[0x06] = samples[0x06] + samples[0x19];
|
|
93 b1[0x07] = samples[0x07] + samples[0x18];
|
|
94 b1[0x08] = samples[0x08] + samples[0x17];
|
|
95 b1[0x09] = samples[0x09] + samples[0x16];
|
|
96 b1[0x0A] = samples[0x0A] + samples[0x15];
|
|
97 b1[0x0B] = samples[0x0B] + samples[0x14];
|
|
98 b1[0x0C] = samples[0x0C] + samples[0x13];
|
|
99 b1[0x0D] = samples[0x0D] + samples[0x12];
|
|
100 b1[0x0E] = samples[0x0E] + samples[0x11];
|
|
101 b1[0x0F] = samples[0x0F] + samples[0x10];
|
|
102 b1[0x10] = (samples[0x0F] - samples[0x10]) * costab[0xF];
|
|
103 b1[0x11] = (samples[0x0E] - samples[0x11]) * costab[0xE];
|
|
104 b1[0x12] = (samples[0x0D] - samples[0x12]) * costab[0xD];
|
|
105 b1[0x13] = (samples[0x0C] - samples[0x13]) * costab[0xC];
|
|
106 b1[0x14] = (samples[0x0B] - samples[0x14]) * costab[0xB];
|
|
107 b1[0x15] = (samples[0x0A] - samples[0x15]) * costab[0xA];
|
|
108 b1[0x16] = (samples[0x09] - samples[0x16]) * costab[0x9];
|
|
109 b1[0x17] = (samples[0x08] - samples[0x17]) * costab[0x8];
|
|
110 b1[0x18] = (samples[0x07] - samples[0x18]) * costab[0x7];
|
|
111 b1[0x19] = (samples[0x06] - samples[0x19]) * costab[0x6];
|
|
112 b1[0x1A] = (samples[0x05] - samples[0x1A]) * costab[0x5];
|
|
113 b1[0x1B] = (samples[0x04] - samples[0x1B]) * costab[0x4];
|
|
114 b1[0x1C] = (samples[0x03] - samples[0x1C]) * costab[0x3];
|
|
115 b1[0x1D] = (samples[0x02] - samples[0x1D]) * costab[0x2];
|
|
116 b1[0x1E] = (samples[0x01] - samples[0x1E]) * costab[0x1];
|
|
117 b1[0x1F] = (samples[0x00] - samples[0x1F]) * costab[0x0];
|
|
118
|
|
119 }
|
|
120 {
|
|
121 register real *costab = pnts[1];
|
|
122
|
|
123 b2[0x00] = b1[0x00] + b1[0x0F];
|
|
124 b2[0x01] = b1[0x01] + b1[0x0E];
|
|
125 b2[0x02] = b1[0x02] + b1[0x0D];
|
|
126 b2[0x03] = b1[0x03] + b1[0x0C];
|
|
127 b2[0x04] = b1[0x04] + b1[0x0B];
|
|
128 b2[0x05] = b1[0x05] + b1[0x0A];
|
|
129 b2[0x06] = b1[0x06] + b1[0x09];
|
|
130 b2[0x07] = b1[0x07] + b1[0x08];
|
|
131 b2[0x08] = (b1[0x07] - b1[0x08]) * costab[7];
|
|
132 b2[0x09] = (b1[0x06] - b1[0x09]) * costab[6];
|
|
133 b2[0x0A] = (b1[0x05] - b1[0x0A]) * costab[5];
|
|
134 b2[0x0B] = (b1[0x04] - b1[0x0B]) * costab[4];
|
|
135 b2[0x0C] = (b1[0x03] - b1[0x0C]) * costab[3];
|
|
136 b2[0x0D] = (b1[0x02] - b1[0x0D]) * costab[2];
|
|
137 b2[0x0E] = (b1[0x01] - b1[0x0E]) * costab[1];
|
|
138 b2[0x0F] = (b1[0x00] - b1[0x0F]) * costab[0];
|
|
139 b2[0x10] = b1[0x10] + b1[0x1F];
|
|
140 b2[0x11] = b1[0x11] + b1[0x1E];
|
|
141 b2[0x12] = b1[0x12] + b1[0x1D];
|
|
142 b2[0x13] = b1[0x13] + b1[0x1C];
|
|
143 b2[0x14] = b1[0x14] + b1[0x1B];
|
|
144 b2[0x15] = b1[0x15] + b1[0x1A];
|
|
145 b2[0x16] = b1[0x16] + b1[0x19];
|
|
146 b2[0x17] = b1[0x17] + b1[0x18];
|
|
147 b2[0x18] = (b1[0x18] - b1[0x17]) * costab[7];
|
|
148 b2[0x19] = (b1[0x19] - b1[0x16]) * costab[6];
|
|
149 b2[0x1A] = (b1[0x1A] - b1[0x15]) * costab[5];
|
|
150 b2[0x1B] = (b1[0x1B] - b1[0x14]) * costab[4];
|
|
151 b2[0x1C] = (b1[0x1C] - b1[0x13]) * costab[3];
|
|
152 b2[0x1D] = (b1[0x1D] - b1[0x12]) * costab[2];
|
|
153 b2[0x1E] = (b1[0x1E] - b1[0x11]) * costab[1];
|
|
154 b2[0x1F] = (b1[0x1F] - b1[0x10]) * costab[0];
|
|
155
|
|
156 }
|
|
157
|
|
158 {
|
|
159 register real *costab = pnts[2];
|
|
160
|
|
161 b1[0x00] = b2[0x00] + b2[0x07];
|
|
162 b1[0x01] = b2[0x01] + b2[0x06];
|
|
163 b1[0x02] = b2[0x02] + b2[0x05];
|
|
164 b1[0x03] = b2[0x03] + b2[0x04];
|
|
165 b1[0x04] = (b2[0x03] - b2[0x04]) * costab[3];
|
|
166 b1[0x05] = (b2[0x02] - b2[0x05]) * costab[2];
|
|
167 b1[0x06] = (b2[0x01] - b2[0x06]) * costab[1];
|
|
168 b1[0x07] = (b2[0x00] - b2[0x07]) * costab[0];
|
|
169 b1[0x08] = b2[0x08] + b2[0x0F];
|
|
170 b1[0x09] = b2[0x09] + b2[0x0E];
|
|
171 b1[0x0A] = b2[0x0A] + b2[0x0D];
|
|
172 b1[0x0B] = b2[0x0B] + b2[0x0C];
|
|
173 b1[0x0C] = (b2[0x0C] - b2[0x0B]) * costab[3];
|
|
174 b1[0x0D] = (b2[0x0D] - b2[0x0A]) * costab[2];
|
|
175 b1[0x0E] = (b2[0x0E] - b2[0x09]) * costab[1];
|
|
176 b1[0x0F] = (b2[0x0F] - b2[0x08]) * costab[0];
|
|
177 b1[0x10] = b2[0x10] + b2[0x17];
|
|
178 b1[0x11] = b2[0x11] + b2[0x16];
|
|
179 b1[0x12] = b2[0x12] + b2[0x15];
|
|
180 b1[0x13] = b2[0x13] + b2[0x14];
|
|
181 b1[0x14] = (b2[0x13] - b2[0x14]) * costab[3];
|
|
182 b1[0x15] = (b2[0x12] - b2[0x15]) * costab[2];
|
|
183 b1[0x16] = (b2[0x11] - b2[0x16]) * costab[1];
|
|
184 b1[0x17] = (b2[0x10] - b2[0x17]) * costab[0];
|
|
185 b1[0x18] = b2[0x18] + b2[0x1F];
|
|
186 b1[0x19] = b2[0x19] + b2[0x1E];
|
|
187 b1[0x1A] = b2[0x1A] + b2[0x1D];
|
|
188 b1[0x1B] = b2[0x1B] + b2[0x1C];
|
|
189 b1[0x1C] = (b2[0x1C] - b2[0x1B]) * costab[3];
|
|
190 b1[0x1D] = (b2[0x1D] - b2[0x1A]) * costab[2];
|
|
191 b1[0x1E] = (b2[0x1E] - b2[0x19]) * costab[1];
|
|
192 b1[0x1F] = (b2[0x1F] - b2[0x18]) * costab[0];
|
|
193 }
|
|
194
|
|
195 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
|
|
196
|
|
197 // How does it work ?
|
|
198 // the first three passes are reproducted in the three block below
|
|
199 // all computations are done on a 4 elements vector
|
|
200 // 'reverse' is a special perumtation vector used to reverse
|
|
201 // the order of the elements inside a vector.
|
|
202 // note that all loads/stores to b1 (b2) between passes 1 and 2 (2 and 3)
|
|
203 // have been removed, all elements are stored inside b1vX (b2vX)
|
|
204 {
|
|
205 register vector float
|
|
206 b1v0, b1v1, b1v2, b1v3,
|
|
207 b1v4, b1v5, b1v6, b1v7;
|
|
208 register vector float
|
|
209 temp1, temp2;
|
|
210
|
|
211 {
|
|
212 register real *costab = pnts[0];
|
|
213
|
|
214 register vector float
|
|
215 samplesv1, samplesv2, samplesv3, samplesv4,
|
|
216 samplesv5, samplesv6, samplesv7, samplesv8,
|
|
217 samplesv9;
|
|
218 register vector unsigned char samples_perm = vec_lvsl(0, samples);
|
|
219 register vector float costabv1, costabv2, costabv3, costabv4, costabv5;
|
|
220 register vector unsigned char costab_perm = vec_lvsl(0, costab);
|
|
221
|
|
222 samplesv1 = vec_ld(0, samples);
|
|
223 samplesv2 = vec_ld(16, samples);
|
|
224 samplesv1 = vec_perm(samplesv1, samplesv2, samples_perm);
|
|
225 samplesv3 = vec_ld(32, samples);
|
|
226 samplesv2 = vec_perm(samplesv2, samplesv3, samples_perm);
|
|
227 samplesv4 = vec_ld(48, samples);
|
|
228 samplesv3 = vec_perm(samplesv3, samplesv4, samples_perm);
|
|
229 samplesv5 = vec_ld(64, samples);
|
|
230 samplesv4 = vec_perm(samplesv4, samplesv5, samples_perm);
|
|
231 samplesv6 = vec_ld(80, samples);
|
|
232 samplesv5 = vec_perm(samplesv5, samplesv6, samples_perm);
|
|
233 samplesv7 = vec_ld(96, samples);
|
|
234 samplesv6 = vec_perm(samplesv6, samplesv7, samples_perm);
|
|
235 samplesv8 = vec_ld(112, samples);
|
|
236 samplesv7 = vec_perm(samplesv7, samplesv8, samples_perm);
|
|
237 samplesv9 = vec_ld(128, samples);
|
|
238 samplesv8 = vec_perm(samplesv8, samplesv9, samples_perm);
|
|
239
|
|
240 temp1 = vec_add(samplesv1,
|
|
241 vec_perm(samplesv8, samplesv8, reverse));
|
|
242 //vec_st(temp1, 0, b1);
|
|
243 b1v0 = temp1;
|
|
244 temp1 = vec_add(samplesv2,
|
|
245 vec_perm(samplesv7, samplesv7, reverse));
|
|
246 //vec_st(temp1, 16, b1);
|
|
247 b1v1 = temp1;
|
|
248 temp1 = vec_add(samplesv3,
|
|
249 vec_perm(samplesv6, samplesv6, reverse));
|
|
250 //vec_st(temp1, 32, b1);
|
|
251 b1v2 = temp1;
|
|
252 temp1 = vec_add(samplesv4,
|
|
253 vec_perm(samplesv5, samplesv5, reverse));
|
|
254 //vec_st(temp1, 48, b1);
|
|
255 b1v3 = temp1;
|
|
256
|
|
257 costabv1 = vec_ld(0, costab);
|
|
258 costabv2 = vec_ld(16, costab);
|
|
259 costabv1 = vec_perm(costabv1, costabv2, costab_perm);
|
|
260 costabv3 = vec_ld(32, costab);
|
|
261 costabv2 = vec_perm(costabv2, costabv3, costab_perm);
|
|
262 costabv4 = vec_ld(48, costab);
|
|
263 costabv3 = vec_perm(costabv3, costabv4, costab_perm);
|
|
264 costabv5 = vec_ld(64, costab);
|
|
265 costabv4 = vec_perm(costabv4, costabv5, costab_perm);
|
|
266
|
|
267 temp1 = vec_sub(vec_perm(samplesv4, samplesv4, reverse),
|
|
268 samplesv5);
|
|
269 temp2 = vec_madd(temp1,
|
|
270 vec_perm(costabv4, costabv4, reverse),
|
|
271 vczero);
|
|
272 //vec_st(temp2, 64, b1);
|
|
273 b1v4 = temp2;
|
|
274
|
|
275 temp1 = vec_sub(vec_perm(samplesv3, samplesv3, reverse),
|
|
276 samplesv6);
|
|
277 temp2 = vec_madd(temp1,
|
|
278 vec_perm(costabv3, costabv3, reverse),
|
|
279 vczero);
|
|
280 //vec_st(temp2, 80, b1);
|
|
281 b1v5 = temp2;
|
|
282 temp1 = vec_sub(vec_perm(samplesv2, samplesv2, reverse),
|
|
283 samplesv7);
|
|
284 temp2 = vec_madd(temp1,
|
|
285 vec_perm(costabv2, costabv2, reverse),
|
|
286 vczero);
|
|
287 //vec_st(temp2, 96, b1);
|
|
288 b1v6 = temp2;
|
|
289
|
|
290 temp1 = vec_sub(vec_perm(samplesv1, samplesv1, reverse),
|
|
291 samplesv8);
|
|
292 temp2 = vec_madd(temp1,
|
|
293 vec_perm(costabv1, costabv1, reverse),
|
|
294 vczero);
|
|
295 //vec_st(temp2, 112, b1);
|
|
296 b1v7 = temp2;
|
|
297
|
|
298 }
|
|
299
|
|
300 {
|
|
301 register vector float
|
|
302 b2v0, b2v1, b2v2, b2v3,
|
|
303 b2v4, b2v5, b2v6, b2v7;
|
|
304 {
|
|
305 register real *costab = pnts[1];
|
|
306 register vector float costabv1r, costabv2r, costabv1, costabv2, costabv3;
|
|
307 register vector unsigned char costab_perm = vec_lvsl(0, costab);
|
|
308
|
|
309 costabv1 = vec_ld(0, costab);
|
|
310 costabv2 = vec_ld(16, costab);
|
|
311 costabv1 = vec_perm(costabv1, costabv2, costab_perm);
|
|
312 costabv3 = vec_ld(32, costab);
|
|
313 costabv2 = vec_perm(costabv2, costabv3 , costab_perm);
|
|
314 costabv1r = vec_perm(costabv1, costabv1, reverse);
|
|
315 costabv2r = vec_perm(costabv2, costabv2, reverse);
|
|
316
|
|
317 temp1 = vec_add(b1v0, vec_perm(b1v3, b1v3, reverse));
|
|
318 //vec_st(temp1, 0, b2);
|
|
319 b2v0 = temp1;
|
|
320 temp1 = vec_add(b1v1, vec_perm(b1v2, b1v2, reverse));
|
|
321 //vec_st(temp1, 16, b2);
|
|
322 b2v1 = temp1;
|
|
323 temp2 = vec_sub(vec_perm(b1v1, b1v1, reverse), b1v2);
|
|
324 temp1 = vec_madd(temp2, costabv2r, vczero);
|
|
325 //vec_st(temp1, 32, b2);
|
|
326 b2v2 = temp1;
|
|
327 temp2 = vec_sub(vec_perm(b1v0, b1v0, reverse), b1v3);
|
|
328 temp1 = vec_madd(temp2, costabv1r, vczero);
|
|
329 //vec_st(temp1, 48, b2);
|
|
330 b2v3 = temp1;
|
|
331 temp1 = vec_add(b1v4, vec_perm(b1v7, b1v7, reverse));
|
|
332 //vec_st(temp1, 64, b2);
|
|
333 b2v4 = temp1;
|
|
334 temp1 = vec_add(b1v5, vec_perm(b1v6, b1v6, reverse));
|
|
335 //vec_st(temp1, 80, b2);
|
|
336 b2v5 = temp1;
|
|
337 temp2 = vec_sub(b1v6, vec_perm(b1v5, b1v5, reverse));
|
|
338 temp1 = vec_madd(temp2, costabv2r, vczero);
|
|
339 //vec_st(temp1, 96, b2);
|
|
340 b2v6 = temp1;
|
|
341 temp2 = vec_sub(b1v7, vec_perm(b1v4, b1v4, reverse));
|
|
342 temp1 = vec_madd(temp2, costabv1r, vczero);
|
|
343 //vec_st(temp1, 112, b2);
|
|
344 b2v7 = temp1;
|
|
345 }
|
|
346
|
|
347 {
|
|
348 register real *costab = pnts[2];
|
|
349
|
|
350
|
|
351 vector float costabv1r, costabv1, costabv2;
|
|
352 vector unsigned char costab_perm = vec_lvsl(0, costab);
|
|
353
|
|
354 costabv1 = vec_ld(0, costab);
|
|
355 costabv2 = vec_ld(16, costab);
|
|
356 costabv1 = vec_perm(costabv1, costabv2, costab_perm);
|
|
357 costabv1r = vec_perm(costabv1, costabv1, reverse);
|
|
358
|
|
359 temp1 = vec_add(b2v0, vec_perm(b2v1, b2v1, reverse));
|
|
360 vec_st(temp1, 0, b1);
|
|
361 temp2 = vec_sub(vec_perm(b2v0, b2v0, reverse), b2v1);
|
|
362 temp1 = vec_madd(temp2, costabv1r, vczero);
|
|
363 vec_st(temp1, 16, b1);
|
|
364
|
|
365 temp1 = vec_add(b2v2, vec_perm(b2v3, b2v3, reverse));
|
|
366 vec_st(temp1, 32, b1);
|
|
367 temp2 = vec_sub(b2v3, vec_perm(b2v2, b2v2, reverse));
|
|
368 temp1 = vec_madd(temp2, costabv1r, vczero);
|
|
369 vec_st(temp1, 48, b1);
|
|
370
|
|
371 temp1 = vec_add(b2v4, vec_perm(b2v5, b2v5, reverse));
|
|
372 vec_st(temp1, 64, b1);
|
|
373 temp2 = vec_sub(vec_perm(b2v4, b2v4, reverse), b2v5);
|
|
374 temp1 = vec_madd(temp2, costabv1r, vczero);
|
|
375 vec_st(temp1, 80, b1);
|
|
376
|
|
377 temp1 = vec_add(b2v6, vec_perm(b2v7, b2v7, reverse));
|
|
378 vec_st(temp1, 96, b1);
|
|
379 temp2 = vec_sub(b2v7, vec_perm(b2v6, b2v6, reverse));
|
|
380 temp1 = vec_madd(temp2, costabv1r, vczero);
|
|
381 vec_st(temp1, 112, b1);
|
|
382
|
|
383 }
|
|
384 }
|
|
385 }
|
|
386
|
|
387 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
|
|
388
|
|
389 {
|
|
390 register real const cos0 = pnts[3][0];
|
|
391 register real const cos1 = pnts[3][1];
|
|
392
|
|
393 b2[0x00] = b1[0x00] + b1[0x03];
|
|
394 b2[0x01] = b1[0x01] + b1[0x02];
|
|
395 b2[0x02] = (b1[0x01] - b1[0x02]) * cos1;
|
|
396 b2[0x03] = (b1[0x00] - b1[0x03]) * cos0;
|
|
397 b2[0x04] = b1[0x04] + b1[0x07];
|
|
398 b2[0x05] = b1[0x05] + b1[0x06];
|
|
399 b2[0x06] = (b1[0x06] - b1[0x05]) * cos1;
|
|
400 b2[0x07] = (b1[0x07] - b1[0x04]) * cos0;
|
|
401 b2[0x08] = b1[0x08] + b1[0x0B];
|
|
402 b2[0x09] = b1[0x09] + b1[0x0A];
|
|
403 b2[0x0A] = (b1[0x09] - b1[0x0A]) * cos1;
|
|
404 b2[0x0B] = (b1[0x08] - b1[0x0B]) * cos0;
|
|
405 b2[0x0C] = b1[0x0C] + b1[0x0F];
|
|
406 b2[0x0D] = b1[0x0D] + b1[0x0E];
|
|
407 b2[0x0E] = (b1[0x0E] - b1[0x0D]) * cos1;
|
|
408 b2[0x0F] = (b1[0x0F] - b1[0x0C]) * cos0;
|
|
409 b2[0x10] = b1[0x10] + b1[0x13];
|
|
410 b2[0x11] = b1[0x11] + b1[0x12];
|
|
411 b2[0x12] = (b1[0x11] - b1[0x12]) * cos1;
|
|
412 b2[0x13] = (b1[0x10] - b1[0x13]) * cos0;
|
|
413 b2[0x14] = b1[0x14] + b1[0x17];
|
|
414 b2[0x15] = b1[0x15] + b1[0x16];
|
|
415 b2[0x16] = (b1[0x16] - b1[0x15]) * cos1;
|
|
416 b2[0x17] = (b1[0x17] - b1[0x14]) * cos0;
|
|
417 b2[0x18] = b1[0x18] + b1[0x1B];
|
|
418 b2[0x19] = b1[0x19] + b1[0x1A];
|
|
419 b2[0x1A] = (b1[0x19] - b1[0x1A]) * cos1;
|
|
420 b2[0x1B] = (b1[0x18] - b1[0x1B]) * cos0;
|
|
421 b2[0x1C] = b1[0x1C] + b1[0x1F];
|
|
422 b2[0x1D] = b1[0x1D] + b1[0x1E];
|
|
423 b2[0x1E] = (b1[0x1E] - b1[0x1D]) * cos1;
|
|
424 b2[0x1F] = (b1[0x1F] - b1[0x1C]) * cos0;
|
|
425 }
|
|
426
|
|
427 {
|
|
428 register real const cos0 = pnts[4][0];
|
|
429
|
|
430 b1[0x00] = b2[0x00] + b2[0x01];
|
|
431 b1[0x01] = (b2[0x00] - b2[0x01]) * cos0;
|
|
432 b1[0x02] = b2[0x02] + b2[0x03];
|
|
433 b1[0x03] = (b2[0x03] - b2[0x02]) * cos0;
|
|
434 b1[0x02] += b1[0x03];
|
|
435
|
|
436 b1[0x04] = b2[0x04] + b2[0x05];
|
|
437 b1[0x05] = (b2[0x04] - b2[0x05]) * cos0;
|
|
438 b1[0x06] = b2[0x06] + b2[0x07];
|
|
439 b1[0x07] = (b2[0x07] - b2[0x06]) * cos0;
|
|
440 b1[0x06] += b1[0x07];
|
|
441 b1[0x04] += b1[0x06];
|
|
442 b1[0x06] += b1[0x05];
|
|
443 b1[0x05] += b1[0x07];
|
|
444
|
|
445 b1[0x08] = b2[0x08] + b2[0x09];
|
|
446 b1[0x09] = (b2[0x08] - b2[0x09]) * cos0;
|
|
447 b1[0x0A] = b2[0x0A] + b2[0x0B];
|
|
448 b1[0x0B] = (b2[0x0B] - b2[0x0A]) * cos0;
|
|
449 b1[0x0A] += b1[0x0B];
|
|
450
|
|
451 b1[0x0C] = b2[0x0C] + b2[0x0D];
|
|
452 b1[0x0D] = (b2[0x0C] - b2[0x0D]) * cos0;
|
|
453 b1[0x0E] = b2[0x0E] + b2[0x0F];
|
|
454 b1[0x0F] = (b2[0x0F] - b2[0x0E]) * cos0;
|
|
455 b1[0x0E] += b1[0x0F];
|
|
456 b1[0x0C] += b1[0x0E];
|
|
457 b1[0x0E] += b1[0x0D];
|
|
458 b1[0x0D] += b1[0x0F];
|
|
459
|
|
460 b1[0x10] = b2[0x10] + b2[0x11];
|
|
461 b1[0x11] = (b2[0x10] - b2[0x11]) * cos0;
|
|
462 b1[0x12] = b2[0x12] + b2[0x13];
|
|
463 b1[0x13] = (b2[0x13] - b2[0x12]) * cos0;
|
|
464 b1[0x12] += b1[0x13];
|
|
465
|
|
466 b1[0x14] = b2[0x14] + b2[0x15];
|
|
467 b1[0x15] = (b2[0x14] - b2[0x15]) * cos0;
|
|
468 b1[0x16] = b2[0x16] + b2[0x17];
|
|
469 b1[0x17] = (b2[0x17] - b2[0x16]) * cos0;
|
|
470 b1[0x16] += b1[0x17];
|
|
471 b1[0x14] += b1[0x16];
|
|
472 b1[0x16] += b1[0x15];
|
|
473 b1[0x15] += b1[0x17];
|
|
474
|
|
475 b1[0x18] = b2[0x18] + b2[0x19];
|
|
476 b1[0x19] = (b2[0x18] - b2[0x19]) * cos0;
|
|
477 b1[0x1A] = b2[0x1A] + b2[0x1B];
|
|
478 b1[0x1B] = (b2[0x1B] - b2[0x1A]) * cos0;
|
|
479 b1[0x1A] += b1[0x1B];
|
|
480
|
|
481 b1[0x1C] = b2[0x1C] + b2[0x1D];
|
|
482 b1[0x1D] = (b2[0x1C] - b2[0x1D]) * cos0;
|
|
483 b1[0x1E] = b2[0x1E] + b2[0x1F];
|
|
484 b1[0x1F] = (b2[0x1F] - b2[0x1E]) * cos0;
|
|
485 b1[0x1E] += b1[0x1F];
|
|
486 b1[0x1C] += b1[0x1E];
|
|
487 b1[0x1E] += b1[0x1D];
|
|
488 b1[0x1D] += b1[0x1F];
|
|
489 }
|
|
490
|
|
491 out0[0x10*16] = b1[0x00];
|
|
492 out0[0x10*12] = b1[0x04];
|
|
493 out0[0x10* 8] = b1[0x02];
|
|
494 out0[0x10* 4] = b1[0x06];
|
|
495 out0[0x10* 0] = b1[0x01];
|
|
496 out1[0x10* 0] = b1[0x01];
|
|
497 out1[0x10* 4] = b1[0x05];
|
|
498 out1[0x10* 8] = b1[0x03];
|
|
499 out1[0x10*12] = b1[0x07];
|
|
500
|
|
501 b1[0x08] += b1[0x0C];
|
|
502 out0[0x10*14] = b1[0x08];
|
|
503 b1[0x0C] += b1[0x0a];
|
|
504 out0[0x10*10] = b1[0x0C];
|
|
505 b1[0x0A] += b1[0x0E];
|
|
506 out0[0x10* 6] = b1[0x0A];
|
|
507 b1[0x0E] += b1[0x09];
|
|
508 out0[0x10* 2] = b1[0x0E];
|
|
509 b1[0x09] += b1[0x0D];
|
|
510 out1[0x10* 2] = b1[0x09];
|
|
511 b1[0x0D] += b1[0x0B];
|
|
512 out1[0x10* 6] = b1[0x0D];
|
|
513 b1[0x0B] += b1[0x0F];
|
|
514 out1[0x10*10] = b1[0x0B];
|
|
515 out1[0x10*14] = b1[0x0F];
|
|
516
|
|
517 b1[0x18] += b1[0x1C];
|
|
518 out0[0x10*15] = b1[0x10] + b1[0x18];
|
|
519 out0[0x10*13] = b1[0x18] + b1[0x14];
|
|
520 b1[0x1C] += b1[0x1a];
|
|
521 out0[0x10*11] = b1[0x14] + b1[0x1C];
|
|
522 out0[0x10* 9] = b1[0x1C] + b1[0x12];
|
|
523 b1[0x1A] += b1[0x1E];
|
|
524 out0[0x10* 7] = b1[0x12] + b1[0x1A];
|
|
525 out0[0x10* 5] = b1[0x1A] + b1[0x16];
|
|
526 b1[0x1E] += b1[0x19];
|
|
527 out0[0x10* 3] = b1[0x16] + b1[0x1E];
|
|
528 out0[0x10* 1] = b1[0x1E] + b1[0x11];
|
|
529 b1[0x19] += b1[0x1D];
|
|
530 out1[0x10* 1] = b1[0x11] + b1[0x19];
|
|
531 out1[0x10* 3] = b1[0x19] + b1[0x15];
|
|
532 b1[0x1D] += b1[0x1B];
|
|
533 out1[0x10* 5] = b1[0x15] + b1[0x1D];
|
|
534 out1[0x10* 7] = b1[0x1D] + b1[0x13];
|
|
535 b1[0x1B] += b1[0x1F];
|
|
536 out1[0x10* 9] = b1[0x13] + b1[0x1B];
|
|
537 out1[0x10*11] = b1[0x1B] + b1[0x17];
|
|
538 out1[0x10*13] = b1[0x17] + b1[0x1F];
|
|
539 out1[0x10*15] = b1[0x1F];
|
|
540 }
|
|
541
|
9122
|
542 #endif /* HAVE_ALTIVEC */
|
9002
|
543
|