comparison mp3lib/dct64_altivec.c @ 9002:60d144a16088

An altivec-optimized DCT64 for mp3lib (partially, it seems roughly three times as fast as the C code according to quick-n-dirty gprof tests) This one is bit-perfect. patch by Romain Dolbeau <dolbeau@irisa.fr>
author arpi
date Sat, 18 Jan 2003 19:28:53 +0000
parents
children 5ba896a38d75
comparison
equal deleted inserted replaced
9001:01a9cf43074c 9002:60d144a16088
1
2 /*
3 * Discrete Cosine Tansform (DCT) for subband synthesis
4 * optimized for machines with no auto-increment.
5 * The performance is highly compiler dependend. Maybe
6 * the dct64.c version for 'normal' processor may be faster
7 * even for Intel processors.
8 */
9
10 #define real float
11
12 #include "mpg123.h"
13
14 #ifdef HAVE_ALTIVEC
15
16 // used to build registers permutation vectors (vcprm)
17 // the 's' are for words in the _s_econd vector
18 #define WORD_0 0x00,0x01,0x02,0x03
19 #define WORD_1 0x04,0x05,0x06,0x07
20 #define WORD_2 0x08,0x09,0x0a,0x0b
21 #define WORD_3 0x0c,0x0d,0x0e,0x0f
22 #define WORD_s0 0x10,0x11,0x12,0x13
23 #define WORD_s1 0x14,0x15,0x16,0x17
24 #define WORD_s2 0x18,0x19,0x1a,0x1b
25 #define WORD_s3 0x1c,0x1d,0x1e,0x1f
26
27 #define vcprm(a,b,c,d) (const vector unsigned char)(WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d)
28
29 // vcprmle is used to keep the same index as in the SSE version.
30 // it's the same as vcprm, with the index inversed
31 // ('le' is Little Endian)
32 #define vcprmle(a,b,c,d) vcprm(d,c,b,a)
33
34 // used to build inverse/identity vectors (vcii)
35 // n is _n_egative, p is _p_ositive
36 #define FLOAT_n -1.
37 #define FLOAT_p 1.
38
39 #define vcii(a,b,c,d) (const vector float)(FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d)
40
41 void dct64_altivec(real *a,real *b,real *c)
42 {
43 real __attribute__ ((aligned(16))) b1[0x20];
44 real __attribute__ ((aligned(16))) b2[0x20];
45
46 real *out0 = a;
47 real *out1 = b;
48 real *samples = c;
49
50 const vector float vczero = (const vector float)(0.);
51 const vector unsigned char reverse = (const vector unsigned char)vcprm(3,2,1,0);
52
53
54 if (((unsigned long)b1 & 0x0000000F) ||
55 ((unsigned long)b2 & 0x0000000F))
56
57 {
58 printf("MISALIGNED:\t%p\t%p\t%p\t%p\t%p\n",
59 b1, b2, a, b, samples);
60 }
61
62
63 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
64
65 {
66 register real *costab = pnts[0];
67
68 b1[0x00] = samples[0x00] + samples[0x1F];
69 b1[0x01] = samples[0x01] + samples[0x1E];
70 b1[0x02] = samples[0x02] + samples[0x1D];
71 b1[0x03] = samples[0x03] + samples[0x1C];
72 b1[0x04] = samples[0x04] + samples[0x1B];
73 b1[0x05] = samples[0x05] + samples[0x1A];
74 b1[0x06] = samples[0x06] + samples[0x19];
75 b1[0x07] = samples[0x07] + samples[0x18];
76 b1[0x08] = samples[0x08] + samples[0x17];
77 b1[0x09] = samples[0x09] + samples[0x16];
78 b1[0x0A] = samples[0x0A] + samples[0x15];
79 b1[0x0B] = samples[0x0B] + samples[0x14];
80 b1[0x0C] = samples[0x0C] + samples[0x13];
81 b1[0x0D] = samples[0x0D] + samples[0x12];
82 b1[0x0E] = samples[0x0E] + samples[0x11];
83 b1[0x0F] = samples[0x0F] + samples[0x10];
84 b1[0x10] = (samples[0x0F] - samples[0x10]) * costab[0xF];
85 b1[0x11] = (samples[0x0E] - samples[0x11]) * costab[0xE];
86 b1[0x12] = (samples[0x0D] - samples[0x12]) * costab[0xD];
87 b1[0x13] = (samples[0x0C] - samples[0x13]) * costab[0xC];
88 b1[0x14] = (samples[0x0B] - samples[0x14]) * costab[0xB];
89 b1[0x15] = (samples[0x0A] - samples[0x15]) * costab[0xA];
90 b1[0x16] = (samples[0x09] - samples[0x16]) * costab[0x9];
91 b1[0x17] = (samples[0x08] - samples[0x17]) * costab[0x8];
92 b1[0x18] = (samples[0x07] - samples[0x18]) * costab[0x7];
93 b1[0x19] = (samples[0x06] - samples[0x19]) * costab[0x6];
94 b1[0x1A] = (samples[0x05] - samples[0x1A]) * costab[0x5];
95 b1[0x1B] = (samples[0x04] - samples[0x1B]) * costab[0x4];
96 b1[0x1C] = (samples[0x03] - samples[0x1C]) * costab[0x3];
97 b1[0x1D] = (samples[0x02] - samples[0x1D]) * costab[0x2];
98 b1[0x1E] = (samples[0x01] - samples[0x1E]) * costab[0x1];
99 b1[0x1F] = (samples[0x00] - samples[0x1F]) * costab[0x0];
100
101 }
102 {
103 register real *costab = pnts[1];
104
105 b2[0x00] = b1[0x00] + b1[0x0F];
106 b2[0x01] = b1[0x01] + b1[0x0E];
107 b2[0x02] = b1[0x02] + b1[0x0D];
108 b2[0x03] = b1[0x03] + b1[0x0C];
109 b2[0x04] = b1[0x04] + b1[0x0B];
110 b2[0x05] = b1[0x05] + b1[0x0A];
111 b2[0x06] = b1[0x06] + b1[0x09];
112 b2[0x07] = b1[0x07] + b1[0x08];
113 b2[0x08] = (b1[0x07] - b1[0x08]) * costab[7];
114 b2[0x09] = (b1[0x06] - b1[0x09]) * costab[6];
115 b2[0x0A] = (b1[0x05] - b1[0x0A]) * costab[5];
116 b2[0x0B] = (b1[0x04] - b1[0x0B]) * costab[4];
117 b2[0x0C] = (b1[0x03] - b1[0x0C]) * costab[3];
118 b2[0x0D] = (b1[0x02] - b1[0x0D]) * costab[2];
119 b2[0x0E] = (b1[0x01] - b1[0x0E]) * costab[1];
120 b2[0x0F] = (b1[0x00] - b1[0x0F]) * costab[0];
121 b2[0x10] = b1[0x10] + b1[0x1F];
122 b2[0x11] = b1[0x11] + b1[0x1E];
123 b2[0x12] = b1[0x12] + b1[0x1D];
124 b2[0x13] = b1[0x13] + b1[0x1C];
125 b2[0x14] = b1[0x14] + b1[0x1B];
126 b2[0x15] = b1[0x15] + b1[0x1A];
127 b2[0x16] = b1[0x16] + b1[0x19];
128 b2[0x17] = b1[0x17] + b1[0x18];
129 b2[0x18] = (b1[0x18] - b1[0x17]) * costab[7];
130 b2[0x19] = (b1[0x19] - b1[0x16]) * costab[6];
131 b2[0x1A] = (b1[0x1A] - b1[0x15]) * costab[5];
132 b2[0x1B] = (b1[0x1B] - b1[0x14]) * costab[4];
133 b2[0x1C] = (b1[0x1C] - b1[0x13]) * costab[3];
134 b2[0x1D] = (b1[0x1D] - b1[0x12]) * costab[2];
135 b2[0x1E] = (b1[0x1E] - b1[0x11]) * costab[1];
136 b2[0x1F] = (b1[0x1F] - b1[0x10]) * costab[0];
137
138 }
139
140 {
141 register real *costab = pnts[2];
142
143 b1[0x00] = b2[0x00] + b2[0x07];
144 b1[0x01] = b2[0x01] + b2[0x06];
145 b1[0x02] = b2[0x02] + b2[0x05];
146 b1[0x03] = b2[0x03] + b2[0x04];
147 b1[0x04] = (b2[0x03] - b2[0x04]) * costab[3];
148 b1[0x05] = (b2[0x02] - b2[0x05]) * costab[2];
149 b1[0x06] = (b2[0x01] - b2[0x06]) * costab[1];
150 b1[0x07] = (b2[0x00] - b2[0x07]) * costab[0];
151 b1[0x08] = b2[0x08] + b2[0x0F];
152 b1[0x09] = b2[0x09] + b2[0x0E];
153 b1[0x0A] = b2[0x0A] + b2[0x0D];
154 b1[0x0B] = b2[0x0B] + b2[0x0C];
155 b1[0x0C] = (b2[0x0C] - b2[0x0B]) * costab[3];
156 b1[0x0D] = (b2[0x0D] - b2[0x0A]) * costab[2];
157 b1[0x0E] = (b2[0x0E] - b2[0x09]) * costab[1];
158 b1[0x0F] = (b2[0x0F] - b2[0x08]) * costab[0];
159 b1[0x10] = b2[0x10] + b2[0x17];
160 b1[0x11] = b2[0x11] + b2[0x16];
161 b1[0x12] = b2[0x12] + b2[0x15];
162 b1[0x13] = b2[0x13] + b2[0x14];
163 b1[0x14] = (b2[0x13] - b2[0x14]) * costab[3];
164 b1[0x15] = (b2[0x12] - b2[0x15]) * costab[2];
165 b1[0x16] = (b2[0x11] - b2[0x16]) * costab[1];
166 b1[0x17] = (b2[0x10] - b2[0x17]) * costab[0];
167 b1[0x18] = b2[0x18] + b2[0x1F];
168 b1[0x19] = b2[0x19] + b2[0x1E];
169 b1[0x1A] = b2[0x1A] + b2[0x1D];
170 b1[0x1B] = b2[0x1B] + b2[0x1C];
171 b1[0x1C] = (b2[0x1C] - b2[0x1B]) * costab[3];
172 b1[0x1D] = (b2[0x1D] - b2[0x1A]) * costab[2];
173 b1[0x1E] = (b2[0x1E] - b2[0x19]) * costab[1];
174 b1[0x1F] = (b2[0x1F] - b2[0x18]) * costab[0];
175 }
176
177 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
178
179 // How does it work ?
180 // the first three passes are reproducted in the three block below
181 // all computations are done on a 4 elements vector
182 // 'reverse' is a special perumtation vector used to reverse
183 // the order of the elements inside a vector.
184 // note that all loads/stores to b1 (b2) between passes 1 and 2 (2 and 3)
185 // have been removed, all elements are stored inside b1vX (b2vX)
186 {
187 register vector float
188 b1v0, b1v1, b1v2, b1v3,
189 b1v4, b1v5, b1v6, b1v7;
190 register vector float
191 temp1, temp2;
192
193 {
194 register real *costab = pnts[0];
195
196 register vector float
197 samplesv1, samplesv2, samplesv3, samplesv4,
198 samplesv5, samplesv6, samplesv7, samplesv8,
199 samplesv9;
200 register vector unsigned char samples_perm = vec_lvsl(0, samples);
201 register vector float costabv1, costabv2, costabv3, costabv4, costabv5;
202 register vector unsigned char costab_perm = vec_lvsl(0, costab);
203
204 samplesv1 = vec_ld(0, samples);
205 samplesv2 = vec_ld(16, samples);
206 samplesv1 = vec_perm(samplesv1, samplesv2, samples_perm);
207 samplesv3 = vec_ld(32, samples);
208 samplesv2 = vec_perm(samplesv2, samplesv3, samples_perm);
209 samplesv4 = vec_ld(48, samples);
210 samplesv3 = vec_perm(samplesv3, samplesv4, samples_perm);
211 samplesv5 = vec_ld(64, samples);
212 samplesv4 = vec_perm(samplesv4, samplesv5, samples_perm);
213 samplesv6 = vec_ld(80, samples);
214 samplesv5 = vec_perm(samplesv5, samplesv6, samples_perm);
215 samplesv7 = vec_ld(96, samples);
216 samplesv6 = vec_perm(samplesv6, samplesv7, samples_perm);
217 samplesv8 = vec_ld(112, samples);
218 samplesv7 = vec_perm(samplesv7, samplesv8, samples_perm);
219 samplesv9 = vec_ld(128, samples);
220 samplesv8 = vec_perm(samplesv8, samplesv9, samples_perm);
221
222 temp1 = vec_add(samplesv1,
223 vec_perm(samplesv8, samplesv8, reverse));
224 //vec_st(temp1, 0, b1);
225 b1v0 = temp1;
226 temp1 = vec_add(samplesv2,
227 vec_perm(samplesv7, samplesv7, reverse));
228 //vec_st(temp1, 16, b1);
229 b1v1 = temp1;
230 temp1 = vec_add(samplesv3,
231 vec_perm(samplesv6, samplesv6, reverse));
232 //vec_st(temp1, 32, b1);
233 b1v2 = temp1;
234 temp1 = vec_add(samplesv4,
235 vec_perm(samplesv5, samplesv5, reverse));
236 //vec_st(temp1, 48, b1);
237 b1v3 = temp1;
238
239 costabv1 = vec_ld(0, costab);
240 costabv2 = vec_ld(16, costab);
241 costabv1 = vec_perm(costabv1, costabv2, costab_perm);
242 costabv3 = vec_ld(32, costab);
243 costabv2 = vec_perm(costabv2, costabv3, costab_perm);
244 costabv4 = vec_ld(48, costab);
245 costabv3 = vec_perm(costabv3, costabv4, costab_perm);
246 costabv5 = vec_ld(64, costab);
247 costabv4 = vec_perm(costabv4, costabv5, costab_perm);
248
249 temp1 = vec_sub(vec_perm(samplesv4, samplesv4, reverse),
250 samplesv5);
251 temp2 = vec_madd(temp1,
252 vec_perm(costabv4, costabv4, reverse),
253 vczero);
254 //vec_st(temp2, 64, b1);
255 b1v4 = temp2;
256
257 temp1 = vec_sub(vec_perm(samplesv3, samplesv3, reverse),
258 samplesv6);
259 temp2 = vec_madd(temp1,
260 vec_perm(costabv3, costabv3, reverse),
261 vczero);
262 //vec_st(temp2, 80, b1);
263 b1v5 = temp2;
264 temp1 = vec_sub(vec_perm(samplesv2, samplesv2, reverse),
265 samplesv7);
266 temp2 = vec_madd(temp1,
267 vec_perm(costabv2, costabv2, reverse),
268 vczero);
269 //vec_st(temp2, 96, b1);
270 b1v6 = temp2;
271
272 temp1 = vec_sub(vec_perm(samplesv1, samplesv1, reverse),
273 samplesv8);
274 temp2 = vec_madd(temp1,
275 vec_perm(costabv1, costabv1, reverse),
276 vczero);
277 //vec_st(temp2, 112, b1);
278 b1v7 = temp2;
279
280 }
281
282 {
283 register vector float
284 b2v0, b2v1, b2v2, b2v3,
285 b2v4, b2v5, b2v6, b2v7;
286 {
287 register real *costab = pnts[1];
288 register vector float costabv1r, costabv2r, costabv1, costabv2, costabv3;
289 register vector unsigned char costab_perm = vec_lvsl(0, costab);
290
291 costabv1 = vec_ld(0, costab);
292 costabv2 = vec_ld(16, costab);
293 costabv1 = vec_perm(costabv1, costabv2, costab_perm);
294 costabv3 = vec_ld(32, costab);
295 costabv2 = vec_perm(costabv2, costabv3 , costab_perm);
296 costabv1r = vec_perm(costabv1, costabv1, reverse);
297 costabv2r = vec_perm(costabv2, costabv2, reverse);
298
299 temp1 = vec_add(b1v0, vec_perm(b1v3, b1v3, reverse));
300 //vec_st(temp1, 0, b2);
301 b2v0 = temp1;
302 temp1 = vec_add(b1v1, vec_perm(b1v2, b1v2, reverse));
303 //vec_st(temp1, 16, b2);
304 b2v1 = temp1;
305 temp2 = vec_sub(vec_perm(b1v1, b1v1, reverse), b1v2);
306 temp1 = vec_madd(temp2, costabv2r, vczero);
307 //vec_st(temp1, 32, b2);
308 b2v2 = temp1;
309 temp2 = vec_sub(vec_perm(b1v0, b1v0, reverse), b1v3);
310 temp1 = vec_madd(temp2, costabv1r, vczero);
311 //vec_st(temp1, 48, b2);
312 b2v3 = temp1;
313 temp1 = vec_add(b1v4, vec_perm(b1v7, b1v7, reverse));
314 //vec_st(temp1, 64, b2);
315 b2v4 = temp1;
316 temp1 = vec_add(b1v5, vec_perm(b1v6, b1v6, reverse));
317 //vec_st(temp1, 80, b2);
318 b2v5 = temp1;
319 temp2 = vec_sub(b1v6, vec_perm(b1v5, b1v5, reverse));
320 temp1 = vec_madd(temp2, costabv2r, vczero);
321 //vec_st(temp1, 96, b2);
322 b2v6 = temp1;
323 temp2 = vec_sub(b1v7, vec_perm(b1v4, b1v4, reverse));
324 temp1 = vec_madd(temp2, costabv1r, vczero);
325 //vec_st(temp1, 112, b2);
326 b2v7 = temp1;
327 }
328
329 {
330 register real *costab = pnts[2];
331
332
333 vector float costabv1r, costabv1, costabv2;
334 vector unsigned char costab_perm = vec_lvsl(0, costab);
335
336 costabv1 = vec_ld(0, costab);
337 costabv2 = vec_ld(16, costab);
338 costabv1 = vec_perm(costabv1, costabv2, costab_perm);
339 costabv1r = vec_perm(costabv1, costabv1, reverse);
340
341 temp1 = vec_add(b2v0, vec_perm(b2v1, b2v1, reverse));
342 vec_st(temp1, 0, b1);
343 temp2 = vec_sub(vec_perm(b2v0, b2v0, reverse), b2v1);
344 temp1 = vec_madd(temp2, costabv1r, vczero);
345 vec_st(temp1, 16, b1);
346
347 temp1 = vec_add(b2v2, vec_perm(b2v3, b2v3, reverse));
348 vec_st(temp1, 32, b1);
349 temp2 = vec_sub(b2v3, vec_perm(b2v2, b2v2, reverse));
350 temp1 = vec_madd(temp2, costabv1r, vczero);
351 vec_st(temp1, 48, b1);
352
353 temp1 = vec_add(b2v4, vec_perm(b2v5, b2v5, reverse));
354 vec_st(temp1, 64, b1);
355 temp2 = vec_sub(vec_perm(b2v4, b2v4, reverse), b2v5);
356 temp1 = vec_madd(temp2, costabv1r, vczero);
357 vec_st(temp1, 80, b1);
358
359 temp1 = vec_add(b2v6, vec_perm(b2v7, b2v7, reverse));
360 vec_st(temp1, 96, b1);
361 temp2 = vec_sub(b2v7, vec_perm(b2v6, b2v6, reverse));
362 temp1 = vec_madd(temp2, costabv1r, vczero);
363 vec_st(temp1, 112, b1);
364
365 }
366 }
367 }
368
369 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
370
371 {
372 register real const cos0 = pnts[3][0];
373 register real const cos1 = pnts[3][1];
374
375 b2[0x00] = b1[0x00] + b1[0x03];
376 b2[0x01] = b1[0x01] + b1[0x02];
377 b2[0x02] = (b1[0x01] - b1[0x02]) * cos1;
378 b2[0x03] = (b1[0x00] - b1[0x03]) * cos0;
379 b2[0x04] = b1[0x04] + b1[0x07];
380 b2[0x05] = b1[0x05] + b1[0x06];
381 b2[0x06] = (b1[0x06] - b1[0x05]) * cos1;
382 b2[0x07] = (b1[0x07] - b1[0x04]) * cos0;
383 b2[0x08] = b1[0x08] + b1[0x0B];
384 b2[0x09] = b1[0x09] + b1[0x0A];
385 b2[0x0A] = (b1[0x09] - b1[0x0A]) * cos1;
386 b2[0x0B] = (b1[0x08] - b1[0x0B]) * cos0;
387 b2[0x0C] = b1[0x0C] + b1[0x0F];
388 b2[0x0D] = b1[0x0D] + b1[0x0E];
389 b2[0x0E] = (b1[0x0E] - b1[0x0D]) * cos1;
390 b2[0x0F] = (b1[0x0F] - b1[0x0C]) * cos0;
391 b2[0x10] = b1[0x10] + b1[0x13];
392 b2[0x11] = b1[0x11] + b1[0x12];
393 b2[0x12] = (b1[0x11] - b1[0x12]) * cos1;
394 b2[0x13] = (b1[0x10] - b1[0x13]) * cos0;
395 b2[0x14] = b1[0x14] + b1[0x17];
396 b2[0x15] = b1[0x15] + b1[0x16];
397 b2[0x16] = (b1[0x16] - b1[0x15]) * cos1;
398 b2[0x17] = (b1[0x17] - b1[0x14]) * cos0;
399 b2[0x18] = b1[0x18] + b1[0x1B];
400 b2[0x19] = b1[0x19] + b1[0x1A];
401 b2[0x1A] = (b1[0x19] - b1[0x1A]) * cos1;
402 b2[0x1B] = (b1[0x18] - b1[0x1B]) * cos0;
403 b2[0x1C] = b1[0x1C] + b1[0x1F];
404 b2[0x1D] = b1[0x1D] + b1[0x1E];
405 b2[0x1E] = (b1[0x1E] - b1[0x1D]) * cos1;
406 b2[0x1F] = (b1[0x1F] - b1[0x1C]) * cos0;
407 }
408
409 {
410 register real const cos0 = pnts[4][0];
411
412 b1[0x00] = b2[0x00] + b2[0x01];
413 b1[0x01] = (b2[0x00] - b2[0x01]) * cos0;
414 b1[0x02] = b2[0x02] + b2[0x03];
415 b1[0x03] = (b2[0x03] - b2[0x02]) * cos0;
416 b1[0x02] += b1[0x03];
417
418 b1[0x04] = b2[0x04] + b2[0x05];
419 b1[0x05] = (b2[0x04] - b2[0x05]) * cos0;
420 b1[0x06] = b2[0x06] + b2[0x07];
421 b1[0x07] = (b2[0x07] - b2[0x06]) * cos0;
422 b1[0x06] += b1[0x07];
423 b1[0x04] += b1[0x06];
424 b1[0x06] += b1[0x05];
425 b1[0x05] += b1[0x07];
426
427 b1[0x08] = b2[0x08] + b2[0x09];
428 b1[0x09] = (b2[0x08] - b2[0x09]) * cos0;
429 b1[0x0A] = b2[0x0A] + b2[0x0B];
430 b1[0x0B] = (b2[0x0B] - b2[0x0A]) * cos0;
431 b1[0x0A] += b1[0x0B];
432
433 b1[0x0C] = b2[0x0C] + b2[0x0D];
434 b1[0x0D] = (b2[0x0C] - b2[0x0D]) * cos0;
435 b1[0x0E] = b2[0x0E] + b2[0x0F];
436 b1[0x0F] = (b2[0x0F] - b2[0x0E]) * cos0;
437 b1[0x0E] += b1[0x0F];
438 b1[0x0C] += b1[0x0E];
439 b1[0x0E] += b1[0x0D];
440 b1[0x0D] += b1[0x0F];
441
442 b1[0x10] = b2[0x10] + b2[0x11];
443 b1[0x11] = (b2[0x10] - b2[0x11]) * cos0;
444 b1[0x12] = b2[0x12] + b2[0x13];
445 b1[0x13] = (b2[0x13] - b2[0x12]) * cos0;
446 b1[0x12] += b1[0x13];
447
448 b1[0x14] = b2[0x14] + b2[0x15];
449 b1[0x15] = (b2[0x14] - b2[0x15]) * cos0;
450 b1[0x16] = b2[0x16] + b2[0x17];
451 b1[0x17] = (b2[0x17] - b2[0x16]) * cos0;
452 b1[0x16] += b1[0x17];
453 b1[0x14] += b1[0x16];
454 b1[0x16] += b1[0x15];
455 b1[0x15] += b1[0x17];
456
457 b1[0x18] = b2[0x18] + b2[0x19];
458 b1[0x19] = (b2[0x18] - b2[0x19]) * cos0;
459 b1[0x1A] = b2[0x1A] + b2[0x1B];
460 b1[0x1B] = (b2[0x1B] - b2[0x1A]) * cos0;
461 b1[0x1A] += b1[0x1B];
462
463 b1[0x1C] = b2[0x1C] + b2[0x1D];
464 b1[0x1D] = (b2[0x1C] - b2[0x1D]) * cos0;
465 b1[0x1E] = b2[0x1E] + b2[0x1F];
466 b1[0x1F] = (b2[0x1F] - b2[0x1E]) * cos0;
467 b1[0x1E] += b1[0x1F];
468 b1[0x1C] += b1[0x1E];
469 b1[0x1E] += b1[0x1D];
470 b1[0x1D] += b1[0x1F];
471 }
472
473 out0[0x10*16] = b1[0x00];
474 out0[0x10*12] = b1[0x04];
475 out0[0x10* 8] = b1[0x02];
476 out0[0x10* 4] = b1[0x06];
477 out0[0x10* 0] = b1[0x01];
478 out1[0x10* 0] = b1[0x01];
479 out1[0x10* 4] = b1[0x05];
480 out1[0x10* 8] = b1[0x03];
481 out1[0x10*12] = b1[0x07];
482
483 b1[0x08] += b1[0x0C];
484 out0[0x10*14] = b1[0x08];
485 b1[0x0C] += b1[0x0a];
486 out0[0x10*10] = b1[0x0C];
487 b1[0x0A] += b1[0x0E];
488 out0[0x10* 6] = b1[0x0A];
489 b1[0x0E] += b1[0x09];
490 out0[0x10* 2] = b1[0x0E];
491 b1[0x09] += b1[0x0D];
492 out1[0x10* 2] = b1[0x09];
493 b1[0x0D] += b1[0x0B];
494 out1[0x10* 6] = b1[0x0D];
495 b1[0x0B] += b1[0x0F];
496 out1[0x10*10] = b1[0x0B];
497 out1[0x10*14] = b1[0x0F];
498
499 b1[0x18] += b1[0x1C];
500 out0[0x10*15] = b1[0x10] + b1[0x18];
501 out0[0x10*13] = b1[0x18] + b1[0x14];
502 b1[0x1C] += b1[0x1a];
503 out0[0x10*11] = b1[0x14] + b1[0x1C];
504 out0[0x10* 9] = b1[0x1C] + b1[0x12];
505 b1[0x1A] += b1[0x1E];
506 out0[0x10* 7] = b1[0x12] + b1[0x1A];
507 out0[0x10* 5] = b1[0x1A] + b1[0x16];
508 b1[0x1E] += b1[0x19];
509 out0[0x10* 3] = b1[0x16] + b1[0x1E];
510 out0[0x10* 1] = b1[0x1E] + b1[0x11];
511 b1[0x19] += b1[0x1D];
512 out1[0x10* 1] = b1[0x11] + b1[0x19];
513 out1[0x10* 3] = b1[0x19] + b1[0x15];
514 b1[0x1D] += b1[0x1B];
515 out1[0x10* 5] = b1[0x15] + b1[0x1D];
516 out1[0x10* 7] = b1[0x1D] + b1[0x13];
517 b1[0x1B] += b1[0x1F];
518 out1[0x10* 9] = b1[0x13] + b1[0x1B];
519 out1[0x10*11] = b1[0x1B] + b1[0x17];
520 out1[0x10*13] = b1[0x17] + b1[0x1F];
521 out1[0x10*15] = b1[0x1F];
522 }
523
524 #endif HAVE_ALTIVEC
525