3394
|
1 /*
|
|
2 * imdct.c
|
|
3 * Copyright (C) 2000-2001 Michel Lespinasse <walken@zoy.org>
|
|
4 * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
|
|
5 *
|
|
6 * This file is part of a52dec, a free ATSC A-52 stream decoder.
|
|
7 * See http://liba52.sourceforge.net/ for updates.
|
|
8 *
|
|
9 * a52dec is free software; you can redistribute it and/or modify
|
|
10 * it under the terms of the GNU General Public License as published by
|
|
11 * the Free Software Foundation; either version 2 of the License, or
|
|
12 * (at your option) any later version.
|
|
13 *
|
|
14 * a52dec is distributed in the hope that it will be useful,
|
|
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
17 * GNU General Public License for more details.
|
|
18 *
|
|
19 * You should have received a copy of the GNU General Public License
|
|
20 * along with this program; if not, write to the Free Software
|
|
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
22 */
|
|
23
|
|
24 #include "config.h"
|
|
25
|
|
26 #include <math.h>
|
|
27 #include <stdio.h>
|
|
28 #ifndef M_PI
|
|
29 #define M_PI 3.1415926535897932384626433832795029
|
|
30 #endif
|
|
31 #include <inttypes.h>
|
|
32
|
|
33 #include "a52.h"
|
|
34 #include "a52_internal.h"
|
|
35 #include "mm_accel.h"
|
|
36
|
|
37 void (* imdct_256) (sample_t data[], sample_t delay[], sample_t bias);
|
|
38 void (* imdct_512) (sample_t data[], sample_t delay[], sample_t bias);
|
|
39
|
|
40 typedef struct complex_s {
|
|
41 sample_t real;
|
|
42 sample_t imag;
|
|
43 } complex_t;
|
|
44
|
|
45
|
|
46 /* 128 point bit-reverse LUT */
|
|
47 static uint8_t bit_reverse_512[] = {
|
|
48 0x00, 0x40, 0x20, 0x60, 0x10, 0x50, 0x30, 0x70,
|
|
49 0x08, 0x48, 0x28, 0x68, 0x18, 0x58, 0x38, 0x78,
|
|
50 0x04, 0x44, 0x24, 0x64, 0x14, 0x54, 0x34, 0x74,
|
|
51 0x0c, 0x4c, 0x2c, 0x6c, 0x1c, 0x5c, 0x3c, 0x7c,
|
|
52 0x02, 0x42, 0x22, 0x62, 0x12, 0x52, 0x32, 0x72,
|
|
53 0x0a, 0x4a, 0x2a, 0x6a, 0x1a, 0x5a, 0x3a, 0x7a,
|
|
54 0x06, 0x46, 0x26, 0x66, 0x16, 0x56, 0x36, 0x76,
|
|
55 0x0e, 0x4e, 0x2e, 0x6e, 0x1e, 0x5e, 0x3e, 0x7e,
|
|
56 0x01, 0x41, 0x21, 0x61, 0x11, 0x51, 0x31, 0x71,
|
|
57 0x09, 0x49, 0x29, 0x69, 0x19, 0x59, 0x39, 0x79,
|
|
58 0x05, 0x45, 0x25, 0x65, 0x15, 0x55, 0x35, 0x75,
|
|
59 0x0d, 0x4d, 0x2d, 0x6d, 0x1d, 0x5d, 0x3d, 0x7d,
|
|
60 0x03, 0x43, 0x23, 0x63, 0x13, 0x53, 0x33, 0x73,
|
|
61 0x0b, 0x4b, 0x2b, 0x6b, 0x1b, 0x5b, 0x3b, 0x7b,
|
|
62 0x07, 0x47, 0x27, 0x67, 0x17, 0x57, 0x37, 0x77,
|
|
63 0x0f, 0x4f, 0x2f, 0x6f, 0x1f, 0x5f, 0x3f, 0x7f};
|
|
64
|
|
65 static uint8_t bit_reverse_256[] = {
|
|
66 0x00, 0x20, 0x10, 0x30, 0x08, 0x28, 0x18, 0x38,
|
|
67 0x04, 0x24, 0x14, 0x34, 0x0c, 0x2c, 0x1c, 0x3c,
|
|
68 0x02, 0x22, 0x12, 0x32, 0x0a, 0x2a, 0x1a, 0x3a,
|
|
69 0x06, 0x26, 0x16, 0x36, 0x0e, 0x2e, 0x1e, 0x3e,
|
|
70 0x01, 0x21, 0x11, 0x31, 0x09, 0x29, 0x19, 0x39,
|
|
71 0x05, 0x25, 0x15, 0x35, 0x0d, 0x2d, 0x1d, 0x3d,
|
|
72 0x03, 0x23, 0x13, 0x33, 0x0b, 0x2b, 0x1b, 0x3b,
|
|
73 0x07, 0x27, 0x17, 0x37, 0x0f, 0x2f, 0x1f, 0x3f};
|
|
74
|
3508
|
75 #ifdef HAVE_SSE
|
|
76 // NOTE: SSE needs 16byte alignment or it will segfault
|
|
77 static complex_t __attribute__((aligned(16))) buf[128];
|
3527
|
78 static float __attribute__((aligned(16))) sseSinCos1a[256];
|
|
79 static float __attribute__((aligned(16))) sseSinCos1b[256];
|
3512
|
80 static float __attribute__((aligned(16))) ps111_1[4]={1,1,1,-1};
|
3534
|
81 //static float __attribute__((aligned(16))) sseW0[4];
|
|
82 static float __attribute__((aligned(16))) sseW1[8];
|
|
83 static float __attribute__((aligned(16))) sseW2[16];
|
|
84 static float __attribute__((aligned(16))) sseW3[32];
|
|
85 static float __attribute__((aligned(16))) sseW4[64];
|
|
86 static float __attribute__((aligned(16))) sseW5[128];
|
|
87 static float __attribute__((aligned(16))) sseW6[256];
|
|
88 static float __attribute__((aligned(16))) *sseW[7]=
|
|
89 {NULL /*sseW0*/,sseW1,sseW2,sseW3,sseW4,sseW5,sseW6};
|
3553
|
90 static float __attribute__((aligned(16))) sseWindow[512];
|
3508
|
91 #else
|
3394
|
92 static complex_t buf[128];
|
3508
|
93 #endif
|
3394
|
94
|
|
95 /* Twiddle factor LUT */
|
|
96 static complex_t w_1[1];
|
|
97 static complex_t w_2[2];
|
|
98 static complex_t w_4[4];
|
|
99 static complex_t w_8[8];
|
|
100 static complex_t w_16[16];
|
|
101 static complex_t w_32[32];
|
|
102 static complex_t w_64[64];
|
|
103 static complex_t * w[7] = {w_1, w_2, w_4, w_8, w_16, w_32, w_64};
|
|
104
|
|
105 /* Twiddle factors for IMDCT */
|
|
106 static sample_t xcos1[128];
|
|
107 static sample_t xsin1[128];
|
|
108 static sample_t xcos2[64];
|
|
109 static sample_t xsin2[64];
|
|
110
|
|
111 /* Windowing function for Modified DCT - Thank you acroread */
|
|
112 sample_t imdct_window[] = {
|
|
113 0.00014, 0.00024, 0.00037, 0.00051, 0.00067, 0.00086, 0.00107, 0.00130,
|
|
114 0.00157, 0.00187, 0.00220, 0.00256, 0.00297, 0.00341, 0.00390, 0.00443,
|
|
115 0.00501, 0.00564, 0.00632, 0.00706, 0.00785, 0.00871, 0.00962, 0.01061,
|
|
116 0.01166, 0.01279, 0.01399, 0.01526, 0.01662, 0.01806, 0.01959, 0.02121,
|
|
117 0.02292, 0.02472, 0.02662, 0.02863, 0.03073, 0.03294, 0.03527, 0.03770,
|
|
118 0.04025, 0.04292, 0.04571, 0.04862, 0.05165, 0.05481, 0.05810, 0.06153,
|
|
119 0.06508, 0.06878, 0.07261, 0.07658, 0.08069, 0.08495, 0.08935, 0.09389,
|
|
120 0.09859, 0.10343, 0.10842, 0.11356, 0.11885, 0.12429, 0.12988, 0.13563,
|
|
121 0.14152, 0.14757, 0.15376, 0.16011, 0.16661, 0.17325, 0.18005, 0.18699,
|
|
122 0.19407, 0.20130, 0.20867, 0.21618, 0.22382, 0.23161, 0.23952, 0.24757,
|
|
123 0.25574, 0.26404, 0.27246, 0.28100, 0.28965, 0.29841, 0.30729, 0.31626,
|
|
124 0.32533, 0.33450, 0.34376, 0.35311, 0.36253, 0.37204, 0.38161, 0.39126,
|
|
125 0.40096, 0.41072, 0.42054, 0.43040, 0.44030, 0.45023, 0.46020, 0.47019,
|
|
126 0.48020, 0.49022, 0.50025, 0.51028, 0.52031, 0.53033, 0.54033, 0.55031,
|
|
127 0.56026, 0.57019, 0.58007, 0.58991, 0.59970, 0.60944, 0.61912, 0.62873,
|
|
128 0.63827, 0.64774, 0.65713, 0.66643, 0.67564, 0.68476, 0.69377, 0.70269,
|
|
129 0.71150, 0.72019, 0.72877, 0.73723, 0.74557, 0.75378, 0.76186, 0.76981,
|
|
130 0.77762, 0.78530, 0.79283, 0.80022, 0.80747, 0.81457, 0.82151, 0.82831,
|
|
131 0.83496, 0.84145, 0.84779, 0.85398, 0.86001, 0.86588, 0.87160, 0.87716,
|
|
132 0.88257, 0.88782, 0.89291, 0.89785, 0.90264, 0.90728, 0.91176, 0.91610,
|
|
133 0.92028, 0.92432, 0.92822, 0.93197, 0.93558, 0.93906, 0.94240, 0.94560,
|
|
134 0.94867, 0.95162, 0.95444, 0.95713, 0.95971, 0.96217, 0.96451, 0.96674,
|
|
135 0.96887, 0.97089, 0.97281, 0.97463, 0.97635, 0.97799, 0.97953, 0.98099,
|
|
136 0.98236, 0.98366, 0.98488, 0.98602, 0.98710, 0.98811, 0.98905, 0.98994,
|
|
137 0.99076, 0.99153, 0.99225, 0.99291, 0.99353, 0.99411, 0.99464, 0.99513,
|
|
138 0.99558, 0.99600, 0.99639, 0.99674, 0.99706, 0.99736, 0.99763, 0.99788,
|
|
139 0.99811, 0.99831, 0.99850, 0.99867, 0.99882, 0.99895, 0.99908, 0.99919,
|
|
140 0.99929, 0.99938, 0.99946, 0.99953, 0.99959, 0.99965, 0.99969, 0.99974,
|
|
141 0.99978, 0.99981, 0.99984, 0.99986, 0.99988, 0.99990, 0.99992, 0.99993,
|
|
142 0.99994, 0.99995, 0.99996, 0.99997, 0.99998, 0.99998, 0.99998, 0.99999,
|
|
143 0.99999, 0.99999, 0.99999, 1.00000, 1.00000, 1.00000, 1.00000, 1.00000,
|
|
144 1.00000, 1.00000, 1.00000, 1.00000, 1.00000, 1.00000, 1.00000, 1.00000 };
|
|
145
|
|
146
|
|
147 static inline void swap_cmplx(complex_t *a, complex_t *b)
|
|
148 {
|
|
149 complex_t tmp;
|
|
150
|
|
151 tmp = *a;
|
|
152 *a = *b;
|
|
153 *b = tmp;
|
|
154 }
|
|
155
|
|
156
|
|
157
|
|
158 static inline complex_t cmplx_mult(complex_t a, complex_t b)
|
|
159 {
|
|
160 complex_t ret;
|
|
161
|
|
162 ret.real = a.real * b.real - a.imag * b.imag;
|
|
163 ret.imag = a.real * b.imag + a.imag * b.real;
|
|
164
|
|
165 return ret;
|
|
166 }
|
|
167
|
|
168 void
|
|
169 imdct_do_512(sample_t data[],sample_t delay[], sample_t bias)
|
|
170 {
|
|
171 int i,k;
|
|
172 int p,q;
|
|
173 int m;
|
|
174 int two_m;
|
|
175 int two_m_plus_one;
|
|
176
|
|
177 sample_t tmp_a_i;
|
|
178 sample_t tmp_a_r;
|
|
179 sample_t tmp_b_i;
|
|
180 sample_t tmp_b_r;
|
|
181
|
|
182 sample_t *data_ptr;
|
|
183 sample_t *delay_ptr;
|
|
184 sample_t *window_ptr;
|
|
185
|
|
186 /* 512 IMDCT with source and dest data in 'data' */
|
|
187
|
3529
|
188 #ifdef HAVE_SSE
|
3394
|
189 /* Pre IFFT complex multiply plus IFFT cmplx conjugate */
|
3529
|
190 /* Bit reversed shuffling */
|
3527
|
191 asm volatile(
|
|
192 "xorl %%esi, %%esi \n\t"
|
3529
|
193 "leal bit_reverse_512, %%eax \n\t"
|
3527
|
194 "movl $1008, %%edi \n\t"
|
3529
|
195 "pushl %%ebp \n\t" //use ebp without telling gcc
|
|
196 ".balign 16 \n\t"
|
3527
|
197 "1: \n\t"
|
|
198 "movaps (%0, %%esi), %%xmm0 \n\t"
|
|
199 "movaps (%0, %%edi), %%xmm1 \n\t"
|
|
200 "shufps $0xA0, %%xmm0, %%xmm0 \n\t"
|
|
201 "shufps $0x5F, %%xmm1, %%xmm1 \n\t"
|
|
202 "mulps sseSinCos1a(%%esi), %%xmm0 \n\t"
|
|
203 "mulps sseSinCos1b(%%esi), %%xmm1 \n\t"
|
|
204 "addps %%xmm1, %%xmm0 \n\t"
|
3529
|
205 "movzbl (%%eax), %%edx \n\t"
|
|
206 "movzbl 1(%%eax), %%ebp \n\t"
|
|
207 "movlps %%xmm0, (%1, %%edx,8) \n\t"
|
|
208 "movhps %%xmm0, (%1, %%ebp,8) \n\t"
|
3527
|
209 "addl $16, %%esi \n\t"
|
3529
|
210 "addl $2, %%eax \n\t" // avoid complex addressing for P4 crap
|
3527
|
211 "subl $16, %%edi \n\t"
|
|
212 " jnc 1b \n\t"
|
3529
|
213 "popl %%ebp \n\t"//no we didnt touch ebp *g*
|
|
214 :: "b" (data), "c" (buf)
|
|
215 : "%esi", "%edi", "%eax", "%edx"
|
3527
|
216 );
|
|
217 #else
|
3529
|
218 /* Pre IFFT complex multiply plus IFFT cmplx conjugate */
|
3394
|
219 for( i=0; i < 128; i++) {
|
|
220 /* z[i] = (X[256-2*i-1] + j * X[2*i]) * (xcos1[i] + j * xsin1[i]) ; */
|
|
221 buf[i].real = (data[256-2*i-1] * xcos1[i]) - (data[2*i] * xsin1[i]);
|
|
222 buf[i].imag = -1.0 * ((data[2*i] * xcos1[i]) + (data[256-2*i-1] * xsin1[i]));
|
|
223 }
|
|
224
|
|
225 /* Bit reversed shuffling */
|
|
226 for(i=0; i<128; i++) {
|
|
227 k = bit_reverse_512[i];
|
|
228 if (k < i)
|
|
229 swap_cmplx(&buf[i],&buf[k]);
|
|
230 }
|
3529
|
231 #endif
|
|
232
|
3394
|
233
|
|
234 /* FFT Merge */
|
3549
|
235 /* unoptimized variant
|
|
236 for (m=1; m < 7; m++) {
|
|
237 if(m)
|
|
238 two_m = (1 << m);
|
|
239 else
|
|
240 two_m = 1;
|
|
241
|
|
242 two_m_plus_one = (1 << (m+1));
|
|
243
|
|
244 for(i = 0; i < 128; i += two_m_plus_one) {
|
|
245 for(k = 0; k < two_m; k++) {
|
|
246 p = k + i;
|
|
247 q = p + two_m;
|
3508
|
248 tmp_a_r = buf[p].real;
|
|
249 tmp_a_i = buf[p].imag;
|
3549
|
250 tmp_b_r = buf[q].real * w[m][k].real - buf[q].imag * w[m][k].imag;
|
|
251 tmp_b_i = buf[q].imag * w[m][k].real + buf[q].real * w[m][k].imag;
|
3508
|
252 buf[p].real = tmp_a_r + tmp_b_r;
|
|
253 buf[p].imag = tmp_a_i + tmp_b_i;
|
|
254 buf[q].real = tmp_a_r - tmp_b_r;
|
|
255 buf[q].imag = tmp_a_i - tmp_b_i;
|
3549
|
256 }
|
|
257 }
|
|
258 }
|
|
259 */
|
|
260
|
|
261 #ifdef HAVE_SSE
|
|
262 // Note w[0][0]={1,0}
|
3508
|
263 asm volatile(
|
|
264 "xorps %%xmm1, %%xmm1 \n\t"
|
|
265 "xorps %%xmm2, %%xmm2 \n\t"
|
|
266 "movl %0, %%esi \n\t"
|
3529
|
267 ".balign 16 \n\t"
|
3508
|
268 "1: \n\t"
|
|
269 "movlps (%%esi), %%xmm0 \n\t" //buf[p]
|
|
270 "movlps 8(%%esi), %%xmm1\n\t" //buf[q]
|
|
271 "movhps (%%esi), %%xmm0 \n\t" //buf[p]
|
|
272 "movhps 8(%%esi), %%xmm2\n\t" //buf[q]
|
|
273 "addps %%xmm1, %%xmm0 \n\t"
|
|
274 "subps %%xmm2, %%xmm0 \n\t"
|
|
275 "movaps %%xmm0, (%%esi) \n\t"
|
|
276 "addl $16, %%esi \n\t"
|
|
277 "cmpl %1, %%esi \n\t"
|
|
278 " jb 1b \n\t"
|
|
279 :: "g" (buf), "r" (buf + 128)
|
|
280 : "%esi"
|
|
281 );
|
3549
|
282 #else
|
|
283 for(i = 0; i < 128; i += 2) {
|
|
284 tmp_a_r = buf[i].real;
|
|
285 tmp_a_i = buf[i].imag;
|
|
286 tmp_b_r = buf[i+1].real;
|
|
287 tmp_b_i = buf[i+1].imag;
|
|
288 buf[i].real = tmp_a_r + tmp_b_r;
|
|
289 buf[i].imag = tmp_a_i + tmp_b_i;
|
|
290 buf[i+1].real = tmp_a_r - tmp_b_r;
|
|
291 buf[i+1].imag = tmp_a_i - tmp_b_i;
|
|
292 }
|
|
293 #endif
|
|
294
|
3512
|
295 // Note w[1]={{1,0}, {0,-1}}
|
3549
|
296 #ifdef HAVE_SSE
|
3512
|
297 asm volatile(
|
|
298 "movaps ps111_1, %%xmm7 \n\t" // 1,1,1,-1
|
|
299 "movl %0, %%esi \n\t"
|
3529
|
300 ".balign 16 \n\t"
|
3512
|
301 "1: \n\t"
|
|
302 "movaps 16(%%esi), %%xmm2 \n\t" //r2,i2,r3,i3
|
|
303 "shufps $0xB4, %%xmm2, %%xmm2 \n\t" //r2,i2,i3,r3
|
|
304 "mulps %%xmm7, %%xmm2 \n\t" //r2,i2,i3,-r3
|
|
305 "movaps (%%esi), %%xmm0 \n\t" //r0,i0,r1,i1
|
|
306 "movaps (%%esi), %%xmm1 \n\t" //r0,i0,r1,i1
|
|
307 "addps %%xmm2, %%xmm0 \n\t"
|
|
308 "subps %%xmm2, %%xmm1 \n\t"
|
|
309 "movaps %%xmm0, (%%esi) \n\t"
|
|
310 "movaps %%xmm1, 16(%%esi) \n\t"
|
|
311 "addl $32, %%esi \n\t"
|
|
312 "cmpl %1, %%esi \n\t"
|
|
313 " jb 1b \n\t"
|
|
314 :: "g" (buf), "r" (buf + 128)
|
|
315 : "%esi"
|
|
316 );
|
3549
|
317 #else
|
|
318 for(i = 0; i < 128; i += 4) {
|
|
319 tmp_a_r = buf[i].real;
|
|
320 tmp_a_i = buf[i].imag;
|
|
321 tmp_b_r = buf[i+2].real;
|
|
322 tmp_b_i = buf[i+2].imag;
|
|
323 buf[i].real = tmp_a_r + tmp_b_r;
|
|
324 buf[i].imag = tmp_a_i + tmp_b_i;
|
|
325 buf[i+2].real = tmp_a_r - tmp_b_r;
|
|
326 buf[i+2].imag = tmp_a_i - tmp_b_i;
|
|
327 tmp_a_r = buf[i+1].real;
|
|
328 tmp_a_i = buf[i+1].imag;
|
|
329 tmp_b_r = buf[i+3].imag;
|
|
330 tmp_b_i = buf[i+3].real;
|
|
331 buf[i+1].real = tmp_a_r + tmp_b_r;
|
|
332 buf[i+1].imag = tmp_a_i - tmp_b_i;
|
|
333 buf[i+3].real = tmp_a_r - tmp_b_r;
|
|
334 buf[i+3].imag = tmp_a_i + tmp_b_i;
|
|
335 }
|
|
336 #endif
|
|
337
|
|
338 #ifdef HAVE_SSE
|
3534
|
339 /*
|
|
340 Note sseW2+0={1,1,sqrt(2),sqrt(2))
|
|
341 Note sseW2+16={0,0,sqrt(2),-sqrt(2))
|
|
342 Note sseW2+32={0,0,-sqrt(2),-sqrt(2))
|
|
343 Note sseW2+48={1,-1,sqrt(2),-sqrt(2))
|
|
344 */
|
|
345 asm volatile(
|
3537
|
346 "movaps 48+sseW2, %%xmm6 \n\t"
|
3534
|
347 "movaps 16+sseW2, %%xmm7 \n\t"
|
|
348 "xorps %%xmm5, %%xmm5 \n\t"
|
|
349 "xorps %%xmm2, %%xmm2 \n\t"
|
|
350 "movl %0, %%esi \n\t"
|
|
351 ".balign 16 \n\t"
|
|
352 "1: \n\t"
|
3537
|
353 "movaps 32(%%esi), %%xmm2 \n\t" //r4,i4,r5,i5
|
3534
|
354 "movaps 48(%%esi), %%xmm3 \n\t" //r6,i6,r7,i7
|
3537
|
355 "movaps sseW2, %%xmm4 \n\t" //r4,i4,r5,i5
|
|
356 "movaps 32+sseW2, %%xmm5 \n\t" //r6,i6,r7,i7
|
|
357 "mulps %%xmm2, %%xmm4 \n\t"
|
|
358 "mulps %%xmm3, %%xmm5 \n\t"
|
3534
|
359 "shufps $0xB1, %%xmm2, %%xmm2 \n\t" //i4,r4,i5,r5
|
|
360 "shufps $0xB1, %%xmm3, %%xmm3 \n\t" //i6,r6,i7,r7
|
3537
|
361 "mulps %%xmm6, %%xmm3 \n\t"
|
3534
|
362 "mulps %%xmm7, %%xmm2 \n\t"
|
|
363 "movaps (%%esi), %%xmm0 \n\t" //r0,i0,r1,i1
|
|
364 "movaps 16(%%esi), %%xmm1 \n\t" //r2,i2,r3,i3
|
|
365 "addps %%xmm4, %%xmm2 \n\t"
|
|
366 "addps %%xmm5, %%xmm3 \n\t"
|
|
367 "movaps %%xmm2, %%xmm4 \n\t"
|
|
368 "movaps %%xmm3, %%xmm5 \n\t"
|
|
369 "addps %%xmm0, %%xmm2 \n\t"
|
|
370 "addps %%xmm1, %%xmm3 \n\t"
|
|
371 "subps %%xmm4, %%xmm0 \n\t"
|
|
372 "subps %%xmm5, %%xmm1 \n\t"
|
|
373 "movaps %%xmm2, (%%esi) \n\t"
|
|
374 "movaps %%xmm3, 16(%%esi) \n\t"
|
|
375 "movaps %%xmm0, 32(%%esi) \n\t"
|
|
376 "movaps %%xmm1, 48(%%esi) \n\t"
|
|
377 "addl $64, %%esi \n\t"
|
|
378 "cmpl %1, %%esi \n\t"
|
|
379 " jb 1b \n\t"
|
|
380 :: "g" (buf), "r" (buf + 128)
|
|
381 : "%esi"
|
|
382 );
|
3549
|
383 #else
|
|
384 for(i = 0; i < 128; i += 8) {
|
|
385 tmp_a_r = buf[i].real;
|
|
386 tmp_a_i = buf[i].imag;
|
|
387 tmp_b_r = buf[i+4].real;
|
|
388 tmp_b_i = buf[i+4].imag;
|
|
389 buf[i].real = tmp_a_r + tmp_b_r;
|
|
390 buf[i].imag = tmp_a_i + tmp_b_i;
|
|
391 buf[i+4].real = tmp_a_r - tmp_b_r;
|
|
392 buf[i+4].imag = tmp_a_i - tmp_b_i;
|
|
393 tmp_a_r = buf[1+i].real;
|
|
394 tmp_a_i = buf[1+i].imag;
|
|
395 tmp_b_r = (buf[i+5].real + buf[i+5].imag) * w[2][1].real;
|
|
396 tmp_b_i = (buf[i+5].imag - buf[i+5].real) * w[2][1].real;
|
|
397 buf[1+i].real = tmp_a_r + tmp_b_r;
|
|
398 buf[1+i].imag = tmp_a_i + tmp_b_i;
|
|
399 buf[i+5].real = tmp_a_r - tmp_b_r;
|
|
400 buf[i+5].imag = tmp_a_i - tmp_b_i;
|
|
401 tmp_a_r = buf[i+2].real;
|
|
402 tmp_a_i = buf[i+2].imag;
|
|
403 tmp_b_r = buf[i+6].imag;
|
|
404 tmp_b_i = - buf[i+6].real;
|
|
405 buf[i+2].real = tmp_a_r + tmp_b_r;
|
|
406 buf[i+2].imag = tmp_a_i + tmp_b_i;
|
|
407 buf[i+6].real = tmp_a_r - tmp_b_r;
|
|
408 buf[i+6].imag = tmp_a_i - tmp_b_i;
|
|
409 tmp_a_r = buf[i+3].real;
|
|
410 tmp_a_i = buf[i+3].imag;
|
|
411 tmp_b_r = (buf[i+7].real - buf[i+7].imag) * w[2][3].imag;
|
|
412 tmp_b_i = (buf[i+7].imag + buf[i+7].real) * w[2][3].imag;
|
|
413 buf[i+3].real = tmp_a_r + tmp_b_r;
|
|
414 buf[i+3].imag = tmp_a_i + tmp_b_i;
|
|
415 buf[i+7].real = tmp_a_r - tmp_b_r;
|
|
416 buf[i+7].imag = tmp_a_i - tmp_b_i;
|
|
417 }
|
|
418 #endif
|
3508
|
419
|
3549
|
420 #ifdef HAVE_SSE
|
3546
|
421 for (m=3; m < 7; m++) {
|
|
422 two_m = (1 << m);
|
|
423 two_m_plus_one = two_m<<1;
|
|
424 asm volatile(
|
|
425 "movl %0, %%esi \n\t"
|
|
426 ".balign 16 \n\t"
|
|
427 "1: \n\t"
|
|
428 "xorl %%edi, %%edi \n\t" // k
|
|
429 "leal (%%esi, %3), %%edx \n\t"
|
|
430 "2: \n\t"
|
|
431 "movaps (%%edx, %%edi), %%xmm1 \n\t"
|
|
432 "movaps (%4, %%edi, 2), %%xmm2 \n\t"
|
|
433 "mulps %%xmm1, %%xmm2 \n\t"
|
|
434 "shufps $0xB1, %%xmm1, %%xmm1 \n\t"
|
|
435 "mulps 16(%4, %%edi, 2), %%xmm1 \n\t"
|
|
436 "movaps (%%esi, %%edi), %%xmm0 \n\t"
|
|
437 "addps %%xmm2, %%xmm1 \n\t"
|
|
438 "movaps %%xmm1, %%xmm2 \n\t"
|
|
439 "addps %%xmm0, %%xmm1 \n\t"
|
|
440 "subps %%xmm2, %%xmm0 \n\t"
|
|
441 "movaps %%xmm1, (%%esi, %%edi) \n\t"
|
|
442 "movaps %%xmm0, (%%edx, %%edi) \n\t"
|
|
443 "addl $16, %%edi \n\t"
|
|
444 "cmpl %3, %%edi \n\t" //FIXME (opt) count against 0
|
|
445 " jb 2b \n\t"
|
|
446 "addl %2, %%esi \n\t"
|
|
447 "cmpl %1, %%esi \n\t"
|
|
448 " jb 1b \n\t"
|
|
449 :: "g" (buf), "m" (buf+128), "m" (two_m_plus_one<<3), "r" (two_m<<3),
|
|
450 "r" (sseW[m])
|
|
451 : "%esi", "%edi", "%edx"
|
|
452 );
|
|
453 }
|
|
454
|
3537
|
455 #else
|
|
456 for (m=3; m < 7; m++) {
|
|
457 two_m = (1 << m);
|
3394
|
458
|
3537
|
459 two_m_plus_one = two_m<<1;
|
3394
|
460
|
3537
|
461 for(i = 0; i < 128; i += two_m_plus_one) {
|
|
462 for(k = 0; k < two_m; k++) {
|
|
463 int p = k + i;
|
|
464 int q = p + two_m;
|
3394
|
465 tmp_a_r = buf[p].real;
|
|
466 tmp_a_i = buf[p].imag;
|
|
467 tmp_b_r = buf[q].real * w[m][k].real - buf[q].imag * w[m][k].imag;
|
|
468 tmp_b_i = buf[q].imag * w[m][k].real + buf[q].real * w[m][k].imag;
|
|
469 buf[p].real = tmp_a_r + tmp_b_r;
|
|
470 buf[p].imag = tmp_a_i + tmp_b_i;
|
|
471 buf[q].real = tmp_a_r - tmp_b_r;
|
|
472 buf[q].imag = tmp_a_i - tmp_b_i;
|
|
473 }
|
|
474 }
|
|
475 }
|
3508
|
476 #endif
|
|
477
|
3394
|
478 /* Post IFFT complex multiply plus IFFT complex conjugate*/
|
|
479 for( i=0; i < 128; i++) {
|
|
480 /* y[n] = z[n] * (xcos1[n] + j * xsin1[n]) ; */
|
|
481 tmp_a_r = buf[i].real;
|
|
482 tmp_a_i = -1.0 * buf[i].imag;
|
|
483 buf[i].real =(tmp_a_r * xcos1[i]) - (tmp_a_i * xsin1[i]);
|
|
484 buf[i].imag =(tmp_a_r * xsin1[i]) + (tmp_a_i * xcos1[i]);
|
|
485 }
|
|
486
|
|
487 data_ptr = data;
|
|
488 delay_ptr = delay;
|
|
489 window_ptr = imdct_window;
|
|
490
|
|
491 /* Window and convert to real valued signal */
|
3552
|
492 #ifdef HAVE_SSE
|
|
493 asm volatile(
|
|
494 "xorl %%edi, %%edi \n\t" // 0
|
|
495 "xorl %%esi, %%esi \n\t" // 0
|
|
496 "movss %3, %%xmm2 \n\t" // bias
|
|
497 "shufps $0x00, %%xmm2, %%xmm2 \n\t" // bias, bias, ...
|
|
498 ".balign 16 \n\t"
|
|
499 "1: \n\t"
|
|
500 "movlps (%0, %%esi), %%xmm0 \n\t" // ? ? A ?
|
|
501 "movlps 8(%0, %%esi), %%xmm1 \n\t" // ? ? C ?
|
|
502 "movhps -16(%0, %%edi), %%xmm1 \n\t" // ? D C ?
|
|
503 "movhps -8(%0, %%edi), %%xmm0 \n\t" // ? B A ?
|
|
504 "shufps $0x99, %%xmm1, %%xmm0 \n\t" // D C B A
|
|
505 "mulps sseWindow(%%esi), %%xmm0 \n\t"
|
|
506 "addps (%2, %%esi), %%xmm0 \n\t"
|
|
507 "addps %%xmm2, %%xmm0 \n\t"
|
|
508 "movaps %%xmm0, (%1, %%esi) \n\t"
|
|
509 "addl $16, %%esi \n\t"
|
|
510 "subl $16, %%edi \n\t"
|
|
511 "cmpl $512, %%esi \n\t"
|
|
512 " jb 1b \n\t"
|
|
513 :: "r" (buf+64), "r" (data_ptr), "r" (delay_ptr), "m" (bias)
|
|
514 : "%esi", "%edi"
|
|
515 );
|
|
516 data_ptr+=128;
|
|
517 delay_ptr+=128;
|
3553
|
518 // window_ptr+=128;
|
3552
|
519 #else
|
3394
|
520 for(i=0; i< 64; i++) {
|
|
521 *data_ptr++ = -buf[64+i].imag * *window_ptr++ + *delay_ptr++ + bias;
|
|
522 *data_ptr++ = buf[64-i-1].real * *window_ptr++ + *delay_ptr++ + bias;
|
|
523 }
|
3552
|
524 #endif
|
3394
|
525
|
3552
|
526 #ifdef HAVE_SSE
|
|
527 asm volatile(
|
|
528 "movl $1024, %%edi \n\t" // 512
|
|
529 "xorl %%esi, %%esi \n\t" // 0
|
|
530 "movss %3, %%xmm2 \n\t" // bias
|
|
531 "shufps $0x00, %%xmm2, %%xmm2 \n\t" // bias, bias, ...
|
|
532 ".balign 16 \n\t"
|
|
533 "1: \n\t"
|
|
534 "movlps (%0, %%esi), %%xmm0 \n\t" // ? ? ? A
|
|
535 "movlps 8(%0, %%esi), %%xmm1 \n\t" // ? ? ? C
|
|
536 "movhps -16(%0, %%edi), %%xmm1 \n\t" // D ? ? C
|
|
537 "movhps -8(%0, %%edi), %%xmm0 \n\t" // B ? ? A
|
|
538 "shufps $0xCC, %%xmm1, %%xmm0 \n\t" // D C B A
|
|
539 "mulps 512+sseWindow(%%esi), %%xmm0 \n\t"
|
|
540 "addps (%2, %%esi), %%xmm0 \n\t"
|
|
541 "addps %%xmm2, %%xmm0 \n\t"
|
|
542 "movaps %%xmm0, (%1, %%esi) \n\t"
|
|
543 "addl $16, %%esi \n\t"
|
|
544 "subl $16, %%edi \n\t"
|
|
545 "cmpl $512, %%esi \n\t"
|
|
546 " jb 1b \n\t"
|
|
547 :: "r" (buf), "r" (data_ptr), "r" (delay_ptr), "m" (bias)
|
|
548 : "%esi", "%edi"
|
|
549 );
|
|
550 data_ptr+=128;
|
3553
|
551 // window_ptr+=128;
|
3552
|
552 #else
|
3394
|
553 for(i=0; i< 64; i++) {
|
|
554 *data_ptr++ = -buf[i].real * *window_ptr++ + *delay_ptr++ + bias;
|
|
555 *data_ptr++ = buf[128-i-1].imag * *window_ptr++ + *delay_ptr++ + bias;
|
|
556 }
|
3552
|
557 #endif
|
3394
|
558
|
|
559 /* The trailing edge of the window goes into the delay line */
|
|
560 delay_ptr = delay;
|
|
561
|
3553
|
562 #ifdef HAVE_SSE
|
|
563 asm volatile(
|
|
564 "xorl %%edi, %%edi \n\t" // 0
|
|
565 "xorl %%esi, %%esi \n\t" // 0
|
|
566 ".balign 16 \n\t"
|
|
567 "1: \n\t"
|
|
568 "movlps (%0, %%esi), %%xmm0 \n\t" // ? ? ? A
|
|
569 "movlps 8(%0, %%esi), %%xmm1 \n\t" // ? ? ? C
|
|
570 "movhps -16(%0, %%edi), %%xmm1 \n\t" // D ? ? C
|
|
571 "movhps -8(%0, %%edi), %%xmm0 \n\t" // B ? ? A
|
|
572 "shufps $0xCC, %%xmm1, %%xmm0 \n\t" // D C B A
|
|
573 "mulps 1024+sseWindow(%%esi), %%xmm0 \n\t"
|
|
574 "movaps %%xmm0, (%1, %%esi) \n\t"
|
|
575 "addl $16, %%esi \n\t"
|
|
576 "subl $16, %%edi \n\t"
|
|
577 "cmpl $512, %%esi \n\t"
|
|
578 " jb 1b \n\t"
|
|
579 :: "r" (buf+64), "r" (delay_ptr)
|
|
580 : "%esi", "%edi"
|
|
581 );
|
|
582 delay_ptr+=128;
|
|
583 // window_ptr-=128;
|
|
584 #else
|
3394
|
585 for(i=0; i< 64; i++) {
|
|
586 *delay_ptr++ = -buf[64+i].real * *--window_ptr;
|
|
587 *delay_ptr++ = buf[64-i-1].imag * *--window_ptr;
|
|
588 }
|
3553
|
589 #endif
|
3394
|
590
|
3553
|
591 #ifdef HAVE_SSE
|
|
592 asm volatile(
|
|
593 "movl $1024, %%edi \n\t" // 1024
|
|
594 "xorl %%esi, %%esi \n\t" // 0
|
|
595 ".balign 16 \n\t"
|
|
596 "1: \n\t"
|
|
597 "movlps (%0, %%esi), %%xmm0 \n\t" // ? ? A ?
|
|
598 "movlps 8(%0, %%esi), %%xmm1 \n\t" // ? ? C ?
|
|
599 "movhps -16(%0, %%edi), %%xmm1 \n\t" // ? D C ?
|
|
600 "movhps -8(%0, %%edi), %%xmm0 \n\t" // ? B A ?
|
|
601 "shufps $0x99, %%xmm1, %%xmm0 \n\t" // D C B A
|
|
602 "mulps 1536+sseWindow(%%esi), %%xmm0 \n\t"
|
|
603 "movaps %%xmm0, (%1, %%esi) \n\t"
|
|
604 "addl $16, %%esi \n\t"
|
|
605 "subl $16, %%edi \n\t"
|
|
606 "cmpl $512, %%esi \n\t"
|
|
607 " jb 1b \n\t"
|
|
608 :: "r" (buf), "r" (delay_ptr)
|
|
609 : "%esi", "%edi"
|
|
610 );
|
|
611 #else
|
3394
|
612 for(i=0; i<64; i++) {
|
|
613 *delay_ptr++ = buf[i].imag * *--window_ptr;
|
|
614 *delay_ptr++ = -buf[128-i-1].real * *--window_ptr;
|
|
615 }
|
3553
|
616 #endif
|
3394
|
617 }
|
|
618
|
|
619 void
|
|
620 imdct_do_256(sample_t data[],sample_t delay[],sample_t bias)
|
|
621 {
|
|
622 int i,k;
|
|
623 int p,q;
|
|
624 int m;
|
|
625 int two_m;
|
|
626 int two_m_plus_one;
|
|
627
|
|
628 sample_t tmp_a_i;
|
|
629 sample_t tmp_a_r;
|
|
630 sample_t tmp_b_i;
|
|
631 sample_t tmp_b_r;
|
|
632
|
|
633 sample_t *data_ptr;
|
|
634 sample_t *delay_ptr;
|
|
635 sample_t *window_ptr;
|
|
636
|
|
637 complex_t *buf_1, *buf_2;
|
|
638
|
|
639 buf_1 = &buf[0];
|
|
640 buf_2 = &buf[64];
|
|
641
|
|
642 /* Pre IFFT complex multiply plus IFFT cmplx conjugate */
|
|
643 for(k=0; k<64; k++) {
|
|
644 /* X1[k] = X[2*k] */
|
|
645 /* X2[k] = X[2*k+1] */
|
|
646
|
|
647 p = 2 * (128-2*k-1);
|
|
648 q = 2 * (2 * k);
|
|
649
|
|
650 /* Z1[k] = (X1[128-2*k-1] + j * X1[2*k]) * (xcos2[k] + j * xsin2[k]); */
|
|
651 buf_1[k].real = data[p] * xcos2[k] - data[q] * xsin2[k];
|
|
652 buf_1[k].imag = -1.0f * (data[q] * xcos2[k] + data[p] * xsin2[k]);
|
|
653 /* Z2[k] = (X2[128-2*k-1] + j * X2[2*k]) * (xcos2[k] + j * xsin2[k]); */
|
|
654 buf_2[k].real = data[p + 1] * xcos2[k] - data[q + 1] * xsin2[k];
|
|
655 buf_2[k].imag = -1.0f * ( data[q + 1] * xcos2[k] + data[p + 1] * xsin2[k]);
|
|
656 }
|
|
657
|
|
658 /* IFFT Bit reversed shuffling */
|
|
659 for(i=0; i<64; i++) {
|
|
660 k = bit_reverse_256[i];
|
|
661 if (k < i) {
|
|
662 swap_cmplx(&buf_1[i],&buf_1[k]);
|
|
663 swap_cmplx(&buf_2[i],&buf_2[k]);
|
|
664 }
|
|
665 }
|
|
666
|
|
667 /* FFT Merge */
|
|
668 for (m=0; m < 6; m++) {
|
|
669 two_m = (1 << m);
|
|
670 two_m_plus_one = (1 << (m+1));
|
|
671
|
|
672 /* FIXME */
|
|
673 if(m)
|
|
674 two_m = (1 << m);
|
|
675 else
|
|
676 two_m = 1;
|
|
677
|
|
678 for(k = 0; k < two_m; k++) {
|
|
679 for(i = 0; i < 64; i += two_m_plus_one) {
|
|
680 p = k + i;
|
|
681 q = p + two_m;
|
|
682 /* Do block 1 */
|
|
683 tmp_a_r = buf_1[p].real;
|
|
684 tmp_a_i = buf_1[p].imag;
|
|
685 tmp_b_r = buf_1[q].real * w[m][k].real - buf_1[q].imag * w[m][k].imag;
|
|
686 tmp_b_i = buf_1[q].imag * w[m][k].real + buf_1[q].real * w[m][k].imag;
|
|
687 buf_1[p].real = tmp_a_r + tmp_b_r;
|
|
688 buf_1[p].imag = tmp_a_i + tmp_b_i;
|
|
689 buf_1[q].real = tmp_a_r - tmp_b_r;
|
|
690 buf_1[q].imag = tmp_a_i - tmp_b_i;
|
|
691
|
|
692 /* Do block 2 */
|
|
693 tmp_a_r = buf_2[p].real;
|
|
694 tmp_a_i = buf_2[p].imag;
|
|
695 tmp_b_r = buf_2[q].real * w[m][k].real - buf_2[q].imag * w[m][k].imag;
|
|
696 tmp_b_i = buf_2[q].imag * w[m][k].real + buf_2[q].real * w[m][k].imag;
|
|
697 buf_2[p].real = tmp_a_r + tmp_b_r;
|
|
698 buf_2[p].imag = tmp_a_i + tmp_b_i;
|
|
699 buf_2[q].real = tmp_a_r - tmp_b_r;
|
|
700 buf_2[q].imag = tmp_a_i - tmp_b_i;
|
|
701 }
|
|
702 }
|
|
703 }
|
|
704
|
|
705 /* Post IFFT complex multiply */
|
|
706 for( i=0; i < 64; i++) {
|
|
707 /* y1[n] = z1[n] * (xcos2[n] + j * xs in2[n]) ; */
|
|
708 tmp_a_r = buf_1[i].real;
|
|
709 tmp_a_i = -buf_1[i].imag;
|
|
710 buf_1[i].real =(tmp_a_r * xcos2[i]) - (tmp_a_i * xsin2[i]);
|
|
711 buf_1[i].imag =(tmp_a_r * xsin2[i]) + (tmp_a_i * xcos2[i]);
|
|
712 /* y2[n] = z2[n] * (xcos2[n] + j * xsin2[n]) ; */
|
|
713 tmp_a_r = buf_2[i].real;
|
|
714 tmp_a_i = -buf_2[i].imag;
|
|
715 buf_2[i].real =(tmp_a_r * xcos2[i]) - (tmp_a_i * xsin2[i]);
|
|
716 buf_2[i].imag =(tmp_a_r * xsin2[i]) + (tmp_a_i * xcos2[i]);
|
|
717 }
|
|
718
|
|
719 data_ptr = data;
|
|
720 delay_ptr = delay;
|
|
721 window_ptr = imdct_window;
|
|
722
|
|
723 /* Window and convert to real valued signal */
|
|
724 for(i=0; i< 64; i++) {
|
|
725 *data_ptr++ = -buf_1[i].imag * *window_ptr++ + *delay_ptr++ + bias;
|
|
726 *data_ptr++ = buf_1[64-i-1].real * *window_ptr++ + *delay_ptr++ + bias;
|
|
727 }
|
|
728
|
|
729 for(i=0; i< 64; i++) {
|
|
730 *data_ptr++ = -buf_1[i].real * *window_ptr++ + *delay_ptr++ + bias;
|
|
731 *data_ptr++ = buf_1[64-i-1].imag * *window_ptr++ + *delay_ptr++ + bias;
|
|
732 }
|
|
733
|
|
734 delay_ptr = delay;
|
|
735
|
|
736 for(i=0; i< 64; i++) {
|
|
737 *delay_ptr++ = -buf_2[i].real * *--window_ptr;
|
|
738 *delay_ptr++ = buf_2[64-i-1].imag * *--window_ptr;
|
|
739 }
|
|
740
|
|
741 for(i=0; i< 64; i++) {
|
|
742 *delay_ptr++ = buf_2[i].imag * *--window_ptr;
|
|
743 *delay_ptr++ = -buf_2[64-i-1].real * *--window_ptr;
|
|
744 }
|
|
745 }
|
|
746
|
|
747 void imdct_init (uint32_t mm_accel)
|
|
748 {
|
|
749 #ifdef LIBA52_MLIB
|
|
750 if (mm_accel & MM_ACCEL_MLIB) {
|
|
751 fprintf (stderr, "Using mlib for IMDCT transform\n");
|
|
752 imdct_512 = imdct_do_512_mlib;
|
|
753 imdct_256 = imdct_do_256_mlib;
|
|
754 } else
|
|
755 #endif
|
|
756 {
|
|
757 int i, j, k;
|
|
758
|
|
759 fprintf (stderr, "No accelerated IMDCT transform found\n");
|
|
760
|
|
761 /* Twiddle factors to turn IFFT into IMDCT */
|
|
762 for (i = 0; i < 128; i++) {
|
|
763 xcos1[i] = -cos ((M_PI / 2048) * (8 * i + 1));
|
|
764 xsin1[i] = -sin ((M_PI / 2048) * (8 * i + 1));
|
|
765 }
|
3527
|
766 #ifdef HAVE_SSE
|
|
767 for (i = 0; i < 128; i++) {
|
|
768 sseSinCos1a[2*i+0]= -xsin1[i];
|
|
769 sseSinCos1a[2*i+1]= -xcos1[i];
|
|
770 sseSinCos1b[2*i+0]= xcos1[i];
|
|
771 sseSinCos1b[2*i+1]= -xsin1[i];
|
|
772 }
|
|
773 #endif
|
3394
|
774
|
|
775 /* More twiddle factors to turn IFFT into IMDCT */
|
|
776 for (i = 0; i < 64; i++) {
|
|
777 xcos2[i] = -cos ((M_PI / 1024) * (8 * i + 1));
|
|
778 xsin2[i] = -sin ((M_PI / 1024) * (8 * i + 1));
|
|
779 }
|
|
780
|
|
781 for (i = 0; i < 7; i++) {
|
|
782 j = 1 << i;
|
|
783 for (k = 0; k < j; k++) {
|
|
784 w[i][k].real = cos (-M_PI * k / j);
|
|
785 w[i][k].imag = sin (-M_PI * k / j);
|
|
786 }
|
|
787 }
|
3534
|
788 #ifdef HAVE_SSE
|
|
789 for (i = 1; i < 7; i++) {
|
|
790 j = 1 << i;
|
|
791 for (k = 0; k < j; k+=2) {
|
|
792
|
|
793 sseW[i][4*k + 0] = w[i][k+0].real;
|
|
794 sseW[i][4*k + 1] = w[i][k+0].real;
|
|
795 sseW[i][4*k + 2] = w[i][k+1].real;
|
|
796 sseW[i][4*k + 3] = w[i][k+1].real;
|
|
797
|
|
798 sseW[i][4*k + 4] = -w[i][k+0].imag;
|
|
799 sseW[i][4*k + 5] = w[i][k+0].imag;
|
|
800 sseW[i][4*k + 6] = -w[i][k+1].imag;
|
|
801 sseW[i][4*k + 7] = w[i][k+1].imag;
|
|
802
|
|
803 //we multiply more or less uninitalized numbers so we need to use exactly 0.0
|
|
804 if(k==0)
|
|
805 {
|
|
806 // sseW[i][4*k + 0]= sseW[i][4*k + 1]= 1.0;
|
|
807 sseW[i][4*k + 4]= sseW[i][4*k + 5]= 0.0;
|
|
808 }
|
|
809
|
|
810 if(2*k == j)
|
|
811 {
|
|
812 sseW[i][4*k + 0]= sseW[i][4*k + 1]= 0.0;
|
|
813 // sseW[i][4*k + 4]= -(sseW[i][4*k + 5]= -1.0);
|
|
814 }
|
|
815 }
|
|
816 }
|
3552
|
817
|
|
818 for(i=0; i<128; i++)
|
|
819 {
|
|
820 sseWindow[2*i+0]= -imdct_window[2*i+0];
|
3553
|
821 sseWindow[2*i+1]= imdct_window[2*i+1];
|
3552
|
822 }
|
3553
|
823
|
|
824 for(i=0; i<64; i++)
|
|
825 {
|
|
826 sseWindow[256 + 2*i+0]= -imdct_window[254 - 2*i+1];
|
|
827 sseWindow[256 + 2*i+1]= imdct_window[254 - 2*i+0];
|
|
828 sseWindow[384 + 2*i+0]= imdct_window[126 - 2*i+1];
|
|
829 sseWindow[384 + 2*i+1]= -imdct_window[126 - 2*i+0];
|
|
830 }
|
3534
|
831 #endif
|
|
832
|
3394
|
833 imdct_512 = imdct_do_512;
|
|
834 imdct_256 = imdct_do_256;
|
|
835 }
|
|
836 }
|