Mercurial > mplayer.hg
annotate liba52/imdct.c @ 4026:f431838015b8
read the whole FM
author | gabucino |
---|---|
date | Sun, 06 Jan 2002 23:52:42 +0000 |
parents | 0cc94b1eec0f |
children | 2dbd637ffe05 |
rev | line source |
---|---|
3394 | 1 /* |
2 * imdct.c | |
3 * Copyright (C) 2000-2001 Michel Lespinasse <walken@zoy.org> | |
4 * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca> | |
5 * | |
6 * This file is part of a52dec, a free ATSC A-52 stream decoder. | |
7 * See http://liba52.sourceforge.net/ for updates. | |
8 * | |
9 * a52dec is free software; you can redistribute it and/or modify | |
10 * it under the terms of the GNU General Public License as published by | |
11 * the Free Software Foundation; either version 2 of the License, or | |
12 * (at your option) any later version. | |
13 * | |
14 * a52dec is distributed in the hope that it will be useful, | |
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 * GNU General Public License for more details. | |
18 * | |
19 * You should have received a copy of the GNU General Public License | |
20 * along with this program; if not, write to the Free Software | |
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
3579 | 22 * |
23 * SSE optimizations from Michael Niedermayer (michaelni@gmx.at) | |
3884 | 24 * 3DNOW optimizations from Nick Kurshev <nickols_k@mail.ru> |
25 * michael did port them from libac3 (untested, perhaps totally broken) | |
3394 | 26 */ |
27 | |
28 #include "config.h" | |
29 | |
30 #include <math.h> | |
31 #include <stdio.h> | |
32 #ifndef M_PI | |
33 #define M_PI 3.1415926535897932384626433832795029 | |
34 #endif | |
35 #include <inttypes.h> | |
36 | |
37 #include "a52.h" | |
38 #include "a52_internal.h" | |
39 #include "mm_accel.h" | |
40 | |
3884 | 41 #ifdef RUNTIME_CPUDETECT |
42 #undef HAVE_3DNOWEX | |
43 #endif | |
44 | |
45 #define USE_AC3_C | |
46 | |
3394 | 47 void (* imdct_256) (sample_t data[], sample_t delay[], sample_t bias); |
48 void (* imdct_512) (sample_t data[], sample_t delay[], sample_t bias); | |
49 | |
50 typedef struct complex_s { | |
51 sample_t real; | |
52 sample_t imag; | |
53 } complex_t; | |
54 | |
3884 | 55 static void fft_128p(complex_t *a); |
56 static void fft_128p_3dnow(complex_t *a); | |
57 | |
58 static const int pm128[128] __attribute__((aligned(16))) = | |
59 { | |
60 0, 16, 32, 48, 64, 80, 96, 112, 8, 40, 72, 104, 24, 56, 88, 120, | |
61 4, 20, 36, 52, 68, 84, 100, 116, 12, 28, 44, 60, 76, 92, 108, 124, | |
62 2, 18, 34, 50, 66, 82, 98, 114, 10, 42, 74, 106, 26, 58, 90, 122, | |
63 6, 22, 38, 54, 70, 86, 102, 118, 14, 46, 78, 110, 30, 62, 94, 126, | |
64 1, 17, 33, 49, 65, 81, 97, 113, 9, 41, 73, 105, 25, 57, 89, 121, | |
65 5, 21, 37, 53, 69, 85, 101, 117, 13, 29, 45, 61, 77, 93, 109, 125, | |
66 3, 19, 35, 51, 67, 83, 99, 115, 11, 43, 75, 107, 27, 59, 91, 123, | |
67 7, 23, 39, 55, 71, 87, 103, 119, 15, 31, 47, 63, 79, 95, 111, 127 | |
68 }; | |
3394 | 69 |
70 /* 128 point bit-reverse LUT */ | |
71 static uint8_t bit_reverse_512[] = { | |
72 0x00, 0x40, 0x20, 0x60, 0x10, 0x50, 0x30, 0x70, | |
73 0x08, 0x48, 0x28, 0x68, 0x18, 0x58, 0x38, 0x78, | |
74 0x04, 0x44, 0x24, 0x64, 0x14, 0x54, 0x34, 0x74, | |
75 0x0c, 0x4c, 0x2c, 0x6c, 0x1c, 0x5c, 0x3c, 0x7c, | |
76 0x02, 0x42, 0x22, 0x62, 0x12, 0x52, 0x32, 0x72, | |
77 0x0a, 0x4a, 0x2a, 0x6a, 0x1a, 0x5a, 0x3a, 0x7a, | |
78 0x06, 0x46, 0x26, 0x66, 0x16, 0x56, 0x36, 0x76, | |
79 0x0e, 0x4e, 0x2e, 0x6e, 0x1e, 0x5e, 0x3e, 0x7e, | |
80 0x01, 0x41, 0x21, 0x61, 0x11, 0x51, 0x31, 0x71, | |
81 0x09, 0x49, 0x29, 0x69, 0x19, 0x59, 0x39, 0x79, | |
82 0x05, 0x45, 0x25, 0x65, 0x15, 0x55, 0x35, 0x75, | |
83 0x0d, 0x4d, 0x2d, 0x6d, 0x1d, 0x5d, 0x3d, 0x7d, | |
84 0x03, 0x43, 0x23, 0x63, 0x13, 0x53, 0x33, 0x73, | |
85 0x0b, 0x4b, 0x2b, 0x6b, 0x1b, 0x5b, 0x3b, 0x7b, | |
86 0x07, 0x47, 0x27, 0x67, 0x17, 0x57, 0x37, 0x77, | |
87 0x0f, 0x4f, 0x2f, 0x6f, 0x1f, 0x5f, 0x3f, 0x7f}; | |
88 | |
89 static uint8_t bit_reverse_256[] = { | |
90 0x00, 0x20, 0x10, 0x30, 0x08, 0x28, 0x18, 0x38, | |
91 0x04, 0x24, 0x14, 0x34, 0x0c, 0x2c, 0x1c, 0x3c, | |
92 0x02, 0x22, 0x12, 0x32, 0x0a, 0x2a, 0x1a, 0x3a, | |
93 0x06, 0x26, 0x16, 0x36, 0x0e, 0x2e, 0x1e, 0x3e, | |
94 0x01, 0x21, 0x11, 0x31, 0x09, 0x29, 0x19, 0x39, | |
95 0x05, 0x25, 0x15, 0x35, 0x0d, 0x2d, 0x1d, 0x3d, | |
96 0x03, 0x23, 0x13, 0x33, 0x0b, 0x2b, 0x1b, 0x3b, | |
97 0x07, 0x27, 0x17, 0x37, 0x0f, 0x2f, 0x1f, 0x3f}; | |
98 | |
3579 | 99 #ifdef ARCH_X86 |
3508 | 100 // NOTE: SSE needs 16byte alignment or it will segfault |
3581 | 101 // |
3508 | 102 static complex_t __attribute__((aligned(16))) buf[128]; |
3581 | 103 static float __attribute__((aligned(16))) sseSinCos1c[256]; |
104 static float __attribute__((aligned(16))) sseSinCos1d[256]; | |
3512 | 105 static float __attribute__((aligned(16))) ps111_1[4]={1,1,1,-1}; |
3534 | 106 //static float __attribute__((aligned(16))) sseW0[4]; |
107 static float __attribute__((aligned(16))) sseW1[8]; | |
108 static float __attribute__((aligned(16))) sseW2[16]; | |
109 static float __attribute__((aligned(16))) sseW3[32]; | |
110 static float __attribute__((aligned(16))) sseW4[64]; | |
111 static float __attribute__((aligned(16))) sseW5[128]; | |
112 static float __attribute__((aligned(16))) sseW6[256]; | |
113 static float __attribute__((aligned(16))) *sseW[7]= | |
114 {NULL /*sseW0*/,sseW1,sseW2,sseW3,sseW4,sseW5,sseW6}; | |
3553 | 115 static float __attribute__((aligned(16))) sseWindow[512]; |
3508 | 116 #else |
3394 | 117 static complex_t buf[128]; |
3508 | 118 #endif |
3394 | 119 |
120 /* Twiddle factor LUT */ | |
121 static complex_t w_1[1]; | |
122 static complex_t w_2[2]; | |
123 static complex_t w_4[4]; | |
124 static complex_t w_8[8]; | |
125 static complex_t w_16[16]; | |
126 static complex_t w_32[32]; | |
127 static complex_t w_64[64]; | |
128 static complex_t * w[7] = {w_1, w_2, w_4, w_8, w_16, w_32, w_64}; | |
129 | |
130 /* Twiddle factors for IMDCT */ | |
131 static sample_t xcos1[128]; | |
132 static sample_t xsin1[128]; | |
133 static sample_t xcos2[64]; | |
134 static sample_t xsin2[64]; | |
135 | |
136 /* Windowing function for Modified DCT - Thank you acroread */ | |
137 sample_t imdct_window[] = { | |
138 0.00014, 0.00024, 0.00037, 0.00051, 0.00067, 0.00086, 0.00107, 0.00130, | |
139 0.00157, 0.00187, 0.00220, 0.00256, 0.00297, 0.00341, 0.00390, 0.00443, | |
140 0.00501, 0.00564, 0.00632, 0.00706, 0.00785, 0.00871, 0.00962, 0.01061, | |
141 0.01166, 0.01279, 0.01399, 0.01526, 0.01662, 0.01806, 0.01959, 0.02121, | |
142 0.02292, 0.02472, 0.02662, 0.02863, 0.03073, 0.03294, 0.03527, 0.03770, | |
143 0.04025, 0.04292, 0.04571, 0.04862, 0.05165, 0.05481, 0.05810, 0.06153, | |
144 0.06508, 0.06878, 0.07261, 0.07658, 0.08069, 0.08495, 0.08935, 0.09389, | |
145 0.09859, 0.10343, 0.10842, 0.11356, 0.11885, 0.12429, 0.12988, 0.13563, | |
146 0.14152, 0.14757, 0.15376, 0.16011, 0.16661, 0.17325, 0.18005, 0.18699, | |
147 0.19407, 0.20130, 0.20867, 0.21618, 0.22382, 0.23161, 0.23952, 0.24757, | |
148 0.25574, 0.26404, 0.27246, 0.28100, 0.28965, 0.29841, 0.30729, 0.31626, | |
149 0.32533, 0.33450, 0.34376, 0.35311, 0.36253, 0.37204, 0.38161, 0.39126, | |
150 0.40096, 0.41072, 0.42054, 0.43040, 0.44030, 0.45023, 0.46020, 0.47019, | |
151 0.48020, 0.49022, 0.50025, 0.51028, 0.52031, 0.53033, 0.54033, 0.55031, | |
152 0.56026, 0.57019, 0.58007, 0.58991, 0.59970, 0.60944, 0.61912, 0.62873, | |
153 0.63827, 0.64774, 0.65713, 0.66643, 0.67564, 0.68476, 0.69377, 0.70269, | |
154 0.71150, 0.72019, 0.72877, 0.73723, 0.74557, 0.75378, 0.76186, 0.76981, | |
155 0.77762, 0.78530, 0.79283, 0.80022, 0.80747, 0.81457, 0.82151, 0.82831, | |
156 0.83496, 0.84145, 0.84779, 0.85398, 0.86001, 0.86588, 0.87160, 0.87716, | |
157 0.88257, 0.88782, 0.89291, 0.89785, 0.90264, 0.90728, 0.91176, 0.91610, | |
158 0.92028, 0.92432, 0.92822, 0.93197, 0.93558, 0.93906, 0.94240, 0.94560, | |
159 0.94867, 0.95162, 0.95444, 0.95713, 0.95971, 0.96217, 0.96451, 0.96674, | |
160 0.96887, 0.97089, 0.97281, 0.97463, 0.97635, 0.97799, 0.97953, 0.98099, | |
161 0.98236, 0.98366, 0.98488, 0.98602, 0.98710, 0.98811, 0.98905, 0.98994, | |
162 0.99076, 0.99153, 0.99225, 0.99291, 0.99353, 0.99411, 0.99464, 0.99513, | |
163 0.99558, 0.99600, 0.99639, 0.99674, 0.99706, 0.99736, 0.99763, 0.99788, | |
164 0.99811, 0.99831, 0.99850, 0.99867, 0.99882, 0.99895, 0.99908, 0.99919, | |
165 0.99929, 0.99938, 0.99946, 0.99953, 0.99959, 0.99965, 0.99969, 0.99974, | |
166 0.99978, 0.99981, 0.99984, 0.99986, 0.99988, 0.99990, 0.99992, 0.99993, | |
167 0.99994, 0.99995, 0.99996, 0.99997, 0.99998, 0.99998, 0.99998, 0.99999, | |
168 0.99999, 0.99999, 0.99999, 1.00000, 1.00000, 1.00000, 1.00000, 1.00000, | |
169 1.00000, 1.00000, 1.00000, 1.00000, 1.00000, 1.00000, 1.00000, 1.00000 }; | |
170 | |
171 | |
172 static inline void swap_cmplx(complex_t *a, complex_t *b) | |
173 { | |
174 complex_t tmp; | |
175 | |
176 tmp = *a; | |
177 *a = *b; | |
178 *b = tmp; | |
179 } | |
180 | |
181 | |
182 | |
183 static inline complex_t cmplx_mult(complex_t a, complex_t b) | |
184 { | |
185 complex_t ret; | |
186 | |
187 ret.real = a.real * b.real - a.imag * b.imag; | |
188 ret.imag = a.real * b.imag + a.imag * b.real; | |
189 | |
190 return ret; | |
191 } | |
192 | |
193 void | |
194 imdct_do_512(sample_t data[],sample_t delay[], sample_t bias) | |
195 { | |
196 int i,k; | |
197 int p,q; | |
198 int m; | |
199 int two_m; | |
200 int two_m_plus_one; | |
201 | |
202 sample_t tmp_a_i; | |
203 sample_t tmp_a_r; | |
204 sample_t tmp_b_i; | |
205 sample_t tmp_b_r; | |
206 | |
207 sample_t *data_ptr; | |
208 sample_t *delay_ptr; | |
209 sample_t *window_ptr; | |
210 | |
211 /* 512 IMDCT with source and dest data in 'data' */ | |
212 | |
3884 | 213 /* Pre IFFT complex multiply plus IFFT cmplx conjugate & reordering*/ |
3394 | 214 for( i=0; i < 128; i++) { |
215 /* z[i] = (X[256-2*i-1] + j * X[2*i]) * (xcos1[i] + j * xsin1[i]) ; */ | |
3884 | 216 #ifdef USE_AC3_C |
217 int j= pm128[i]; | |
218 #else | |
219 int j= bit_reverse_512[i]; | |
220 #endif | |
221 buf[i].real = (data[256-2*j-1] * xcos1[j]) - (data[2*j] * xsin1[j]); | |
222 buf[i].imag = -1.0 * ((data[2*j] * xcos1[j]) + (data[256-2*j-1] * xsin1[j])); | |
3394 | 223 } |
224 | |
225 /* FFT Merge */ | |
3549 | 226 /* unoptimized variant |
227 for (m=1; m < 7; m++) { | |
228 if(m) | |
229 two_m = (1 << m); | |
230 else | |
231 two_m = 1; | |
232 | |
233 two_m_plus_one = (1 << (m+1)); | |
234 | |
235 for(i = 0; i < 128; i += two_m_plus_one) { | |
236 for(k = 0; k < two_m; k++) { | |
237 p = k + i; | |
238 q = p + two_m; | |
3508 | 239 tmp_a_r = buf[p].real; |
240 tmp_a_i = buf[p].imag; | |
3549 | 241 tmp_b_r = buf[q].real * w[m][k].real - buf[q].imag * w[m][k].imag; |
242 tmp_b_i = buf[q].imag * w[m][k].real + buf[q].real * w[m][k].imag; | |
3508 | 243 buf[p].real = tmp_a_r + tmp_b_r; |
244 buf[p].imag = tmp_a_i + tmp_b_i; | |
245 buf[q].real = tmp_a_r - tmp_b_r; | |
246 buf[q].imag = tmp_a_i - tmp_b_i; | |
3549 | 247 } |
248 } | |
249 } | |
250 */ | |
3884 | 251 #ifdef USE_AC3_C |
252 fft_128p (&buf[0]); | |
253 #else | |
254 | |
3623 | 255 /* 1. iteration */ |
3579 | 256 for(i = 0; i < 128; i += 2) { |
257 tmp_a_r = buf[i].real; | |
258 tmp_a_i = buf[i].imag; | |
259 tmp_b_r = buf[i+1].real; | |
260 tmp_b_i = buf[i+1].imag; | |
261 buf[i].real = tmp_a_r + tmp_b_r; | |
262 buf[i].imag = tmp_a_i + tmp_b_i; | |
263 buf[i+1].real = tmp_a_r - tmp_b_r; | |
264 buf[i+1].imag = tmp_a_i - tmp_b_i; | |
265 } | |
266 | |
3623 | 267 /* 2. iteration */ |
3579 | 268 // Note w[1]={{1,0}, {0,-1}} |
269 for(i = 0; i < 128; i += 4) { | |
270 tmp_a_r = buf[i].real; | |
271 tmp_a_i = buf[i].imag; | |
272 tmp_b_r = buf[i+2].real; | |
273 tmp_b_i = buf[i+2].imag; | |
274 buf[i].real = tmp_a_r + tmp_b_r; | |
275 buf[i].imag = tmp_a_i + tmp_b_i; | |
276 buf[i+2].real = tmp_a_r - tmp_b_r; | |
277 buf[i+2].imag = tmp_a_i - tmp_b_i; | |
278 tmp_a_r = buf[i+1].real; | |
279 tmp_a_i = buf[i+1].imag; | |
280 tmp_b_r = buf[i+3].imag; | |
281 tmp_b_i = buf[i+3].real; | |
282 buf[i+1].real = tmp_a_r + tmp_b_r; | |
283 buf[i+1].imag = tmp_a_i - tmp_b_i; | |
284 buf[i+3].real = tmp_a_r - tmp_b_r; | |
285 buf[i+3].imag = tmp_a_i + tmp_b_i; | |
286 } | |
287 | |
3623 | 288 /* 3. iteration */ |
3579 | 289 for(i = 0; i < 128; i += 8) { |
290 tmp_a_r = buf[i].real; | |
291 tmp_a_i = buf[i].imag; | |
292 tmp_b_r = buf[i+4].real; | |
293 tmp_b_i = buf[i+4].imag; | |
294 buf[i].real = tmp_a_r + tmp_b_r; | |
295 buf[i].imag = tmp_a_i + tmp_b_i; | |
296 buf[i+4].real = tmp_a_r - tmp_b_r; | |
297 buf[i+4].imag = tmp_a_i - tmp_b_i; | |
298 tmp_a_r = buf[1+i].real; | |
299 tmp_a_i = buf[1+i].imag; | |
300 tmp_b_r = (buf[i+5].real + buf[i+5].imag) * w[2][1].real; | |
301 tmp_b_i = (buf[i+5].imag - buf[i+5].real) * w[2][1].real; | |
302 buf[1+i].real = tmp_a_r + tmp_b_r; | |
303 buf[1+i].imag = tmp_a_i + tmp_b_i; | |
304 buf[i+5].real = tmp_a_r - tmp_b_r; | |
305 buf[i+5].imag = tmp_a_i - tmp_b_i; | |
306 tmp_a_r = buf[i+2].real; | |
307 tmp_a_i = buf[i+2].imag; | |
308 tmp_b_r = buf[i+6].imag; | |
309 tmp_b_i = - buf[i+6].real; | |
310 buf[i+2].real = tmp_a_r + tmp_b_r; | |
311 buf[i+2].imag = tmp_a_i + tmp_b_i; | |
312 buf[i+6].real = tmp_a_r - tmp_b_r; | |
313 buf[i+6].imag = tmp_a_i - tmp_b_i; | |
314 tmp_a_r = buf[i+3].real; | |
315 tmp_a_i = buf[i+3].imag; | |
316 tmp_b_r = (buf[i+7].real - buf[i+7].imag) * w[2][3].imag; | |
317 tmp_b_i = (buf[i+7].imag + buf[i+7].real) * w[2][3].imag; | |
318 buf[i+3].real = tmp_a_r + tmp_b_r; | |
319 buf[i+3].imag = tmp_a_i + tmp_b_i; | |
320 buf[i+7].real = tmp_a_r - tmp_b_r; | |
321 buf[i+7].imag = tmp_a_i - tmp_b_i; | |
322 } | |
323 | |
3623 | 324 /* 4-7. iterations */ |
3579 | 325 for (m=3; m < 7; m++) { |
326 two_m = (1 << m); | |
327 | |
328 two_m_plus_one = two_m<<1; | |
329 | |
330 for(i = 0; i < 128; i += two_m_plus_one) { | |
331 for(k = 0; k < two_m; k++) { | |
332 int p = k + i; | |
333 int q = p + two_m; | |
334 tmp_a_r = buf[p].real; | |
335 tmp_a_i = buf[p].imag; | |
336 tmp_b_r = buf[q].real * w[m][k].real - buf[q].imag * w[m][k].imag; | |
337 tmp_b_i = buf[q].imag * w[m][k].real + buf[q].real * w[m][k].imag; | |
338 buf[p].real = tmp_a_r + tmp_b_r; | |
339 buf[p].imag = tmp_a_i + tmp_b_i; | |
340 buf[q].real = tmp_a_r - tmp_b_r; | |
341 buf[q].imag = tmp_a_i - tmp_b_i; | |
342 } | |
343 } | |
344 } | |
3884 | 345 #endif |
3579 | 346 /* Post IFFT complex multiply plus IFFT complex conjugate*/ |
347 for( i=0; i < 128; i++) { | |
348 /* y[n] = z[n] * (xcos1[n] + j * xsin1[n]) ; */ | |
349 tmp_a_r = buf[i].real; | |
350 tmp_a_i = -1.0 * buf[i].imag; | |
351 buf[i].real =(tmp_a_r * xcos1[i]) - (tmp_a_i * xsin1[i]); | |
352 buf[i].imag =(tmp_a_r * xsin1[i]) + (tmp_a_i * xcos1[i]); | |
353 } | |
354 | |
355 data_ptr = data; | |
356 delay_ptr = delay; | |
357 window_ptr = imdct_window; | |
358 | |
359 /* Window and convert to real valued signal */ | |
360 for(i=0; i< 64; i++) { | |
361 *data_ptr++ = -buf[64+i].imag * *window_ptr++ + *delay_ptr++ + bias; | |
362 *data_ptr++ = buf[64-i-1].real * *window_ptr++ + *delay_ptr++ + bias; | |
363 } | |
364 | |
365 for(i=0; i< 64; i++) { | |
366 *data_ptr++ = -buf[i].real * *window_ptr++ + *delay_ptr++ + bias; | |
367 *data_ptr++ = buf[128-i-1].imag * *window_ptr++ + *delay_ptr++ + bias; | |
368 } | |
3884 | 369 |
3579 | 370 /* The trailing edge of the window goes into the delay line */ |
371 delay_ptr = delay; | |
372 | |
373 for(i=0; i< 64; i++) { | |
374 *delay_ptr++ = -buf[64+i].real * *--window_ptr; | |
375 *delay_ptr++ = buf[64-i-1].imag * *--window_ptr; | |
376 } | |
377 | |
378 for(i=0; i<64; i++) { | |
379 *delay_ptr++ = buf[i].imag * *--window_ptr; | |
380 *delay_ptr++ = -buf[128-i-1].real * *--window_ptr; | |
381 } | |
382 } | |
383 | |
384 #ifdef ARCH_X86 | |
3884 | 385 #include "srfftp_3dnow.h" |
386 | |
387 const i_cmplx_t x_plus_minus_3dnow __attribute__ ((aligned (8))) = { 0x00000000UL, 0x80000000UL }; | |
388 const i_cmplx_t x_minus_plus_3dnow __attribute__ ((aligned (8))) = { 0x80000000UL, 0x00000000UL }; | |
389 const complex_t HSQRT2_3DNOW __attribute__ ((aligned (8))) = { 0.707106781188, 0.707106781188 }; | |
390 | |
391 void | |
392 imdct_do_512_3dnow(sample_t data[],sample_t delay[], sample_t bias) | |
393 { | |
394 int i,k; | |
395 int p,q; | |
396 int m; | |
397 int two_m; | |
398 int two_m_plus_one; | |
399 | |
400 sample_t tmp_a_i; | |
401 sample_t tmp_a_r; | |
402 sample_t tmp_b_i; | |
403 sample_t tmp_b_r; | |
404 | |
405 sample_t *data_ptr; | |
406 sample_t *delay_ptr; | |
407 sample_t *window_ptr; | |
408 | |
409 /* 512 IMDCT with source and dest data in 'data' */ | |
410 | |
411 /* Pre IFFT complex multiply plus IFFT cmplx conjugate & reordering*/ | |
412 #if 1 | |
413 __asm__ __volatile__ ( | |
414 "movq %0, %%mm7\n\t" | |
415 ::"m"(x_plus_minus_3dnow) | |
416 :"memory"); | |
417 for( i=0; i < 128; i++) { | |
418 int j = pm128[i]; | |
419 __asm__ __volatile__ ( | |
420 "movd %1, %%mm0\n\t" | |
421 "movd %3, %%mm1\n\t" | |
422 "punpckldq %2, %%mm0\n\t" /* mm0 = data[256-2*j-1] | data[2*j]*/ | |
423 "punpckldq %4, %%mm1\n\t" /* mm1 = xcos[j] | xsin[j] */ | |
424 "movq %%mm0, %%mm2\n\t" | |
425 "pfmul %%mm1, %%mm0\n\t" | |
426 #ifdef HAVE_3DNOWEX | |
427 "pswapd %%mm1, %%mm1\n\t" | |
428 #else | |
429 "movq %%mm1, %%mm5\n\t" | |
430 "psrlq $32, %%mm1\n\t" | |
431 "punpckldq %%mm5, %%mm1\n\t" | |
432 #endif | |
433 "pfmul %%mm1, %%mm2\n\t" | |
434 #ifdef HAVE_3DNOWEX | |
435 "pfpnacc %%mm2, %%mm0\n\t" | |
436 #else | |
437 "pxor %%mm7, %%mm0\n\t" | |
438 "pfacc %%mm2, %%mm0\n\t" | |
439 #endif | |
440 "pxor %%mm7, %%mm0\n\t" | |
441 "movq %%mm0, %0" | |
442 :"=m"(buf[i]) | |
443 :"m"(data[256-2*j-1]), "m"(data[2*j]), "m"(xcos1[j]), "m"(xsin1[j]) | |
444 :"memory" | |
445 ); | |
446 /* buf[i].re = (data[256-2*j-1] * xcos1[j] - data[2*j] * xsin1[j]); | |
447 buf[i].im = (data[256-2*j-1] * xsin1[j] + data[2*j] * xcos1[j])*(-1.0);*/ | |
448 } | |
449 #else | |
450 __asm__ __volatile__ ("femms":::"memory"); | |
451 for( i=0; i < 128; i++) { | |
452 /* z[i] = (X[256-2*i-1] + j * X[2*i]) * (xcos1[i] + j * xsin1[i]) ; */ | |
453 int j= pm128[i]; | |
454 buf[i].real = (data[256-2*j-1] * xcos1[j]) - (data[2*j] * xsin1[j]); | |
455 buf[i].imag = -1.0 * ((data[2*j] * xcos1[j]) + (data[256-2*j-1] * xsin1[j])); | |
456 } | |
457 #endif | |
458 | |
459 /* FFT Merge */ | |
460 /* unoptimized variant | |
461 for (m=1; m < 7; m++) { | |
462 if(m) | |
463 two_m = (1 << m); | |
464 else | |
465 two_m = 1; | |
466 | |
467 two_m_plus_one = (1 << (m+1)); | |
468 | |
469 for(i = 0; i < 128; i += two_m_plus_one) { | |
470 for(k = 0; k < two_m; k++) { | |
471 p = k + i; | |
472 q = p + two_m; | |
473 tmp_a_r = buf[p].real; | |
474 tmp_a_i = buf[p].imag; | |
475 tmp_b_r = buf[q].real * w[m][k].real - buf[q].imag * w[m][k].imag; | |
476 tmp_b_i = buf[q].imag * w[m][k].real + buf[q].real * w[m][k].imag; | |
477 buf[p].real = tmp_a_r + tmp_b_r; | |
478 buf[p].imag = tmp_a_i + tmp_b_i; | |
479 buf[q].real = tmp_a_r - tmp_b_r; | |
480 buf[q].imag = tmp_a_i - tmp_b_i; | |
481 } | |
482 } | |
483 } | |
484 */ | |
485 | |
486 fft_128p_3dnow (&buf[0]); | |
487 // asm volatile ("femms \n\t":::"memory"); | |
488 | |
489 /* Post IFFT complex multiply plus IFFT complex conjugate*/ | |
490 #if 1 | |
491 __asm__ __volatile__ ( | |
492 "movq %0, %%mm7\n\t" | |
493 "movq %1, %%mm6\n\t" | |
494 ::"m"(x_plus_minus_3dnow), | |
495 "m"(x_minus_plus_3dnow) | |
496 :"eax","memory"); | |
497 for (i=0; i < 128; i++) { | |
498 __asm__ __volatile__ ( | |
499 "movq %1, %%mm0\n\t" /* ac3_buf[i].re | ac3_buf[i].im */ | |
500 "movq %%mm0, %%mm1\n\t" /* ac3_buf[i].re | ac3_buf[i].im */ | |
501 #ifndef HAVE_3DNOWEX | |
502 "movq %%mm1, %%mm2\n\t" | |
503 "psrlq $32, %%mm1\n\t" | |
504 "punpckldq %%mm2, %%mm1\n\t" | |
505 #else | |
506 "pswapd %%mm1, %%mm1\n\t" /* ac3_buf[i].re | ac3_buf[i].im */ | |
507 #endif | |
508 "movd %3, %%mm3\n\t" /* ac3_xsin[i] */ | |
509 "punpckldq %2, %%mm3\n\t" /* ac3_xsin[i] | ac3_xcos[i] */ | |
510 "pfmul %%mm3, %%mm0\n\t" | |
511 "pfmul %%mm3, %%mm1\n\t" | |
512 #ifndef HAVE_3DNOWEX | |
513 "pxor %%mm7, %%mm0\n\t" | |
514 "pfacc %%mm1, %%mm0\n\t" | |
515 "movd %%mm0, 4%0\n\t" | |
516 "psrlq $32, %%mm0\n\t" | |
517 "movd %%mm0, %0\n\t" | |
518 #else | |
519 "pfpnacc %%mm1, %%mm0\n\t" /* mm0 = mm0[0] - mm0[1] | mm1[0] + mm1[1] */ | |
520 "pswapd %%mm0, %%mm0\n\t" | |
521 "movq %%mm0, %0" | |
522 #endif | |
523 :"=m"(buf[i]) | |
524 :"m"(buf[i]),"m"(xcos1[i]),"m"(xsin1[i]) | |
525 :"memory"); | |
526 /* ac3_buf[i].re =(tmp_a_r * ac3_xcos1[i]) + (tmp_a_i * ac3_xsin1[i]); | |
527 ac3_buf[i].im =(tmp_a_r * ac3_xsin1[i]) - (tmp_a_i * ac3_xcos1[i]);*/ | |
528 } | |
529 #else | |
530 __asm__ __volatile__ ("femms":::"memory"); | |
531 for( i=0; i < 128; i++) { | |
532 /* y[n] = z[n] * (xcos1[n] + j * xsin1[n]) ; */ | |
533 tmp_a_r = buf[i].real; | |
534 tmp_a_i = -1.0 * buf[i].imag; | |
535 buf[i].real =(tmp_a_r * xcos1[i]) - (tmp_a_i * xsin1[i]); | |
536 buf[i].imag =(tmp_a_r * xsin1[i]) + (tmp_a_i * xcos1[i]); | |
537 } | |
538 #endif | |
539 | |
540 data_ptr = data; | |
541 delay_ptr = delay; | |
542 window_ptr = imdct_window; | |
543 | |
544 /* Window and convert to real valued signal */ | |
545 #if 1 | |
546 asm volatile ( | |
547 "movd (%0), %%mm3 \n\t" | |
548 "punpckldq %%mm3, %%mm3 \n\t" | |
549 :: "r" (&bias) | |
550 ); | |
551 for (i=0; i< 64; i++) { | |
552 /* merge two loops in one to enable working of 2 decoders */ | |
553 __asm__ __volatile__ ( | |
554 "movd 516(%1), %%mm0\n\t" | |
555 "movd (%1), %%mm1\n\t" /**data_ptr++=-buf[64+i].im**window_ptr+++*delay_ptr++;*/ | |
556 "punpckldq (%2), %%mm0\n\t"/*data_ptr[128]=-buf[i].re*window_ptr[128]+delay_ptr[128];*/ | |
557 "punpckldq 516(%2), %%mm1\n\t" | |
558 "pfmul (%3), %%mm0\n\t"/**data_ptr++=buf[64-i-1].re**window_ptr+++*delay_ptr++;*/ | |
559 "pfmul 512(%3), %%mm1\n\t" | |
560 "pxor %%mm6, %%mm0\n\t"/*data_ptr[128]=buf[128-i-1].im*window_ptr[128]+delay_ptr[128];*/ | |
561 "pxor %%mm6, %%mm1\n\t" | |
562 "pfadd (%4), %%mm0\n\t" | |
563 "pfadd 512(%4), %%mm1\n\t" | |
564 "pfadd %%mm3, %%mm0\n\t" | |
565 "pfadd %%mm3, %%mm1\n\t" | |
566 "movq %%mm0, (%0)\n\t" | |
567 "movq %%mm1, 512(%0)" | |
568 :"=r"(data_ptr) | |
569 :"r"(&buf[i].real), "r"(&buf[64-i-1].real), "r"(window_ptr), "r"(delay_ptr), "0"(data_ptr) | |
570 :"memory"); | |
571 data_ptr += 2; | |
572 window_ptr += 2; | |
573 delay_ptr += 2; | |
574 } | |
575 window_ptr += 128; | |
576 #else | |
577 __asm__ __volatile__ ("femms":::"memory"); | |
578 for(i=0; i< 64; i++) { | |
579 *data_ptr++ = -buf[64+i].imag * *window_ptr++ + *delay_ptr++ + bias; | |
580 *data_ptr++ = buf[64-i-1].real * *window_ptr++ + *delay_ptr++ + bias; | |
581 } | |
582 | |
583 for(i=0; i< 64; i++) { | |
584 *data_ptr++ = -buf[i].real * *window_ptr++ + *delay_ptr++ + bias; | |
585 *data_ptr++ = buf[128-i-1].imag * *window_ptr++ + *delay_ptr++ + bias; | |
586 } | |
587 #endif | |
588 | |
589 /* The trailing edge of the window goes into the delay line */ | |
590 delay_ptr = delay; | |
591 #if 1 | |
592 for(i=0; i< 64; i++) { | |
593 /* merge two loops in one to enable working of 2 decoders */ | |
594 window_ptr -=2; | |
595 __asm__ __volatile__( | |
596 "movd 508(%1), %%mm0\n\t" | |
597 "movd (%1), %%mm1\n\t" | |
598 "punpckldq (%2), %%mm0\n\t" | |
599 "punpckldq 508(%2), %%mm1\n\t" | |
600 #ifdef HAVE_3DNOWEX | |
601 "pswapd (%3), %%mm3\n\t" | |
602 "pswapd -512(%3), %%mm4\n\t" | |
603 #else | |
604 "movq (%3), %%mm3\n\t"/**delay_ptr++=-buf[64+i].re**--window_ptr;*/ | |
605 "movq -512(%3), %%mm4\n\t" | |
606 "psrlq $32, %%mm3\n\t"/*delay_ptr[128]=buf[i].im**window_ptr[-512];*/ | |
607 "psrlq $32, %%mm4\n\t"/**delay_ptr++=buf[64-i-1].im**--window_ptr;*/ | |
608 "punpckldq (%3), %%mm3\n\t"/*delay_ptr[128]=-buf[128-i-1].re**window_ptr[-512];*/ | |
609 "punpckldq -512(%3), %%mm4\n\t" | |
610 #endif | |
611 "pfmul %%mm3, %%mm0\n\t" | |
612 "pfmul %%mm4, %%mm1\n\t" | |
613 "pxor %%mm6, %%mm0\n\t" | |
614 "pxor %%mm7, %%mm1\n\t" | |
615 "movq %%mm0, (%0)\n\t" | |
616 "movq %%mm1, 512(%0)" | |
617 :"=r"(delay_ptr) | |
618 :"r"(&buf[i].imag), "r"(&buf[64-i-1].imag), "r"(window_ptr), "0"(delay_ptr) | |
619 :"memory"); | |
620 delay_ptr += 2; | |
621 } | |
622 __asm__ __volatile__ ("femms":::"memory"); | |
623 #else | |
624 __asm__ __volatile__ ("femms":::"memory"); | |
625 for(i=0; i< 64; i++) { | |
626 *delay_ptr++ = -buf[64+i].real * *--window_ptr; | |
627 *delay_ptr++ = buf[64-i-1].imag * *--window_ptr; | |
628 } | |
629 | |
630 for(i=0; i<64; i++) { | |
631 *delay_ptr++ = buf[i].imag * *--window_ptr; | |
632 *delay_ptr++ = -buf[128-i-1].real * *--window_ptr; | |
633 } | |
634 #endif | |
635 } | |
636 | |
637 | |
3579 | 638 void |
639 imdct_do_512_sse(sample_t data[],sample_t delay[], sample_t bias) | |
640 { | |
641 int i,k; | |
642 int p,q; | |
643 int m; | |
644 int two_m; | |
645 int two_m_plus_one; | |
646 | |
647 sample_t tmp_a_i; | |
648 sample_t tmp_a_r; | |
649 sample_t tmp_b_i; | |
650 sample_t tmp_b_r; | |
651 | |
652 sample_t *data_ptr; | |
653 sample_t *delay_ptr; | |
654 sample_t *window_ptr; | |
655 | |
656 /* 512 IMDCT with source and dest data in 'data' */ | |
3623 | 657 /* see the c version (dct_do_512()), its allmost identical, just in C */ |
658 | |
3579 | 659 /* Pre IFFT complex multiply plus IFFT cmplx conjugate */ |
660 /* Bit reversed shuffling */ | |
661 asm volatile( | |
662 "xorl %%esi, %%esi \n\t" | |
663 "leal bit_reverse_512, %%eax \n\t" | |
664 "movl $1008, %%edi \n\t" | |
665 "pushl %%ebp \n\t" //use ebp without telling gcc | |
666 ".balign 16 \n\t" | |
667 "1: \n\t" | |
3584 | 668 "movlps (%0, %%esi), %%xmm0 \n\t" // XXXI |
669 "movhps 8(%0, %%edi), %%xmm0 \n\t" // RXXI | |
670 "movlps 8(%0, %%esi), %%xmm1 \n\t" // XXXi | |
671 "movhps (%0, %%edi), %%xmm1 \n\t" // rXXi | |
672 "shufps $0x33, %%xmm1, %%xmm0 \n\t" // irIR | |
673 "movaps sseSinCos1c(%%esi), %%xmm2 \n\t" | |
674 "mulps %%xmm0, %%xmm2 \n\t" | |
675 "shufps $0xB1, %%xmm0, %%xmm0 \n\t" // riRI | |
676 "mulps sseSinCos1d(%%esi), %%xmm0 \n\t" | |
677 "subps %%xmm0, %%xmm2 \n\t" | |
3579 | 678 "movzbl (%%eax), %%edx \n\t" |
679 "movzbl 1(%%eax), %%ebp \n\t" | |
3584 | 680 "movlps %%xmm2, (%1, %%edx,8) \n\t" |
681 "movhps %%xmm2, (%1, %%ebp,8) \n\t" | |
3579 | 682 "addl $16, %%esi \n\t" |
683 "addl $2, %%eax \n\t" // avoid complex addressing for P4 crap | |
684 "subl $16, %%edi \n\t" | |
685 " jnc 1b \n\t" | |
686 "popl %%ebp \n\t"//no we didnt touch ebp *g* | |
687 :: "b" (data), "c" (buf) | |
688 : "%esi", "%edi", "%eax", "%edx" | |
689 ); | |
690 | |
691 | |
692 /* FFT Merge */ | |
693 /* unoptimized variant | |
694 for (m=1; m < 7; m++) { | |
695 if(m) | |
696 two_m = (1 << m); | |
697 else | |
698 two_m = 1; | |
699 | |
700 two_m_plus_one = (1 << (m+1)); | |
701 | |
702 for(i = 0; i < 128; i += two_m_plus_one) { | |
703 for(k = 0; k < two_m; k++) { | |
704 p = k + i; | |
705 q = p + two_m; | |
706 tmp_a_r = buf[p].real; | |
707 tmp_a_i = buf[p].imag; | |
708 tmp_b_r = buf[q].real * w[m][k].real - buf[q].imag * w[m][k].imag; | |
709 tmp_b_i = buf[q].imag * w[m][k].real + buf[q].real * w[m][k].imag; | |
710 buf[p].real = tmp_a_r + tmp_b_r; | |
711 buf[p].imag = tmp_a_i + tmp_b_i; | |
712 buf[q].real = tmp_a_r - tmp_b_r; | |
713 buf[q].imag = tmp_a_i - tmp_b_i; | |
714 } | |
715 } | |
716 } | |
717 */ | |
718 | |
3623 | 719 /* 1. iteration */ |
3549 | 720 // Note w[0][0]={1,0} |
3508 | 721 asm volatile( |
722 "xorps %%xmm1, %%xmm1 \n\t" | |
723 "xorps %%xmm2, %%xmm2 \n\t" | |
724 "movl %0, %%esi \n\t" | |
3529 | 725 ".balign 16 \n\t" |
3508 | 726 "1: \n\t" |
727 "movlps (%%esi), %%xmm0 \n\t" //buf[p] | |
728 "movlps 8(%%esi), %%xmm1\n\t" //buf[q] | |
729 "movhps (%%esi), %%xmm0 \n\t" //buf[p] | |
730 "movhps 8(%%esi), %%xmm2\n\t" //buf[q] | |
731 "addps %%xmm1, %%xmm0 \n\t" | |
732 "subps %%xmm2, %%xmm0 \n\t" | |
733 "movaps %%xmm0, (%%esi) \n\t" | |
734 "addl $16, %%esi \n\t" | |
735 "cmpl %1, %%esi \n\t" | |
736 " jb 1b \n\t" | |
737 :: "g" (buf), "r" (buf + 128) | |
738 : "%esi" | |
739 ); | |
3549 | 740 |
3623 | 741 /* 2. iteration */ |
3512 | 742 // Note w[1]={{1,0}, {0,-1}} |
743 asm volatile( | |
744 "movaps ps111_1, %%xmm7 \n\t" // 1,1,1,-1 | |
745 "movl %0, %%esi \n\t" | |
3529 | 746 ".balign 16 \n\t" |
3512 | 747 "1: \n\t" |
748 "movaps 16(%%esi), %%xmm2 \n\t" //r2,i2,r3,i3 | |
749 "shufps $0xB4, %%xmm2, %%xmm2 \n\t" //r2,i2,i3,r3 | |
750 "mulps %%xmm7, %%xmm2 \n\t" //r2,i2,i3,-r3 | |
751 "movaps (%%esi), %%xmm0 \n\t" //r0,i0,r1,i1 | |
752 "movaps (%%esi), %%xmm1 \n\t" //r0,i0,r1,i1 | |
753 "addps %%xmm2, %%xmm0 \n\t" | |
754 "subps %%xmm2, %%xmm1 \n\t" | |
755 "movaps %%xmm0, (%%esi) \n\t" | |
756 "movaps %%xmm1, 16(%%esi) \n\t" | |
757 "addl $32, %%esi \n\t" | |
758 "cmpl %1, %%esi \n\t" | |
759 " jb 1b \n\t" | |
760 :: "g" (buf), "r" (buf + 128) | |
761 : "%esi" | |
762 ); | |
3549 | 763 |
3623 | 764 /* 3. iteration */ |
3534 | 765 /* |
766 Note sseW2+0={1,1,sqrt(2),sqrt(2)) | |
767 Note sseW2+16={0,0,sqrt(2),-sqrt(2)) | |
768 Note sseW2+32={0,0,-sqrt(2),-sqrt(2)) | |
769 Note sseW2+48={1,-1,sqrt(2),-sqrt(2)) | |
770 */ | |
771 asm volatile( | |
3537 | 772 "movaps 48+sseW2, %%xmm6 \n\t" |
3534 | 773 "movaps 16+sseW2, %%xmm7 \n\t" |
774 "xorps %%xmm5, %%xmm5 \n\t" | |
775 "xorps %%xmm2, %%xmm2 \n\t" | |
776 "movl %0, %%esi \n\t" | |
777 ".balign 16 \n\t" | |
778 "1: \n\t" | |
3537 | 779 "movaps 32(%%esi), %%xmm2 \n\t" //r4,i4,r5,i5 |
3534 | 780 "movaps 48(%%esi), %%xmm3 \n\t" //r6,i6,r7,i7 |
3537 | 781 "movaps sseW2, %%xmm4 \n\t" //r4,i4,r5,i5 |
782 "movaps 32+sseW2, %%xmm5 \n\t" //r6,i6,r7,i7 | |
783 "mulps %%xmm2, %%xmm4 \n\t" | |
784 "mulps %%xmm3, %%xmm5 \n\t" | |
3534 | 785 "shufps $0xB1, %%xmm2, %%xmm2 \n\t" //i4,r4,i5,r5 |
786 "shufps $0xB1, %%xmm3, %%xmm3 \n\t" //i6,r6,i7,r7 | |
3537 | 787 "mulps %%xmm6, %%xmm3 \n\t" |
3534 | 788 "mulps %%xmm7, %%xmm2 \n\t" |
789 "movaps (%%esi), %%xmm0 \n\t" //r0,i0,r1,i1 | |
790 "movaps 16(%%esi), %%xmm1 \n\t" //r2,i2,r3,i3 | |
791 "addps %%xmm4, %%xmm2 \n\t" | |
792 "addps %%xmm5, %%xmm3 \n\t" | |
793 "movaps %%xmm2, %%xmm4 \n\t" | |
794 "movaps %%xmm3, %%xmm5 \n\t" | |
795 "addps %%xmm0, %%xmm2 \n\t" | |
796 "addps %%xmm1, %%xmm3 \n\t" | |
797 "subps %%xmm4, %%xmm0 \n\t" | |
798 "subps %%xmm5, %%xmm1 \n\t" | |
799 "movaps %%xmm2, (%%esi) \n\t" | |
800 "movaps %%xmm3, 16(%%esi) \n\t" | |
801 "movaps %%xmm0, 32(%%esi) \n\t" | |
802 "movaps %%xmm1, 48(%%esi) \n\t" | |
803 "addl $64, %%esi \n\t" | |
804 "cmpl %1, %%esi \n\t" | |
805 " jb 1b \n\t" | |
806 :: "g" (buf), "r" (buf + 128) | |
807 : "%esi" | |
808 ); | |
3508 | 809 |
3623 | 810 /* 4-7. iterations */ |
3546 | 811 for (m=3; m < 7; m++) { |
812 two_m = (1 << m); | |
813 two_m_plus_one = two_m<<1; | |
814 asm volatile( | |
815 "movl %0, %%esi \n\t" | |
816 ".balign 16 \n\t" | |
817 "1: \n\t" | |
818 "xorl %%edi, %%edi \n\t" // k | |
819 "leal (%%esi, %3), %%edx \n\t" | |
820 "2: \n\t" | |
821 "movaps (%%edx, %%edi), %%xmm1 \n\t" | |
822 "movaps (%4, %%edi, 2), %%xmm2 \n\t" | |
823 "mulps %%xmm1, %%xmm2 \n\t" | |
824 "shufps $0xB1, %%xmm1, %%xmm1 \n\t" | |
825 "mulps 16(%4, %%edi, 2), %%xmm1 \n\t" | |
826 "movaps (%%esi, %%edi), %%xmm0 \n\t" | |
827 "addps %%xmm2, %%xmm1 \n\t" | |
828 "movaps %%xmm1, %%xmm2 \n\t" | |
829 "addps %%xmm0, %%xmm1 \n\t" | |
830 "subps %%xmm2, %%xmm0 \n\t" | |
831 "movaps %%xmm1, (%%esi, %%edi) \n\t" | |
832 "movaps %%xmm0, (%%edx, %%edi) \n\t" | |
833 "addl $16, %%edi \n\t" | |
834 "cmpl %3, %%edi \n\t" //FIXME (opt) count against 0 | |
835 " jb 2b \n\t" | |
836 "addl %2, %%esi \n\t" | |
837 "cmpl %1, %%esi \n\t" | |
838 " jb 1b \n\t" | |
839 :: "g" (buf), "m" (buf+128), "m" (two_m_plus_one<<3), "r" (two_m<<3), | |
840 "r" (sseW[m]) | |
841 : "%esi", "%edi", "%edx" | |
842 ); | |
843 } | |
844 | |
3623 | 845 /* Post IFFT complex multiply plus IFFT complex conjugate*/ |
3581 | 846 asm volatile( |
847 "movl $-1024, %%esi \n\t" | |
848 ".balign 16 \n\t" | |
849 "1: \n\t" | |
850 "movaps (%0, %%esi), %%xmm0 \n\t" | |
851 "movaps (%0, %%esi), %%xmm1 \n\t" | |
852 "shufps $0xB1, %%xmm0, %%xmm0 \n\t" | |
853 "mulps 1024+sseSinCos1c(%%esi), %%xmm1 \n\t" | |
854 "mulps 1024+sseSinCos1d(%%esi), %%xmm0 \n\t" | |
855 "addps %%xmm1, %%xmm0 \n\t" | |
856 "movaps %%xmm0, (%0, %%esi) \n\t" | |
857 "addl $16, %%esi \n\t" | |
858 " jnz 1b \n\t" | |
859 :: "r" (buf+128) | |
860 : "%esi" | |
861 ); | |
862 | |
3394 | 863 |
864 data_ptr = data; | |
865 delay_ptr = delay; | |
866 window_ptr = imdct_window; | |
867 | |
868 /* Window and convert to real valued signal */ | |
3552 | 869 asm volatile( |
870 "xorl %%edi, %%edi \n\t" // 0 | |
871 "xorl %%esi, %%esi \n\t" // 0 | |
872 "movss %3, %%xmm2 \n\t" // bias | |
873 "shufps $0x00, %%xmm2, %%xmm2 \n\t" // bias, bias, ... | |
874 ".balign 16 \n\t" | |
875 "1: \n\t" | |
876 "movlps (%0, %%esi), %%xmm0 \n\t" // ? ? A ? | |
877 "movlps 8(%0, %%esi), %%xmm1 \n\t" // ? ? C ? | |
878 "movhps -16(%0, %%edi), %%xmm1 \n\t" // ? D C ? | |
879 "movhps -8(%0, %%edi), %%xmm0 \n\t" // ? B A ? | |
880 "shufps $0x99, %%xmm1, %%xmm0 \n\t" // D C B A | |
881 "mulps sseWindow(%%esi), %%xmm0 \n\t" | |
882 "addps (%2, %%esi), %%xmm0 \n\t" | |
883 "addps %%xmm2, %%xmm0 \n\t" | |
884 "movaps %%xmm0, (%1, %%esi) \n\t" | |
885 "addl $16, %%esi \n\t" | |
886 "subl $16, %%edi \n\t" | |
887 "cmpl $512, %%esi \n\t" | |
888 " jb 1b \n\t" | |
889 :: "r" (buf+64), "r" (data_ptr), "r" (delay_ptr), "m" (bias) | |
890 : "%esi", "%edi" | |
891 ); | |
892 data_ptr+=128; | |
893 delay_ptr+=128; | |
3553 | 894 // window_ptr+=128; |
3579 | 895 |
3552 | 896 asm volatile( |
897 "movl $1024, %%edi \n\t" // 512 | |
898 "xorl %%esi, %%esi \n\t" // 0 | |
899 "movss %3, %%xmm2 \n\t" // bias | |
900 "shufps $0x00, %%xmm2, %%xmm2 \n\t" // bias, bias, ... | |
901 ".balign 16 \n\t" | |
902 "1: \n\t" | |
903 "movlps (%0, %%esi), %%xmm0 \n\t" // ? ? ? A | |
904 "movlps 8(%0, %%esi), %%xmm1 \n\t" // ? ? ? C | |
905 "movhps -16(%0, %%edi), %%xmm1 \n\t" // D ? ? C | |
906 "movhps -8(%0, %%edi), %%xmm0 \n\t" // B ? ? A | |
907 "shufps $0xCC, %%xmm1, %%xmm0 \n\t" // D C B A | |
908 "mulps 512+sseWindow(%%esi), %%xmm0 \n\t" | |
909 "addps (%2, %%esi), %%xmm0 \n\t" | |
910 "addps %%xmm2, %%xmm0 \n\t" | |
911 "movaps %%xmm0, (%1, %%esi) \n\t" | |
912 "addl $16, %%esi \n\t" | |
913 "subl $16, %%edi \n\t" | |
914 "cmpl $512, %%esi \n\t" | |
915 " jb 1b \n\t" | |
916 :: "r" (buf), "r" (data_ptr), "r" (delay_ptr), "m" (bias) | |
917 : "%esi", "%edi" | |
918 ); | |
919 data_ptr+=128; | |
3553 | 920 // window_ptr+=128; |
3394 | 921 |
922 /* The trailing edge of the window goes into the delay line */ | |
923 delay_ptr = delay; | |
924 | |
3553 | 925 asm volatile( |
926 "xorl %%edi, %%edi \n\t" // 0 | |
927 "xorl %%esi, %%esi \n\t" // 0 | |
928 ".balign 16 \n\t" | |
929 "1: \n\t" | |
930 "movlps (%0, %%esi), %%xmm0 \n\t" // ? ? ? A | |
931 "movlps 8(%0, %%esi), %%xmm1 \n\t" // ? ? ? C | |
932 "movhps -16(%0, %%edi), %%xmm1 \n\t" // D ? ? C | |
933 "movhps -8(%0, %%edi), %%xmm0 \n\t" // B ? ? A | |
934 "shufps $0xCC, %%xmm1, %%xmm0 \n\t" // D C B A | |
935 "mulps 1024+sseWindow(%%esi), %%xmm0 \n\t" | |
936 "movaps %%xmm0, (%1, %%esi) \n\t" | |
937 "addl $16, %%esi \n\t" | |
938 "subl $16, %%edi \n\t" | |
939 "cmpl $512, %%esi \n\t" | |
940 " jb 1b \n\t" | |
941 :: "r" (buf+64), "r" (delay_ptr) | |
942 : "%esi", "%edi" | |
943 ); | |
944 delay_ptr+=128; | |
945 // window_ptr-=128; | |
3579 | 946 |
3553 | 947 asm volatile( |
948 "movl $1024, %%edi \n\t" // 1024 | |
949 "xorl %%esi, %%esi \n\t" // 0 | |
950 ".balign 16 \n\t" | |
951 "1: \n\t" | |
952 "movlps (%0, %%esi), %%xmm0 \n\t" // ? ? A ? | |
953 "movlps 8(%0, %%esi), %%xmm1 \n\t" // ? ? C ? | |
954 "movhps -16(%0, %%edi), %%xmm1 \n\t" // ? D C ? | |
955 "movhps -8(%0, %%edi), %%xmm0 \n\t" // ? B A ? | |
956 "shufps $0x99, %%xmm1, %%xmm0 \n\t" // D C B A | |
957 "mulps 1536+sseWindow(%%esi), %%xmm0 \n\t" | |
958 "movaps %%xmm0, (%1, %%esi) \n\t" | |
959 "addl $16, %%esi \n\t" | |
960 "subl $16, %%edi \n\t" | |
961 "cmpl $512, %%esi \n\t" | |
962 " jb 1b \n\t" | |
963 :: "r" (buf), "r" (delay_ptr) | |
964 : "%esi", "%edi" | |
965 ); | |
3394 | 966 } |
3579 | 967 #endif //arch_x86 |
3394 | 968 |
969 void | |
970 imdct_do_256(sample_t data[],sample_t delay[],sample_t bias) | |
971 { | |
972 int i,k; | |
973 int p,q; | |
974 int m; | |
975 int two_m; | |
976 int two_m_plus_one; | |
977 | |
978 sample_t tmp_a_i; | |
979 sample_t tmp_a_r; | |
980 sample_t tmp_b_i; | |
981 sample_t tmp_b_r; | |
982 | |
983 sample_t *data_ptr; | |
984 sample_t *delay_ptr; | |
985 sample_t *window_ptr; | |
986 | |
987 complex_t *buf_1, *buf_2; | |
988 | |
989 buf_1 = &buf[0]; | |
990 buf_2 = &buf[64]; | |
991 | |
992 /* Pre IFFT complex multiply plus IFFT cmplx conjugate */ | |
993 for(k=0; k<64; k++) { | |
994 /* X1[k] = X[2*k] */ | |
995 /* X2[k] = X[2*k+1] */ | |
996 | |
997 p = 2 * (128-2*k-1); | |
998 q = 2 * (2 * k); | |
999 | |
1000 /* Z1[k] = (X1[128-2*k-1] + j * X1[2*k]) * (xcos2[k] + j * xsin2[k]); */ | |
1001 buf_1[k].real = data[p] * xcos2[k] - data[q] * xsin2[k]; | |
1002 buf_1[k].imag = -1.0f * (data[q] * xcos2[k] + data[p] * xsin2[k]); | |
1003 /* Z2[k] = (X2[128-2*k-1] + j * X2[2*k]) * (xcos2[k] + j * xsin2[k]); */ | |
1004 buf_2[k].real = data[p + 1] * xcos2[k] - data[q + 1] * xsin2[k]; | |
1005 buf_2[k].imag = -1.0f * ( data[q + 1] * xcos2[k] + data[p + 1] * xsin2[k]); | |
1006 } | |
1007 | |
1008 /* IFFT Bit reversed shuffling */ | |
1009 for(i=0; i<64; i++) { | |
1010 k = bit_reverse_256[i]; | |
1011 if (k < i) { | |
1012 swap_cmplx(&buf_1[i],&buf_1[k]); | |
1013 swap_cmplx(&buf_2[i],&buf_2[k]); | |
1014 } | |
1015 } | |
1016 | |
1017 /* FFT Merge */ | |
1018 for (m=0; m < 6; m++) { | |
1019 two_m = (1 << m); | |
1020 two_m_plus_one = (1 << (m+1)); | |
1021 | |
1022 /* FIXME */ | |
1023 if(m) | |
1024 two_m = (1 << m); | |
1025 else | |
1026 two_m = 1; | |
1027 | |
1028 for(k = 0; k < two_m; k++) { | |
1029 for(i = 0; i < 64; i += two_m_plus_one) { | |
1030 p = k + i; | |
1031 q = p + two_m; | |
1032 /* Do block 1 */ | |
1033 tmp_a_r = buf_1[p].real; | |
1034 tmp_a_i = buf_1[p].imag; | |
1035 tmp_b_r = buf_1[q].real * w[m][k].real - buf_1[q].imag * w[m][k].imag; | |
1036 tmp_b_i = buf_1[q].imag * w[m][k].real + buf_1[q].real * w[m][k].imag; | |
1037 buf_1[p].real = tmp_a_r + tmp_b_r; | |
1038 buf_1[p].imag = tmp_a_i + tmp_b_i; | |
1039 buf_1[q].real = tmp_a_r - tmp_b_r; | |
1040 buf_1[q].imag = tmp_a_i - tmp_b_i; | |
1041 | |
1042 /* Do block 2 */ | |
1043 tmp_a_r = buf_2[p].real; | |
1044 tmp_a_i = buf_2[p].imag; | |
1045 tmp_b_r = buf_2[q].real * w[m][k].real - buf_2[q].imag * w[m][k].imag; | |
1046 tmp_b_i = buf_2[q].imag * w[m][k].real + buf_2[q].real * w[m][k].imag; | |
1047 buf_2[p].real = tmp_a_r + tmp_b_r; | |
1048 buf_2[p].imag = tmp_a_i + tmp_b_i; | |
1049 buf_2[q].real = tmp_a_r - tmp_b_r; | |
1050 buf_2[q].imag = tmp_a_i - tmp_b_i; | |
1051 } | |
1052 } | |
1053 } | |
1054 | |
1055 /* Post IFFT complex multiply */ | |
1056 for( i=0; i < 64; i++) { | |
1057 /* y1[n] = z1[n] * (xcos2[n] + j * xs in2[n]) ; */ | |
1058 tmp_a_r = buf_1[i].real; | |
1059 tmp_a_i = -buf_1[i].imag; | |
1060 buf_1[i].real =(tmp_a_r * xcos2[i]) - (tmp_a_i * xsin2[i]); | |
1061 buf_1[i].imag =(tmp_a_r * xsin2[i]) + (tmp_a_i * xcos2[i]); | |
1062 /* y2[n] = z2[n] * (xcos2[n] + j * xsin2[n]) ; */ | |
1063 tmp_a_r = buf_2[i].real; | |
1064 tmp_a_i = -buf_2[i].imag; | |
1065 buf_2[i].real =(tmp_a_r * xcos2[i]) - (tmp_a_i * xsin2[i]); | |
1066 buf_2[i].imag =(tmp_a_r * xsin2[i]) + (tmp_a_i * xcos2[i]); | |
1067 } | |
1068 | |
1069 data_ptr = data; | |
1070 delay_ptr = delay; | |
1071 window_ptr = imdct_window; | |
1072 | |
1073 /* Window and convert to real valued signal */ | |
1074 for(i=0; i< 64; i++) { | |
1075 *data_ptr++ = -buf_1[i].imag * *window_ptr++ + *delay_ptr++ + bias; | |
1076 *data_ptr++ = buf_1[64-i-1].real * *window_ptr++ + *delay_ptr++ + bias; | |
1077 } | |
1078 | |
1079 for(i=0; i< 64; i++) { | |
1080 *data_ptr++ = -buf_1[i].real * *window_ptr++ + *delay_ptr++ + bias; | |
1081 *data_ptr++ = buf_1[64-i-1].imag * *window_ptr++ + *delay_ptr++ + bias; | |
1082 } | |
1083 | |
1084 delay_ptr = delay; | |
1085 | |
1086 for(i=0; i< 64; i++) { | |
1087 *delay_ptr++ = -buf_2[i].real * *--window_ptr; | |
1088 *delay_ptr++ = buf_2[64-i-1].imag * *--window_ptr; | |
1089 } | |
1090 | |
1091 for(i=0; i< 64; i++) { | |
1092 *delay_ptr++ = buf_2[i].imag * *--window_ptr; | |
1093 *delay_ptr++ = -buf_2[64-i-1].real * *--window_ptr; | |
1094 } | |
1095 } | |
1096 | |
1097 void imdct_init (uint32_t mm_accel) | |
1098 { | |
1099 #ifdef LIBA52_MLIB | |
1100 if (mm_accel & MM_ACCEL_MLIB) { | |
1101 fprintf (stderr, "Using mlib for IMDCT transform\n"); | |
1102 imdct_512 = imdct_do_512_mlib; | |
1103 imdct_256 = imdct_do_256_mlib; | |
1104 } else | |
1105 #endif | |
1106 { | |
1107 int i, j, k; | |
1108 | |
3908 | 1109 if(mm_accel & MM_ACCEL_X86_SSE) fprintf (stderr, "Using SSE optimized IMDCT transform\n"); |
1110 else if(mm_accel & MM_ACCEL_X86_3DNOW) fprintf (stderr, "Using 3DNow optimized IMDCT transform\n"); | |
1111 else fprintf (stderr, "No accelerated IMDCT transform found\n"); | |
3394 | 1112 |
1113 /* Twiddle factors to turn IFFT into IMDCT */ | |
1114 for (i = 0; i < 128; i++) { | |
1115 xcos1[i] = -cos ((M_PI / 2048) * (8 * i + 1)); | |
1116 xsin1[i] = -sin ((M_PI / 2048) * (8 * i + 1)); | |
1117 } | |
3579 | 1118 #ifdef ARCH_X86 |
3527 | 1119 for (i = 0; i < 128; i++) { |
3581 | 1120 sseSinCos1c[2*i+0]= xcos1[i]; |
1121 sseSinCos1c[2*i+1]= -xcos1[i]; | |
1122 sseSinCos1d[2*i+0]= xsin1[i]; | |
1123 sseSinCos1d[2*i+1]= xsin1[i]; | |
3527 | 1124 } |
1125 #endif | |
3394 | 1126 |
1127 /* More twiddle factors to turn IFFT into IMDCT */ | |
1128 for (i = 0; i < 64; i++) { | |
1129 xcos2[i] = -cos ((M_PI / 1024) * (8 * i + 1)); | |
1130 xsin2[i] = -sin ((M_PI / 1024) * (8 * i + 1)); | |
1131 } | |
1132 | |
1133 for (i = 0; i < 7; i++) { | |
1134 j = 1 << i; | |
1135 for (k = 0; k < j; k++) { | |
1136 w[i][k].real = cos (-M_PI * k / j); | |
1137 w[i][k].imag = sin (-M_PI * k / j); | |
1138 } | |
1139 } | |
3579 | 1140 #ifdef ARCH_X86 |
3534 | 1141 for (i = 1; i < 7; i++) { |
1142 j = 1 << i; | |
1143 for (k = 0; k < j; k+=2) { | |
1144 | |
1145 sseW[i][4*k + 0] = w[i][k+0].real; | |
1146 sseW[i][4*k + 1] = w[i][k+0].real; | |
1147 sseW[i][4*k + 2] = w[i][k+1].real; | |
1148 sseW[i][4*k + 3] = w[i][k+1].real; | |
1149 | |
1150 sseW[i][4*k + 4] = -w[i][k+0].imag; | |
1151 sseW[i][4*k + 5] = w[i][k+0].imag; | |
1152 sseW[i][4*k + 6] = -w[i][k+1].imag; | |
1153 sseW[i][4*k + 7] = w[i][k+1].imag; | |
1154 | |
1155 //we multiply more or less uninitalized numbers so we need to use exactly 0.0 | |
1156 if(k==0) | |
1157 { | |
1158 // sseW[i][4*k + 0]= sseW[i][4*k + 1]= 1.0; | |
1159 sseW[i][4*k + 4]= sseW[i][4*k + 5]= 0.0; | |
1160 } | |
1161 | |
1162 if(2*k == j) | |
1163 { | |
1164 sseW[i][4*k + 0]= sseW[i][4*k + 1]= 0.0; | |
1165 // sseW[i][4*k + 4]= -(sseW[i][4*k + 5]= -1.0); | |
1166 } | |
1167 } | |
1168 } | |
3552 | 1169 |
1170 for(i=0; i<128; i++) | |
1171 { | |
1172 sseWindow[2*i+0]= -imdct_window[2*i+0]; | |
3553 | 1173 sseWindow[2*i+1]= imdct_window[2*i+1]; |
3552 | 1174 } |
3553 | 1175 |
1176 for(i=0; i<64; i++) | |
1177 { | |
1178 sseWindow[256 + 2*i+0]= -imdct_window[254 - 2*i+1]; | |
1179 sseWindow[256 + 2*i+1]= imdct_window[254 - 2*i+0]; | |
1180 sseWindow[384 + 2*i+0]= imdct_window[126 - 2*i+1]; | |
1181 sseWindow[384 + 2*i+1]= -imdct_window[126 - 2*i+0]; | |
1182 } | |
3579 | 1183 #endif // arch_x86 |
1184 | |
3720
120ac80f13c2
Fixed #ifdef discrepancy that was breaking compilation on PPC platform
melanson
parents:
3623
diff
changeset
|
1185 imdct_512 = imdct_do_512; |
120ac80f13c2
Fixed #ifdef discrepancy that was breaking compilation on PPC platform
melanson
parents:
3623
diff
changeset
|
1186 #ifdef ARCH_X86 |
3908 | 1187 if(mm_accel & MM_ACCEL_X86_SSE) imdct_512 = imdct_do_512_sse; |
1188 else if(mm_accel & MM_ACCEL_X86_3DNOW) imdct_512 = imdct_do_512_3dnow; | |
3720
120ac80f13c2
Fixed #ifdef discrepancy that was breaking compilation on PPC platform
melanson
parents:
3623
diff
changeset
|
1189 #endif // arch_x86 |
3394 | 1190 imdct_256 = imdct_do_256; |
1191 } | |
1192 } | |
3884 | 1193 |
1194 // Stuff below this line is borrowed from libac3 | |
1195 #include "srfftp.h" | |
1196 | |
1197 #ifdef ARCH_X86 | |
1198 | |
1199 static void fft_4_3dnow(complex_t *x) | |
1200 { | |
1201 /* delta_p = 1 here */ | |
1202 /* x[k] = sum_{i=0..3} x[i] * w^{i*k}, w=e^{-2*pi/4} | |
1203 */ | |
1204 __asm__ __volatile__( | |
1205 "movq 24(%1), %%mm3\n\t" | |
1206 "movq 8(%1), %%mm1\n\t" | |
1207 "pxor %2, %%mm3\n\t" /* mm3.re | -mm3.im */ | |
1208 "pxor %3, %%mm1\n\t" /* -mm1.re | mm1.im */ | |
1209 "pfadd %%mm1, %%mm3\n\t" /* vi.im = x[3].re - x[1].re; */ | |
1210 "movq %%mm3, %%mm4\n\t" /* vi.re =-x[3].im + x[1].im; mm4 = vi */ | |
1211 #ifdef HAVE_3DNOWEX | |
1212 "pswapd %%mm4, %%mm4\n\t" | |
1213 #else | |
1214 "movq %%mm4, %%mm5\n\t" | |
1215 "psrlq $32, %%mm4\n\t" | |
1216 "punpckldq %%mm5, %%mm4\n\t" | |
1217 #endif | |
1218 "movq (%1), %%mm5\n\t" /* yb.re = x[0].re - x[2].re; */ | |
1219 "movq (%1), %%mm6\n\t" /* yt.re = x[0].re + x[2].re; */ | |
1220 "movq 24(%1), %%mm7\n\t" /* u.re = x[3].re + x[1].re; */ | |
1221 "pfsub 16(%1), %%mm5\n\t" /* yb.im = x[0].im - x[2].im; mm5 = yb */ | |
1222 "pfadd 16(%1), %%mm6\n\t" /* yt.im = x[0].im + x[2].im; mm6 = yt */ | |
1223 "pfadd 8(%1), %%mm7\n\t" /* u.im = x[3].im + x[1].im; mm7 = u */ | |
1224 | |
1225 "movq %%mm6, %%mm0\n\t" /* x[0].re = yt.re + u.re; */ | |
1226 "movq %%mm5, %%mm1\n\t" /* x[1].re = yb.re + vi.re; */ | |
1227 "pfadd %%mm7, %%mm0\n\t" /*x[0].im = yt.im + u.im; */ | |
1228 "pfadd %%mm4, %%mm1\n\t" /* x[1].im = yb.im + vi.im; */ | |
1229 "movq %%mm0, (%0)\n\t" | |
1230 "movq %%mm1, 8(%0)\n\t" | |
1231 | |
1232 "pfsub %%mm7, %%mm6\n\t" /* x[2].re = yt.re - u.re; */ | |
1233 "pfsub %%mm4, %%mm5\n\t" /* x[3].re = yb.re - vi.re; */ | |
1234 "movq %%mm6, 16(%0)\n\t" /* x[2].im = yt.im - u.im; */ | |
1235 "movq %%mm5, 24(%0)" /* x[3].im = yb.im - vi.im; */ | |
1236 :"=r"(x) | |
1237 :"0"(x), | |
1238 "m"(x_plus_minus_3dnow), | |
1239 "m"(x_minus_plus_3dnow) | |
1240 :"memory"); | |
1241 } | |
1242 | |
1243 static void fft_8_3dnow(complex_t *x) | |
1244 { | |
1245 /* delta_p = diag{1, sqrt(i)} here */ | |
1246 /* x[k] = sum_{i=0..7} x[i] * w^{i*k}, w=e^{-2*pi/8} | |
1247 */ | |
1248 complex_t wT1, wB1, wB2; | |
1249 | |
1250 __asm__ __volatile__( | |
1251 "movq 8(%2), %%mm0\n\t" | |
1252 "movq 24(%2), %%mm1\n\t" | |
1253 "movq %%mm0, %0\n\t" /* wT1 = x[1]; */ | |
1254 "movq %%mm1, %1\n\t" /* wB1 = x[3]; */ | |
1255 :"=m"(wT1), "=m"(wB1) | |
1256 :"r"(x) | |
1257 :"memory"); | |
1258 | |
1259 __asm__ __volatile__( | |
1260 "movq 16(%0), %%mm2\n\t" | |
1261 "movq 32(%0), %%mm3\n\t" | |
1262 "movq %%mm2, 8(%0)\n\t" /* x[1] = x[2]; */ | |
1263 "movq 48(%0), %%mm4\n\t" | |
1264 "movq %%mm3, 16(%0)\n\t" /* x[2] = x[4]; */ | |
1265 "movq %%mm4, 24(%0)\n\t" /* x[3] = x[6]; */ | |
1266 :"=r"(x) | |
1267 :"0"(x) | |
1268 :"memory"); | |
1269 | |
1270 fft_4_3dnow(&x[0]); | |
1271 | |
1272 /* x[0] x[4] x[2] x[6] */ | |
1273 | |
1274 __asm__ __volatile__( | |
1275 "movq 40(%1), %%mm0\n\t" | |
1276 "movq %%mm0, %%mm3\n\t" | |
1277 "movq 56(%1), %%mm1\n\t" | |
1278 "pfadd %%mm1, %%mm0\n\t" | |
1279 "pfsub %%mm1, %%mm3\n\t" | |
1280 "movq (%2), %%mm2\n\t" | |
1281 "pfadd %%mm2, %%mm0\n\t" | |
1282 "pfadd %%mm2, %%mm3\n\t" | |
1283 "movq (%3), %%mm1\n\t" | |
1284 "pfadd %%mm1, %%mm0\n\t" | |
1285 "pfsub %%mm1, %%mm3\n\t" | |
1286 "movq (%1), %%mm1\n\t" | |
1287 "movq 16(%1), %%mm4\n\t" | |
1288 "movq %%mm1, %%mm2\n\t" | |
1289 #ifdef HAVE_3DNOWEX | |
1290 "pswapd %%mm3, %%mm3\n\t" | |
1291 #else | |
1292 "movq %%mm3, %%mm6\n\t" | |
1293 "psrlq $32, %%mm3\n\t" | |
1294 "punpckldq %%mm6, %%mm3\n\t" | |
1295 #endif | |
1296 "pfadd %%mm0, %%mm1\n\t" | |
1297 "movq %%mm4, %%mm5\n\t" | |
1298 "pfsub %%mm0, %%mm2\n\t" | |
1299 "pfadd %%mm3, %%mm4\n\t" | |
1300 "movq %%mm1, (%0)\n\t" | |
1301 "pfsub %%mm3, %%mm5\n\t" | |
1302 "movq %%mm2, 32(%0)\n\t" | |
1303 "movd %%mm4, 16(%0)\n\t" | |
1304 "movd %%mm5, 48(%0)\n\t" | |
1305 "psrlq $32, %%mm4\n\t" | |
1306 "psrlq $32, %%mm5\n\t" | |
1307 "movd %%mm4, 52(%0)\n\t" | |
1308 "movd %%mm5, 20(%0)" | |
1309 :"=r"(x) | |
1310 :"0"(x), "r"(&wT1), "r"(&wB1) | |
1311 :"memory"); | |
1312 | |
1313 /* x[1] x[5] */ | |
1314 __asm__ __volatile__ ( | |
1315 "movq %6, %%mm6\n\t" | |
1316 "movq %5, %%mm7\n\t" | |
1317 "movq %1, %%mm0\n\t" | |
1318 "movq %2, %%mm1\n\t" | |
1319 "movq 56(%3), %%mm3\n\t" | |
1320 "pfsub 40(%3), %%mm0\n\t" | |
1321 #ifdef HAVE_3DNOWEX | |
1322 "pswapd %%mm1, %%mm1\n\t" | |
1323 #else | |
1324 "movq %%mm1, %%mm2\n\t" | |
1325 "psrlq $32, %%mm1\n\t" | |
1326 "punpckldq %%mm2,%%mm1\n\t" | |
1327 #endif | |
1328 "pxor %%mm7, %%mm1\n\t" | |
1329 "pfadd %%mm1, %%mm0\n\t" | |
1330 #ifdef HAVE_3DNOWEX | |
1331 "pswapd %%mm3, %%mm3\n\t" | |
1332 #else | |
1333 "movq %%mm3, %%mm2\n\t" | |
1334 "psrlq $32, %%mm3\n\t" | |
1335 "punpckldq %%mm2,%%mm3\n\t" | |
1336 #endif | |
1337 "pxor %%mm6, %%mm3\n\t" | |
1338 "pfadd %%mm3, %%mm0\n\t" | |
1339 "movq %%mm0, %%mm1\n\t" | |
1340 "pxor %%mm6, %%mm1\n\t" | |
1341 "pfacc %%mm1, %%mm0\n\t" | |
1342 "pfmul %4, %%mm0\n\t" | |
1343 | |
1344 "movq 40(%3), %%mm5\n\t" | |
1345 #ifdef HAVE_3DNOWEX | |
1346 "pswapd %%mm5, %%mm5\n\t" | |
1347 #else | |
1348 "movq %%mm5, %%mm1\n\t" | |
1349 "psrlq $32, %%mm5\n\t" | |
1350 "punpckldq %%mm1,%%mm5\n\t" | |
1351 #endif | |
1352 "movq %%mm5, %0\n\t" | |
1353 | |
1354 "movq 8(%3), %%mm1\n\t" | |
1355 "movq %%mm1, %%mm2\n\t" | |
1356 "pfsub %%mm0, %%mm1\n\t" | |
1357 "pfadd %%mm0, %%mm2\n\t" | |
1358 "movq %%mm1, 40(%3)\n\t" | |
1359 "movq %%mm2, 8(%3)\n\t" | |
1360 :"=m"(wB2) | |
1361 :"m"(wT1), "m"(wB1), "r"(x), "m"(HSQRT2_3DNOW), | |
1362 "m"(x_plus_minus_3dnow), "m"(x_minus_plus_3dnow) | |
1363 :"memory"); | |
1364 | |
1365 | |
1366 /* x[3] x[7] */ | |
1367 __asm__ __volatile__( | |
1368 "movq %1, %%mm0\n\t" | |
1369 #ifdef HAVE_3DNOWEX | |
1370 "pswapd %3, %%mm1\n\t" | |
1371 #else | |
1372 "movq %3, %%mm1\n\t" | |
1373 "psrlq $32, %%mm1\n\t" | |
1374 "punpckldq %3, %%mm1\n\t" | |
1375 #endif | |
1376 "pxor %%mm6, %%mm1\n\t" | |
1377 "pfadd %%mm1, %%mm0\n\t" | |
1378 "movq %2, %%mm2\n\t" | |
1379 "movq 56(%4), %%mm3\n\t" | |
1380 "pxor %%mm7, %%mm3\n\t" | |
1381 "pfadd %%mm3, %%mm2\n\t" | |
1382 #ifdef HAVE_3DNOWEX | |
1383 "pswapd %%mm2, %%mm2\n\t" | |
1384 #else | |
1385 "movq %%mm2, %%mm5\n\t" | |
1386 "psrlq $32, %%mm2\n\t" | |
1387 "punpckldq %%mm5,%%mm2\n\t" | |
1388 #endif | |
1389 "movq 24(%4), %%mm3\n\t" | |
1390 "pfsub %%mm2, %%mm0\n\t" | |
1391 "movq %%mm3, %%mm4\n\t" | |
1392 "movq %%mm0, %%mm1\n\t" | |
1393 "pxor %%mm6, %%mm0\n\t" | |
1394 "pfacc %%mm1, %%mm0\n\t" | |
1395 "pfmul %5, %%mm0\n\t" | |
1396 "movq %%mm0, %%mm1\n\t" | |
1397 "pxor %%mm6, %%mm1\n\t" | |
1398 "pxor %%mm7, %%mm0\n\t" | |
1399 "pfadd %%mm1, %%mm3\n\t" | |
1400 "pfadd %%mm0, %%mm4\n\t" | |
1401 "movq %%mm4, 24(%0)\n\t" | |
1402 "movq %%mm3, 56(%0)\n\t" | |
1403 :"=r"(x) | |
1404 :"m"(wT1), "m"(wB2), "m"(wB1), "0"(x), "m"(HSQRT2_3DNOW) | |
1405 :"memory"); | |
1406 } | |
1407 | |
1408 static void fft_asmb_3dnow(int k, complex_t *x, complex_t *wTB, | |
1409 const complex_t *d, const complex_t *d_3) | |
1410 { | |
1411 register complex_t *x2k, *x3k, *x4k, *wB; | |
1412 | |
1413 TRANS_FILL_MM6_MM7_3DNOW(); | |
1414 x2k = x + 2 * k; | |
1415 x3k = x2k + 2 * k; | |
1416 x4k = x3k + 2 * k; | |
1417 wB = wTB + 2 * k; | |
1418 | |
1419 TRANSZERO_3DNOW(x[0],x2k[0],x3k[0],x4k[0]); | |
1420 TRANS_3DNOW(x[1],x2k[1],x3k[1],x4k[1],wTB[1],wB[1],d[1],d_3[1]); | |
1421 | |
1422 --k; | |
1423 for(;;) { | |
1424 TRANS_3DNOW(x[2],x2k[2],x3k[2],x4k[2],wTB[2],wB[2],d[2],d_3[2]); | |
1425 TRANS_3DNOW(x[3],x2k[3],x3k[3],x4k[3],wTB[3],wB[3],d[3],d_3[3]); | |
1426 if (!--k) break; | |
1427 x += 2; | |
1428 x2k += 2; | |
1429 x3k += 2; | |
1430 x4k += 2; | |
1431 d += 2; | |
1432 d_3 += 2; | |
1433 wTB += 2; | |
1434 wB += 2; | |
1435 } | |
1436 | |
1437 } | |
1438 | |
1439 void fft_asmb16_3dnow(complex_t *x, complex_t *wTB) | |
1440 { | |
1441 int k = 2; | |
1442 | |
1443 TRANS_FILL_MM6_MM7_3DNOW(); | |
1444 /* transform x[0], x[8], x[4], x[12] */ | |
1445 TRANSZERO_3DNOW(x[0],x[4],x[8],x[12]); | |
1446 | |
1447 /* transform x[1], x[9], x[5], x[13] */ | |
1448 TRANS_3DNOW(x[1],x[5],x[9],x[13],wTB[1],wTB[5],delta16[1],delta16_3[1]); | |
1449 | |
1450 /* transform x[2], x[10], x[6], x[14] */ | |
1451 TRANSHALF_16_3DNOW(x[2],x[6],x[10],x[14]); | |
1452 | |
1453 /* transform x[3], x[11], x[7], x[15] */ | |
1454 TRANS_3DNOW(x[3],x[7],x[11],x[15],wTB[3],wTB[7],delta16[3],delta16_3[3]); | |
1455 | |
1456 } | |
1457 | |
1458 static void fft_128p_3dnow(complex_t *a) | |
1459 { | |
1460 fft_8_3dnow(&a[0]); fft_4_3dnow(&a[8]); fft_4_3dnow(&a[12]); | |
1461 fft_asmb16_3dnow(&a[0], &a[8]); | |
1462 | |
1463 fft_8_3dnow(&a[16]), fft_8_3dnow(&a[24]); | |
1464 fft_asmb_3dnow(4, &a[0], &a[16],&delta32[0], &delta32_3[0]); | |
1465 | |
1466 fft_8_3dnow(&a[32]); fft_4_3dnow(&a[40]); fft_4_3dnow(&a[44]); | |
1467 fft_asmb16_3dnow(&a[32], &a[40]); | |
1468 | |
1469 fft_8_3dnow(&a[48]); fft_4_3dnow(&a[56]); fft_4_3dnow(&a[60]); | |
1470 fft_asmb16_3dnow(&a[48], &a[56]); | |
1471 | |
1472 fft_asmb_3dnow(8, &a[0], &a[32],&delta64[0], &delta64_3[0]); | |
1473 | |
1474 fft_8_3dnow(&a[64]); fft_4_3dnow(&a[72]); fft_4_3dnow(&a[76]); | |
1475 /* fft_16(&a[64]); */ | |
1476 fft_asmb16_3dnow(&a[64], &a[72]); | |
1477 | |
1478 fft_8_3dnow(&a[80]); fft_8_3dnow(&a[88]); | |
1479 | |
1480 /* fft_32(&a[64]); */ | |
1481 fft_asmb_3dnow(4, &a[64], &a[80],&delta32[0], &delta32_3[0]); | |
1482 | |
1483 fft_8_3dnow(&a[96]); fft_4_3dnow(&a[104]), fft_4_3dnow(&a[108]); | |
1484 /* fft_16(&a[96]); */ | |
1485 fft_asmb16_3dnow(&a[96], &a[104]); | |
1486 | |
1487 fft_8_3dnow(&a[112]), fft_8_3dnow(&a[120]); | |
1488 /* fft_32(&a[96]); */ | |
1489 fft_asmb_3dnow(4, &a[96], &a[112], &delta32[0], &delta32_3[0]); | |
1490 | |
1491 /* fft_128(&a[0]); */ | |
1492 fft_asmb_3dnow(16, &a[0], &a[64], &delta128[0], &delta128_3[0]); | |
1493 } | |
1494 #endif //ARCH_X86 | |
1495 | |
1496 static void fft_asmb(int k, complex_t *x, complex_t *wTB, | |
1497 const complex_t *d, const complex_t *d_3) | |
1498 { | |
1499 register complex_t *x2k, *x3k, *x4k, *wB; | |
1500 register float a_r, a_i, a1_r, a1_i, u_r, u_i, v_r, v_i; | |
1501 | |
1502 x2k = x + 2 * k; | |
1503 x3k = x2k + 2 * k; | |
1504 x4k = x3k + 2 * k; | |
1505 wB = wTB + 2 * k; | |
1506 | |
1507 TRANSZERO(x[0],x2k[0],x3k[0],x4k[0]); | |
1508 TRANS(x[1],x2k[1],x3k[1],x4k[1],wTB[1],wB[1],d[1],d_3[1]); | |
1509 | |
1510 --k; | |
1511 for(;;) { | |
1512 TRANS(x[2],x2k[2],x3k[2],x4k[2],wTB[2],wB[2],d[2],d_3[2]); | |
1513 TRANS(x[3],x2k[3],x3k[3],x4k[3],wTB[3],wB[3],d[3],d_3[3]); | |
1514 if (!--k) break; | |
1515 x += 2; | |
1516 x2k += 2; | |
1517 x3k += 2; | |
1518 x4k += 2; | |
1519 d += 2; | |
1520 d_3 += 2; | |
1521 wTB += 2; | |
1522 wB += 2; | |
1523 } | |
1524 | |
1525 } | |
1526 | |
1527 static void fft_asmb16(complex_t *x, complex_t *wTB) | |
1528 { | |
1529 register float a_r, a_i, a1_r, a1_i, u_r, u_i, v_r, v_i; | |
1530 int k = 2; | |
1531 | |
1532 /* transform x[0], x[8], x[4], x[12] */ | |
1533 TRANSZERO(x[0],x[4],x[8],x[12]); | |
1534 | |
1535 /* transform x[1], x[9], x[5], x[13] */ | |
1536 TRANS(x[1],x[5],x[9],x[13],wTB[1],wTB[5],delta16[1],delta16_3[1]); | |
1537 | |
1538 /* transform x[2], x[10], x[6], x[14] */ | |
1539 TRANSHALF_16(x[2],x[6],x[10],x[14]); | |
1540 | |
1541 /* transform x[3], x[11], x[7], x[15] */ | |
1542 TRANS(x[3],x[7],x[11],x[15],wTB[3],wTB[7],delta16[3],delta16_3[3]); | |
1543 | |
1544 } | |
1545 | |
1546 static void fft_4(complex_t *x) | |
1547 { | |
1548 /* delta_p = 1 here */ | |
1549 /* x[k] = sum_{i=0..3} x[i] * w^{i*k}, w=e^{-2*pi/4} | |
1550 */ | |
1551 | |
1552 register float yt_r, yt_i, yb_r, yb_i, u_r, u_i, vi_r, vi_i; | |
1553 | |
1554 yt_r = x[0].real; | |
1555 yb_r = yt_r - x[2].real; | |
1556 yt_r += x[2].real; | |
1557 | |
1558 u_r = x[1].real; | |
1559 vi_i = x[3].real - u_r; | |
1560 u_r += x[3].real; | |
1561 | |
1562 u_i = x[1].imag; | |
1563 vi_r = u_i - x[3].imag; | |
1564 u_i += x[3].imag; | |
1565 | |
1566 yt_i = yt_r; | |
1567 yt_i += u_r; | |
1568 x[0].real = yt_i; | |
1569 yt_r -= u_r; | |
1570 x[2].real = yt_r; | |
1571 yt_i = yb_r; | |
1572 yt_i += vi_r; | |
1573 x[1].real = yt_i; | |
1574 yb_r -= vi_r; | |
1575 x[3].real = yb_r; | |
1576 | |
1577 yt_i = x[0].imag; | |
1578 yb_i = yt_i - x[2].imag; | |
1579 yt_i += x[2].imag; | |
1580 | |
1581 yt_r = yt_i; | |
1582 yt_r += u_i; | |
1583 x[0].imag = yt_r; | |
1584 yt_i -= u_i; | |
1585 x[2].imag = yt_i; | |
1586 yt_r = yb_i; | |
1587 yt_r += vi_i; | |
1588 x[1].imag = yt_r; | |
1589 yb_i -= vi_i; | |
1590 x[3].imag = yb_i; | |
1591 } | |
1592 | |
1593 | |
1594 static void fft_8(complex_t *x) | |
1595 { | |
1596 /* delta_p = diag{1, sqrt(i)} here */ | |
1597 /* x[k] = sum_{i=0..7} x[i] * w^{i*k}, w=e^{-2*pi/8} | |
1598 */ | |
1599 register float wT1_r, wT1_i, wB1_r, wB1_i, wT2_r, wT2_i, wB2_r, wB2_i; | |
1600 | |
1601 wT1_r = x[1].real; | |
1602 wT1_i = x[1].imag; | |
1603 wB1_r = x[3].real; | |
1604 wB1_i = x[3].imag; | |
1605 | |
1606 x[1] = x[2]; | |
1607 x[2] = x[4]; | |
1608 x[3] = x[6]; | |
1609 fft_4(&x[0]); | |
1610 | |
1611 | |
1612 /* x[0] x[4] */ | |
1613 wT2_r = x[5].real; | |
1614 wT2_r += x[7].real; | |
1615 wT2_r += wT1_r; | |
1616 wT2_r += wB1_r; | |
1617 wT2_i = wT2_r; | |
1618 wT2_r += x[0].real; | |
1619 wT2_i = x[0].real - wT2_i; | |
1620 x[0].real = wT2_r; | |
1621 x[4].real = wT2_i; | |
1622 | |
1623 wT2_i = x[5].imag; | |
1624 wT2_i += x[7].imag; | |
1625 wT2_i += wT1_i; | |
1626 wT2_i += wB1_i; | |
1627 wT2_r = wT2_i; | |
1628 wT2_r += x[0].imag; | |
1629 wT2_i = x[0].imag - wT2_i; | |
1630 x[0].imag = wT2_r; | |
1631 x[4].imag = wT2_i; | |
1632 | |
1633 /* x[2] x[6] */ | |
1634 wT2_r = x[5].imag; | |
1635 wT2_r -= x[7].imag; | |
1636 wT2_r += wT1_i; | |
1637 wT2_r -= wB1_i; | |
1638 wT2_i = wT2_r; | |
1639 wT2_r += x[2].real; | |
1640 wT2_i = x[2].real - wT2_i; | |
1641 x[2].real = wT2_r; | |
1642 x[6].real = wT2_i; | |
1643 | |
1644 wT2_i = x[5].real; | |
1645 wT2_i -= x[7].real; | |
1646 wT2_i += wT1_r; | |
1647 wT2_i -= wB1_r; | |
1648 wT2_r = wT2_i; | |
1649 wT2_r += x[2].imag; | |
1650 wT2_i = x[2].imag - wT2_i; | |
1651 x[2].imag = wT2_i; | |
1652 x[6].imag = wT2_r; | |
1653 | |
1654 | |
1655 /* x[1] x[5] */ | |
1656 wT2_r = wT1_r; | |
1657 wT2_r += wB1_i; | |
1658 wT2_r -= x[5].real; | |
1659 wT2_r -= x[7].imag; | |
1660 wT2_i = wT1_i; | |
1661 wT2_i -= wB1_r; | |
1662 wT2_i -= x[5].imag; | |
1663 wT2_i += x[7].real; | |
1664 | |
1665 wB2_r = wT2_r; | |
1666 wB2_r += wT2_i; | |
1667 wT2_i -= wT2_r; | |
1668 wB2_r *= HSQRT2; | |
1669 wT2_i *= HSQRT2; | |
1670 wT2_r = wB2_r; | |
1671 wB2_r += x[1].real; | |
1672 wT2_r = x[1].real - wT2_r; | |
1673 | |
1674 wB2_i = x[5].real; | |
1675 x[1].real = wB2_r; | |
1676 x[5].real = wT2_r; | |
1677 | |
1678 wT2_r = wT2_i; | |
1679 wT2_r += x[1].imag; | |
1680 wT2_i = x[1].imag - wT2_i; | |
1681 wB2_r = x[5].imag; | |
1682 x[1].imag = wT2_r; | |
1683 x[5].imag = wT2_i; | |
1684 | |
1685 /* x[3] x[7] */ | |
1686 wT1_r -= wB1_i; | |
1687 wT1_i += wB1_r; | |
1688 wB1_r = wB2_i - x[7].imag; | |
1689 wB1_i = wB2_r + x[7].real; | |
1690 wT1_r -= wB1_r; | |
1691 wT1_i -= wB1_i; | |
1692 wB1_r = wT1_r + wT1_i; | |
1693 wB1_r *= HSQRT2; | |
1694 wT1_i -= wT1_r; | |
1695 wT1_i *= HSQRT2; | |
1696 wB2_r = x[3].real; | |
1697 wB2_i = wB2_r + wT1_i; | |
1698 wB2_r -= wT1_i; | |
1699 x[3].real = wB2_i; | |
1700 x[7].real = wB2_r; | |
1701 wB2_i = x[3].imag; | |
1702 wB2_r = wB2_i + wB1_r; | |
1703 wB2_i -= wB1_r; | |
1704 x[3].imag = wB2_i; | |
1705 x[7].imag = wB2_r; | |
1706 } | |
1707 | |
1708 | |
1709 static void fft_128p(complex_t *a) | |
1710 { | |
1711 fft_8(&a[0]); fft_4(&a[8]); fft_4(&a[12]); | |
1712 fft_asmb16(&a[0], &a[8]); | |
1713 | |
1714 fft_8(&a[16]), fft_8(&a[24]); | |
1715 fft_asmb(4, &a[0], &a[16],&delta32[0], &delta32_3[0]); | |
1716 | |
1717 fft_8(&a[32]); fft_4(&a[40]); fft_4(&a[44]); | |
1718 fft_asmb16(&a[32], &a[40]); | |
1719 | |
1720 fft_8(&a[48]); fft_4(&a[56]); fft_4(&a[60]); | |
1721 fft_asmb16(&a[48], &a[56]); | |
1722 | |
1723 fft_asmb(8, &a[0], &a[32],&delta64[0], &delta64_3[0]); | |
1724 | |
1725 fft_8(&a[64]); fft_4(&a[72]); fft_4(&a[76]); | |
1726 /* fft_16(&a[64]); */ | |
1727 fft_asmb16(&a[64], &a[72]); | |
1728 | |
1729 fft_8(&a[80]); fft_8(&a[88]); | |
1730 | |
1731 /* fft_32(&a[64]); */ | |
1732 fft_asmb(4, &a[64], &a[80],&delta32[0], &delta32_3[0]); | |
1733 | |
1734 fft_8(&a[96]); fft_4(&a[104]), fft_4(&a[108]); | |
1735 /* fft_16(&a[96]); */ | |
1736 fft_asmb16(&a[96], &a[104]); | |
1737 | |
1738 fft_8(&a[112]), fft_8(&a[120]); | |
1739 /* fft_32(&a[96]); */ | |
1740 fft_asmb(4, &a[96], &a[112], &delta32[0], &delta32_3[0]); | |
1741 | |
1742 /* fft_128(&a[0]); */ | |
1743 fft_asmb(16, &a[0], &a[64], &delta128[0], &delta128_3[0]); | |
1744 } | |
1745 | |
1746 | |
1747 |