comparison Plugins/Input/aac/libfaad2/sbr_qmf.c @ 1010:29feaace84d0 trunk

[svn] - synchronize audacious-faad with FAAD2 CVS.
author nenolod
date Mon, 08 May 2006 06:56:47 -0700
parents 0a2ad94e8607
children 1e6c0a3f2d15
comparison
equal deleted inserted replaced
1009:1008da26c12d 1010:29feaace84d0
1 /* 1 /*
2 ** FAAD2 - Freeware Advanced Audio (AAC) Decoder including SBR decoding 2 ** FAAD2 - Freeware Advanced Audio (AAC) Decoder including SBR decoding
3 ** Copyright (C) 2003-2004 M. Bakker, Ahead Software AG, http://www.nero.com 3 ** Copyright (C) 2003-2004 M. Bakker, Ahead Software AG, http://www.nero.com
4 ** 4 **
5 ** This program is free software; you can redistribute it and/or modify 5 ** This program is free software; you can redistribute it and/or modify
6 ** it under the terms of the GNU General Public License as published by 6 ** it under the terms of the GNU General Public License as published by
7 ** the Free Software Foundation; either version 2 of the License, or 7 ** the Free Software Foundation; either version 2 of the License, or
8 ** (at your option) any later version. 8 ** (at your option) any later version.
9 ** 9 **
10 ** This program is distributed in the hope that it will be useful, 10 ** This program is distributed in the hope that it will be useful,
11 ** but WITHOUT ANY WARRANTY; without even the implied warranty of 11 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ** GNU General Public License for more details. 13 ** GNU General Public License for more details.
14 ** 14 **
15 ** You should have received a copy of the GNU General Public License 15 ** You should have received a copy of the GNU General Public License
16 ** along with this program; if not, write to the Free Software 16 ** along with this program; if not, write to the Free Software
17 ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 ** 18 **
19 ** Any non-GPL usage of this software or parts of this software is strictly 19 ** Any non-GPL usage of this software or parts of this software is strictly
20 ** forbidden. 20 ** forbidden.
21 ** 21 **
22 ** Commercial non-GPL licensing of this software is possible. 22 ** Commercial non-GPL licensing of this software is possible.
23 ** For more info contact Ahead Software through Mpeg4AAClicense@nero.com. 23 ** For more info contact Ahead Software through Mpeg4AAClicense@nero.com.
24 ** 24 **
25 ** $Id: sbr_qmf.c,v 1.27 2004/09/04 14:56:28 menno Exp $ 25 ** $Id: sbr_qmf.c,v 1.19 2004/01/05 14:05:12 menno Exp $
26 **/ 26 **/
27 27
28 #include "common.h" 28 #include "common.h"
29 #include "structs.h" 29 #include "structs.h"
30 30
36 #include "sbr_dct.h" 36 #include "sbr_dct.h"
37 #include "sbr_qmf.h" 37 #include "sbr_qmf.h"
38 #include "sbr_qmf_c.h" 38 #include "sbr_qmf_c.h"
39 #include "sbr_syntax.h" 39 #include "sbr_syntax.h"
40 40
41
41 qmfa_info *qmfa_init(uint8_t channels) 42 qmfa_info *qmfa_init(uint8_t channels)
42 { 43 {
43 qmfa_info *qmfa = (qmfa_info*)faad_malloc(sizeof(qmfa_info)); 44 qmfa_info *qmfa = (qmfa_info*)faad_malloc(sizeof(qmfa_info));
44 45 qmfa->x = (real_t*)faad_malloc(channels * 10 * sizeof(real_t));
45 /* x is implemented as double ringbuffer */ 46 memset(qmfa->x, 0, channels * 10 * sizeof(real_t));
46 qmfa->x = (real_t*)faad_malloc(2 * channels * 10 * sizeof(real_t));
47 memset(qmfa->x, 0, 2 * channels * 10 * sizeof(real_t));
48
49 /* ringbuffer index */
50 qmfa->x_index = 0;
51 47
52 qmfa->channels = channels; 48 qmfa->channels = channels;
53 49
54 return qmfa; 50 return qmfa;
55 } 51 }
62 faad_free(qmfa); 58 faad_free(qmfa);
63 } 59 }
64 } 60 }
65 61
66 void sbr_qmf_analysis_32(sbr_info *sbr, qmfa_info *qmfa, const real_t *input, 62 void sbr_qmf_analysis_32(sbr_info *sbr, qmfa_info *qmfa, const real_t *input,
67 qmf_t X[MAX_NTSRHFG][64], uint8_t offset, uint8_t kx) 63 qmf_t X[MAX_NTSRHFG][32], uint8_t offset, uint8_t kx)
68 { 64 {
69 ALIGN real_t u[64]; 65 ALIGN real_t u[64];
70 #ifndef SBR_LOW_POWER 66 #ifndef SBR_LOW_POWER
71 ALIGN real_t in_real[32], in_imag[32], out_real[32], out_imag[32]; 67 ALIGN real_t x[64], y[64];
72 #else 68 #else
73 ALIGN real_t y[32]; 69 ALIGN real_t y[32];
74 #endif 70 #endif
75 uint16_t in = 0; 71 uint16_t in = 0;
76 uint8_t l; 72 uint8_t l;
79 for (l = 0; l < sbr->numTimeSlotsRate; l++) 75 for (l = 0; l < sbr->numTimeSlotsRate; l++)
80 { 76 {
81 int16_t n; 77 int16_t n;
82 78
83 /* shift input buffer x */ 79 /* shift input buffer x */
84 /* input buffer is not shifted anymore, x is implemented as double ringbuffer */ 80 memmove(qmfa->x + 32, qmfa->x, (320-32)*sizeof(real_t));
85 //memmove(qmfa->x + 32, qmfa->x, (320-32)*sizeof(real_t));
86 81
87 /* add new samples to input buffer x */ 82 /* add new samples to input buffer x */
88 for (n = 32 - 1; n >= 0; n--) 83 for (n = 32 - 1; n >= 0; n--)
89 { 84 {
90 #ifdef FIXED_POINT 85 #ifdef FIXED_POINT
91 qmfa->x[qmfa->x_index + n] = qmfa->x[qmfa->x_index + n + 320] = (input[in++]) >> 4; 86 qmfa->x[n] = (input[in++]) >> 5;
92 #else 87 #else
93 qmfa->x[qmfa->x_index + n] = qmfa->x[qmfa->x_index + n + 320] = input[in++]; 88 qmfa->x[n] = input[in++];
94 #endif 89 #endif
95 } 90 }
96 91
97 /* window and summation to create array u */ 92 /* window and summation to create array u */
98 for (n = 0; n < 64; n++) 93 for (n = 0; n < 64; n++)
99 { 94 {
100 u[n] = MUL_F(qmfa->x[qmfa->x_index + n], qmf_c[2*n]) + 95 u[n] = MUL_F(qmfa->x[n], qmf_c[2*n]) +
101 MUL_F(qmfa->x[qmfa->x_index + n + 64], qmf_c[2*(n + 64)]) + 96 MUL_F(qmfa->x[n + 64], qmf_c[2*(n + 64)]) +
102 MUL_F(qmfa->x[qmfa->x_index + n + 128], qmf_c[2*(n + 128)]) + 97 MUL_F(qmfa->x[n + 128], qmf_c[2*(n + 128)]) +
103 MUL_F(qmfa->x[qmfa->x_index + n + 192], qmf_c[2*(n + 192)]) + 98 MUL_F(qmfa->x[n + 192], qmf_c[2*(n + 192)]) +
104 MUL_F(qmfa->x[qmfa->x_index + n + 256], qmf_c[2*(n + 256)]); 99 MUL_F(qmfa->x[n + 256], qmf_c[2*(n + 256)]);
105 } 100 }
106
107 /* update ringbuffer index */
108 qmfa->x_index -= 32;
109 if (qmfa->x_index < 0)
110 qmfa->x_index = (320-32);
111 101
112 /* calculate 32 subband samples by introducing X */ 102 /* calculate 32 subband samples by introducing X */
113 #ifdef SBR_LOW_POWER 103 #ifdef SBR_LOW_POWER
114 y[0] = u[48]; 104 y[0] = u[48];
115 for (n = 1; n < 16; n++) 105 for (n = 1; n < 16; n++)
122 for (n = 0; n < 32; n++) 112 for (n = 0; n < 32; n++)
123 { 113 {
124 if (n < kx) 114 if (n < kx)
125 { 115 {
126 #ifdef FIXED_POINT 116 #ifdef FIXED_POINT
127 QMF_RE(X[l + offset][n]) = u[n] /*<< 1*/; 117 QMF_RE(X[l + offset][n]) = u[n] << 1;
128 #else 118 #else
129 QMF_RE(X[l + offset][n]) = 2. * u[n]; 119 QMF_RE(X[l + offset][n]) = 2. * u[n];
130 #endif 120 #endif
131 } else { 121 } else {
132 QMF_RE(X[l + offset][n]) = 0; 122 QMF_RE(X[l + offset][n]) = 0;
133 } 123 }
134 } 124 }
135 #else 125 #else
136 126 x[0] = u[0];
137 // Reordering of data moved from DCT_IV to here 127 for (n = 0; n < 31; n++)
138 in_imag[31] = u[1]; 128 {
139 in_real[0] = u[0]; 129 x[2*n+1] = u[n+1] + u[63-n];
140 for (n = 1; n < 31; n++) 130 x[2*n+2] = u[n+1] - u[63-n];
141 { 131 }
142 in_imag[31 - n] = u[n+1]; 132 x[63] = u[32];
143 in_real[n] = -u[64-n]; 133
144 } 134 DCT4_64_kernel(y, x);
145 in_imag[0] = u[32]; 135
146 in_real[31] = -u[33]; 136 for (n = 0; n < 32; n++)
147 137 {
148 // dct4_kernel is DCT_IV without reordering which is done before and after FFT 138 if (n < kx)
149 dct4_kernel(in_real, in_imag, out_real, out_imag); 139 {
150
151 // Reordering of data moved from DCT_IV to here
152 for (n = 0; n < 16; n++) {
153 if (2*n+1 < kx) {
154 #ifdef FIXED_POINT 140 #ifdef FIXED_POINT
155 QMF_RE(X[l + offset][2*n]) = out_real[n]; 141 QMF_RE(X[l + offset][n]) = y[n] << 1;
156 QMF_IM(X[l + offset][2*n]) = out_imag[n]; 142 QMF_IM(X[l + offset][n]) = -y[63-n] << 1;
157 QMF_RE(X[l + offset][2*n+1]) = -out_imag[31-n]; 143 #else
158 QMF_IM(X[l + offset][2*n+1]) = -out_real[31-n]; 144 QMF_RE(X[l + offset][n]) = 2. * y[n];
159 #else 145 QMF_IM(X[l + offset][n]) = -2. * y[63-n];
160 QMF_RE(X[l + offset][2*n]) = 2. * out_real[n];
161 QMF_IM(X[l + offset][2*n]) = 2. * out_imag[n];
162 QMF_RE(X[l + offset][2*n+1]) = -2. * out_imag[31-n];
163 QMF_IM(X[l + offset][2*n+1]) = -2. * out_real[31-n];
164 #endif 146 #endif
165 } else { 147 } else {
166 if (2*n < kx) { 148 QMF_RE(X[l + offset][n]) = 0;
167 #ifdef FIXED_POINT 149 QMF_IM(X[l + offset][n]) = 0;
168 QMF_RE(X[l + offset][2*n]) = out_real[n];
169 QMF_IM(X[l + offset][2*n]) = out_imag[n];
170 #else
171 QMF_RE(X[l + offset][2*n]) = 2. * out_real[n];
172 QMF_IM(X[l + offset][2*n]) = 2. * out_imag[n];
173 #endif
174 }
175 else {
176 QMF_RE(X[l + offset][2*n]) = 0;
177 QMF_IM(X[l + offset][2*n]) = 0;
178 }
179 QMF_RE(X[l + offset][2*n+1]) = 0;
180 QMF_IM(X[l + offset][2*n+1]) = 0;
181 } 150 }
182 } 151 }
183 #endif 152 #endif
184 } 153 }
185 } 154 }
186 155
187 static const complex_t qmf32_pre_twiddle[] =
188 {
189 { FRAC_CONST(0.999924701839145), FRAC_CONST(-0.012271538285720) },
190 { FRAC_CONST(0.999322384588350), FRAC_CONST(-0.036807222941359) },
191 { FRAC_CONST(0.998118112900149), FRAC_CONST(-0.061320736302209) },
192 { FRAC_CONST(0.996312612182778), FRAC_CONST(-0.085797312344440) },
193 { FRAC_CONST(0.993906970002356), FRAC_CONST(-0.110222207293883) },
194 { FRAC_CONST(0.990902635427780), FRAC_CONST(-0.134580708507126) },
195 { FRAC_CONST(0.987301418157858), FRAC_CONST(-0.158858143333861) },
196 { FRAC_CONST(0.983105487431216), FRAC_CONST(-0.183039887955141) },
197 { FRAC_CONST(0.978317370719628), FRAC_CONST(-0.207111376192219) },
198 { FRAC_CONST(0.972939952205560), FRAC_CONST(-0.231058108280671) },
199 { FRAC_CONST(0.966976471044852), FRAC_CONST(-0.254865659604515) },
200 { FRAC_CONST(0.960430519415566), FRAC_CONST(-0.278519689385053) },
201 { FRAC_CONST(0.953306040354194), FRAC_CONST(-0.302005949319228) },
202 { FRAC_CONST(0.945607325380521), FRAC_CONST(-0.325310292162263) },
203 { FRAC_CONST(0.937339011912575), FRAC_CONST(-0.348418680249435) },
204 { FRAC_CONST(0.928506080473216), FRAC_CONST(-0.371317193951838) },
205 { FRAC_CONST(0.919113851690058), FRAC_CONST(-0.393992040061048) },
206 { FRAC_CONST(0.909167983090522), FRAC_CONST(-0.416429560097637) },
207 { FRAC_CONST(0.898674465693954), FRAC_CONST(-0.438616238538528) },
208 { FRAC_CONST(0.887639620402854), FRAC_CONST(-0.460538710958240) },
209 { FRAC_CONST(0.876070094195407), FRAC_CONST(-0.482183772079123) },
210 { FRAC_CONST(0.863972856121587), FRAC_CONST(-0.503538383725718) },
211 { FRAC_CONST(0.851355193105265), FRAC_CONST(-0.524589682678469) },
212 { FRAC_CONST(0.838224705554838), FRAC_CONST(-0.545324988422046) },
213 { FRAC_CONST(0.824589302785025), FRAC_CONST(-0.565731810783613) },
214 { FRAC_CONST(0.810457198252595), FRAC_CONST(-0.585797857456439) },
215 { FRAC_CONST(0.795836904608884), FRAC_CONST(-0.605511041404326) },
216 { FRAC_CONST(0.780737228572094), FRAC_CONST(-0.624859488142386) },
217 { FRAC_CONST(0.765167265622459), FRAC_CONST(-0.643831542889791) },
218 { FRAC_CONST(0.749136394523459), FRAC_CONST(-0.662415777590172) },
219 { FRAC_CONST(0.732654271672413), FRAC_CONST(-0.680600997795453) },
220 { FRAC_CONST(0.715730825283819), FRAC_CONST(-0.698376249408973) }
221 };
222
223 qmfs_info *qmfs_init(uint8_t channels) 156 qmfs_info *qmfs_init(uint8_t channels)
224 { 157 {
225 qmfs_info *qmfs = (qmfs_info*)faad_malloc(sizeof(qmfs_info)); 158 qmfs_info *qmfs = (qmfs_info*)faad_malloc(sizeof(qmfs_info));
226 159
227 /* v is a double ringbuffer */ 160 #ifndef SBR_LOW_POWER
228 qmfs->v = (real_t*)faad_malloc(2 * channels * 20 * sizeof(real_t)); 161 qmfs->v[0] = (real_t*)faad_malloc(channels * 10 * sizeof(real_t));
229 memset(qmfs->v, 0, 2 * channels * 20 * sizeof(real_t)); 162 memset(qmfs->v[0], 0, channels * 10 * sizeof(real_t));
163 qmfs->v[1] = (real_t*)faad_malloc(channels * 10 * sizeof(real_t));
164 memset(qmfs->v[1], 0, channels * 10 * sizeof(real_t));
165 #else
166 qmfs->v[0] = (real_t*)faad_malloc(channels * 20 * sizeof(real_t));
167 memset(qmfs->v[0], 0, channels * 20 * sizeof(real_t));
168 qmfs->v[1] = NULL;
169 #endif
230 170
231 qmfs->v_index = 0; 171 qmfs->v_index = 0;
232 172
233 qmfs->channels = channels; 173 qmfs->channels = channels;
234 174
175 #ifdef USE_SSE
176 if (cpu_has_sse())
177 {
178 qmfs->qmf_func = sbr_qmf_synthesis_64_sse;
179 } else {
180 qmfs->qmf_func = sbr_qmf_synthesis_64;
181 }
182 #endif
183
235 return qmfs; 184 return qmfs;
236 } 185 }
237 186
238 void qmfs_end(qmfs_info *qmfs) 187 void qmfs_end(qmfs_info *qmfs)
239 { 188 {
240 if (qmfs) 189 if (qmfs)
241 { 190 {
242 if (qmfs->v) faad_free(qmfs->v); 191 if (qmfs->v[0]) faad_free(qmfs->v[0]);
192 #ifndef SBR_LOW_POWER
193 if (qmfs->v[1]) faad_free(qmfs->v[1]);
194 #endif
243 faad_free(qmfs); 195 faad_free(qmfs);
244 } 196 }
245 } 197 }
246 198
247 #ifdef SBR_LOW_POWER 199 #ifdef SBR_LOW_POWER
248
249 void sbr_qmf_synthesis_32(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
250 real_t *output)
251 {
252 ALIGN real_t x[16];
253 ALIGN real_t y[16];
254 int16_t n, k, out = 0;
255 uint8_t l;
256
257 /* qmf subsample l */
258 for (l = 0; l < sbr->numTimeSlotsRate; l++)
259 {
260 /* shift buffers */
261 /* we are not shifting v, it is a double ringbuffer */
262 //memmove(qmfs->v + 64, qmfs->v, (640-64)*sizeof(real_t));
263
264 /* calculate 64 samples */
265 for (k = 0; k < 16; k++)
266 {
267 #ifdef FIXED_POINT
268 y[k] = (QMF_RE(X[l][k]) - QMF_RE(X[l][31 - k]));
269 x[k] = (QMF_RE(X[l][k]) + QMF_RE(X[l][31 - k]));
270 #else
271 y[k] = (QMF_RE(X[l][k]) - QMF_RE(X[l][31 - k])) / 32.0;
272 x[k] = (QMF_RE(X[l][k]) + QMF_RE(X[l][31 - k])) / 32.0;
273 #endif
274 }
275
276 /* even n samples */
277 DCT2_16_unscaled(x, x);
278 /* odd n samples */
279 DCT4_16(y, y);
280
281 for (n = 8; n < 24; n++)
282 {
283 qmfs->v[qmfs->v_index + n*2] = qmfs->v[qmfs->v_index + 640 + n*2] = x[n-8];
284 qmfs->v[qmfs->v_index + n*2+1] = qmfs->v[qmfs->v_index + 640 + n*2+1] = y[n-8];
285 }
286 for (n = 0; n < 16; n++)
287 {
288 qmfs->v[qmfs->v_index + n] = qmfs->v[qmfs->v_index + 640 + n] = qmfs->v[qmfs->v_index + 32-n];
289 }
290 qmfs->v[qmfs->v_index + 48] = qmfs->v[qmfs->v_index + 640 + 48] = 0;
291 for (n = 1; n < 16; n++)
292 {
293 qmfs->v[qmfs->v_index + 48+n] = qmfs->v[qmfs->v_index + 640 + 48+n] = -qmfs->v[qmfs->v_index + 48-n];
294 }
295
296 /* calculate 32 output samples and window */
297 for (k = 0; k < 32; k++)
298 {
299 output[out++] = MUL_F(qmfs->v[qmfs->v_index + k], qmf_c[2*k]) +
300 MUL_F(qmfs->v[qmfs->v_index + 96 + k], qmf_c[64 + 2*k]) +
301 MUL_F(qmfs->v[qmfs->v_index + 128 + k], qmf_c[128 + 2*k]) +
302 MUL_F(qmfs->v[qmfs->v_index + 224 + k], qmf_c[192 + 2*k]) +
303 MUL_F(qmfs->v[qmfs->v_index + 256 + k], qmf_c[256 + 2*k]) +
304 MUL_F(qmfs->v[qmfs->v_index + 352 + k], qmf_c[320 + 2*k]) +
305 MUL_F(qmfs->v[qmfs->v_index + 384 + k], qmf_c[384 + 2*k]) +
306 MUL_F(qmfs->v[qmfs->v_index + 480 + k], qmf_c[448 + 2*k]) +
307 MUL_F(qmfs->v[qmfs->v_index + 512 + k], qmf_c[512 + 2*k]) +
308 MUL_F(qmfs->v[qmfs->v_index + 608 + k], qmf_c[576 + 2*k]);
309 }
310
311 /* update the ringbuffer index */
312 qmfs->v_index -= 64;
313 if (qmfs->v_index < 0)
314 qmfs->v_index = (640-64);
315 }
316 }
317
318 void sbr_qmf_synthesis_64(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64], 200 void sbr_qmf_synthesis_64(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
319 real_t *output) 201 real_t *output)
320 { 202 {
321 ALIGN real_t x[64]; 203 ALIGN real_t x[64];
322 ALIGN real_t y[64]; 204 ALIGN real_t y[64];
325 207
326 208
327 /* qmf subsample l */ 209 /* qmf subsample l */
328 for (l = 0; l < sbr->numTimeSlotsRate; l++) 210 for (l = 0; l < sbr->numTimeSlotsRate; l++)
329 { 211 {
212 //real_t *v0, *v1;
213
330 /* shift buffers */ 214 /* shift buffers */
331 /* we are not shifting v, it is a double ringbuffer */ 215 //memmove(qmfs->v[0] + 64, qmfs->v[0], (640-64)*sizeof(real_t));
332 //memmove(qmfs->v + 128, qmfs->v, (1280-128)*sizeof(real_t)); 216 //memmove(qmfs->v[1] + 64, qmfs->v[1], (640-64)*sizeof(real_t));
217 memmove(qmfs->v[0] + 128, qmfs->v[0], (1280-128)*sizeof(real_t));
218
219 //v0 = qmfs->v[qmfs->v_index];
220 //v1 = qmfs->v[(qmfs->v_index + 1) & 0x1];
221 //qmfs->v_index = (qmfs->v_index + 1) & 0x1;
333 222
334 /* calculate 128 samples */ 223 /* calculate 128 samples */
335 for (k = 0; k < 32; k++) 224 for (k = 0; k < 64; k++)
336 { 225 {
337 #ifdef FIXED_POINT 226 #ifdef FIXED_POINT
338 y[k] = (QMF_RE(X[l][k]) - QMF_RE(X[l][63 - k])); 227 x[k] = QMF_RE(X[l][k]);
339 x[k] = (QMF_RE(X[l][k]) + QMF_RE(X[l][63 - k])); 228 #else
340 #else 229 x[k] = QMF_RE(X[l][k]) / 32.;
341 y[k] = (QMF_RE(X[l][k]) - QMF_RE(X[l][63 - k])) / 32.0; 230 #endif
342 x[k] = (QMF_RE(X[l][k]) + QMF_RE(X[l][63 - k])) / 32.0; 231 }
343 #endif 232
344 } 233 for (n = 0; n < 32; n++)
345 234 {
346 /* even n samples */ 235 y[2*n] = -x[2*n];
347 DCT2_32_unscaled(x, x); 236 y[2*n+1] = x[2*n+1];
348 /* odd n samples */ 237 }
349 DCT4_32(y, y); 238
350 239 DCT2_64_unscaled(x, x);
351 for (n = 16; n < 48; n++) 240
352 { 241 for (n = 0; n < 64; n++)
353 qmfs->v[qmfs->v_index + n*2] = qmfs->v[qmfs->v_index + 1280 + n*2] = x[n-16]; 242 {
354 qmfs->v[qmfs->v_index + n*2+1] = qmfs->v[qmfs->v_index + 1280 + n*2+1] = y[n-16]; 243 qmfs->v[0][n+32] = x[n];
355 } 244 }
356 for (n = 0; n < 32; n++) 245 for (n = 0; n < 32; n++)
357 { 246 {
358 qmfs->v[qmfs->v_index + n] = qmfs->v[qmfs->v_index + 1280 + n] = qmfs->v[qmfs->v_index + 64-n]; 247 qmfs->v[0][31 - n] = x[n + 1];
359 } 248 }
360 qmfs->v[qmfs->v_index + 96] = qmfs->v[qmfs->v_index + 1280 + 96] = 0; 249 DST2_64_unscaled(x, y);
250 qmfs->v[0][96] = 0;
361 for (n = 1; n < 32; n++) 251 for (n = 1; n < 32; n++)
362 { 252 {
363 qmfs->v[qmfs->v_index + 96+n] = qmfs->v[qmfs->v_index + 1280 + 96+n] = -qmfs->v[qmfs->v_index + 96-n]; 253 qmfs->v[0][n + 96] = x[n-1];
364 } 254 }
365 255
366 /* calculate 64 output samples and window */ 256 /* calculate 64 output samples and window */
367 for (k = 0; k < 64; k++) 257 for (k = 0; k < 64; k++)
368 { 258 {
369 output[out++] = MUL_F(qmfs->v[qmfs->v_index + k], qmf_c[k]) + 259 #if 1
370 MUL_F(qmfs->v[qmfs->v_index + 192 + k], qmf_c[64 + k]) + 260 output[out++] = MUL_F(qmfs->v[0][k], qmf_c[k]) +
371 MUL_F(qmfs->v[qmfs->v_index + 256 + k], qmf_c[128 + k]) + 261 MUL_F(qmfs->v[0][192 + k], qmf_c[64 + k]) +
372 MUL_F(qmfs->v[qmfs->v_index + 256 + 192 + k], qmf_c[128 + 64 + k]) + 262 MUL_F(qmfs->v[0][256 + k], qmf_c[128 + k]) +
373 MUL_F(qmfs->v[qmfs->v_index + 512 + k], qmf_c[256 + k]) + 263 MUL_F(qmfs->v[0][256 + 192 + k], qmf_c[128 + 64 + k]) +
374 MUL_F(qmfs->v[qmfs->v_index + 512 + 192 + k], qmf_c[256 + 64 + k]) + 264 MUL_F(qmfs->v[0][512 + k], qmf_c[256 + k]) +
375 MUL_F(qmfs->v[qmfs->v_index + 768 + k], qmf_c[384 + k]) + 265 MUL_F(qmfs->v[0][512 + 192 + k], qmf_c[256 + 64 + k]) +
376 MUL_F(qmfs->v[qmfs->v_index + 768 + 192 + k], qmf_c[384 + 64 + k]) + 266 MUL_F(qmfs->v[0][768 + k], qmf_c[384 + k]) +
377 MUL_F(qmfs->v[qmfs->v_index + 1024 + k], qmf_c[512 + k]) + 267 MUL_F(qmfs->v[0][768 + 192 + k], qmf_c[384 + 64 + k]) +
378 MUL_F(qmfs->v[qmfs->v_index + 1024 + 192 + k], qmf_c[512 + 64 + k]); 268 MUL_F(qmfs->v[0][1024 + k], qmf_c[512 + k]) +
379 } 269 MUL_F(qmfs->v[0][1024 + 192 + k], qmf_c[512 + 64 + k]);
380 270 #else
381 /* update the ringbuffer index */ 271 output[out++] = MUL_F(v0[k], qmf_c[k]) +
382 qmfs->v_index -= 128; 272 MUL_F(v0[64 + k], qmf_c[64 + k]) +
383 if (qmfs->v_index < 0) 273 MUL_F(v0[128 + k], qmf_c[128 + k]) +
384 qmfs->v_index = (1280-128); 274 MUL_F(v0[192 + k], qmf_c[192 + k]) +
385 } 275 MUL_F(v0[256 + k], qmf_c[256 + k]) +
386 } 276 MUL_F(v0[320 + k], qmf_c[320 + k]) +
387 #else 277 MUL_F(v0[384 + k], qmf_c[384 + k]) +
388 void sbr_qmf_synthesis_32(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64], 278 MUL_F(v0[448 + k], qmf_c[448 + k]) +
389 real_t *output) 279 MUL_F(v0[512 + k], qmf_c[512 + k]) +
390 { 280 MUL_F(v0[576 + k], qmf_c[576 + k]);
391 ALIGN real_t x1[32], x2[32]; 281 #endif
392 #ifndef FIXED_POINT 282 }
393 real_t scale = 1.f/64.f; 283 }
394 #endif 284 }
285
286 void sbr_qmf_synthesis_64_sse(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
287 real_t *output)
288 {
289 ALIGN real_t x[64];
290 ALIGN real_t y[64];
291 ALIGN real_t y2[64];
395 int16_t n, k, out = 0; 292 int16_t n, k, out = 0;
396 uint8_t l; 293 uint8_t l;
397 294
398
399 /* qmf subsample l */ 295 /* qmf subsample l */
400 for (l = 0; l < sbr->numTimeSlotsRate; l++) 296 for (l = 0; l < sbr->numTimeSlotsRate; l++)
401 { 297 {
402 /* shift buffer v */ 298 //real_t *v0, *v1;
403 /* buffer is not shifted, we are using a ringbuffer */ 299
404 //memmove(qmfs->v + 64, qmfs->v, (640-64)*sizeof(real_t)); 300 /* shift buffers */
405 301 //memmove(qmfs->v[0] + 64, qmfs->v[0], (640-64)*sizeof(real_t));
406 /* calculate 64 samples */ 302 //memmove(qmfs->v[1] + 64, qmfs->v[1], (640-64)*sizeof(real_t));
407 /* complex pre-twiddle */ 303 memmove(qmfs->v[0] + 128, qmfs->v[0], (1280-128)*sizeof(real_t));
408 for (k = 0; k < 32; k++) 304
409 { 305 //v0 = qmfs->v[qmfs->v_index];
410 x1[k] = MUL_F(QMF_RE(X[l][k]), RE(qmf32_pre_twiddle[k])) - MUL_F(QMF_IM(X[l][k]), IM(qmf32_pre_twiddle[k])); 306 //v1 = qmfs->v[(qmfs->v_index + 1) & 0x1];
411 x2[k] = MUL_F(QMF_IM(X[l][k]), RE(qmf32_pre_twiddle[k])) + MUL_F(QMF_RE(X[l][k]), IM(qmf32_pre_twiddle[k])); 307 //qmfs->v_index = (qmfs->v_index + 1) & 0x1;
412 308
413 #ifndef FIXED_POINT 309 /* calculate 128 samples */
414 x1[k] *= scale; 310 for (k = 0; k < 64; k++)
415 x2[k] *= scale; 311 {
416 #else 312 #ifdef FIXED_POINT
417 x1[k] >>= 1; 313 x[k] = QMF_RE(X[l][k]);
418 x2[k] >>= 1; 314 #else
419 #endif 315 x[k] = QMF_RE(X[l][k]) / 32.;
420 } 316 #endif
421 317 }
422 /* transform */ 318
423 DCT4_32(x1, x1); 319 for (n = 0; n < 32; n++)
424 DST4_32(x2, x2); 320 {
425 321 y[2*n] = -x[2*n];
426 for (n = 0; n < 32; n++) 322 y[2*n+1] = x[2*n+1];
427 { 323 }
428 qmfs->v[qmfs->v_index + n] = qmfs->v[qmfs->v_index + 640 + n] = -x1[n] + x2[n]; 324
429 qmfs->v[qmfs->v_index + 63 - n] = qmfs->v[qmfs->v_index + 640 + 63 - n] = x1[n] + x2[n]; 325 DCT2_64_unscaled(x, x);
430 } 326
431 327 for (n = 0; n < 64; n++)
432 /* calculate 32 output samples and window */ 328 {
433 for (k = 0; k < 32; k++) 329 qmfs->v[0][n+32] = x[n];
434 { 330 }
435 output[out++] = MUL_F(qmfs->v[qmfs->v_index + k], qmf_c[2*k]) + 331 for (n = 0; n < 32; n++)
436 MUL_F(qmfs->v[qmfs->v_index + 96 + k], qmf_c[64 + 2*k]) + 332 {
437 MUL_F(qmfs->v[qmfs->v_index + 128 + k], qmf_c[128 + 2*k]) + 333 qmfs->v[0][31 - n] = x[n + 1];
438 MUL_F(qmfs->v[qmfs->v_index + 224 + k], qmf_c[192 + 2*k]) + 334 }
439 MUL_F(qmfs->v[qmfs->v_index + 256 + k], qmf_c[256 + 2*k]) + 335
440 MUL_F(qmfs->v[qmfs->v_index + 352 + k], qmf_c[320 + 2*k]) + 336 DST2_64_unscaled(x, y);
441 MUL_F(qmfs->v[qmfs->v_index + 384 + k], qmf_c[384 + 2*k]) + 337 qmfs->v[0][96] = 0;
442 MUL_F(qmfs->v[qmfs->v_index + 480 + k], qmf_c[448 + 2*k]) + 338 for (n = 1; n < 32; n++)
443 MUL_F(qmfs->v[qmfs->v_index + 512 + k], qmf_c[512 + 2*k]) + 339 {
444 MUL_F(qmfs->v[qmfs->v_index + 608 + k], qmf_c[576 + 2*k]); 340 qmfs->v[0][n + 96] = x[n-1];
445 } 341 }
446 342
447 /* update ringbuffer index */ 343 /* calculate 64 output samples and window */
448 qmfs->v_index -= 64; 344 for (k = 0; k < 64; k++)
449 if (qmfs->v_index < 0) 345 {
450 qmfs->v_index = (640 - 64); 346 #if 1
451 } 347 output[out++] = MUL_F(qmfs->v[0][k], qmf_c[k]) +
452 } 348 MUL_F(qmfs->v[0][192 + k], qmf_c[64 + k]) +
453 349 MUL_F(qmfs->v[0][256 + k], qmf_c[128 + k]) +
350 MUL_F(qmfs->v[0][256 + 192 + k], qmf_c[128 + 64 + k]) +
351 MUL_F(qmfs->v[0][512 + k], qmf_c[256 + k]) +
352 MUL_F(qmfs->v[0][512 + 192 + k], qmf_c[256 + 64 + k]) +
353 MUL_F(qmfs->v[0][768 + k], qmf_c[384 + k]) +
354 MUL_F(qmfs->v[0][768 + 192 + k], qmf_c[384 + 64 + k]) +
355 MUL_F(qmfs->v[0][1024 + k], qmf_c[512 + k]) +
356 MUL_F(qmfs->v[0][1024 + 192 + k], qmf_c[512 + 64 + k]);
357 #else
358 output[out++] = MUL_F(v0[k], qmf_c[k]) +
359 MUL_F(v0[64 + k], qmf_c[64 + k]) +
360 MUL_F(v0[128 + k], qmf_c[128 + k]) +
361 MUL_F(v0[192 + k], qmf_c[192 + k]) +
362 MUL_F(v0[256 + k], qmf_c[256 + k]) +
363 MUL_F(v0[320 + k], qmf_c[320 + k]) +
364 MUL_F(v0[384 + k], qmf_c[384 + k]) +
365 MUL_F(v0[448 + k], qmf_c[448 + k]) +
366 MUL_F(v0[512 + k], qmf_c[512 + k]) +
367 MUL_F(v0[576 + k], qmf_c[576 + k]);
368 #endif
369 }
370 }
371 }
372 #else
454 void sbr_qmf_synthesis_64(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64], 373 void sbr_qmf_synthesis_64(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
455 real_t *output) 374 real_t *output)
456 { 375 {
457 // ALIGN real_t x1[64], x2[64]; 376 ALIGN real_t x1[64], x2[64];
458 #ifndef SBR_LOW_POWER
459 ALIGN real_t in_real1[32], in_imag1[32], out_real1[32], out_imag1[32];
460 ALIGN real_t in_real2[32], in_imag2[32], out_real2[32], out_imag2[32];
461 #endif
462 qmf_t * pX;
463 real_t * pring_buffer_1, * pring_buffer_3;
464 // real_t * ptemp_1, * ptemp_2;
465 #ifdef PREFER_POINTERS
466 // These pointers are used if target platform has autoinc address generators
467 real_t * pring_buffer_2, * pring_buffer_4;
468 real_t * pring_buffer_5, * pring_buffer_6;
469 real_t * pring_buffer_7, * pring_buffer_8;
470 real_t * pring_buffer_9, * pring_buffer_10;
471 const real_t * pqmf_c_1, * pqmf_c_2, * pqmf_c_3, * pqmf_c_4;
472 const real_t * pqmf_c_5, * pqmf_c_6, * pqmf_c_7, * pqmf_c_8;
473 const real_t * pqmf_c_9, * pqmf_c_10;
474 #endif // #ifdef PREFER_POINTERS
475 #ifndef FIXED_POINT
476 real_t scale = 1.f/64.f; 377 real_t scale = 1.f/64.f;
477 #endif
478 int16_t n, k, out = 0; 378 int16_t n, k, out = 0;
479 uint8_t l; 379 uint8_t l;
480 380
481 381
482 /* qmf subsample l */ 382 /* qmf subsample l */
483 for (l = 0; l < sbr->numTimeSlotsRate; l++) 383 for (l = 0; l < sbr->numTimeSlotsRate; l++)
484 { 384 {
485 /* shift buffer v */ 385 real_t *v0, *v1;
486 /* buffer is not shifted, we use double ringbuffer */ 386
487 //memmove(qmfs->v + 128, qmfs->v, (1280-128)*sizeof(real_t)); 387 /* shift buffers */
388 memmove(qmfs->v[0] + 64, qmfs->v[0], (640-64)*sizeof(real_t));
389 memmove(qmfs->v[1] + 64, qmfs->v[1], (640-64)*sizeof(real_t));
390
391 v0 = qmfs->v[qmfs->v_index];
392 v1 = qmfs->v[(qmfs->v_index + 1) & 0x1];
393 qmfs->v_index = (qmfs->v_index + 1) & 0x1;
488 394
489 /* calculate 128 samples */ 395 /* calculate 128 samples */
490 #ifndef FIXED_POINT 396 x1[0] = scale*QMF_RE(X[l][0]);
491 397 x2[63] = scale*QMF_IM(X[l][0]);
492 pX = X[l]; 398 for (k = 0; k < 31; k++)
493 399 {
494 in_imag1[31] = scale*QMF_RE(pX[1]); 400 x1[2*k+1] = scale*(QMF_RE(X[l][2*k+1]) - QMF_RE(X[l][2*k+2]));
495 in_real1[0] = scale*QMF_RE(pX[0]); 401 x1[2*k+2] = scale*(QMF_RE(X[l][2*k+1]) + QMF_RE(X[l][2*k+2]));
496 in_imag2[31] = scale*QMF_IM(pX[63-1]); 402
497 in_real2[0] = scale*QMF_IM(pX[63-0]); 403 x2[61 - 2*k] = scale*(QMF_IM(X[l][2*k+2]) - QMF_IM(X[l][2*k+1]));
498 for (k = 1; k < 31; k++) 404 x2[62 - 2*k] = scale*(QMF_IM(X[l][2*k+2]) + QMF_IM(X[l][2*k+1]));
499 { 405 }
500 in_imag1[31 - k] = scale*QMF_RE(pX[2*k + 1]); 406 x1[63] = scale*QMF_RE(X[l][63]);
501 in_real1[ k] = scale*QMF_RE(pX[2*k ]); 407 x2[0] = scale*QMF_IM(X[l][63]);
502 in_imag2[31 - k] = scale*QMF_IM(pX[63 - (2*k + 1)]); 408
503 in_real2[ k] = scale*QMF_IM(pX[63 - (2*k )]); 409 DCT4_64_kernel(x1, x1);
504 } 410 DCT4_64_kernel(x2, x2);
505 in_imag1[0] = scale*QMF_RE(pX[63]); 411
506 in_real1[31] = scale*QMF_RE(pX[62]); 412 for (n = 0; n < 32; n++)
507 in_imag2[0] = scale*QMF_IM(pX[63-63]); 413 {
508 in_real2[31] = scale*QMF_IM(pX[63-62]); 414 v0[ 2*n] = x2[2*n] - x1[2*n];
509 415 v1[63-2*n] = x2[2*n] + x1[2*n];
510 #else 416 v0[ 2*n+1] = -x2[2*n+1] - x1[2*n+1];
511 417 v1[62-2*n] = -x2[2*n+1] + x1[2*n+1];
512 pX = X[l]; 418 }
513
514 in_imag1[31] = QMF_RE(pX[1]) >> 1;
515 in_real1[0] = QMF_RE(pX[0]) >> 1;
516 in_imag2[31] = QMF_IM(pX[62]) >> 1;
517 in_real2[0] = QMF_IM(pX[63]) >> 1;
518 for (k = 1; k < 31; k++)
519 {
520 in_imag1[31 - k] = QMF_RE(pX[2*k + 1]) >> 1;
521 in_real1[ k] = QMF_RE(pX[2*k ]) >> 1;
522 in_imag2[31 - k] = QMF_IM(pX[63 - (2*k + 1)]) >> 1;
523 in_real2[ k] = QMF_IM(pX[63 - (2*k )]) >> 1;
524 }
525 in_imag1[0] = QMF_RE(pX[63]) >> 1;
526 in_real1[31] = QMF_RE(pX[62]) >> 1;
527 in_imag2[0] = QMF_IM(pX[0]) >> 1;
528 in_real2[31] = QMF_IM(pX[1]) >> 1;
529
530 #endif
531
532
533 // dct4_kernel is DCT_IV without reordering which is done before and after FFT
534 dct4_kernel(in_real1, in_imag1, out_real1, out_imag1);
535 dct4_kernel(in_real2, in_imag2, out_real2, out_imag2);
536
537
538 pring_buffer_1 = qmfs->v + qmfs->v_index;
539 pring_buffer_3 = pring_buffer_1 + 1280;
540 #ifdef PREFER_POINTERS
541 pring_buffer_2 = pring_buffer_1 + 127;
542 pring_buffer_4 = pring_buffer_1 + (1280 + 127);
543 #endif // #ifdef PREFER_POINTERS
544 // ptemp_1 = x1;
545 // ptemp_2 = x2;
546 #ifdef PREFER_POINTERS
547 for (n = 0; n < 32; n ++)
548 {
549 //real_t x1 = *ptemp_1++;
550 //real_t x2 = *ptemp_2++;
551 // pring_buffer_3 and pring_buffer_4 are needed only for double ring buffer
552 *pring_buffer_1++ = *pring_buffer_3++ = out_real2[n] - out_real1[n];
553 *pring_buffer_2-- = *pring_buffer_4-- = out_real2[n] + out_real1[n];
554 //x1 = *ptemp_1++;
555 //x2 = *ptemp_2++;
556 *pring_buffer_1++ = *pring_buffer_3++ = out_imag2[31-n] + out_imag1[31-n];
557 *pring_buffer_2-- = *pring_buffer_4-- = out_imag2[31-n] - out_imag1[31-n];
558 }
559 #else // #ifdef PREFER_POINTERS
560
561 for (n = 0; n < 32; n++)
562 {
563 // pring_buffer_3 and pring_buffer_4 are needed only for double ring buffer
564 pring_buffer_1[2*n] = pring_buffer_3[2*n] = out_real2[n] - out_real1[n];
565 pring_buffer_1[127-2*n] = pring_buffer_3[127-2*n] = out_real2[n] + out_real1[n];
566 pring_buffer_1[2*n+1] = pring_buffer_3[2*n+1] = out_imag2[31-n] + out_imag1[31-n];
567 pring_buffer_1[127-(2*n+1)] = pring_buffer_3[127-(2*n+1)] = out_imag2[31-n] - out_imag1[31-n];
568 }
569
570 #endif // #ifdef PREFER_POINTERS
571
572 pring_buffer_1 = qmfs->v + qmfs->v_index;
573 #ifdef PREFER_POINTERS
574 pring_buffer_2 = pring_buffer_1 + 192;
575 pring_buffer_3 = pring_buffer_1 + 256;
576 pring_buffer_4 = pring_buffer_1 + (256 + 192);
577 pring_buffer_5 = pring_buffer_1 + 512;
578 pring_buffer_6 = pring_buffer_1 + (512 + 192);
579 pring_buffer_7 = pring_buffer_1 + 768;
580 pring_buffer_8 = pring_buffer_1 + (768 + 192);
581 pring_buffer_9 = pring_buffer_1 + 1024;
582 pring_buffer_10 = pring_buffer_1 + (1024 + 192);
583 pqmf_c_1 = qmf_c;
584 pqmf_c_2 = qmf_c + 64;
585 pqmf_c_3 = qmf_c + 128;
586 pqmf_c_4 = qmf_c + 192;
587 pqmf_c_5 = qmf_c + 256;
588 pqmf_c_6 = qmf_c + 320;
589 pqmf_c_7 = qmf_c + 384;
590 pqmf_c_8 = qmf_c + 448;
591 pqmf_c_9 = qmf_c + 512;
592 pqmf_c_10 = qmf_c + 576;
593 #endif // #ifdef PREFER_POINTERS
594 419
595 /* calculate 64 output samples and window */ 420 /* calculate 64 output samples and window */
596 for (k = 0; k < 64; k++) 421 for (k = 0; k < 64; k++)
597 { 422 {
598 #ifdef PREFER_POINTERS 423 output[out++] = MUL_F(v0[k], qmf_c[k]) +
599 output[out++] = 424 MUL_F(v0[64 + k], qmf_c[64 + k]) +
600 MUL_F(*pring_buffer_1++, *pqmf_c_1++) + 425 MUL_F(v0[128 + k], qmf_c[128 + k]) +
601 MUL_F(*pring_buffer_2++, *pqmf_c_2++) + 426 MUL_F(v0[192 + k], qmf_c[192 + k]) +
602 MUL_F(*pring_buffer_3++, *pqmf_c_3++) + 427 MUL_F(v0[256 + k], qmf_c[256 + k]) +
603 MUL_F(*pring_buffer_4++, *pqmf_c_4++) + 428 MUL_F(v0[320 + k], qmf_c[320 + k]) +
604 MUL_F(*pring_buffer_5++, *pqmf_c_5++) + 429 MUL_F(v0[384 + k], qmf_c[384 + k]) +
605 MUL_F(*pring_buffer_6++, *pqmf_c_6++) + 430 MUL_F(v0[448 + k], qmf_c[448 + k]) +
606 MUL_F(*pring_buffer_7++, *pqmf_c_7++) + 431 MUL_F(v0[512 + k], qmf_c[512 + k]) +
607 MUL_F(*pring_buffer_8++, *pqmf_c_8++) + 432 MUL_F(v0[576 + k], qmf_c[576 + k]);
608 MUL_F(*pring_buffer_9++, *pqmf_c_9++) + 433 }
609 MUL_F(*pring_buffer_10++, *pqmf_c_10++); 434 }
610 #else // #ifdef PREFER_POINTERS 435 }
611 output[out++] = 436
612 MUL_F(pring_buffer_1[k+0], qmf_c[k+0]) + 437 #ifdef USE_SSE
613 MUL_F(pring_buffer_1[k+192], qmf_c[k+64]) + 438 void memmove_sse_576(real_t *out, const real_t *in)
614 MUL_F(pring_buffer_1[k+256], qmf_c[k+128]) + 439 {
615 MUL_F(pring_buffer_1[k+(256+192)], qmf_c[k+192]) + 440 __m128 m[144];
616 MUL_F(pring_buffer_1[k+512], qmf_c[k+256]) + 441 uint16_t i;
617 MUL_F(pring_buffer_1[k+(512+192)], qmf_c[k+320]) + 442
618 MUL_F(pring_buffer_1[k+768], qmf_c[k+384]) + 443 for (i = 0; i < 144; i++)
619 MUL_F(pring_buffer_1[k+(768+192)], qmf_c[k+448]) + 444 {
620 MUL_F(pring_buffer_1[k+1024], qmf_c[k+512]) + 445 m[i] = _mm_load_ps(&in[i*4]);
621 MUL_F(pring_buffer_1[k+(1024+192)], qmf_c[k+576]); 446 }
622 #endif // #ifdef PREFER_POINTERS 447 for (i = 0; i < 144; i++)
623 } 448 {
624 449 _mm_store_ps(&out[i*4], m[i]);
625 /* update ringbuffer index */ 450 }
626 qmfs->v_index -= 128; 451 }
627 if (qmfs->v_index < 0) 452
628 qmfs->v_index = (1280 - 128); 453 void sbr_qmf_synthesis_64_sse(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
629 } 454 real_t *output)
630 } 455 {
631 #endif 456 ALIGN real_t x1[64], x2[64];
632 457 real_t scale = 1.f/64.f;
633 #endif 458 int16_t n, k, out = 0;
459 uint8_t l;
460
461
462 /* qmf subsample l */
463 for (l = 0; l < sbr->numTimeSlotsRate; l++)
464 {
465 real_t *v0, *v1;
466
467 /* shift buffers */
468 memmove_sse_576(qmfs->v[0] + 64, qmfs->v[0]);
469 memmove_sse_576(qmfs->v[1] + 64, qmfs->v[1]);
470
471 v0 = qmfs->v[qmfs->v_index];
472 v1 = qmfs->v[(qmfs->v_index + 1) & 0x1];
473 qmfs->v_index = (qmfs->v_index + 1) & 0x1;
474
475 /* calculate 128 samples */
476 x1[0] = scale*QMF_RE(X[l][0]);
477 x2[63] = scale*QMF_IM(X[l][0]);
478 for (k = 0; k < 31; k++)
479 {
480 x1[2*k+1] = scale*(QMF_RE(X[l][2*k+1]) - QMF_RE(X[l][2*k+2]));
481 x1[2*k+2] = scale*(QMF_RE(X[l][2*k+1]) + QMF_RE(X[l][2*k+2]));
482
483 x2[61 - 2*k] = scale*(QMF_IM(X[l][2*k+2]) - QMF_IM(X[l][2*k+1]));
484 x2[62 - 2*k] = scale*(QMF_IM(X[l][2*k+2]) + QMF_IM(X[l][2*k+1]));
485 }
486 x1[63] = scale*QMF_RE(X[l][63]);
487 x2[0] = scale*QMF_IM(X[l][63]);
488
489 DCT4_64_kernel(x1, x1);
490 DCT4_64_kernel(x2, x2);
491
492 for (n = 0; n < 32; n++)
493 {
494 v0[ 2*n ] = x2[2*n] - x1[2*n];
495 v1[63- 2*n ] = x2[2*n] + x1[2*n];
496 v0[ 2*n+1 ] = -x2[2*n+1] - x1[2*n+1];
497 v1[63-(2*n+1)] = -x2[2*n+1] + x1[2*n+1];
498 }
499
500 /* calculate 64 output samples and window */
501 for (k = 0; k < 64; k+=4)
502 {
503 __m128 m0, m1, m2, m3, m4, m5, m6, m7, m8, m9;
504 __m128 c0, c1, c2, c3, c4, c5, c6, c7, c8, c9;
505 __m128 s1, s2, s3, s4, s5, s6, s7, s8, s9;
506
507 m0 = _mm_load_ps(&v0[k]);
508 m1 = _mm_load_ps(&v0[k + 64]);
509 m2 = _mm_load_ps(&v0[k + 128]);
510 m3 = _mm_load_ps(&v0[k + 192]);
511 m4 = _mm_load_ps(&v0[k + 256]);
512 c0 = _mm_load_ps(&qmf_c[k]);
513 c1 = _mm_load_ps(&qmf_c[k + 64]);
514 c2 = _mm_load_ps(&qmf_c[k + 128]);
515 c3 = _mm_load_ps(&qmf_c[k + 192]);
516 c4 = _mm_load_ps(&qmf_c[k + 256]);
517
518 m0 = _mm_mul_ps(m0, c0);
519 m1 = _mm_mul_ps(m1, c1);
520 m2 = _mm_mul_ps(m2, c2);
521 m3 = _mm_mul_ps(m3, c3);
522 m4 = _mm_mul_ps(m4, c4);
523
524 s1 = _mm_add_ps(m0, m1);
525 s2 = _mm_add_ps(m2, m3);
526 s6 = _mm_add_ps(s1, s2);
527
528 m5 = _mm_load_ps(&v0[k + 320]);
529 m6 = _mm_load_ps(&v0[k + 384]);
530 m7 = _mm_load_ps(&v0[k + 448]);
531 m8 = _mm_load_ps(&v0[k + 512]);
532 m9 = _mm_load_ps(&v0[k + 576]);
533 c5 = _mm_load_ps(&qmf_c[k + 320]);
534 c6 = _mm_load_ps(&qmf_c[k + 384]);
535 c7 = _mm_load_ps(&qmf_c[k + 448]);
536 c8 = _mm_load_ps(&qmf_c[k + 512]);
537 c9 = _mm_load_ps(&qmf_c[k + 576]);
538
539 m5 = _mm_mul_ps(m5, c5);
540 m6 = _mm_mul_ps(m6, c6);
541 m7 = _mm_mul_ps(m7, c7);
542 m8 = _mm_mul_ps(m8, c8);
543 m9 = _mm_mul_ps(m9, c9);
544
545 s3 = _mm_add_ps(m4, m5);
546 s4 = _mm_add_ps(m6, m7);
547 s5 = _mm_add_ps(m8, m9);
548 s7 = _mm_add_ps(s3, s4);
549 s8 = _mm_add_ps(s5, s6);
550 s9 = _mm_add_ps(s7, s8);
551
552 _mm_store_ps(&output[out], s9);
553 out += 4;
554 }
555 }
556 }
557 #endif
558 #endif
559
560 #endif