Mercurial > mplayer.hg
comparison libfaad2/sbr_hfgen.c @ 10725:e989150f8216
libfaad2 v2.0rc1 imported
author | arpi |
---|---|
date | Sat, 30 Aug 2003 22:30:28 +0000 |
parents | |
children | 3185f64f6350 |
comparison
equal
deleted
inserted
replaced
10724:adf5697b9d83 | 10725:e989150f8216 |
---|---|
1 /* | |
2 ** FAAD2 - Freeware Advanced Audio (AAC) Decoder including SBR decoding | |
3 ** Copyright (C) 2003 M. Bakker, Ahead Software AG, http://www.nero.com | |
4 ** | |
5 ** This program is free software; you can redistribute it and/or modify | |
6 ** it under the terms of the GNU General Public License as published by | |
7 ** the Free Software Foundation; either version 2 of the License, or | |
8 ** (at your option) any later version. | |
9 ** | |
10 ** This program is distributed in the hope that it will be useful, | |
11 ** but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 ** GNU General Public License for more details. | |
14 ** | |
15 ** You should have received a copy of the GNU General Public License | |
16 ** along with this program; if not, write to the Free Software | |
17 ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
18 ** | |
19 ** Any non-GPL usage of this software or parts of this software is strictly | |
20 ** forbidden. | |
21 ** | |
22 ** Commercial non-GPL licensing of this software is possible. | |
23 ** For more info contact Ahead Software through Mpeg4AAClicense@nero.com. | |
24 ** | |
25 ** $Id: sbr_hfgen.c,v 1.1 2003/07/29 08:20:13 menno Exp $ | |
26 **/ | |
27 | |
28 /* High Frequency generation */ | |
29 | |
30 #include "common.h" | |
31 #include "structs.h" | |
32 | |
33 #ifdef SBR_DEC | |
34 | |
35 #include "sbr_syntax.h" | |
36 #include "sbr_hfgen.h" | |
37 #include "sbr_fbt.h" | |
38 | |
39 void hf_generation(sbr_info *sbr, qmf_t *Xlow, | |
40 qmf_t *Xhigh | |
41 #ifdef SBR_LOW_POWER | |
42 ,real_t *deg | |
43 #endif | |
44 ,uint8_t ch) | |
45 { | |
46 uint8_t l, i, x; | |
47 complex_t alpha_0[64], alpha_1[64]; | |
48 #ifdef SBR_LOW_POWER | |
49 real_t rxx[64]; | |
50 #endif | |
51 | |
52 | |
53 calc_chirp_factors(sbr, ch); | |
54 | |
55 if ((ch == 0) && (sbr->Reset)) | |
56 patch_construction(sbr); | |
57 | |
58 /* calculate the prediction coefficients */ | |
59 calc_prediction_coef(sbr, Xlow, alpha_0, alpha_1 | |
60 #ifdef SBR_LOW_POWER | |
61 , rxx | |
62 #endif | |
63 ); | |
64 | |
65 #ifdef SBR_LOW_POWER | |
66 calc_aliasing_degree(sbr, rxx, deg); | |
67 #endif | |
68 | |
69 /* actual HF generation */ | |
70 for (i = 0; i < sbr->noPatches; i++) | |
71 { | |
72 for (x = 0; x < sbr->patchNoSubbands[i]; x++) | |
73 { | |
74 complex_t a0, a1; | |
75 real_t bw, bw2; | |
76 uint8_t q, p, k, g; | |
77 | |
78 /* find the low and high band for patching */ | |
79 k = sbr->kx + x; | |
80 for (q = 0; q < i; q++) | |
81 { | |
82 k += sbr->patchNoSubbands[q]; | |
83 } | |
84 p = sbr->patchStartSubband[i] + x; | |
85 | |
86 #ifdef SBR_LOW_POWER | |
87 if (x != 0 /*x < sbr->patchNoSubbands[i]-1*/) | |
88 deg[k] = deg[p]; | |
89 else | |
90 deg[k] = 0; | |
91 #endif | |
92 | |
93 g = sbr->table_map_k_to_g[k]; | |
94 | |
95 bw = sbr->bwArray[ch][g]; | |
96 bw2 = MUL_C_C(bw, bw); | |
97 | |
98 /* do the patching */ | |
99 /* with or without filtering */ | |
100 if (bw2 > 0) | |
101 { | |
102 RE(a0) = MUL_R_C(RE(alpha_0[p]), bw); | |
103 RE(a1) = MUL_R_C(RE(alpha_1[p]), bw2); | |
104 #ifndef SBR_LOW_POWER | |
105 IM(a0) = MUL_R_C(IM(alpha_0[p]), bw); | |
106 IM(a1) = MUL_R_C(IM(alpha_1[p]), bw2); | |
107 #endif | |
108 | |
109 for (l = sbr->t_E[ch][0]; l < sbr->t_E[ch][sbr->L_E[ch]]; l++) | |
110 { | |
111 QMF_RE(Xhigh[((l + tHFAdj)<<6) + k]) = QMF_RE(Xlow[((l + tHFAdj)<<5) + p]); | |
112 #ifndef SBR_LOW_POWER | |
113 QMF_IM(Xhigh[((l + tHFAdj)<<6) + k]) = QMF_IM(Xlow[((l + tHFAdj)<<5) + p]); | |
114 #endif | |
115 | |
116 #ifdef SBR_LOW_POWER | |
117 QMF_RE(Xhigh[((l + tHFAdj)<<6) + k]) += ( | |
118 MUL(RE(a0), QMF_RE(Xlow[((l - 1 + tHFAdj)<<5) + p])) + | |
119 MUL(RE(a1), QMF_RE(Xlow[((l - 2 + tHFAdj)<<5) + p]))); | |
120 #else | |
121 QMF_RE(Xhigh[((l + tHFAdj)<<6) + k]) += ( | |
122 RE(a0) * QMF_RE(Xlow[((l - 1 + tHFAdj)<<5) + p]) - | |
123 IM(a0) * QMF_IM(Xlow[((l - 1 + tHFAdj)<<5) + p]) + | |
124 RE(a1) * QMF_RE(Xlow[((l - 2 + tHFAdj)<<5) + p]) - | |
125 IM(a1) * QMF_IM(Xlow[((l - 2 + tHFAdj)<<5) + p])); | |
126 QMF_IM(Xhigh[((l + tHFAdj)<<6) + k]) += ( | |
127 IM(a0) * QMF_RE(Xlow[((l - 1 + tHFAdj)<<5) + p]) + | |
128 RE(a0) * QMF_IM(Xlow[((l - 1 + tHFAdj)<<5) + p]) + | |
129 IM(a1) * QMF_RE(Xlow[((l - 2 + tHFAdj)<<5) + p]) + | |
130 RE(a1) * QMF_IM(Xlow[((l - 2 + tHFAdj)<<5) + p])); | |
131 #endif | |
132 } | |
133 } else { | |
134 for (l = sbr->t_E[ch][0]; l < sbr->t_E[ch][sbr->L_E[ch]]; l++) | |
135 { | |
136 QMF_RE(Xhigh[((l + tHFAdj)<<6) + k]) = QMF_RE(Xlow[((l + tHFAdj)<<5) + p]); | |
137 #ifndef SBR_LOW_POWER | |
138 QMF_IM(Xhigh[((l + tHFAdj)<<6) + k]) = QMF_IM(Xlow[((l + tHFAdj)<<5) + p]); | |
139 #endif | |
140 } | |
141 } | |
142 } | |
143 } | |
144 | |
145 #if 0 | |
146 if (sbr->frame == 179) | |
147 { | |
148 for (l = 0; l < 64; l++) | |
149 { | |
150 printf("%d %.3f\n", l, deg[l]); | |
151 } | |
152 } | |
153 #endif | |
154 | |
155 if (sbr->Reset) | |
156 { | |
157 limiter_frequency_table(sbr); | |
158 } | |
159 } | |
160 | |
161 typedef struct | |
162 { | |
163 complex_t r01; | |
164 complex_t r02; | |
165 complex_t r11; | |
166 complex_t r12; | |
167 complex_t r22; | |
168 real_t det; | |
169 } acorr_coef; | |
170 | |
171 #define SBR_ABS(A) ((A) < 0) ? -(A) : (A) | |
172 | |
173 static void auto_correlation(acorr_coef *ac, qmf_t *buffer, | |
174 uint8_t bd, uint8_t len) | |
175 { | |
176 int8_t j, jminus1, jminus2; | |
177 const real_t rel = COEF_CONST(0.9999999999999); // 1 / (1 + 1e-6f); | |
178 | |
179 #ifdef FIXED_POINT | |
180 /* | |
181 * For computing the covariance matrix and the filter coefficients | |
182 * in fixed point, all values are normalised so that the fixed point | |
183 * values don't overflow. | |
184 */ | |
185 uint32_t max = 0; | |
186 uint32_t pow2, exp; | |
187 | |
188 for (j = tHFAdj-2; j < len + tHFAdj; j++) | |
189 { | |
190 max = max(SBR_ABS(QMF_RE(buffer[j*32 + bd])>>REAL_BITS), max); | |
191 } | |
192 | |
193 /* find the first power of 2 bigger than max to avoid division */ | |
194 pow2 = 1; | |
195 exp = 0; | |
196 while (max > pow2) | |
197 { | |
198 pow2 <<= 1; | |
199 exp++; | |
200 } | |
201 | |
202 /* give some more space */ | |
203 // if (exp > 3) | |
204 // exp -= 3; | |
205 #endif | |
206 | |
207 memset(ac, 0, sizeof(acorr_coef)); | |
208 | |
209 for (j = tHFAdj; j < len + tHFAdj; j++) | |
210 { | |
211 jminus1 = j - 1; | |
212 jminus2 = jminus1 - 1; | |
213 | |
214 #ifdef SBR_LOW_POWER | |
215 #ifdef FIXED_POINT | |
216 /* normalisation with rounding */ | |
217 RE(ac->r01) += MUL(((QMF_RE(buffer[j*32 + bd])+(1<<(exp-1)))>>exp), ((QMF_RE(buffer[jminus1*32 + bd])+(1<<(exp-1)))>>exp)); | |
218 RE(ac->r02) += MUL(((QMF_RE(buffer[j*32 + bd])+(1<<(exp-1)))>>exp), ((QMF_RE(buffer[jminus2*32 + bd])+(1<<(exp-1)))>>exp)); | |
219 RE(ac->r11) += MUL(((QMF_RE(buffer[jminus1*32 + bd])+(1<<(exp-1)))>>exp), ((QMF_RE(buffer[jminus1*32 + bd])+(1<<(exp-1)))>>exp)); | |
220 RE(ac->r12) += MUL(((QMF_RE(buffer[jminus1*32 + bd])+(1<<(exp-1)))>>exp), ((QMF_RE(buffer[jminus2*32 + bd])+(1<<(exp-1)))>>exp)); | |
221 RE(ac->r22) += MUL(((QMF_RE(buffer[jminus2*32 + bd])+(1<<(exp-1)))>>exp), ((QMF_RE(buffer[jminus2*32 + bd])+(1<<(exp-1)))>>exp)); | |
222 #else | |
223 RE(ac->r01) += QMF_RE(buffer[j*32 + bd]) * QMF_RE(buffer[jminus1*32 + bd]); | |
224 RE(ac->r02) += QMF_RE(buffer[j*32 + bd]) * QMF_RE(buffer[jminus2*32 + bd]); | |
225 RE(ac->r11) += QMF_RE(buffer[jminus1*32 + bd]) * QMF_RE(buffer[jminus1*32 + bd]); | |
226 RE(ac->r12) += QMF_RE(buffer[jminus1*32 + bd]) * QMF_RE(buffer[jminus2*32 + bd]); | |
227 RE(ac->r22) += QMF_RE(buffer[jminus2*32 + bd]) * QMF_RE(buffer[jminus2*32 + bd]); | |
228 #endif | |
229 #else | |
230 RE(ac->r01) += QMF_RE(buffer[j*32 + bd]) * QMF_RE(buffer[jminus1*32 + bd]) + | |
231 QMF_IM(buffer[j*32 + bd]) * QMF_IM(buffer[jminus1*32 + bd]); | |
232 | |
233 IM(ac->r01) += QMF_IM(buffer[j*32 + bd]) * QMF_RE(buffer[jminus1*32 + bd]) - | |
234 QMF_RE(buffer[j*32 + bd]) * QMF_IM(buffer[jminus1*32 + bd]); | |
235 | |
236 RE(ac->r02) += QMF_RE(buffer[j*32 + bd]) * QMF_RE(buffer[jminus2*32 + bd]) + | |
237 QMF_IM(buffer[j*32 + bd]) * QMF_IM(buffer[jminus2*32 + bd]); | |
238 | |
239 IM(ac->r02) += QMF_IM(buffer[j*32 + bd]) * QMF_RE(buffer[jminus2*32 + bd]) - | |
240 QMF_RE(buffer[j*32 + bd]) * QMF_IM(buffer[jminus2*32 + bd]); | |
241 | |
242 RE(ac->r11) += QMF_RE(buffer[jminus1*32 + bd]) * QMF_RE(buffer[jminus1*32 + bd]) + | |
243 QMF_IM(buffer[jminus1*32 + bd]) * QMF_IM(buffer[jminus1*32 + bd]); | |
244 | |
245 RE(ac->r12) += QMF_RE(buffer[jminus1*32 + bd]) * QMF_RE(buffer[jminus2*32 + bd]) + | |
246 QMF_IM(buffer[jminus1*32 + bd]) * QMF_IM(buffer[jminus2*32 + bd]); | |
247 | |
248 IM(ac->r12) += QMF_IM(buffer[jminus1*32 + bd]) * QMF_RE(buffer[jminus2*32 + bd]) - | |
249 QMF_RE(buffer[jminus1*32 + bd]) * QMF_IM(buffer[jminus2*32 + bd]); | |
250 | |
251 RE(ac->r22) += QMF_RE(buffer[jminus2*32 + bd]) * QMF_RE(buffer[jminus2*32 + bd]) + | |
252 QMF_IM(buffer[jminus2*32 + bd]) * QMF_IM(buffer[jminus2*32 + bd]); | |
253 #endif | |
254 } | |
255 | |
256 #ifdef SBR_LOW_POWER | |
257 ac->det = MUL(RE(ac->r11), RE(ac->r22)) - MUL_R_C(MUL(RE(ac->r12), RE(ac->r12)), rel); | |
258 #else | |
259 ac->det = RE(ac->r11) * RE(ac->r22) - rel * (RE(ac->r12) * RE(ac->r12) + IM(ac->r12) * IM(ac->r12)); | |
260 #endif | |
261 | |
262 #if 0 | |
263 if (ac->det != 0) | |
264 printf("%f %f\n", ac->det, max); | |
265 #endif | |
266 } | |
267 | |
268 static void calc_prediction_coef(sbr_info *sbr, qmf_t *Xlow, | |
269 complex_t *alpha_0, complex_t *alpha_1 | |
270 #ifdef SBR_LOW_POWER | |
271 , real_t *rxx | |
272 #endif | |
273 ) | |
274 { | |
275 uint8_t k; | |
276 real_t tmp; | |
277 acorr_coef ac; | |
278 | |
279 for (k = 1; k < sbr->kx; k++) | |
280 { | |
281 auto_correlation(&ac, Xlow, k, 38); | |
282 | |
283 #ifdef SBR_LOW_POWER | |
284 if (ac.det == 0) | |
285 { | |
286 RE(alpha_1[k]) = 0; | |
287 } else { | |
288 tmp = MUL(RE(ac.r01), RE(ac.r12)) - MUL(RE(ac.r02), RE(ac.r11)); | |
289 RE(alpha_1[k]) = SBR_DIV(tmp, ac.det); | |
290 } | |
291 | |
292 if (RE(ac.r11) == 0) | |
293 { | |
294 RE(alpha_0[k]) = 0; | |
295 } else { | |
296 tmp = RE(ac.r01) + MUL(RE(alpha_1[k]), RE(ac.r12)); | |
297 RE(alpha_0[k]) = -SBR_DIV(tmp, RE(ac.r11)); | |
298 } | |
299 | |
300 if ((RE(alpha_0[k]) >= REAL_CONST(4)) || (RE(alpha_1[k]) >= REAL_CONST(4))) | |
301 { | |
302 RE(alpha_0[k]) = REAL_CONST(0); | |
303 RE(alpha_1[k]) = REAL_CONST(0); | |
304 } | |
305 | |
306 /* reflection coefficient */ | |
307 if (RE(ac.r11) == REAL_CONST(0.0)) | |
308 { | |
309 rxx[k] = REAL_CONST(0.0); | |
310 } else { | |
311 rxx[k] = -SBR_DIV(RE(ac.r01), RE(ac.r11)); | |
312 if (rxx[k] > REAL_CONST(1.0)) rxx[k] = REAL_CONST(1.0); | |
313 if (rxx[k] < REAL_CONST(-1.0)) rxx[k] = REAL_CONST(-1.0); | |
314 } | |
315 #else | |
316 if (ac.det == 0) | |
317 { | |
318 RE(alpha_1[k]) = 0; | |
319 IM(alpha_1[k]) = 0; | |
320 } else { | |
321 tmp = 1.0 / ac.det; | |
322 RE(alpha_1[k]) = (RE(ac.r01) * RE(ac.r12) - IM(ac.r01) * IM(ac.r12) - RE(ac.r02) * RE(ac.r11)) * tmp; | |
323 IM(alpha_1[k]) = (IM(ac.r01) * RE(ac.r12) + RE(ac.r01) * IM(ac.r12) - IM(ac.r02) * RE(ac.r11)) * tmp; | |
324 } | |
325 | |
326 if (RE(ac.r11) == 0) | |
327 { | |
328 RE(alpha_0[k]) = 0; | |
329 IM(alpha_0[k]) = 0; | |
330 } else { | |
331 tmp = 1.0f / RE(ac.r11); | |
332 RE(alpha_0[k]) = -(RE(ac.r01) + RE(alpha_1[k]) * RE(ac.r12) + IM(alpha_1[k]) * IM(ac.r12)) * tmp; | |
333 IM(alpha_0[k]) = -(IM(ac.r01) + IM(alpha_1[k]) * RE(ac.r12) - RE(alpha_1[k]) * IM(ac.r12)) * tmp; | |
334 } | |
335 | |
336 if ((RE(alpha_0[k])*RE(alpha_0[k]) + IM(alpha_0[k])*IM(alpha_0[k]) >= 16) || | |
337 (RE(alpha_1[k])*RE(alpha_1[k]) + IM(alpha_1[k])*IM(alpha_1[k]) >= 16)) | |
338 { | |
339 RE(alpha_0[k]) = 0; | |
340 IM(alpha_0[k]) = 0; | |
341 RE(alpha_1[k]) = 0; | |
342 IM(alpha_1[k]) = 0; | |
343 } | |
344 #endif | |
345 } | |
346 } | |
347 | |
348 #ifdef SBR_LOW_POWER | |
349 static void calc_aliasing_degree(sbr_info *sbr, real_t *rxx, real_t *deg) | |
350 { | |
351 uint8_t k; | |
352 | |
353 rxx[0] = REAL_CONST(0.0); | |
354 deg[1] = REAL_CONST(0.0); | |
355 | |
356 for (k = 2; k < sbr->k0; k++) | |
357 { | |
358 deg[k] = 0.0; | |
359 | |
360 if ((k % 2 == 0) && (rxx[k] < REAL_CONST(0.0))) | |
361 { | |
362 if (rxx[k-1] < 0.0) | |
363 { | |
364 deg[k] = REAL_CONST(1.0); | |
365 | |
366 if (rxx[k-2] > REAL_CONST(0.0)) | |
367 { | |
368 deg[k-1] = REAL_CONST(1.0) - MUL(rxx[k-1], rxx[k-1]); | |
369 } | |
370 } else if (rxx[k-2] > REAL_CONST(0.0)) { | |
371 deg[k] = REAL_CONST(1.0) - MUL(rxx[k-1], rxx[k-1]); | |
372 } | |
373 } | |
374 | |
375 if ((k % 2 == 1) && (rxx[k] > REAL_CONST(0.0))) | |
376 { | |
377 if (rxx[k-1] > REAL_CONST(0.0)) | |
378 { | |
379 deg[k] = REAL_CONST(1.0); | |
380 | |
381 if (rxx[k-2] < REAL_CONST(0.0)) | |
382 { | |
383 deg[k-1] = REAL_CONST(1.0) - MUL(rxx[k-1], rxx[k-1]); | |
384 } | |
385 } else if (rxx[k-2] < REAL_CONST(0.0)) { | |
386 deg[k] = REAL_CONST(1.0) - MUL(rxx[k-1], rxx[k-1]); | |
387 } | |
388 } | |
389 } | |
390 } | |
391 #endif | |
392 | |
393 static real_t mapNewBw(uint8_t invf_mode, uint8_t invf_mode_prev) | |
394 { | |
395 switch (invf_mode) | |
396 { | |
397 case 1: /* LOW */ | |
398 if (invf_mode_prev == 0) /* NONE */ | |
399 return COEF_CONST(0.6); | |
400 else | |
401 return COEF_CONST(0.75); | |
402 | |
403 case 2: /* MID */ | |
404 return COEF_CONST(0.9); | |
405 | |
406 case 3: /* HIGH */ | |
407 return COEF_CONST(0.98); | |
408 | |
409 default: /* NONE */ | |
410 if (invf_mode_prev == 1) /* LOW */ | |
411 return COEF_CONST(0.6); | |
412 else | |
413 return COEF_CONST(0.0); | |
414 } | |
415 } | |
416 | |
417 static void calc_chirp_factors(sbr_info *sbr, uint8_t ch) | |
418 { | |
419 uint8_t i; | |
420 | |
421 for (i = 0; i < sbr->N_Q; i++) | |
422 { | |
423 sbr->bwArray[ch][i] = mapNewBw(sbr->bs_invf_mode[ch][i], sbr->bs_invf_mode_prev[ch][i]); | |
424 | |
425 if (sbr->bwArray[ch][i] < sbr->bwArray_prev[ch][i]) | |
426 sbr->bwArray[ch][i] = MUL_C_C(COEF_CONST(0.75), sbr->bwArray[ch][i]) + MUL_C_C(COEF_CONST(0.25), sbr->bwArray_prev[ch][i]); | |
427 else | |
428 sbr->bwArray[ch][i] = MUL_C_C(COEF_CONST(0.90625), sbr->bwArray[ch][i]) + MUL_C_C(COEF_CONST(0.09375), sbr->bwArray_prev[ch][i]); | |
429 | |
430 if (sbr->bwArray[ch][i] < COEF_CONST(0.015625)) | |
431 sbr->bwArray[ch][i] = COEF_CONST(0.0); | |
432 | |
433 if (sbr->bwArray[ch][i] >= COEF_CONST(0.99609375)) | |
434 sbr->bwArray[ch][i] = COEF_CONST(0.99609375); | |
435 | |
436 sbr->bwArray_prev[ch][i] = sbr->bwArray[ch][i]; | |
437 sbr->bs_invf_mode_prev[ch][i] = sbr->bs_invf_mode[ch][i]; | |
438 } | |
439 } | |
440 | |
441 static void patch_construction(sbr_info *sbr) | |
442 { | |
443 uint8_t i, k; | |
444 uint8_t odd, sb; | |
445 uint8_t msb = sbr->k0; | |
446 uint8_t usb = sbr->kx; | |
447 uint32_t goalSb = (uint32_t)(2.048e6/sbr->sample_rate + 0.5); | |
448 | |
449 sbr->noPatches = 0; | |
450 | |
451 if (goalSb < (sbr->kx + sbr->M)) | |
452 { | |
453 for (i = 0, k = 0; sbr->f_master[i] < goalSb; i++) | |
454 k = i+1; | |
455 } else { | |
456 k = sbr->N_master; | |
457 } | |
458 | |
459 do | |
460 { | |
461 uint8_t j = k + 1; | |
462 | |
463 do | |
464 { | |
465 j--; | |
466 | |
467 sb = sbr->f_master[j]; | |
468 odd = (sb - 2 + sbr->k0) % 2; | |
469 } while (sb > (sbr->k0 - 1 + msb - odd)); | |
470 | |
471 sbr->patchNoSubbands[sbr->noPatches] = max(sb - usb, 0); | |
472 sbr->patchStartSubband[sbr->noPatches] = sbr->k0 - odd - | |
473 sbr->patchNoSubbands[sbr->noPatches]; | |
474 | |
475 if (sbr->patchNoSubbands[sbr->noPatches] > 0) | |
476 { | |
477 usb = sb; | |
478 msb = sb; | |
479 sbr->noPatches++; | |
480 } else { | |
481 msb = sbr->kx; | |
482 } | |
483 | |
484 if (sb == sbr->f_master[k]) | |
485 k = sbr->N_master; | |
486 } while (sb != (sbr->kx + sbr->M)); | |
487 | |
488 if ((sbr->patchNoSubbands[sbr->noPatches-1] < 3) && | |
489 (sbr->noPatches > 1)) | |
490 { | |
491 sbr->noPatches--; | |
492 } | |
493 | |
494 sbr->noPatches = min(sbr->noPatches, 5); | |
495 } | |
496 | |
497 #endif |