10073
|
1 /*
|
|
2 * Wmapro compatible decoder
|
|
3 * Copyright (c) 2007 Baptiste Coudurier, Benjamin Larsson, Ulion
|
|
4 * Copyright (c) 2008 - 2009 Sascha Sommer, Benjamin Larsson
|
|
5 *
|
|
6 * This file is part of FFmpeg.
|
|
7 *
|
|
8 * FFmpeg is free software; you can redistribute it and/or
|
|
9 * modify it under the terms of the GNU Lesser General Public
|
|
10 * License as published by the Free Software Foundation; either
|
|
11 * version 2.1 of the License, or (at your option) any later version.
|
|
12 *
|
|
13 * FFmpeg is distributed in the hope that it will be useful,
|
|
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
16 * Lesser General Public License for more details.
|
|
17 *
|
|
18 * You should have received a copy of the GNU Lesser General Public
|
|
19 * License along with FFmpeg; if not, write to the Free Software
|
|
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
21 */
|
|
22
|
|
23 /**
|
|
24 * @file libavcodec/wmaprodec.c
|
|
25 * @brief wmapro decoder implementation
|
|
26 * Wmapro is an MDCT based codec comparable to wma standard or AAC.
|
|
27 * The decoding therefore consists of the following steps:
|
|
28 * - bitstream decoding
|
|
29 * - reconstruction of per-channel data
|
|
30 * - rescaling and inverse quantization
|
|
31 * - IMDCT
|
|
32 * - windowing and overlapp-add
|
|
33 *
|
|
34 * The compressed wmapro bitstream is split into individual packets.
|
|
35 * Every such packet contains one or more wma frames.
|
|
36 * The compressed frames may have a variable length and frames may
|
|
37 * cross packet boundaries.
|
|
38 * Common to all wmapro frames is the number of samples that are stored in
|
|
39 * a frame.
|
|
40 * The number of samples and a few other decode flags are stored
|
|
41 * as extradata that has to be passed to the decoder.
|
|
42 *
|
|
43 * The wmapro frames themselves are again split into a variable number of
|
|
44 * subframes. Every subframe contains the data for 2^N time domain samples
|
|
45 * where N varies between 7 and 12.
|
|
46 *
|
|
47 * Example wmapro bitstream (in samples):
|
|
48 *
|
|
49 * || packet 0 || packet 1 || packet 2 packets
|
|
50 * ---------------------------------------------------
|
|
51 * || frame 0 || frame 1 || frame 2 || frames
|
|
52 * ---------------------------------------------------
|
|
53 * || | | || | | | || || subframes of channel 0
|
|
54 * ---------------------------------------------------
|
|
55 * || | | || | | | || || subframes of channel 1
|
|
56 * ---------------------------------------------------
|
|
57 *
|
|
58 * The frame layouts for the individual channels of a wma frame does not need
|
|
59 * to be the same.
|
|
60 *
|
|
61 * However, if the offsets and lengths of several subframes of a frame are the
|
|
62 * same, the subframes of the channels can be grouped.
|
|
63 * Every group may then use special coding techniques like M/S stereo coding
|
|
64 * to improve the compression ratio. These channel transformations do not
|
|
65 * need to be applied to a whole subframe. Instead, they can also work on
|
|
66 * individual scale factor bands (see below).
|
|
67 * The coefficients that carry the audio signal in the frequency domain
|
|
68 * are transmitted as huffman-coded vectors with 4, 2 and 1 elements.
|
|
69 * In addition to that, the encoder can switch to a runlevel coding scheme
|
|
70 * by transmitting subframe_length / 128 zero coefficients.
|
|
71 *
|
|
72 * Before the audio signal can be converted to the time domain, the
|
|
73 * coefficients have to be rescaled and inverse quantized.
|
|
74 * A subframe is therefore split into several scale factor bands that get
|
|
75 * scaled individually.
|
|
76 * Scale factors are submitted for every frame but they might be shared
|
|
77 * between the subframes of a channel. Scale factors are initially DPCM-coded.
|
|
78 * Once scale factors are shared, the differences are transmitted as runlevel
|
|
79 * codes.
|
|
80 * Every subframe length and offset combination in the frame layout shares a
|
|
81 * common quantization factor that can be adjusted for every channel by a
|
|
82 * modifier.
|
|
83 * After the inverse quantization, the coefficients get processed by an IMDCT.
|
|
84 * The resulting values are then windowed with a sine window and the first half
|
|
85 * of the values are added to the second half of the output from the previous
|
|
86 * subframe in order to reconstruct the output samples.
|
|
87 */
|
|
88
|
10005
|
89 /**
|
|
90 *@brief Uninitialize the decoder and free all resources.
|
|
91 *@param avctx codec context
|
|
92 *@return 0 on success, < 0 otherwise
|
|
93 */
|
|
94 static av_cold int decode_end(AVCodecContext *avctx)
|
|
95 {
|
|
96 WMA3DecodeContext *s = avctx->priv_data;
|
|
97 int i;
|
|
98
|
10006
|
99 for (i = 0 ; i < WMAPRO_BLOCK_SIZES ; i++)
|
10005
|
100 ff_mdct_end(&s->mdct_ctx[i]);
|
|
101
|
|
102 return 0;
|
|
103 }
|
|
104
|
|
105 /**
|
|
106 *@brief Calculate a decorrelation matrix from the bitstream parameters.
|
|
107 *@param s codec context
|
|
108 *@param chgroup channel group for which the matrix needs to be calculated
|
|
109 */
|
10006
|
110 static void decode_decorrelation_matrix(WMA3DecodeContext *s,
|
|
111 WMA3ChannelGroup *chgroup)
|
10005
|
112 {
|
|
113 int i;
|
|
114 int offset = 0;
|
|
115 int8_t rotation_offset[WMAPRO_MAX_CHANNELS * WMAPRO_MAX_CHANNELS];
|
|
116 memset(chgroup->decorrelation_matrix,0,
|
|
117 sizeof(float) *s->num_channels * s->num_channels);
|
|
118
|
10006
|
119 for (i = 0; i < chgroup->num_channels * (chgroup->num_channels - 1) >> 1; i++)
|
10005
|
120 rotation_offset[i] = get_bits(&s->gb,6);
|
|
121
|
10006
|
122 for (i = 0; i < chgroup->num_channels; i++)
|
10005
|
123 chgroup->decorrelation_matrix[chgroup->num_channels * i + i] =
|
|
124 get_bits1(&s->gb) ? 1.0 : -1.0;
|
|
125
|
10006
|
126 for (i = 1; i < chgroup->num_channels; i++) {
|
10005
|
127 int x;
|
10006
|
128 for (x = 0; x < i; x++) {
|
10005
|
129 int y;
|
10006
|
130 for (y = 0; y < i + 1 ; y++) {
|
10005
|
131 float v1 = chgroup->decorrelation_matrix[x * chgroup->num_channels + y];
|
|
132 float v2 = chgroup->decorrelation_matrix[i * chgroup->num_channels + y];
|
|
133 int n = rotation_offset[offset + x];
|
|
134 float sinv;
|
|
135 float cosv;
|
|
136
|
10006
|
137 if (n < 32) {
|
10005
|
138 sinv = sin64[n];
|
|
139 cosv = sin64[32-n];
|
|
140 } else {
|
|
141 sinv = sin64[64-n];
|
|
142 cosv = -sin64[n-32];
|
|
143 }
|
|
144
|
|
145 chgroup->decorrelation_matrix[y + x * chgroup->num_channels] =
|
|
146 (v1 * sinv) - (v2 * cosv);
|
|
147 chgroup->decorrelation_matrix[y + i * chgroup->num_channels] =
|
|
148 (v1 * cosv) + (v2 * sinv);
|
|
149 }
|
|
150 }
|
|
151 offset += i;
|
|
152 }
|
|
153 }
|
|
154
|
|
155 /**
|
|
156 *@brief Reconstruct the individual channel data.
|
|
157 *@param s codec context
|
|
158 */
|
|
159 static void inverse_channel_transform(WMA3DecodeContext *s)
|
|
160 {
|
|
161 int i;
|
|
162
|
10006
|
163 for (i = 0; i < s->num_chgroups; i++) {
|
10005
|
164
|
|
165 if (s->chgroup[i].transform == 1) {
|
|
166 /** M/S stereo decoding */
|
|
167 int16_t* sfb_offsets = s->cur_sfb_offsets;
|
|
168 float* ch0 = *sfb_offsets + s->channel[0].coeffs;
|
|
169 float* ch1 = *sfb_offsets++ + s->channel[1].coeffs;
|
|
170 const char* tb = s->chgroup[i].transform_band;
|
|
171 const char* tb_end = tb + s->num_bands;
|
|
172
|
|
173 while (tb < tb_end) {
|
|
174 const float* ch0_end = s->channel[0].coeffs +
|
|
175 FFMIN(*sfb_offsets,s->subframe_len);
|
|
176 if (*tb++ == 1) {
|
|
177 while (ch0 < ch0_end) {
|
|
178 const float v1 = *ch0;
|
|
179 const float v2 = *ch1;
|
|
180 *ch0++ = v1 - v2;
|
|
181 *ch1++ = v1 + v2;
|
|
182 }
|
|
183 } else {
|
|
184 while (ch0 < ch0_end) {
|
|
185 *ch0++ *= 181.0 / 128;
|
|
186 *ch1++ *= 181.0 / 128;
|
|
187 }
|
|
188 }
|
|
189 ++sfb_offsets;
|
|
190 }
|
|
191 } else if (s->chgroup[i].transform) {
|
|
192 float data[WMAPRO_MAX_CHANNELS];
|
|
193 const int num_channels = s->chgroup[i].num_channels;
|
|
194 float** ch_data = s->chgroup[i].channel_data;
|
|
195 float** ch_end = ch_data + num_channels;
|
|
196 const int8_t* tb = s->chgroup[i].transform_band;
|
|
197 int16_t* sfb;
|
|
198
|
|
199 /** multichannel decorrelation */
|
|
200 for (sfb = s->cur_sfb_offsets ;
|
|
201 sfb < s->cur_sfb_offsets + s->num_bands;sfb++) {
|
|
202 if (*tb++ == 1) {
|
|
203 int y;
|
|
204 /** multiply values with the decorrelation_matrix */
|
10006
|
205 for (y = sfb[0]; y < FFMIN(sfb[1], s->subframe_len); y++) {
|
10005
|
206 const float* mat = s->chgroup[i].decorrelation_matrix;
|
10006
|
207 const float* data_end = data + num_channels;
|
|
208 float* data_ptr = data;
|
10005
|
209 float** ch;
|
|
210
|
|
211 for (ch = ch_data;ch < ch_end; ch++)
|
|
212 *data_ptr++ = (*ch)[y];
|
|
213
|
|
214 for (ch = ch_data; ch < ch_end; ch++) {
|
|
215 float sum = 0;
|
|
216 data_ptr = data;
|
|
217 while (data_ptr < data_end)
|
|
218 sum += *data_ptr++ * *mat++;
|
|
219
|
|
220 (*ch)[y] = sum;
|
|
221 }
|
|
222 }
|
|
223 }
|
|
224 }
|
|
225 }
|
|
226 }
|
|
227 }
|
|
228
|