Mercurial > libavcodec.hg
annotate rv34.c @ 6096:89140b93ae09 libavcodec
Direct blocks should use motion vectors from the second reference frame
author | kostya |
---|---|
date | Mon, 31 Dec 2007 07:12:50 +0000 |
parents | ce3b68242317 |
children | a2b438bcb1d2 |
rev | line source |
---|---|
6026 | 1 /* |
2 * RV30/40 decoder common data | |
3 * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov | |
4 * | |
5 * This file is part of FFmpeg. | |
6 * | |
7 * FFmpeg is free software; you can redistribute it and/or | |
8 * modify it under the terms of the GNU Lesser General Public | |
9 * License as published by the Free Software Foundation; either | |
10 * version 2.1 of the License, or (at your option) any later version. | |
11 * | |
12 * FFmpeg is distributed in the hope that it will be useful, | |
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 * Lesser General Public License for more details. | |
16 * | |
17 * You should have received a copy of the GNU Lesser General Public | |
18 * License along with FFmpeg; if not, write to the Free Software | |
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
20 */ | |
21 | |
22 /** | |
23 * @file rv34.c | |
24 * RV30/40 decoder common data | |
25 */ | |
26 | |
27 #include "avcodec.h" | |
28 #include "dsputil.h" | |
29 #include "mpegvideo.h" | |
30 #include "golomb.h" | |
31 #include "rectangle.h" | |
32 | |
33 #include "rv34vlc.h" | |
34 #include "rv34data.h" | |
35 #include "rv34.h" | |
36 | |
37 //#define DEBUG | |
38 | |
39 /** translation of RV30/40 macroblock types to lavc ones */ | |
40 static const int rv34_mb_type_to_lavc[12] = { | |
41 MB_TYPE_INTRA, | |
42 MB_TYPE_INTRA16x16, | |
43 MB_TYPE_16x16 | MB_TYPE_L0, | |
44 MB_TYPE_8x8 | MB_TYPE_L0, | |
45 MB_TYPE_16x16 | MB_TYPE_L0, | |
46 MB_TYPE_16x16 | MB_TYPE_L1, | |
47 MB_TYPE_SKIP, | |
48 MB_TYPE_DIRECT2 | MB_TYPE_16x16, | |
49 MB_TYPE_16x8 | MB_TYPE_L0, | |
50 MB_TYPE_8x16 | MB_TYPE_L0, | |
51 MB_TYPE_16x16 | MB_TYPE_L0L1, | |
52 MB_TYPE_16x16 | MB_TYPE_L0 | |
53 }; | |
54 | |
55 | |
56 static RV34VLC intra_vlcs[NUM_INTRA_TABLES], inter_vlcs[NUM_INTER_TABLES]; | |
57 | |
58 /** | |
59 * @defgroup vlc RV30/40 VLC generating functions | |
60 * @{ | |
61 */ | |
62 | |
63 /** | |
64 * Generate VLC from codeword lengths. | |
65 * @param bits codeword lengths (zeroes are accepted) | |
66 * @param size length of input data | |
67 * @param insyms symbols for input codes (NULL for default ones) | |
68 */ | |
69 static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *insyms) | |
70 { | |
71 int i; | |
72 int counts[17] = {0}, codes[17]; | |
73 uint16_t cw[size], syms[size]; | |
74 uint8_t bits2[size]; | |
75 int maxbits = 0, realsize = 0; | |
76 | |
77 for(i = 0; i < size; i++){ | |
78 if(bits[i]){ | |
79 bits2[realsize] = bits[i]; | |
80 syms[realsize] = insyms ? insyms[i] : i; | |
81 realsize++; | |
82 maxbits = FFMAX(maxbits, bits[i]); | |
83 counts[bits[i]]++; | |
84 } | |
85 } | |
86 | |
87 codes[0] = 0; | |
88 for(i = 0; i < 16; i++) | |
89 codes[i+1] = (codes[i] + counts[i]) << 1; | |
90 for(i = 0; i < realsize; i++) | |
91 cw[i] = codes[bits2[i]]++; | |
92 | |
93 init_vlc_sparse(vlc, FFMIN(maxbits, 9), realsize, | |
94 bits2, 1, 1, | |
95 cw, 2, 2, | |
96 syms, 2, 2, INIT_VLC_USE_STATIC); | |
97 } | |
98 | |
99 /** | |
100 * Initialize all tables. | |
101 */ | |
102 static void rv34_init_tables() | |
103 { | |
104 int i, j, k; | |
105 | |
106 for(i = 0; i < NUM_INTRA_TABLES; i++){ | |
107 for(j = 0; j < 2; j++){ | |
108 rv34_gen_vlc(rv34_table_intra_cbppat [i][j], CBPPAT_VLC_SIZE, &intra_vlcs[i].cbppattern[j], NULL); | |
109 rv34_gen_vlc(rv34_table_intra_secondpat[i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].second_pattern[j], NULL); | |
110 rv34_gen_vlc(rv34_table_intra_thirdpat [i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].third_pattern[j], NULL); | |
111 for(k = 0; k < 4; k++) | |
112 rv34_gen_vlc(rv34_table_intra_cbp[i][j+k*2], CBP_VLC_SIZE, &intra_vlcs[i].cbp[j][k], rv34_cbp_code); | |
113 } | |
114 for(j = 0; j < 4; j++) | |
115 rv34_gen_vlc(rv34_table_intra_firstpat[i][j], FIRSTBLK_VLC_SIZE, &intra_vlcs[i].first_pattern[j], NULL); | |
116 rv34_gen_vlc(rv34_intra_coeff[i], COEFF_VLC_SIZE, &intra_vlcs[i].coefficient, NULL); | |
117 } | |
118 | |
119 for(i = 0; i < NUM_INTER_TABLES; i++){ | |
120 rv34_gen_vlc(rv34_inter_cbppat[i], CBPPAT_VLC_SIZE, &inter_vlcs[i].cbppattern[0], NULL); | |
121 for(j = 0; j < 4; j++) | |
122 rv34_gen_vlc(rv34_inter_cbp[i][j], CBP_VLC_SIZE, &inter_vlcs[i].cbp[0][j], rv34_cbp_code); | |
123 for(j = 0; j < 2; j++){ | |
124 rv34_gen_vlc(rv34_table_inter_firstpat [i][j], FIRSTBLK_VLC_SIZE, &inter_vlcs[i].first_pattern[j], NULL); | |
125 rv34_gen_vlc(rv34_table_inter_secondpat[i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].second_pattern[j], NULL); | |
126 rv34_gen_vlc(rv34_table_inter_thirdpat [i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].third_pattern[j], NULL); | |
127 } | |
128 rv34_gen_vlc(rv34_inter_coeff[i], COEFF_VLC_SIZE, &inter_vlcs[i].coefficient, NULL); | |
129 } | |
130 } | |
131 | |
132 /** @} */ // vlc group | |
133 | |
134 | |
135 /** | |
136 * @defgroup transform RV30/40 inverse transform functions | |
137 * @{ | |
138 */ | |
139 | |
140 static av_always_inline void rv34_row_transform(int temp[16], DCTELEM *block) | |
141 { | |
142 int i; | |
143 | |
144 for(i=0; i<4; i++){ | |
145 const int z0= 13*(block[i+8*0] + block[i+8*2]); | |
146 const int z1= 13*(block[i+8*0] - block[i+8*2]); | |
147 const int z2= 7* block[i+8*1] - 17*block[i+8*3]; | |
148 const int z3= 17* block[i+8*1] + 7*block[i+8*3]; | |
149 | |
150 temp[4*i+0]= z0+z3; | |
151 temp[4*i+1]= z1+z2; | |
152 temp[4*i+2]= z1-z2; | |
153 temp[4*i+3]= z0-z3; | |
154 } | |
155 } | |
156 | |
157 /** | |
158 * Real Video 3.0/4.0 inverse transform | |
159 * Code is almost the same as in SVQ3, only scaling is different. | |
160 */ | |
161 static void rv34_inv_transform(DCTELEM *block){ | |
162 int temp[16]; | |
163 int i; | |
164 | |
165 rv34_row_transform(temp, block); | |
166 | |
167 for(i=0; i<4; i++){ | |
168 const int z0= 13*(temp[4*0+i] + temp[4*2+i]) + 0x200; | |
169 const int z1= 13*(temp[4*0+i] - temp[4*2+i]) + 0x200; | |
170 const int z2= 7* temp[4*1+i] - 17*temp[4*3+i]; | |
171 const int z3= 17* temp[4*1+i] + 7*temp[4*3+i]; | |
172 | |
173 block[i*8+0]= (z0 + z3)>>10; | |
174 block[i*8+1]= (z1 + z2)>>10; | |
175 block[i*8+2]= (z1 - z2)>>10; | |
176 block[i*8+3]= (z0 - z3)>>10; | |
177 } | |
178 | |
179 } | |
180 | |
181 /** | |
182 * RealVideo 3.0/4.0 inverse transform for DC block | |
183 * | |
184 * Code is almost the same as rv34_inv_transform() | |
185 * but final coefficients are multiplied by 1.5 and have no rounding. | |
186 */ | |
187 static void rv34_inv_transform_noround(DCTELEM *block){ | |
188 int temp[16]; | |
189 int i; | |
190 | |
191 rv34_row_transform(temp, block); | |
192 | |
193 for(i=0; i<4; i++){ | |
194 const int z0= 13*(temp[4*0+i] + temp[4*2+i]); | |
195 const int z1= 13*(temp[4*0+i] - temp[4*2+i]); | |
196 const int z2= 7* temp[4*1+i] - 17*temp[4*3+i]; | |
197 const int z3= 17* temp[4*1+i] + 7*temp[4*3+i]; | |
198 | |
199 block[i*8+0]= ((z0 + z3)*3)>>11; | |
200 block[i*8+1]= ((z1 + z2)*3)>>11; | |
201 block[i*8+2]= ((z1 - z2)*3)>>11; | |
202 block[i*8+3]= ((z0 - z3)*3)>>11; | |
203 } | |
204 | |
205 } | |
206 | |
207 /** @} */ // transform | |
208 | |
209 | |
210 /** | |
211 * @defgroup block RV30/40 4x4 block decoding functions | |
212 * @{ | |
213 */ | |
214 | |
215 /** | |
216 * Decode coded block pattern. | |
217 */ | |
218 static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table) | |
219 { | |
220 int pattern, code, cbp=0; | |
221 int ones; | |
222 static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000}; | |
223 static const int shifts[4] = { 0, 2, 8, 10 }; | |
224 int *curshift = shifts; | |
225 int i, t, mask; | |
226 | |
227 code = get_vlc2(gb, vlc->cbppattern[table].table, 9, 2); | |
228 pattern = code & 0xF; | |
229 code >>= 4; | |
230 | |
231 ones = rv34_count_ones[pattern]; | |
232 | |
233 for(mask = 8; mask; mask >>= 1, curshift++){ | |
234 if(pattern & mask) | |
235 cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0]; | |
236 } | |
237 | |
238 for(i = 0; i < 4; i++){ | |
239 t = modulo_three_table[code][i]; | |
240 if(t == 1) | |
241 cbp |= cbp_masks[get_bits1(gb)] << i; | |
242 if(t == 2) | |
243 cbp |= cbp_masks[2] << i; | |
244 } | |
245 return cbp; | |
246 } | |
247 | |
248 /** | |
249 * Get one coefficient value from the bistream and store it. | |
250 */ | |
251 static inline void decode_coeff(DCTELEM *dst, int coef, int esc, GetBitContext *gb, VLC* vlc) | |
252 { | |
253 if(coef){ | |
254 if(coef == esc){ | |
255 coef = get_vlc2(gb, vlc->table, 9, 2); | |
256 if(coef > 23){ | |
257 coef -= 23; | |
258 coef = 22 + ((1 << coef) | get_bits(gb, coef)); | |
259 } | |
260 coef += esc; | |
261 } | |
262 if(get_bits1(gb)) | |
263 coef = -coef; | |
264 *dst = coef; | |
265 } | |
266 } | |
267 | |
268 /** | |
269 * Decode 2x2 subblock of coefficients. | |
270 */ | |
271 static inline void decode_subblock(DCTELEM *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc) | |
272 { | |
273 int coeffs[4]; | |
274 | |
275 coeffs[0] = modulo_three_table[code][0]; | |
276 coeffs[1] = modulo_three_table[code][1]; | |
277 coeffs[2] = modulo_three_table[code][2]; | |
278 coeffs[3] = modulo_three_table[code][3]; | |
279 decode_coeff(dst , coeffs[0], 3, gb, vlc); | |
280 if(is_block2){ | |
281 decode_coeff(dst+8, coeffs[1], 2, gb, vlc); | |
282 decode_coeff(dst+1, coeffs[2], 2, gb, vlc); | |
283 }else{ | |
284 decode_coeff(dst+1, coeffs[1], 2, gb, vlc); | |
285 decode_coeff(dst+8, coeffs[2], 2, gb, vlc); | |
286 } | |
287 decode_coeff(dst+9, coeffs[3], 2, gb, vlc); | |
288 } | |
289 | |
290 /** | |
291 * Decode coefficients for 4x4 block. | |
292 * | |
293 * This is done by filling 2x2 subblocks with decoded coefficients | |
294 * in this order (the same for subblocks and subblock coefficients): | |
295 * o--o | |
296 * / | |
297 * / | |
298 * o--o | |
299 */ | |
300 | |
301 static inline void rv34_decode_block(DCTELEM *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc) | |
302 { | |
303 int code, pattern; | |
304 | |
305 code = get_vlc2(gb, rvlc->first_pattern[fc].table, 9, 2); | |
306 | |
307 pattern = code & 0x7; | |
308 | |
309 code >>= 3; | |
310 decode_subblock(dst, code, 0, gb, &rvlc->coefficient); | |
311 | |
312 if(pattern & 4){ | |
313 code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2); | |
314 decode_subblock(dst + 2, code, 0, gb, &rvlc->coefficient); | |
315 } | |
316 if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block | |
317 code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2); | |
318 decode_subblock(dst + 8*2, code, 1, gb, &rvlc->coefficient); | |
319 } | |
320 if(pattern & 1){ | |
321 code = get_vlc2(gb, rvlc->third_pattern[sc].table, 9, 2); | |
322 decode_subblock(dst + 8*2+2, code, 0, gb, &rvlc->coefficient); | |
323 } | |
324 | |
325 } | |
326 | |
327 /** | |
328 * Dequantize ordinary 4x4 block. | |
329 * @todo optimize | |
330 */ | |
331 static inline void rv34_dequant4x4(DCTELEM *block, int Qdc, int Q) | |
332 { | |
333 int i, j; | |
334 | |
335 block[0] = (block[0] * Qdc + 8) >> 4; | |
336 for(i = 0; i < 4; i++) | |
337 for(j = !i; j < 4; j++) | |
338 block[j + i*8] = (block[j + i*8] * Q + 8) >> 4; | |
339 } | |
340 | |
341 /** | |
342 * Dequantize 4x4 block of DC values for 16x16 macroblock. | |
343 * @todo optimize | |
344 */ | |
345 static inline void rv34_dequant4x4_16x16(DCTELEM *block, int Qdc, int Q) | |
346 { | |
347 int i; | |
348 | |
349 for(i = 0; i < 3; i++) | |
350 block[rv34_dezigzag[i]] = (block[rv34_dezigzag[i]] * Qdc + 8) >> 4; | |
351 for(; i < 16; i++) | |
352 block[rv34_dezigzag[i]] = (block[rv34_dezigzag[i]] * Q + 8) >> 4; | |
353 } | |
354 /** @} */ //block functions | |
355 | |
356 | |
357 /** | |
358 * @defgroup bitstream RV30/40 bitstream parsing | |
359 * @{ | |
360 */ | |
361 | |
362 static inline int decode210(GetBitContext *gb){ | |
363 if (get_bits1(gb)) | |
364 return 0; | |
365 else | |
366 return 2 - get_bits1(gb); | |
367 } | |
368 | |
369 /** | |
370 * Decode starting slice position. | |
371 * @todo Maybe replace with ff_h263_decode_mba() ? | |
372 */ | |
373 int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size) | |
374 { | |
375 int i; | |
376 for(i = 0; i < 5; i++) | |
377 if(rv34_mb_max_sizes[i] > mb_size) | |
378 break; | |
379 return rv34_mb_bits_sizes[i]; | |
380 } | |
381 | |
382 /** | |
383 * Select VLC set for decoding from current quantizer, modifier and frame type. | |
384 */ | |
385 static inline RV34VLC* choose_vlc_set(int quant, int mod, int type) | |
386 { | |
387 if(mod == 2 && quant < 19) quant += 10; | |
388 else if(mod && quant < 26) quant += 5; | |
389 return type ? &inter_vlcs[rv34_quant_to_vlc_set[1][av_clip(quant, 0, 30)]] | |
390 : &intra_vlcs[rv34_quant_to_vlc_set[0][av_clip(quant, 0, 30)]]; | |
391 } | |
392 | |
393 /** | |
394 * Decode quantizer difference and return modified quantizer. | |
395 */ | |
396 static inline int rv34_decode_dquant(GetBitContext *gb, int quant) | |
397 { | |
398 if(get_bits1(gb)) | |
399 return rv34_dquant_tab[get_bits1(gb)][quant]; | |
400 else | |
401 return get_bits(gb, 5); | |
402 } | |
403 | |
404 /** @} */ //bitstream functions | |
405 | |
406 /** | |
407 * @defgroup mv motion vector related code (prediction, reconstruction, motion compensation) | |
408 * @{ | |
409 */ | |
410 | |
411 /** macroblock partition width in 8x8 blocks */ | |
412 static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 }; | |
413 | |
414 /** macroblock partition height in 8x8 blocks */ | |
415 static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 }; | |
416 | |
417 /** availability index for subblocks */ | |
418 static const uint8_t avail_indexes[4] = { 5, 6, 9, 10 }; | |
419 | |
420 /** | |
421 * motion vector prediction | |
422 * | |
423 * Motion prediction performed for the block by using median prediction of | |
424 * motion vectors from the left, top and right top blocks but in corner cases | |
425 * some other vectors may be used instead. | |
426 */ | |
427 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no) | |
428 { | |
429 MpegEncContext *s = &r->s; | |
430 int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride; | |
431 int A[2] = {0}, B[2], C[2]; | |
432 int i, j; | |
433 int mx, my; | |
434 int avail_index = avail_indexes[subblock_no]; | |
435 int c_off = part_sizes_w[block_type]; | |
436 | |
437 mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride; | |
438 if(subblock_no == 3) | |
439 c_off = -1; | |
440 | |
441 if(r->avail_cache[avail_index - 1]){ | |
442 A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0]; | |
443 A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1]; | |
444 } | |
445 if(r->avail_cache[avail_index - 4]){ | |
446 B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0]; | |
447 B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1]; | |
448 }else{ | |
449 B[0] = A[0]; | |
450 B[1] = A[1]; | |
451 } | |
452 if(!r->avail_cache[avail_index - 4 + c_off]){ | |
453 if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1] || r->rv30)){ | |
454 C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0]; | |
455 C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1]; | |
456 }else{ | |
457 C[0] = A[0]; | |
458 C[1] = A[1]; | |
459 } | |
460 }else{ | |
461 C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0]; | |
462 C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1]; | |
463 } | |
464 mx = mid_pred(A[0], B[0], C[0]); | |
465 my = mid_pred(A[1], B[1], C[1]); | |
466 mx += r->dmv[dmv_no][0]; | |
467 my += r->dmv[dmv_no][1]; | |
468 for(j = 0; j < part_sizes_h[block_type]; j++){ | |
469 for(i = 0; i < part_sizes_w[block_type]; i++){ | |
470 s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx; | |
471 s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my; | |
472 } | |
473 } | |
474 } | |
475 | |
476 /** | |
6096
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
477 * Calculate motion vector component that should be added for direct blocks. |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
478 */ |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
479 static int calc_add_mv(MpegEncContext *s, int dir, int component) |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
480 { |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
481 int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride; |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
482 int sum; |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
483 |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
484 sum = (s->next_picture_ptr->motion_val[0][mv_pos][component] + |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
485 s->next_picture_ptr->motion_val[0][mv_pos + 1][component] + |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
486 s->next_picture_ptr->motion_val[0][mv_pos + s->b8_stride][component] + |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
487 s->next_picture_ptr->motion_val[0][mv_pos + s->b8_stride + 1][component]) >> 2; |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
488 return dir ? -(sum >> 1) : ((sum + 1) >> 1); |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
489 } |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
490 |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
491 /** |
6026 | 492 * Predict motion vector for B-frame macroblock. |
493 */ | |
494 static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2], | |
495 int A_avail, int B_avail, int C_avail, | |
496 int *mx, int *my) | |
497 { | |
498 if(A_avail + B_avail + C_avail != 3){ | |
499 *mx = A[0] + B[0] + C[0]; | |
500 *my = A[1] + B[1] + C[1]; | |
501 if(A_avail + B_avail + C_avail == 2){ | |
502 *mx /= 2; | |
503 *my /= 2; | |
504 } | |
505 }else{ | |
506 *mx = mid_pred(A[0], B[0], C[0]); | |
507 *my = mid_pred(A[1], B[1], C[1]); | |
508 } | |
509 } | |
510 | |
511 /** | |
512 * motion vector prediction for B-frames | |
513 */ | |
514 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir) | |
515 { | |
516 MpegEncContext *s = &r->s; | |
517 int mb_pos = s->mb_x + s->mb_y * s->mb_stride; | |
518 int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride; | |
519 int A[2], B[2], C[2]; | |
520 int has_A = 0, has_B = 0, has_C = 0; | |
521 int mx, my; | |
522 int i, j; | |
523 Picture *cur_pic = s->current_picture_ptr; | |
524 const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0; | |
525 int type = cur_pic->mb_type[mb_pos]; | |
526 | |
527 memset(A, 0, sizeof(A)); | |
528 memset(B, 0, sizeof(B)); | |
529 memset(C, 0, sizeof(C)); | |
530 if((r->avail_cache[5-1] & type) & mask){ | |
531 A[0] = cur_pic->motion_val[dir][mv_pos - 1][0]; | |
532 A[1] = cur_pic->motion_val[dir][mv_pos - 1][1]; | |
533 has_A = 1; | |
534 } | |
535 if((r->avail_cache[5-4] & type) & mask){ | |
536 B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0]; | |
537 B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1]; | |
538 has_B = 1; | |
539 } | |
540 if((r->avail_cache[5-2] & type) & mask){ | |
541 C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0]; | |
542 C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1]; | |
543 has_C = 1; | |
544 }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[5-5] & type) & mask){ | |
545 C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0]; | |
546 C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1]; | |
547 has_C = 1; | |
548 } | |
549 | |
550 rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my); | |
551 | |
552 mx += r->dmv[dir][0]; | |
553 my += r->dmv[dir][1]; | |
6096
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
554 |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
555 if(block_type == RV34_MB_B_DIRECT){ |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
556 mx += calc_add_mv(s, dir, 0); |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
557 my += calc_add_mv(s, dir, 1); |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
558 } |
6026 | 559 for(j = 0; j < 2; j++){ |
560 for(i = 0; i < 2; i++){ | |
561 cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx; | |
562 cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my; | |
563 } | |
564 } | |
565 if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD) | |
566 fill_rectangle(cur_pic->motion_val[!dir][mv_pos], 2, 2, s->b8_stride, 0, 4); | |
567 } | |
568 | |
569 /** | |
570 * generic motion compensation function | |
571 * | |
572 * @param r decoder context | |
573 * @param block_type type of the current block | |
574 * @param xoff horizontal offset from the start of the current block | |
575 * @param yoff vertical offset from the start of the current block | |
576 * @param mv_off offset to the motion vector information | |
577 * @param width width of the current partition in 8x8 blocks | |
578 * @param height height of the current partition in 8x8 blocks | |
579 */ | |
580 static inline void rv34_mc(RV34DecContext *r, const int block_type, | |
581 const int xoff, const int yoff, int mv_off, | |
582 const int width, const int height, int dir, | |
583 const int thirdpel, | |
584 qpel_mc_func (*qpel_mc)[16], | |
585 h264_chroma_mc_func (*chroma_mc)) | |
586 { | |
587 MpegEncContext *s = &r->s; | |
588 uint8_t *Y, *U, *V, *srcY, *srcU, *srcV; | |
589 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y; | |
590 int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off; | |
591 int is16x16 = 1; | |
592 | |
593 if(thirdpel){ | |
594 #if 0 /// todo | |
595 int lx, ly; | |
596 | |
597 mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 3; | |
598 my = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 3; | |
599 lx = ((s->current_picture_ptr->motion_val[dir][mv_pos][0] % 3) + 3) % 3; | |
600 ly = ((s->current_picture_ptr->motion_val[dir][mv_pos][1] % 3) + 3) % 3; | |
601 dxy = ly*3 + lx; | |
602 uvmx = | |
603 #endif | |
604 mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2; | |
605 my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2; | |
606 dxy = ((my & 3) << 2) | (mx & 3); | |
607 uvmx = mx & 6; | |
608 uvmy = my & 6; | |
609 }else{ | |
610 mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2; | |
611 my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2; | |
612 dxy = ((my & 3) << 2) | (mx & 3); | |
613 uvmx = mx & 6; | |
614 uvmy = my & 6; | |
615 } | |
616 srcY = dir ? s->next_picture_ptr->data[0] : s->last_picture_ptr->data[0]; | |
617 srcU = dir ? s->next_picture_ptr->data[1] : s->last_picture_ptr->data[1]; | |
618 srcV = dir ? s->next_picture_ptr->data[2] : s->last_picture_ptr->data[2]; | |
619 src_x = s->mb_x * 16 + xoff + mx; | |
620 src_y = s->mb_y * 16 + yoff + my; | |
621 uvsrc_x = s->mb_x * 8 + (xoff >> 1) + (mx >> 1); | |
622 uvsrc_y = s->mb_y * 8 + (yoff >> 1) + (my >> 1); | |
623 srcY += src_y * s->linesize + src_x; | |
624 srcU += uvsrc_y * s->uvlinesize + uvsrc_x; | |
625 srcV += uvsrc_y * s->uvlinesize + uvsrc_x; | |
626 if( (unsigned)(src_x - !!(mx&3)*2) > s->h_edge_pos - !!(mx&3)*2 - (width <<3) - 3 | |
627 || (unsigned)(src_y - !!(my&3)*2) > s->v_edge_pos - !!(my&3)*2 - (height<<3) - 3){ | |
628 uint8_t *uvbuf= s->edge_emu_buffer + 20 * s->linesize; | |
629 | |
630 srcY -= 2 + 2*s->linesize; | |
631 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, (width<<3)+4, (height<<3)+4, | |
632 src_x - 2, src_y - 2, s->h_edge_pos, s->v_edge_pos); | |
633 srcY = s->edge_emu_buffer + 2 + 2*s->linesize; | |
634 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, (width<<2)+1, (height<<2)+1, | |
635 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
636 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, (width<<2)+1, (height<<2)+1, | |
637 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
638 srcU = uvbuf; | |
639 srcV = uvbuf + 16; | |
640 } | |
641 Y = s->dest[0] + xoff + yoff *s->linesize; | |
642 U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize; | |
643 V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize; | |
644 | |
645 if(block_type == RV34_MB_P_16x8){ | |
646 qpel_mc[1][dxy](Y, srcY, s->linesize); | |
647 Y += 8; | |
648 srcY += 8; | |
649 }else if(block_type == RV34_MB_P_8x16){ | |
650 qpel_mc[1][dxy](Y, srcY, s->linesize); | |
651 Y += 8 * s->linesize; | |
652 srcY += 8 * s->linesize; | |
653 } | |
654 is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16); | |
655 qpel_mc[!is16x16][dxy](Y, srcY, s->linesize); | |
656 chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy); | |
657 chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy); | |
658 } | |
659 | |
660 static void rv34_mc_1mv(RV34DecContext *r, const int block_type, | |
661 const int xoff, const int yoff, int mv_off, | |
662 const int width, const int height, int dir) | |
663 { | |
664 rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, | |
665 r->s.dsp.put_h264_qpel_pixels_tab, r->s.dsp.put_h264_chroma_pixels_tab); | |
666 } | |
667 | |
668 static void rv34_mc_2mv(RV34DecContext *r, const int block_type) | |
669 { | |
670 rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, | |
671 r->s.dsp.put_h264_qpel_pixels_tab, r->s.dsp.put_h264_chroma_pixels_tab); | |
672 rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, | |
673 r->s.dsp.avg_h264_qpel_pixels_tab, r->s.dsp.avg_h264_chroma_pixels_tab); | |
674 } | |
675 | |
676 /** number of motion vectors in each macroblock type */ | |
677 static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 }; | |
678 | |
679 /** | |
680 * Decode motion vector differences | |
681 * and perform motion vector reconstruction and motion compensation. | |
682 */ | |
683 static int rv34_decode_mv(RV34DecContext *r, int block_type) | |
684 { | |
685 MpegEncContext *s = &r->s; | |
686 GetBitContext *gb = &s->gb; | |
687 int i; | |
688 | |
689 memset(r->dmv, 0, sizeof(r->dmv)); | |
690 for(i = 0; i < num_mvs[block_type]; i++){ | |
691 r->dmv[i][0] = svq3_get_se_golomb(gb); | |
692 r->dmv[i][1] = svq3_get_se_golomb(gb); | |
693 } | |
694 switch(block_type){ | |
695 case RV34_MB_TYPE_INTRA: | |
696 case RV34_MB_TYPE_INTRA16x16: | |
697 fill_rectangle(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], 2, 2, s->b8_stride, 0, 4); | |
698 return 0; | |
699 case RV34_MB_SKIP: | |
700 if(s->pict_type == P_TYPE){ | |
701 fill_rectangle(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], 2, 2, s->b8_stride, 0, 4); | |
702 rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0); | |
703 break; | |
704 } | |
705 case RV34_MB_B_DIRECT: | |
706 rv34_pred_mv_b (r, RV34_MB_B_DIRECT, 0); | |
707 rv34_pred_mv_b (r, RV34_MB_B_DIRECT, 1); | |
708 rv34_mc_2mv (r, RV34_MB_B_DIRECT); | |
709 break; | |
710 case RV34_MB_P_16x16: | |
711 case RV34_MB_P_MIX16x16: | |
712 rv34_pred_mv(r, block_type, 0, 0); | |
713 rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0); | |
714 break; | |
715 case RV34_MB_B_FORWARD: | |
716 case RV34_MB_B_BACKWARD: | |
717 r->dmv[1][0] = r->dmv[0][0]; | |
718 r->dmv[1][1] = r->dmv[0][1]; | |
719 rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD); | |
720 rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD); | |
721 break; | |
722 case RV34_MB_P_16x8: | |
723 case RV34_MB_P_8x16: | |
724 rv34_pred_mv(r, block_type, 0, 0); | |
725 rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1); | |
726 if(block_type == RV34_MB_P_16x8){ | |
727 rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0); | |
728 rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0); | |
729 } | |
730 if(block_type == RV34_MB_P_8x16){ | |
731 rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0); | |
732 rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0); | |
733 } | |
734 break; | |
735 case RV34_MB_B_BIDIR: | |
736 rv34_pred_mv_b (r, block_type, 0); | |
737 rv34_pred_mv_b (r, block_type, 1); | |
738 rv34_mc_2mv (r, block_type); | |
739 break; | |
740 case RV34_MB_P_8x8: | |
741 for(i=0;i< 4;i++){ | |
742 rv34_pred_mv(r, block_type, i, i); | |
743 rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0); | |
744 } | |
745 break; | |
746 } | |
747 | |
748 return 0; | |
749 } | |
750 /** @} */ // mv group | |
751 | |
752 /** | |
753 * @defgroup recons Macroblock reconstruction functions | |
754 * @{ | |
755 */ | |
756 /** mapping of RV30/40 intra prediction types to standard H.264 types */ | |
757 static const int ittrans[9] = { | |
758 DC_PRED, VERT_PRED, HOR_PRED, DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_LEFT_PRED, | |
759 VERT_RIGHT_PRED, VERT_LEFT_PRED, HOR_UP_PRED, HOR_DOWN_PRED, | |
760 }; | |
761 | |
762 /** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */ | |
763 static const int ittrans16[4] = { | |
764 DC_PRED8x8, VERT_PRED8x8, HOR_PRED8x8, PLANE_PRED8x8, | |
765 }; | |
766 | |
767 /** | |
768 * Perform 4x4 intra prediction. | |
769 */ | |
770 static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right) | |
771 { | |
772 uint8_t *prev = dst - stride + 4; | |
773 uint32_t topleft; | |
774 | |
775 if(!up && !left) | |
776 itype = DC_128_PRED; | |
777 else if(!up){ | |
778 if(itype == VERT_PRED) itype = HOR_PRED; | |
779 if(itype == DC_PRED) itype = LEFT_DC_PRED; | |
780 }else if(!left){ | |
781 if(itype == HOR_PRED) itype = VERT_PRED; | |
782 if(itype == DC_PRED) itype = TOP_DC_PRED; | |
783 if(itype == DIAG_DOWN_LEFT_PRED) itype = DIAG_DOWN_LEFT_PRED_RV40_NODOWN; | |
784 } | |
785 if(!down){ | |
786 if(itype == DIAG_DOWN_LEFT_PRED) itype = DIAG_DOWN_LEFT_PRED_RV40_NODOWN; | |
787 if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN; | |
6036
ce3b68242317
Correct spatial prediction mode in RV30/40 for vertical left direction
kostya
parents:
6026
diff
changeset
|
788 if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN; |
6026 | 789 } |
790 if(!right && up){ | |
791 topleft = dst[-stride + 3] * 0x01010101; | |
792 prev = &topleft; | |
793 } | |
794 r->h.pred4x4[itype](dst, prev, stride); | |
795 } | |
796 | |
797 /** add_pixels_clamped for 4x4 block */ | |
798 static void rv34_add_4x4_block(uint8_t *dst, int stride, DCTELEM block[64], int off) | |
799 { | |
800 int x, y; | |
801 for(y = 0; y < 4; y++) | |
802 for(x = 0; x < 4; x++) | |
803 dst[x + y*stride] = av_clip_uint8(dst[x + y*stride] + block[off + x+y*8]); | |
804 } | |
805 | |
806 static inline int adjust_pred16(int itype, int up, int left) | |
807 { | |
808 if(!up && !left) | |
809 itype = DC_128_PRED8x8; | |
810 else if(!up){ | |
811 if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8; | |
812 if(itype == VERT_PRED8x8) itype = HOR_PRED8x8; | |
813 if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8; | |
814 }else if(!left){ | |
815 if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8; | |
816 if(itype == HOR_PRED8x8) itype = VERT_PRED8x8; | |
817 if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8; | |
818 } | |
819 return itype; | |
820 } | |
821 | |
822 static void rv34_output_macroblock(RV34DecContext *r, int8_t *intra_types, int cbp, int is16) | |
823 { | |
824 MpegEncContext *s = &r->s; | |
825 DSPContext *dsp = &s->dsp; | |
826 int i, j; | |
827 uint8_t *Y, *U, *V; | |
828 int itype; | |
829 int avail[6*8] = {0}; | |
830 int idx; | |
831 | |
832 // Set neighbour information. | |
833 if(r->avail_cache[0]) | |
834 avail[0] = 1; | |
835 if(r->avail_cache[1]) | |
836 avail[1] = avail[2] = 1; | |
837 if(r->avail_cache[2]) | |
838 avail[3] = avail[4] = 1; | |
839 if(r->avail_cache[3]) | |
840 avail[5] = 1; | |
841 if(r->avail_cache[4]) | |
842 avail[8] = avail[16] = 1; | |
843 if(r->avail_cache[8]) | |
844 avail[24] = avail[32] = 1; | |
845 | |
846 Y = s->dest[0]; | |
847 U = s->dest[1]; | |
848 V = s->dest[2]; | |
849 if(!is16){ | |
850 for(j = 0; j < 4; j++){ | |
851 idx = 9 + j*8; | |
852 for(i = 0; i < 4; i++, cbp >>= 1, Y += 4, idx++){ | |
853 rv34_pred_4x4_block(r, Y, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]); | |
854 avail[idx] = 1; | |
855 if(cbp & 1) | |
856 rv34_add_4x4_block(Y, s->linesize, s->block[(i>>1)+(j&2)], (i&1)*4+(j&1)*32); | |
857 } | |
858 Y += s->linesize * 4 - 4*4; | |
859 intra_types += s->b4_stride; | |
860 } | |
861 intra_types -= s->b4_stride * 4; | |
862 fill_rectangle(r->avail_cache + 5, 2, 2, 4, 0, 4); | |
863 for(j = 0; j < 2; j++){ | |
864 idx = 5 + j*4; | |
865 for(i = 0; i < 2; i++, cbp >>= 1, idx++){ | |
866 rv34_pred_4x4_block(r, U + i*4 + j*4*s->uvlinesize, s->uvlinesize, ittrans[intra_types[i*2+j*2*s->b4_stride]], r->avail_cache[idx-4], r->avail_cache[idx-1], !i && !j, r->avail_cache[idx-3]); | |
867 rv34_pred_4x4_block(r, V + i*4 + j*4*s->uvlinesize, s->uvlinesize, ittrans[intra_types[i*2+j*2*s->b4_stride]], r->avail_cache[idx-4], r->avail_cache[idx-1], !i && !j, r->avail_cache[idx-3]); | |
868 r->avail_cache[idx] = 1; | |
869 if(cbp & 0x01) | |
870 rv34_add_4x4_block(U + i*4 + j*4*s->uvlinesize, s->uvlinesize, s->block[4], i*4+j*32); | |
871 if(cbp & 0x10) | |
872 rv34_add_4x4_block(V + i*4 + j*4*s->uvlinesize, s->uvlinesize, s->block[5], i*4+j*32); | |
873 } | |
874 } | |
875 }else{ | |
876 itype = ittrans16[intra_types[0]]; | |
877 itype = adjust_pred16(itype, r->avail_cache[5-4], r->avail_cache[5-1]); | |
878 r->h.pred16x16[itype](Y, s->linesize); | |
879 dsp->add_pixels_clamped(s->block[0], Y, s->current_picture.linesize[0]); | |
880 dsp->add_pixels_clamped(s->block[1], Y + 8, s->current_picture.linesize[0]); | |
881 Y += s->current_picture.linesize[0] * 8; | |
882 dsp->add_pixels_clamped(s->block[2], Y, s->current_picture.linesize[0]); | |
883 dsp->add_pixels_clamped(s->block[3], Y + 8, s->current_picture.linesize[0]); | |
884 | |
885 itype = ittrans16[intra_types[0]]; | |
886 if(itype == PLANE_PRED8x8) itype = DC_PRED8x8; | |
887 itype = adjust_pred16(itype, r->avail_cache[5-4], r->avail_cache[5-1]); | |
888 r->h.pred8x8[itype](U, s->uvlinesize); | |
889 dsp->add_pixels_clamped(s->block[4], U, s->uvlinesize); | |
890 r->h.pred8x8[itype](V, s->uvlinesize); | |
891 dsp->add_pixels_clamped(s->block[5], V, s->uvlinesize); | |
892 } | |
893 } | |
894 | |
895 /** @} */ // recons group | |
896 | |
897 /** | |
898 * @addtogroup bitstream | |
899 * Decode macroblock header and return CBP in case of success, -1 otherwise. | |
900 */ | |
901 static int rv34_decode_mb_header(RV34DecContext *r, int8_t *intra_types) | |
902 { | |
903 MpegEncContext *s = &r->s; | |
904 GetBitContext *gb = &s->gb; | |
905 int mb_pos = s->mb_x + s->mb_y * s->mb_stride; | |
906 int i, t; | |
907 | |
908 if(!r->si.type){ | |
909 r->is16 = get_bits1(gb); | |
910 if(!r->is16 && !r->rv30){ | |
911 if(!get_bits1(gb)) | |
912 av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n"); | |
913 } | |
914 s->current_picture_ptr->mb_type[mb_pos] = r->is16 ? MB_TYPE_INTRA16x16 : MB_TYPE_INTRA; | |
915 r->block_type = r->is16 ? RV34_MB_TYPE_INTRA16x16 : RV34_MB_TYPE_INTRA; | |
916 }else{ | |
917 r->block_type = r->decode_mb_info(r); | |
918 if(r->block_type == -1) | |
919 return -1; | |
920 s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type]; | |
921 r->mb_type[mb_pos] = r->block_type; | |
922 if(r->block_type == RV34_MB_SKIP){ | |
923 if(s->pict_type == P_TYPE) | |
924 r->mb_type[mb_pos] = RV34_MB_P_16x16; | |
925 if(s->pict_type == B_TYPE) | |
926 r->mb_type[mb_pos] = RV34_MB_B_DIRECT; | |
927 } | |
928 r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]); | |
929 rv34_decode_mv(r, r->block_type); | |
930 if(r->block_type == RV34_MB_SKIP){ | |
931 fill_rectangle(intra_types, 4, 4, s->b4_stride, 0, sizeof(intra_types[0])); | |
932 return 0; | |
933 } | |
934 r->chroma_vlc = 1; | |
935 r->luma_vlc = 0; | |
936 } | |
937 if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){ | |
938 if(r->is16){ | |
939 t = get_bits(gb, 2); | |
940 fill_rectangle(intra_types, 4, 4, s->b4_stride, t, sizeof(intra_types[0])); | |
941 r->luma_vlc = 2; | |
942 }else{ | |
943 if(r->decode_intra_types(r, gb, intra_types) < 0) | |
944 return -1; | |
945 r->luma_vlc = 1; | |
946 } | |
947 r->chroma_vlc = 0; | |
948 r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0); | |
949 }else{ | |
950 for(i = 0; i < 16; i++) | |
951 intra_types[(i & 3) + (i>>2) * s->b4_stride] = 0; | |
952 r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1); | |
953 if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){ | |
954 r->is16 = 1; | |
955 r->chroma_vlc = 1; | |
956 r->luma_vlc = 2; | |
957 r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0); | |
958 } | |
959 } | |
960 | |
961 return rv34_decode_cbp(gb, r->cur_vlcs, r->is16); | |
962 } | |
963 | |
964 /** | |
965 * @addtogroup recons | |
966 * @{ | |
967 */ | |
968 /** | |
969 * mask for retrieving all bits in coded block pattern | |
970 * corresponding to one 8x8 block | |
971 */ | |
972 #define LUMA_CBP_BLOCK_MASK 0x303 | |
973 | |
974 #define U_CBP_MASK 0x0F0000 | |
975 #define V_CBP_MASK 0xF00000 | |
976 | |
977 | |
978 static void rv34_apply_differences(RV34DecContext *r, int cbp) | |
979 { | |
980 static const int shifts[4] = { 0, 2, 8, 10 }; | |
981 MpegEncContext *s = &r->s; | |
982 int i; | |
983 | |
984 for(i = 0; i < 4; i++) | |
985 if(cbp & (LUMA_CBP_BLOCK_MASK << shifts[i])) | |
986 s->dsp.add_pixels_clamped(s->block[i], s->dest[0] + (i & 1)*8 + (i&2)*4*s->linesize, s->linesize); | |
987 if(cbp & U_CBP_MASK) | |
988 s->dsp.add_pixels_clamped(s->block[4], s->dest[1], s->uvlinesize); | |
989 if(cbp & V_CBP_MASK) | |
990 s->dsp.add_pixels_clamped(s->block[5], s->dest[2], s->uvlinesize); | |
991 } | |
992 | |
993 static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types) | |
994 { | |
995 MpegEncContext *s = &r->s; | |
996 GetBitContext *gb = &s->gb; | |
997 int cbp, cbp2; | |
998 int i, blknum, blkoff; | |
999 DCTELEM block16[64]; | |
1000 int luma_dc_quant; | |
1001 int dist; | |
1002 int mb_pos = s->mb_x + s->mb_y * s->mb_stride; | |
1003 | |
1004 // Calculate which neighbours are available. Maybe it's worth optimizing too. | |
1005 memset(r->avail_cache, 0, sizeof(r->avail_cache)); | |
1006 fill_rectangle(r->avail_cache + 5, 2, 2, 4, 1, 4); | |
1007 dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width; | |
1008 if(s->mb_x && dist) | |
1009 r->avail_cache[4] = | |
1010 r->avail_cache[8] = s->current_picture_ptr->mb_type[mb_pos - 1]; | |
1011 if(dist >= s->mb_width) | |
1012 r->avail_cache[1] = | |
1013 r->avail_cache[2] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride]; | |
1014 if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1) | |
1015 r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1]; | |
1016 if(s->mb_x && dist > s->mb_width) | |
1017 r->avail_cache[0] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1]; | |
1018 | |
1019 s->qscale = r->si.quant; | |
1020 cbp = cbp2 = rv34_decode_mb_header(r, intra_types); | |
1021 | |
1022 if(cbp == -1) | |
1023 return -1; | |
1024 | |
1025 luma_dc_quant = r->si.type ? r->luma_dc_quant_p[s->qscale] : r->luma_dc_quant_i[s->qscale]; | |
1026 if(r->is16){ | |
1027 memset(block16, 0, sizeof(block16)); | |
1028 rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0); | |
1029 rv34_dequant4x4_16x16(block16, rv34_qscale_tab[luma_dc_quant],rv34_qscale_tab[s->qscale]); | |
1030 rv34_inv_transform_noround(block16); | |
1031 } | |
1032 | |
1033 for(i = 0; i < 16; i++, cbp >>= 1){ | |
1034 if(!r->is16 && !(cbp & 1)) continue; | |
1035 blknum = ((i & 2) >> 1) + ((i & 8) >> 2); | |
1036 blkoff = ((i & 1) << 2) + ((i & 4) << 3); | |
1037 if(cbp & 1) | |
1038 rv34_decode_block(s->block[blknum] + blkoff, gb, r->cur_vlcs, r->luma_vlc, 0); | |
1039 rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[luma_dc_quant],rv34_qscale_tab[s->qscale]); | |
1040 if(r->is16) //FIXME: optimize | |
1041 s->block[blknum][blkoff] = block16[(i & 3) | ((i & 0xC) << 1)]; | |
1042 rv34_inv_transform(s->block[blknum] + blkoff); | |
1043 } | |
1044 if(r->block_type == RV34_MB_P_MIX16x16) | |
1045 r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1); | |
1046 for(; i < 24; i++, cbp >>= 1){ | |
1047 if(!(cbp & 1)) continue; | |
1048 blknum = ((i & 4) >> 2) + 4; | |
1049 blkoff = ((i & 1) << 2) + ((i & 2) << 4); | |
1050 rv34_decode_block(s->block[blknum] + blkoff, gb, r->cur_vlcs, r->chroma_vlc, 1); | |
1051 rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]],rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]]); | |
1052 rv34_inv_transform(s->block[blknum] + blkoff); | |
1053 } | |
1054 if(IS_INTRA(s->current_picture_ptr->mb_type[s->mb_x + s->mb_y*s->mb_stride])) | |
1055 rv34_output_macroblock(r, intra_types, cbp2, r->is16); | |
1056 else | |
1057 rv34_apply_differences(r, cbp2); | |
1058 | |
1059 return 0; | |
1060 } | |
1061 | |
1062 static int check_slice_end(RV34DecContext *r, MpegEncContext *s) | |
1063 { | |
1064 int bits; | |
1065 if(s->mb_y >= s->mb_height) | |
1066 return 1; | |
1067 if(!s->mb_num_left) | |
1068 return 1; | |
1069 if(r->s.mb_skip_run > 1) | |
1070 return 0; | |
1071 bits = r->bits - get_bits_count(&s->gb); | |
1072 if(bits < 0 || (bits < 8 && !show_bits(&s->gb, bits))) | |
1073 return 1; | |
1074 return 0; | |
1075 } | |
1076 | |
1077 static inline int slice_compare(SliceInfo *si1, SliceInfo *si2) | |
1078 { | |
1079 return si1->type != si2->type || | |
1080 si1->start >= si2->start || | |
1081 si1->width != si2->width || | |
1082 si1->height != si2->height; | |
1083 } | |
1084 | |
1085 static int rv34_decode_slice(RV34DecContext *r, int end, uint8_t* buf, int buf_size) | |
1086 { | |
1087 MpegEncContext *s = &r->s; | |
1088 GetBitContext *gb = &s->gb; | |
1089 int mb_pos; | |
1090 int res; | |
1091 | |
1092 init_get_bits(&r->s.gb, buf, buf_size*8); | |
1093 res = r->parse_slice_header(r, gb, &r->si); | |
1094 if(res < 0){ | |
1095 av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n"); | |
1096 return -1; | |
1097 } | |
1098 | |
1099 if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) { | |
1100 if(s->width != r->si.width || s->height != r->si.height){ | |
1101 av_log(s->avctx, AV_LOG_DEBUG, "Changing dimensions to %dx%d\n", r->si.width,r->si.height); | |
1102 MPV_common_end(s); | |
1103 s->width = r->si.width; | |
1104 s->height = r->si.height; | |
1105 if(MPV_common_init(s) < 0) | |
1106 return -1; | |
1107 r->intra_types_hist = av_realloc(r->intra_types_hist, s->b4_stride * 4 * 2 * sizeof(*r->intra_types_hist)); | |
1108 r->intra_types = r->intra_types_hist + s->b4_stride * 4; | |
1109 r->mb_type = av_realloc(r->mb_type, r->s.mb_stride * r->s.mb_height * sizeof(*r->mb_type)); | |
1110 } | |
1111 s->pict_type = r->si.type ? r->si.type : I_TYPE; | |
1112 if(MPV_frame_start(s, s->avctx) < 0) | |
1113 return -1; | |
1114 ff_er_frame_start(s); | |
1115 s->current_picture_ptr = &s->current_picture; | |
1116 s->mb_x = s->mb_y = 0; | |
1117 } | |
1118 | |
1119 r->si.end = end; | |
1120 s->qscale = r->si.quant; | |
1121 r->bits = buf_size*8; | |
1122 s->mb_num_left = r->si.end - r->si.start; | |
1123 r->s.mb_skip_run = 0; | |
1124 | |
1125 mb_pos = s->mb_x + s->mb_y * s->mb_width; | |
1126 if(r->si.start != mb_pos){ | |
1127 av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos); | |
1128 s->mb_x = r->si.start % s->mb_width; | |
1129 s->mb_y = r->si.start / s->mb_width; | |
1130 } | |
1131 memset(r->intra_types_hist, -1, s->b4_stride * 4 * 2 * sizeof(*r->intra_types_hist)); | |
1132 s->first_slice_line = 1; | |
1133 s->resync_mb_x= s->mb_x; | |
1134 s->resync_mb_y= s->mb_y; | |
1135 | |
1136 ff_init_block_index(s); | |
1137 while(!check_slice_end(r, s)) { | |
1138 ff_update_block_index(s); | |
1139 s->dsp.clear_blocks(s->block[0]); | |
1140 | |
1141 if(rv34_decode_macroblock(r, r->intra_types + s->mb_x * 4 + 1) < 0){ | |
1142 ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_ERROR|DC_ERROR|MV_ERROR); | |
1143 return -1; | |
1144 } | |
1145 if (++s->mb_x == s->mb_width) { | |
1146 s->mb_x = 0; | |
1147 s->mb_y++; | |
1148 ff_init_block_index(s); | |
1149 | |
1150 memmove(r->intra_types_hist, r->intra_types, s->b4_stride * 4 * sizeof(*r->intra_types_hist)); | |
1151 memset(r->intra_types, -1, s->b4_stride * 4 * sizeof(*r->intra_types_hist)); | |
1152 } | |
1153 if(s->mb_x == s->resync_mb_x) | |
1154 s->first_slice_line=0; | |
1155 s->mb_num_left--; | |
1156 } | |
1157 ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END); | |
1158 | |
1159 return (s->mb_y == s->mb_height); | |
1160 } | |
1161 | |
1162 /** @} */ // recons group end | |
1163 | |
1164 /** | |
1165 * Initialize decoder. | |
1166 */ | |
1167 int ff_rv34_decode_init(AVCodecContext *avctx) | |
1168 { | |
1169 RV34DecContext *r = avctx->priv_data; | |
1170 MpegEncContext *s = &r->s; | |
1171 | |
1172 MPV_decode_defaults(s); | |
1173 s->avctx= avctx; | |
1174 s->out_format = FMT_H263; | |
1175 s->codec_id= avctx->codec_id; | |
1176 | |
1177 s->width = avctx->width; | |
1178 s->height = avctx->height; | |
1179 | |
1180 r->s.avctx = avctx; | |
1181 avctx->flags |= CODEC_FLAG_EMU_EDGE; | |
1182 r->s.flags |= CODEC_FLAG_EMU_EDGE; | |
1183 avctx->pix_fmt = PIX_FMT_YUV420P; | |
1184 avctx->has_b_frames = 1; | |
1185 s->low_delay = 0; | |
1186 | |
1187 if (MPV_common_init(s) < 0) | |
1188 return -1; | |
1189 | |
1190 ff_h264_pred_init(&r->h, CODEC_ID_RV40); | |
1191 | |
1192 r->intra_types_hist = av_malloc(s->b4_stride * 4 * 2 * sizeof(*r->intra_types_hist)); | |
1193 r->intra_types = r->intra_types_hist + s->b4_stride * 4; | |
1194 | |
1195 r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height * sizeof(*r->mb_type)); | |
1196 | |
1197 if(!intra_vlcs[0].cbppattern[0].bits) | |
1198 rv34_init_tables(); | |
1199 | |
1200 return 0; | |
1201 } | |
1202 | |
1203 static int get_slice_offset(AVCodecContext *avctx, uint8_t *buf, int n) | |
1204 { | |
1205 if(avctx->slice_count) return avctx->slice_offset[n]; | |
1206 else return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8); | |
1207 } | |
1208 | |
1209 int ff_rv34_decode_frame(AVCodecContext *avctx, | |
1210 void *data, int *data_size, | |
1211 uint8_t *buf, int buf_size) | |
1212 { | |
1213 RV34DecContext *r = avctx->priv_data; | |
1214 MpegEncContext *s = &r->s; | |
1215 AVFrame *pict = data; | |
1216 SliceInfo si; | |
1217 int i; | |
1218 int slice_count; | |
1219 uint8_t *slices_hdr = NULL; | |
1220 int last = 0; | |
1221 | |
1222 /* no supplementary picture */ | |
1223 if (buf_size == 0) { | |
1224 /* special case for last picture */ | |
1225 if (s->low_delay==0 && s->next_picture_ptr) { | |
1226 *pict= *(AVFrame*)s->next_picture_ptr; | |
1227 s->next_picture_ptr= NULL; | |
1228 | |
1229 *data_size = sizeof(AVFrame); | |
1230 } | |
1231 return 0; | |
1232 } | |
1233 | |
1234 if(!avctx->slice_count){ | |
1235 slice_count = (*buf++) + 1; | |
1236 slices_hdr = buf + 4; | |
1237 buf += 8 * slice_count; | |
1238 }else | |
1239 slice_count = avctx->slice_count; | |
1240 | |
1241 for(i=0; i<slice_count; i++){ | |
1242 int offset= get_slice_offset(avctx, slices_hdr, i); | |
1243 int size; | |
1244 if(i+1 == slice_count) | |
1245 size= buf_size - offset; | |
1246 else | |
1247 size= get_slice_offset(avctx, slices_hdr, i+1) - offset; | |
1248 | |
1249 r->si.end = s->mb_width * s->mb_height; | |
1250 if(i+1 < slice_count){ | |
1251 init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, i+1), (buf_size-get_slice_offset(avctx, slices_hdr, i+1))*8); | |
1252 if(r->parse_slice_header(r, &r->s.gb, &si) < 0){ | |
1253 if(i+2 < slice_count) | |
1254 size = get_slice_offset(avctx, slices_hdr, i+2) - offset; | |
1255 else | |
1256 size = buf_size - offset; | |
1257 }else | |
1258 r->si.end = si.start; | |
1259 } | |
1260 last = rv34_decode_slice(r, r->si.end, buf + offset, size); | |
1261 s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start; | |
1262 if(last) | |
1263 break; | |
1264 } | |
1265 | |
1266 if(last){ | |
1267 if(r->loop_filter) | |
1268 r->loop_filter(r); | |
1269 ff_er_frame_end(s); | |
1270 MPV_frame_end(s); | |
1271 if (s->pict_type == B_TYPE || s->low_delay) { | |
1272 *pict= *(AVFrame*)s->current_picture_ptr; | |
1273 } else if (s->last_picture_ptr != NULL) { | |
1274 *pict= *(AVFrame*)s->last_picture_ptr; | |
1275 } | |
1276 | |
1277 if(s->last_picture_ptr || s->low_delay){ | |
1278 *data_size = sizeof(AVFrame); | |
1279 ff_print_debug_info(s, pict); | |
1280 } | |
1281 s->current_picture_ptr= NULL; //so we can detect if frame_end wasnt called (find some nicer solution...) | |
1282 } | |
1283 return buf_size; | |
1284 } | |
1285 | |
1286 int ff_rv34_decode_end(AVCodecContext *avctx) | |
1287 { | |
1288 RV34DecContext *r = avctx->priv_data; | |
1289 | |
1290 MPV_common_end(&r->s); | |
1291 | |
1292 av_freep(&r->intra_types_hist); | |
1293 r->intra_types = NULL; | |
1294 av_freep(&r->mb_type); | |
1295 | |
1296 return 0; | |
1297 } |