Mercurial > libavcodec.hg
annotate rv34.c @ 6365:80c7e9c015c6 libavcodec
fix crash on non-AltiVec powered machines: MPV_common_init_altivec doesn't check mm_flags
Patch by Alexander Strange %astrange A ithinksw PP com %
author | gpoirier |
---|---|
date | Mon, 18 Feb 2008 21:35:31 +0000 |
parents | b5702c981fe2 |
children | 493dc59d469a |
rev | line source |
---|---|
6026 | 1 /* |
2 * RV30/40 decoder common data | |
3 * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov | |
4 * | |
5 * This file is part of FFmpeg. | |
6 * | |
7 * FFmpeg is free software; you can redistribute it and/or | |
8 * modify it under the terms of the GNU Lesser General Public | |
9 * License as published by the Free Software Foundation; either | |
10 * version 2.1 of the License, or (at your option) any later version. | |
11 * | |
12 * FFmpeg is distributed in the hope that it will be useful, | |
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 * Lesser General Public License for more details. | |
16 * | |
17 * You should have received a copy of the GNU Lesser General Public | |
18 * License along with FFmpeg; if not, write to the Free Software | |
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
20 */ | |
21 | |
22 /** | |
23 * @file rv34.c | |
24 * RV30/40 decoder common data | |
25 */ | |
26 | |
27 #include "avcodec.h" | |
28 #include "dsputil.h" | |
29 #include "mpegvideo.h" | |
30 #include "golomb.h" | |
31 #include "rectangle.h" | |
32 | |
33 #include "rv34vlc.h" | |
34 #include "rv34data.h" | |
35 #include "rv34.h" | |
36 | |
37 //#define DEBUG | |
38 | |
39 /** translation of RV30/40 macroblock types to lavc ones */ | |
40 static const int rv34_mb_type_to_lavc[12] = { | |
41 MB_TYPE_INTRA, | |
42 MB_TYPE_INTRA16x16, | |
43 MB_TYPE_16x16 | MB_TYPE_L0, | |
44 MB_TYPE_8x8 | MB_TYPE_L0, | |
45 MB_TYPE_16x16 | MB_TYPE_L0, | |
46 MB_TYPE_16x16 | MB_TYPE_L1, | |
47 MB_TYPE_SKIP, | |
48 MB_TYPE_DIRECT2 | MB_TYPE_16x16, | |
49 MB_TYPE_16x8 | MB_TYPE_L0, | |
50 MB_TYPE_8x16 | MB_TYPE_L0, | |
51 MB_TYPE_16x16 | MB_TYPE_L0L1, | |
52 MB_TYPE_16x16 | MB_TYPE_L0 | |
53 }; | |
54 | |
55 | |
56 static RV34VLC intra_vlcs[NUM_INTRA_TABLES], inter_vlcs[NUM_INTER_TABLES]; | |
57 | |
58 /** | |
59 * @defgroup vlc RV30/40 VLC generating functions | |
60 * @{ | |
61 */ | |
62 | |
63 /** | |
64 * Generate VLC from codeword lengths. | |
65 * @param bits codeword lengths (zeroes are accepted) | |
66 * @param size length of input data | |
67 * @param insyms symbols for input codes (NULL for default ones) | |
68 */ | |
69 static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t *insyms) | |
70 { | |
71 int i; | |
72 int counts[17] = {0}, codes[17]; | |
73 uint16_t cw[size], syms[size]; | |
74 uint8_t bits2[size]; | |
75 int maxbits = 0, realsize = 0; | |
76 | |
77 for(i = 0; i < size; i++){ | |
78 if(bits[i]){ | |
79 bits2[realsize] = bits[i]; | |
80 syms[realsize] = insyms ? insyms[i] : i; | |
81 realsize++; | |
82 maxbits = FFMAX(maxbits, bits[i]); | |
83 counts[bits[i]]++; | |
84 } | |
85 } | |
86 | |
87 codes[0] = 0; | |
88 for(i = 0; i < 16; i++) | |
89 codes[i+1] = (codes[i] + counts[i]) << 1; | |
90 for(i = 0; i < realsize; i++) | |
91 cw[i] = codes[bits2[i]]++; | |
92 | |
93 init_vlc_sparse(vlc, FFMIN(maxbits, 9), realsize, | |
94 bits2, 1, 1, | |
95 cw, 2, 2, | |
96 syms, 2, 2, INIT_VLC_USE_STATIC); | |
97 } | |
98 | |
99 /** | |
100 * Initialize all tables. | |
101 */ | |
102 static void rv34_init_tables() | |
103 { | |
104 int i, j, k; | |
105 | |
106 for(i = 0; i < NUM_INTRA_TABLES; i++){ | |
107 for(j = 0; j < 2; j++){ | |
108 rv34_gen_vlc(rv34_table_intra_cbppat [i][j], CBPPAT_VLC_SIZE, &intra_vlcs[i].cbppattern[j], NULL); | |
109 rv34_gen_vlc(rv34_table_intra_secondpat[i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].second_pattern[j], NULL); | |
110 rv34_gen_vlc(rv34_table_intra_thirdpat [i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].third_pattern[j], NULL); | |
111 for(k = 0; k < 4; k++) | |
112 rv34_gen_vlc(rv34_table_intra_cbp[i][j+k*2], CBP_VLC_SIZE, &intra_vlcs[i].cbp[j][k], rv34_cbp_code); | |
113 } | |
114 for(j = 0; j < 4; j++) | |
115 rv34_gen_vlc(rv34_table_intra_firstpat[i][j], FIRSTBLK_VLC_SIZE, &intra_vlcs[i].first_pattern[j], NULL); | |
116 rv34_gen_vlc(rv34_intra_coeff[i], COEFF_VLC_SIZE, &intra_vlcs[i].coefficient, NULL); | |
117 } | |
118 | |
119 for(i = 0; i < NUM_INTER_TABLES; i++){ | |
120 rv34_gen_vlc(rv34_inter_cbppat[i], CBPPAT_VLC_SIZE, &inter_vlcs[i].cbppattern[0], NULL); | |
121 for(j = 0; j < 4; j++) | |
122 rv34_gen_vlc(rv34_inter_cbp[i][j], CBP_VLC_SIZE, &inter_vlcs[i].cbp[0][j], rv34_cbp_code); | |
123 for(j = 0; j < 2; j++){ | |
124 rv34_gen_vlc(rv34_table_inter_firstpat [i][j], FIRSTBLK_VLC_SIZE, &inter_vlcs[i].first_pattern[j], NULL); | |
125 rv34_gen_vlc(rv34_table_inter_secondpat[i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].second_pattern[j], NULL); | |
126 rv34_gen_vlc(rv34_table_inter_thirdpat [i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].third_pattern[j], NULL); | |
127 } | |
128 rv34_gen_vlc(rv34_inter_coeff[i], COEFF_VLC_SIZE, &inter_vlcs[i].coefficient, NULL); | |
129 } | |
130 } | |
131 | |
132 /** @} */ // vlc group | |
133 | |
134 | |
135 /** | |
136 * @defgroup transform RV30/40 inverse transform functions | |
137 * @{ | |
138 */ | |
139 | |
140 static av_always_inline void rv34_row_transform(int temp[16], DCTELEM *block) | |
141 { | |
142 int i; | |
143 | |
144 for(i=0; i<4; i++){ | |
145 const int z0= 13*(block[i+8*0] + block[i+8*2]); | |
146 const int z1= 13*(block[i+8*0] - block[i+8*2]); | |
147 const int z2= 7* block[i+8*1] - 17*block[i+8*3]; | |
148 const int z3= 17* block[i+8*1] + 7*block[i+8*3]; | |
149 | |
150 temp[4*i+0]= z0+z3; | |
151 temp[4*i+1]= z1+z2; | |
152 temp[4*i+2]= z1-z2; | |
153 temp[4*i+3]= z0-z3; | |
154 } | |
155 } | |
156 | |
157 /** | |
158 * Real Video 3.0/4.0 inverse transform | |
159 * Code is almost the same as in SVQ3, only scaling is different. | |
160 */ | |
161 static void rv34_inv_transform(DCTELEM *block){ | |
162 int temp[16]; | |
163 int i; | |
164 | |
165 rv34_row_transform(temp, block); | |
166 | |
167 for(i=0; i<4; i++){ | |
168 const int z0= 13*(temp[4*0+i] + temp[4*2+i]) + 0x200; | |
169 const int z1= 13*(temp[4*0+i] - temp[4*2+i]) + 0x200; | |
170 const int z2= 7* temp[4*1+i] - 17*temp[4*3+i]; | |
171 const int z3= 17* temp[4*1+i] + 7*temp[4*3+i]; | |
172 | |
173 block[i*8+0]= (z0 + z3)>>10; | |
174 block[i*8+1]= (z1 + z2)>>10; | |
175 block[i*8+2]= (z1 - z2)>>10; | |
176 block[i*8+3]= (z0 - z3)>>10; | |
177 } | |
178 | |
179 } | |
180 | |
181 /** | |
182 * RealVideo 3.0/4.0 inverse transform for DC block | |
183 * | |
184 * Code is almost the same as rv34_inv_transform() | |
185 * but final coefficients are multiplied by 1.5 and have no rounding. | |
186 */ | |
187 static void rv34_inv_transform_noround(DCTELEM *block){ | |
188 int temp[16]; | |
189 int i; | |
190 | |
191 rv34_row_transform(temp, block); | |
192 | |
193 for(i=0; i<4; i++){ | |
194 const int z0= 13*(temp[4*0+i] + temp[4*2+i]); | |
195 const int z1= 13*(temp[4*0+i] - temp[4*2+i]); | |
196 const int z2= 7* temp[4*1+i] - 17*temp[4*3+i]; | |
197 const int z3= 17* temp[4*1+i] + 7*temp[4*3+i]; | |
198 | |
199 block[i*8+0]= ((z0 + z3)*3)>>11; | |
200 block[i*8+1]= ((z1 + z2)*3)>>11; | |
201 block[i*8+2]= ((z1 - z2)*3)>>11; | |
202 block[i*8+3]= ((z0 - z3)*3)>>11; | |
203 } | |
204 | |
205 } | |
206 | |
207 /** @} */ // transform | |
208 | |
209 | |
210 /** | |
211 * @defgroup block RV30/40 4x4 block decoding functions | |
212 * @{ | |
213 */ | |
214 | |
215 /** | |
216 * Decode coded block pattern. | |
217 */ | |
218 static int rv34_decode_cbp(GetBitContext *gb, RV34VLC *vlc, int table) | |
219 { | |
220 int pattern, code, cbp=0; | |
221 int ones; | |
222 static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000}; | |
223 static const int shifts[4] = { 0, 2, 8, 10 }; | |
224 int *curshift = shifts; | |
225 int i, t, mask; | |
226 | |
227 code = get_vlc2(gb, vlc->cbppattern[table].table, 9, 2); | |
228 pattern = code & 0xF; | |
229 code >>= 4; | |
230 | |
231 ones = rv34_count_ones[pattern]; | |
232 | |
233 for(mask = 8; mask; mask >>= 1, curshift++){ | |
234 if(pattern & mask) | |
235 cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0]; | |
236 } | |
237 | |
238 for(i = 0; i < 4; i++){ | |
239 t = modulo_three_table[code][i]; | |
240 if(t == 1) | |
241 cbp |= cbp_masks[get_bits1(gb)] << i; | |
242 if(t == 2) | |
243 cbp |= cbp_masks[2] << i; | |
244 } | |
245 return cbp; | |
246 } | |
247 | |
248 /** | |
249 * Get one coefficient value from the bistream and store it. | |
250 */ | |
251 static inline void decode_coeff(DCTELEM *dst, int coef, int esc, GetBitContext *gb, VLC* vlc) | |
252 { | |
253 if(coef){ | |
254 if(coef == esc){ | |
255 coef = get_vlc2(gb, vlc->table, 9, 2); | |
256 if(coef > 23){ | |
257 coef -= 23; | |
258 coef = 22 + ((1 << coef) | get_bits(gb, coef)); | |
259 } | |
260 coef += esc; | |
261 } | |
262 if(get_bits1(gb)) | |
263 coef = -coef; | |
264 *dst = coef; | |
265 } | |
266 } | |
267 | |
268 /** | |
269 * Decode 2x2 subblock of coefficients. | |
270 */ | |
271 static inline void decode_subblock(DCTELEM *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc) | |
272 { | |
273 int coeffs[4]; | |
274 | |
275 coeffs[0] = modulo_three_table[code][0]; | |
276 coeffs[1] = modulo_three_table[code][1]; | |
277 coeffs[2] = modulo_three_table[code][2]; | |
278 coeffs[3] = modulo_three_table[code][3]; | |
279 decode_coeff(dst , coeffs[0], 3, gb, vlc); | |
280 if(is_block2){ | |
281 decode_coeff(dst+8, coeffs[1], 2, gb, vlc); | |
282 decode_coeff(dst+1, coeffs[2], 2, gb, vlc); | |
283 }else{ | |
284 decode_coeff(dst+1, coeffs[1], 2, gb, vlc); | |
285 decode_coeff(dst+8, coeffs[2], 2, gb, vlc); | |
286 } | |
287 decode_coeff(dst+9, coeffs[3], 2, gb, vlc); | |
288 } | |
289 | |
290 /** | |
291 * Decode coefficients for 4x4 block. | |
292 * | |
293 * This is done by filling 2x2 subblocks with decoded coefficients | |
294 * in this order (the same for subblocks and subblock coefficients): | |
295 * o--o | |
296 * / | |
297 * / | |
298 * o--o | |
299 */ | |
300 | |
301 static inline void rv34_decode_block(DCTELEM *dst, GetBitContext *gb, RV34VLC *rvlc, int fc, int sc) | |
302 { | |
303 int code, pattern; | |
304 | |
305 code = get_vlc2(gb, rvlc->first_pattern[fc].table, 9, 2); | |
306 | |
307 pattern = code & 0x7; | |
308 | |
309 code >>= 3; | |
310 decode_subblock(dst, code, 0, gb, &rvlc->coefficient); | |
311 | |
312 if(pattern & 4){ | |
313 code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2); | |
314 decode_subblock(dst + 2, code, 0, gb, &rvlc->coefficient); | |
315 } | |
316 if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block | |
317 code = get_vlc2(gb, rvlc->second_pattern[sc].table, 9, 2); | |
318 decode_subblock(dst + 8*2, code, 1, gb, &rvlc->coefficient); | |
319 } | |
320 if(pattern & 1){ | |
321 code = get_vlc2(gb, rvlc->third_pattern[sc].table, 9, 2); | |
322 decode_subblock(dst + 8*2+2, code, 0, gb, &rvlc->coefficient); | |
323 } | |
324 | |
325 } | |
326 | |
327 /** | |
328 * Dequantize ordinary 4x4 block. | |
329 * @todo optimize | |
330 */ | |
331 static inline void rv34_dequant4x4(DCTELEM *block, int Qdc, int Q) | |
332 { | |
333 int i, j; | |
334 | |
335 block[0] = (block[0] * Qdc + 8) >> 4; | |
336 for(i = 0; i < 4; i++) | |
337 for(j = !i; j < 4; j++) | |
338 block[j + i*8] = (block[j + i*8] * Q + 8) >> 4; | |
339 } | |
340 | |
341 /** | |
342 * Dequantize 4x4 block of DC values for 16x16 macroblock. | |
343 * @todo optimize | |
344 */ | |
345 static inline void rv34_dequant4x4_16x16(DCTELEM *block, int Qdc, int Q) | |
346 { | |
347 int i; | |
348 | |
349 for(i = 0; i < 3; i++) | |
350 block[rv34_dezigzag[i]] = (block[rv34_dezigzag[i]] * Qdc + 8) >> 4; | |
351 for(; i < 16; i++) | |
352 block[rv34_dezigzag[i]] = (block[rv34_dezigzag[i]] * Q + 8) >> 4; | |
353 } | |
354 /** @} */ //block functions | |
355 | |
356 | |
357 /** | |
358 * @defgroup bitstream RV30/40 bitstream parsing | |
359 * @{ | |
360 */ | |
361 | |
362 /** | |
363 * Decode starting slice position. | |
364 * @todo Maybe replace with ff_h263_decode_mba() ? | |
365 */ | |
366 int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size) | |
367 { | |
368 int i; | |
369 for(i = 0; i < 5; i++) | |
370 if(rv34_mb_max_sizes[i] > mb_size) | |
371 break; | |
372 return rv34_mb_bits_sizes[i]; | |
373 } | |
374 | |
375 /** | |
376 * Select VLC set for decoding from current quantizer, modifier and frame type. | |
377 */ | |
378 static inline RV34VLC* choose_vlc_set(int quant, int mod, int type) | |
379 { | |
380 if(mod == 2 && quant < 19) quant += 10; | |
381 else if(mod && quant < 26) quant += 5; | |
382 return type ? &inter_vlcs[rv34_quant_to_vlc_set[1][av_clip(quant, 0, 30)]] | |
383 : &intra_vlcs[rv34_quant_to_vlc_set[0][av_clip(quant, 0, 30)]]; | |
384 } | |
385 | |
386 /** | |
387 * Decode quantizer difference and return modified quantizer. | |
388 */ | |
389 static inline int rv34_decode_dquant(GetBitContext *gb, int quant) | |
390 { | |
391 if(get_bits1(gb)) | |
392 return rv34_dquant_tab[get_bits1(gb)][quant]; | |
393 else | |
394 return get_bits(gb, 5); | |
395 } | |
396 | |
397 /** @} */ //bitstream functions | |
398 | |
399 /** | |
400 * @defgroup mv motion vector related code (prediction, reconstruction, motion compensation) | |
401 * @{ | |
402 */ | |
403 | |
404 /** macroblock partition width in 8x8 blocks */ | |
405 static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 }; | |
406 | |
407 /** macroblock partition height in 8x8 blocks */ | |
408 static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 }; | |
409 | |
410 /** availability index for subblocks */ | |
411 static const uint8_t avail_indexes[4] = { 5, 6, 9, 10 }; | |
412 | |
413 /** | |
414 * motion vector prediction | |
415 * | |
416 * Motion prediction performed for the block by using median prediction of | |
417 * motion vectors from the left, top and right top blocks but in corner cases | |
418 * some other vectors may be used instead. | |
419 */ | |
420 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no) | |
421 { | |
422 MpegEncContext *s = &r->s; | |
423 int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride; | |
424 int A[2] = {0}, B[2], C[2]; | |
425 int i, j; | |
426 int mx, my; | |
427 int avail_index = avail_indexes[subblock_no]; | |
428 int c_off = part_sizes_w[block_type]; | |
429 | |
430 mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride; | |
431 if(subblock_no == 3) | |
432 c_off = -1; | |
433 | |
434 if(r->avail_cache[avail_index - 1]){ | |
435 A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0]; | |
436 A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1]; | |
437 } | |
438 if(r->avail_cache[avail_index - 4]){ | |
439 B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0]; | |
440 B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1]; | |
441 }else{ | |
442 B[0] = A[0]; | |
443 B[1] = A[1]; | |
444 } | |
445 if(!r->avail_cache[avail_index - 4 + c_off]){ | |
446 if(r->avail_cache[avail_index - 4] && (r->avail_cache[avail_index - 1] || r->rv30)){ | |
447 C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0]; | |
448 C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1]; | |
449 }else{ | |
450 C[0] = A[0]; | |
451 C[1] = A[1]; | |
452 } | |
453 }else{ | |
454 C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0]; | |
455 C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1]; | |
456 } | |
457 mx = mid_pred(A[0], B[0], C[0]); | |
458 my = mid_pred(A[1], B[1], C[1]); | |
459 mx += r->dmv[dmv_no][0]; | |
460 my += r->dmv[dmv_no][1]; | |
461 for(j = 0; j < part_sizes_h[block_type]; j++){ | |
462 for(i = 0; i < part_sizes_w[block_type]; i++){ | |
463 s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx; | |
464 s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my; | |
465 } | |
466 } | |
467 } | |
468 | |
469 /** | |
6096
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
470 * Calculate motion vector component that should be added for direct blocks. |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
471 */ |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
472 static int calc_add_mv(MpegEncContext *s, int dir, int component) |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
473 { |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
474 int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride; |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
475 int sum; |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
476 |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
477 sum = (s->next_picture_ptr->motion_val[0][mv_pos][component] + |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
478 s->next_picture_ptr->motion_val[0][mv_pos + 1][component] + |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
479 s->next_picture_ptr->motion_val[0][mv_pos + s->b8_stride][component] + |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
480 s->next_picture_ptr->motion_val[0][mv_pos + s->b8_stride + 1][component]) >> 2; |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
481 return dir ? -(sum >> 1) : ((sum + 1) >> 1); |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
482 } |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
483 |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
484 /** |
6026 | 485 * Predict motion vector for B-frame macroblock. |
486 */ | |
487 static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2], | |
488 int A_avail, int B_avail, int C_avail, | |
489 int *mx, int *my) | |
490 { | |
491 if(A_avail + B_avail + C_avail != 3){ | |
492 *mx = A[0] + B[0] + C[0]; | |
493 *my = A[1] + B[1] + C[1]; | |
494 if(A_avail + B_avail + C_avail == 2){ | |
495 *mx /= 2; | |
496 *my /= 2; | |
497 } | |
498 }else{ | |
499 *mx = mid_pred(A[0], B[0], C[0]); | |
500 *my = mid_pred(A[1], B[1], C[1]); | |
501 } | |
502 } | |
503 | |
504 /** | |
505 * motion vector prediction for B-frames | |
506 */ | |
507 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir) | |
508 { | |
509 MpegEncContext *s = &r->s; | |
510 int mb_pos = s->mb_x + s->mb_y * s->mb_stride; | |
511 int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride; | |
512 int A[2], B[2], C[2]; | |
513 int has_A = 0, has_B = 0, has_C = 0; | |
514 int mx, my; | |
515 int i, j; | |
516 Picture *cur_pic = s->current_picture_ptr; | |
517 const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0; | |
518 int type = cur_pic->mb_type[mb_pos]; | |
519 | |
520 memset(A, 0, sizeof(A)); | |
521 memset(B, 0, sizeof(B)); | |
522 memset(C, 0, sizeof(C)); | |
523 if((r->avail_cache[5-1] & type) & mask){ | |
524 A[0] = cur_pic->motion_val[dir][mv_pos - 1][0]; | |
525 A[1] = cur_pic->motion_val[dir][mv_pos - 1][1]; | |
526 has_A = 1; | |
527 } | |
528 if((r->avail_cache[5-4] & type) & mask){ | |
529 B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0]; | |
530 B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1]; | |
531 has_B = 1; | |
532 } | |
533 if((r->avail_cache[5-2] & type) & mask){ | |
534 C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0]; | |
535 C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1]; | |
536 has_C = 1; | |
537 }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[5-5] & type) & mask){ | |
538 C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0]; | |
539 C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1]; | |
540 has_C = 1; | |
541 } | |
542 | |
543 rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my); | |
544 | |
545 mx += r->dmv[dir][0]; | |
546 my += r->dmv[dir][1]; | |
6096
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
547 |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
548 if(block_type == RV34_MB_B_DIRECT){ |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
549 mx += calc_add_mv(s, dir, 0); |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
550 my += calc_add_mv(s, dir, 1); |
89140b93ae09
Direct blocks should use motion vectors from the second reference frame
kostya
parents:
6036
diff
changeset
|
551 } |
6026 | 552 for(j = 0; j < 2; j++){ |
553 for(i = 0; i < 2; i++){ | |
554 cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx; | |
555 cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my; | |
556 } | |
557 } | |
558 if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD) | |
559 fill_rectangle(cur_pic->motion_val[!dir][mv_pos], 2, 2, s->b8_stride, 0, 4); | |
560 } | |
561 | |
6106 | 562 static const int chroma_coeffs[3] = { 8, 5, 3 }; |
563 | |
6026 | 564 /** |
565 * generic motion compensation function | |
566 * | |
567 * @param r decoder context | |
568 * @param block_type type of the current block | |
569 * @param xoff horizontal offset from the start of the current block | |
570 * @param yoff vertical offset from the start of the current block | |
571 * @param mv_off offset to the motion vector information | |
572 * @param width width of the current partition in 8x8 blocks | |
573 * @param height height of the current partition in 8x8 blocks | |
574 */ | |
575 static inline void rv34_mc(RV34DecContext *r, const int block_type, | |
576 const int xoff, const int yoff, int mv_off, | |
577 const int width, const int height, int dir, | |
578 const int thirdpel, | |
579 qpel_mc_func (*qpel_mc)[16], | |
580 h264_chroma_mc_func (*chroma_mc)) | |
581 { | |
582 MpegEncContext *s = &r->s; | |
583 uint8_t *Y, *U, *V, *srcY, *srcU, *srcV; | |
6121
bc59962f70b9
Fractional parts of motion vectors should be accounted separately too
kostya
parents:
6106
diff
changeset
|
584 int dxy, mx, my, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y; |
6026 | 585 int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off; |
586 int is16x16 = 1; | |
587 | |
588 if(thirdpel){ | |
6106 | 589 mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24); |
590 my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24); | |
591 lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3; | |
592 ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3; | |
593 uvmx = chroma_coeffs[(3*(mx&1) + lx) >> 1]; | |
594 uvmy = chroma_coeffs[(3*(my&1) + ly) >> 1]; | |
6026 | 595 }else{ |
596 mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2; | |
597 my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2; | |
6121
bc59962f70b9
Fractional parts of motion vectors should be accounted separately too
kostya
parents:
6106
diff
changeset
|
598 lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3; |
bc59962f70b9
Fractional parts of motion vectors should be accounted separately too
kostya
parents:
6106
diff
changeset
|
599 ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3; |
6026 | 600 uvmx = mx & 6; |
601 uvmy = my & 6; | |
602 } | |
6121
bc59962f70b9
Fractional parts of motion vectors should be accounted separately too
kostya
parents:
6106
diff
changeset
|
603 dxy = ly*4 + lx; |
6026 | 604 srcY = dir ? s->next_picture_ptr->data[0] : s->last_picture_ptr->data[0]; |
605 srcU = dir ? s->next_picture_ptr->data[1] : s->last_picture_ptr->data[1]; | |
606 srcV = dir ? s->next_picture_ptr->data[2] : s->last_picture_ptr->data[2]; | |
607 src_x = s->mb_x * 16 + xoff + mx; | |
608 src_y = s->mb_y * 16 + yoff + my; | |
609 uvsrc_x = s->mb_x * 8 + (xoff >> 1) + (mx >> 1); | |
610 uvsrc_y = s->mb_y * 8 + (yoff >> 1) + (my >> 1); | |
611 srcY += src_y * s->linesize + src_x; | |
612 srcU += uvsrc_y * s->uvlinesize + uvsrc_x; | |
613 srcV += uvsrc_y * s->uvlinesize + uvsrc_x; | |
6121
bc59962f70b9
Fractional parts of motion vectors should be accounted separately too
kostya
parents:
6106
diff
changeset
|
614 if( (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 3 |
bc59962f70b9
Fractional parts of motion vectors should be accounted separately too
kostya
parents:
6106
diff
changeset
|
615 || (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 3){ |
6026 | 616 uint8_t *uvbuf= s->edge_emu_buffer + 20 * s->linesize; |
617 | |
618 srcY -= 2 + 2*s->linesize; | |
619 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, (width<<3)+4, (height<<3)+4, | |
620 src_x - 2, src_y - 2, s->h_edge_pos, s->v_edge_pos); | |
621 srcY = s->edge_emu_buffer + 2 + 2*s->linesize; | |
622 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, (width<<2)+1, (height<<2)+1, | |
623 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
624 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, (width<<2)+1, (height<<2)+1, | |
625 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1); | |
626 srcU = uvbuf; | |
627 srcV = uvbuf + 16; | |
628 } | |
629 Y = s->dest[0] + xoff + yoff *s->linesize; | |
630 U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize; | |
631 V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize; | |
632 | |
633 if(block_type == RV34_MB_P_16x8){ | |
634 qpel_mc[1][dxy](Y, srcY, s->linesize); | |
635 Y += 8; | |
636 srcY += 8; | |
637 }else if(block_type == RV34_MB_P_8x16){ | |
638 qpel_mc[1][dxy](Y, srcY, s->linesize); | |
639 Y += 8 * s->linesize; | |
640 srcY += 8 * s->linesize; | |
641 } | |
642 is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16); | |
643 qpel_mc[!is16x16][dxy](Y, srcY, s->linesize); | |
644 chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy); | |
645 chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy); | |
646 } | |
647 | |
648 static void rv34_mc_1mv(RV34DecContext *r, const int block_type, | |
649 const int xoff, const int yoff, int mv_off, | |
650 const int width, const int height, int dir) | |
651 { | |
652 rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, | |
6106 | 653 r->rv30 ? r->s.dsp.put_rv30_tpel_pixels_tab |
654 : r->s.dsp.put_h264_qpel_pixels_tab, | |
655 r->s.dsp.put_h264_chroma_pixels_tab); | |
6026 | 656 } |
657 | |
658 static void rv34_mc_2mv(RV34DecContext *r, const int block_type) | |
659 { | |
660 rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, | |
6106 | 661 r->rv30 ? r->s.dsp.put_rv30_tpel_pixels_tab |
662 : r->s.dsp.put_h264_qpel_pixels_tab, | |
663 r->s.dsp.put_h264_chroma_pixels_tab); | |
6026 | 664 rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, |
6106 | 665 r->rv30 ? r->s.dsp.avg_rv30_tpel_pixels_tab |
666 : r->s.dsp.avg_h264_qpel_pixels_tab, | |
667 r->s.dsp.avg_h264_chroma_pixels_tab); | |
6026 | 668 } |
669 | |
670 /** number of motion vectors in each macroblock type */ | |
671 static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 }; | |
672 | |
673 /** | |
674 * Decode motion vector differences | |
675 * and perform motion vector reconstruction and motion compensation. | |
676 */ | |
677 static int rv34_decode_mv(RV34DecContext *r, int block_type) | |
678 { | |
679 MpegEncContext *s = &r->s; | |
680 GetBitContext *gb = &s->gb; | |
681 int i; | |
682 | |
683 memset(r->dmv, 0, sizeof(r->dmv)); | |
684 for(i = 0; i < num_mvs[block_type]; i++){ | |
685 r->dmv[i][0] = svq3_get_se_golomb(gb); | |
686 r->dmv[i][1] = svq3_get_se_golomb(gb); | |
687 } | |
688 switch(block_type){ | |
689 case RV34_MB_TYPE_INTRA: | |
690 case RV34_MB_TYPE_INTRA16x16: | |
691 fill_rectangle(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], 2, 2, s->b8_stride, 0, 4); | |
692 return 0; | |
693 case RV34_MB_SKIP: | |
694 if(s->pict_type == P_TYPE){ | |
695 fill_rectangle(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], 2, 2, s->b8_stride, 0, 4); | |
696 rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0); | |
697 break; | |
698 } | |
699 case RV34_MB_B_DIRECT: | |
700 rv34_pred_mv_b (r, RV34_MB_B_DIRECT, 0); | |
701 rv34_pred_mv_b (r, RV34_MB_B_DIRECT, 1); | |
702 rv34_mc_2mv (r, RV34_MB_B_DIRECT); | |
703 break; | |
704 case RV34_MB_P_16x16: | |
705 case RV34_MB_P_MIX16x16: | |
706 rv34_pred_mv(r, block_type, 0, 0); | |
707 rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0); | |
708 break; | |
709 case RV34_MB_B_FORWARD: | |
710 case RV34_MB_B_BACKWARD: | |
711 r->dmv[1][0] = r->dmv[0][0]; | |
712 r->dmv[1][1] = r->dmv[0][1]; | |
713 rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD); | |
714 rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD); | |
715 break; | |
716 case RV34_MB_P_16x8: | |
717 case RV34_MB_P_8x16: | |
718 rv34_pred_mv(r, block_type, 0, 0); | |
719 rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1); | |
720 if(block_type == RV34_MB_P_16x8){ | |
721 rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0); | |
722 rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0); | |
723 } | |
724 if(block_type == RV34_MB_P_8x16){ | |
725 rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0); | |
726 rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0); | |
727 } | |
728 break; | |
729 case RV34_MB_B_BIDIR: | |
730 rv34_pred_mv_b (r, block_type, 0); | |
731 rv34_pred_mv_b (r, block_type, 1); | |
732 rv34_mc_2mv (r, block_type); | |
733 break; | |
734 case RV34_MB_P_8x8: | |
735 for(i=0;i< 4;i++){ | |
736 rv34_pred_mv(r, block_type, i, i); | |
737 rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0); | |
738 } | |
739 break; | |
740 } | |
741 | |
742 return 0; | |
743 } | |
744 /** @} */ // mv group | |
745 | |
746 /** | |
747 * @defgroup recons Macroblock reconstruction functions | |
748 * @{ | |
749 */ | |
750 /** mapping of RV30/40 intra prediction types to standard H.264 types */ | |
751 static const int ittrans[9] = { | |
752 DC_PRED, VERT_PRED, HOR_PRED, DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_LEFT_PRED, | |
753 VERT_RIGHT_PRED, VERT_LEFT_PRED, HOR_UP_PRED, HOR_DOWN_PRED, | |
754 }; | |
755 | |
756 /** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */ | |
757 static const int ittrans16[4] = { | |
758 DC_PRED8x8, VERT_PRED8x8, HOR_PRED8x8, PLANE_PRED8x8, | |
759 }; | |
760 | |
761 /** | |
762 * Perform 4x4 intra prediction. | |
763 */ | |
764 static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right) | |
765 { | |
766 uint8_t *prev = dst - stride + 4; | |
767 uint32_t topleft; | |
768 | |
769 if(!up && !left) | |
770 itype = DC_128_PRED; | |
771 else if(!up){ | |
772 if(itype == VERT_PRED) itype = HOR_PRED; | |
773 if(itype == DC_PRED) itype = LEFT_DC_PRED; | |
774 }else if(!left){ | |
775 if(itype == HOR_PRED) itype = VERT_PRED; | |
776 if(itype == DC_PRED) itype = TOP_DC_PRED; | |
777 if(itype == DIAG_DOWN_LEFT_PRED) itype = DIAG_DOWN_LEFT_PRED_RV40_NODOWN; | |
778 } | |
779 if(!down){ | |
780 if(itype == DIAG_DOWN_LEFT_PRED) itype = DIAG_DOWN_LEFT_PRED_RV40_NODOWN; | |
781 if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN; | |
6036
ce3b68242317
Correct spatial prediction mode in RV30/40 for vertical left direction
kostya
parents:
6026
diff
changeset
|
782 if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN; |
6026 | 783 } |
784 if(!right && up){ | |
785 topleft = dst[-stride + 3] * 0x01010101; | |
786 prev = &topleft; | |
787 } | |
788 r->h.pred4x4[itype](dst, prev, stride); | |
789 } | |
790 | |
791 /** add_pixels_clamped for 4x4 block */ | |
792 static void rv34_add_4x4_block(uint8_t *dst, int stride, DCTELEM block[64], int off) | |
793 { | |
794 int x, y; | |
795 for(y = 0; y < 4; y++) | |
796 for(x = 0; x < 4; x++) | |
797 dst[x + y*stride] = av_clip_uint8(dst[x + y*stride] + block[off + x+y*8]); | |
798 } | |
799 | |
800 static inline int adjust_pred16(int itype, int up, int left) | |
801 { | |
802 if(!up && !left) | |
803 itype = DC_128_PRED8x8; | |
804 else if(!up){ | |
805 if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8; | |
806 if(itype == VERT_PRED8x8) itype = HOR_PRED8x8; | |
807 if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8; | |
808 }else if(!left){ | |
809 if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8; | |
810 if(itype == HOR_PRED8x8) itype = VERT_PRED8x8; | |
811 if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8; | |
812 } | |
813 return itype; | |
814 } | |
815 | |
816 static void rv34_output_macroblock(RV34DecContext *r, int8_t *intra_types, int cbp, int is16) | |
817 { | |
818 MpegEncContext *s = &r->s; | |
819 DSPContext *dsp = &s->dsp; | |
820 int i, j; | |
821 uint8_t *Y, *U, *V; | |
822 int itype; | |
823 int avail[6*8] = {0}; | |
824 int idx; | |
825 | |
826 // Set neighbour information. | |
827 if(r->avail_cache[0]) | |
828 avail[0] = 1; | |
829 if(r->avail_cache[1]) | |
830 avail[1] = avail[2] = 1; | |
831 if(r->avail_cache[2]) | |
832 avail[3] = avail[4] = 1; | |
833 if(r->avail_cache[3]) | |
834 avail[5] = 1; | |
835 if(r->avail_cache[4]) | |
836 avail[8] = avail[16] = 1; | |
837 if(r->avail_cache[8]) | |
838 avail[24] = avail[32] = 1; | |
839 | |
840 Y = s->dest[0]; | |
841 U = s->dest[1]; | |
842 V = s->dest[2]; | |
843 if(!is16){ | |
844 for(j = 0; j < 4; j++){ | |
845 idx = 9 + j*8; | |
846 for(i = 0; i < 4; i++, cbp >>= 1, Y += 4, idx++){ | |
847 rv34_pred_4x4_block(r, Y, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]); | |
848 avail[idx] = 1; | |
849 if(cbp & 1) | |
850 rv34_add_4x4_block(Y, s->linesize, s->block[(i>>1)+(j&2)], (i&1)*4+(j&1)*32); | |
851 } | |
852 Y += s->linesize * 4 - 4*4; | |
853 intra_types += s->b4_stride; | |
854 } | |
855 intra_types -= s->b4_stride * 4; | |
856 fill_rectangle(r->avail_cache + 5, 2, 2, 4, 0, 4); | |
857 for(j = 0; j < 2; j++){ | |
858 idx = 5 + j*4; | |
859 for(i = 0; i < 2; i++, cbp >>= 1, idx++){ | |
860 rv34_pred_4x4_block(r, U + i*4 + j*4*s->uvlinesize, s->uvlinesize, ittrans[intra_types[i*2+j*2*s->b4_stride]], r->avail_cache[idx-4], r->avail_cache[idx-1], !i && !j, r->avail_cache[idx-3]); | |
861 rv34_pred_4x4_block(r, V + i*4 + j*4*s->uvlinesize, s->uvlinesize, ittrans[intra_types[i*2+j*2*s->b4_stride]], r->avail_cache[idx-4], r->avail_cache[idx-1], !i && !j, r->avail_cache[idx-3]); | |
862 r->avail_cache[idx] = 1; | |
863 if(cbp & 0x01) | |
864 rv34_add_4x4_block(U + i*4 + j*4*s->uvlinesize, s->uvlinesize, s->block[4], i*4+j*32); | |
865 if(cbp & 0x10) | |
866 rv34_add_4x4_block(V + i*4 + j*4*s->uvlinesize, s->uvlinesize, s->block[5], i*4+j*32); | |
867 } | |
868 } | |
869 }else{ | |
870 itype = ittrans16[intra_types[0]]; | |
871 itype = adjust_pred16(itype, r->avail_cache[5-4], r->avail_cache[5-1]); | |
872 r->h.pred16x16[itype](Y, s->linesize); | |
873 dsp->add_pixels_clamped(s->block[0], Y, s->current_picture.linesize[0]); | |
874 dsp->add_pixels_clamped(s->block[1], Y + 8, s->current_picture.linesize[0]); | |
875 Y += s->current_picture.linesize[0] * 8; | |
876 dsp->add_pixels_clamped(s->block[2], Y, s->current_picture.linesize[0]); | |
877 dsp->add_pixels_clamped(s->block[3], Y + 8, s->current_picture.linesize[0]); | |
878 | |
879 itype = ittrans16[intra_types[0]]; | |
880 if(itype == PLANE_PRED8x8) itype = DC_PRED8x8; | |
881 itype = adjust_pred16(itype, r->avail_cache[5-4], r->avail_cache[5-1]); | |
882 r->h.pred8x8[itype](U, s->uvlinesize); | |
883 dsp->add_pixels_clamped(s->block[4], U, s->uvlinesize); | |
884 r->h.pred8x8[itype](V, s->uvlinesize); | |
885 dsp->add_pixels_clamped(s->block[5], V, s->uvlinesize); | |
886 } | |
887 } | |
888 | |
889 /** @} */ // recons group | |
890 | |
891 /** | |
892 * @addtogroup bitstream | |
893 * Decode macroblock header and return CBP in case of success, -1 otherwise. | |
894 */ | |
895 static int rv34_decode_mb_header(RV34DecContext *r, int8_t *intra_types) | |
896 { | |
897 MpegEncContext *s = &r->s; | |
898 GetBitContext *gb = &s->gb; | |
899 int mb_pos = s->mb_x + s->mb_y * s->mb_stride; | |
900 int i, t; | |
901 | |
902 if(!r->si.type){ | |
903 r->is16 = get_bits1(gb); | |
904 if(!r->is16 && !r->rv30){ | |
905 if(!get_bits1(gb)) | |
906 av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n"); | |
907 } | |
908 s->current_picture_ptr->mb_type[mb_pos] = r->is16 ? MB_TYPE_INTRA16x16 : MB_TYPE_INTRA; | |
909 r->block_type = r->is16 ? RV34_MB_TYPE_INTRA16x16 : RV34_MB_TYPE_INTRA; | |
910 }else{ | |
911 r->block_type = r->decode_mb_info(r); | |
912 if(r->block_type == -1) | |
913 return -1; | |
914 s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type]; | |
915 r->mb_type[mb_pos] = r->block_type; | |
916 if(r->block_type == RV34_MB_SKIP){ | |
917 if(s->pict_type == P_TYPE) | |
918 r->mb_type[mb_pos] = RV34_MB_P_16x16; | |
919 if(s->pict_type == B_TYPE) | |
920 r->mb_type[mb_pos] = RV34_MB_B_DIRECT; | |
921 } | |
922 r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]); | |
923 rv34_decode_mv(r, r->block_type); | |
924 if(r->block_type == RV34_MB_SKIP){ | |
925 fill_rectangle(intra_types, 4, 4, s->b4_stride, 0, sizeof(intra_types[0])); | |
926 return 0; | |
927 } | |
928 r->chroma_vlc = 1; | |
929 r->luma_vlc = 0; | |
930 } | |
931 if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){ | |
932 if(r->is16){ | |
933 t = get_bits(gb, 2); | |
934 fill_rectangle(intra_types, 4, 4, s->b4_stride, t, sizeof(intra_types[0])); | |
935 r->luma_vlc = 2; | |
936 }else{ | |
937 if(r->decode_intra_types(r, gb, intra_types) < 0) | |
938 return -1; | |
939 r->luma_vlc = 1; | |
940 } | |
941 r->chroma_vlc = 0; | |
942 r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0); | |
943 }else{ | |
944 for(i = 0; i < 16; i++) | |
945 intra_types[(i & 3) + (i>>2) * s->b4_stride] = 0; | |
946 r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1); | |
947 if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){ | |
948 r->is16 = 1; | |
949 r->chroma_vlc = 1; | |
950 r->luma_vlc = 2; | |
951 r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0); | |
952 } | |
953 } | |
954 | |
955 return rv34_decode_cbp(gb, r->cur_vlcs, r->is16); | |
956 } | |
957 | |
958 /** | |
959 * @addtogroup recons | |
960 * @{ | |
961 */ | |
962 /** | |
963 * mask for retrieving all bits in coded block pattern | |
964 * corresponding to one 8x8 block | |
965 */ | |
966 #define LUMA_CBP_BLOCK_MASK 0x303 | |
967 | |
968 #define U_CBP_MASK 0x0F0000 | |
969 #define V_CBP_MASK 0xF00000 | |
970 | |
971 | |
972 static void rv34_apply_differences(RV34DecContext *r, int cbp) | |
973 { | |
974 static const int shifts[4] = { 0, 2, 8, 10 }; | |
975 MpegEncContext *s = &r->s; | |
976 int i; | |
977 | |
978 for(i = 0; i < 4; i++) | |
979 if(cbp & (LUMA_CBP_BLOCK_MASK << shifts[i])) | |
980 s->dsp.add_pixels_clamped(s->block[i], s->dest[0] + (i & 1)*8 + (i&2)*4*s->linesize, s->linesize); | |
981 if(cbp & U_CBP_MASK) | |
982 s->dsp.add_pixels_clamped(s->block[4], s->dest[1], s->uvlinesize); | |
983 if(cbp & V_CBP_MASK) | |
984 s->dsp.add_pixels_clamped(s->block[5], s->dest[2], s->uvlinesize); | |
985 } | |
986 | |
987 static int rv34_decode_macroblock(RV34DecContext *r, int8_t *intra_types) | |
988 { | |
989 MpegEncContext *s = &r->s; | |
990 GetBitContext *gb = &s->gb; | |
991 int cbp, cbp2; | |
992 int i, blknum, blkoff; | |
993 DCTELEM block16[64]; | |
994 int luma_dc_quant; | |
995 int dist; | |
996 int mb_pos = s->mb_x + s->mb_y * s->mb_stride; | |
997 | |
998 // Calculate which neighbours are available. Maybe it's worth optimizing too. | |
999 memset(r->avail_cache, 0, sizeof(r->avail_cache)); | |
1000 fill_rectangle(r->avail_cache + 5, 2, 2, 4, 1, 4); | |
1001 dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width; | |
1002 if(s->mb_x && dist) | |
1003 r->avail_cache[4] = | |
1004 r->avail_cache[8] = s->current_picture_ptr->mb_type[mb_pos - 1]; | |
1005 if(dist >= s->mb_width) | |
1006 r->avail_cache[1] = | |
1007 r->avail_cache[2] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride]; | |
1008 if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1) | |
1009 r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1]; | |
1010 if(s->mb_x && dist > s->mb_width) | |
1011 r->avail_cache[0] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1]; | |
1012 | |
1013 s->qscale = r->si.quant; | |
1014 cbp = cbp2 = rv34_decode_mb_header(r, intra_types); | |
6155
a425bdc70ac5
Save coded block patterns for future loop filtering.
kostya
parents:
6121
diff
changeset
|
1015 r->cbp_luma [s->mb_x + s->mb_y * s->mb_stride] = cbp; |
a425bdc70ac5
Save coded block patterns for future loop filtering.
kostya
parents:
6121
diff
changeset
|
1016 r->cbp_chroma[s->mb_x + s->mb_y * s->mb_stride] = cbp >> 16; |
6156 | 1017 s->current_picture.qscale_table[s->mb_x + s->mb_y * s->mb_stride] = s->qscale; |
6026 | 1018 |
1019 if(cbp == -1) | |
1020 return -1; | |
1021 | |
1022 luma_dc_quant = r->si.type ? r->luma_dc_quant_p[s->qscale] : r->luma_dc_quant_i[s->qscale]; | |
1023 if(r->is16){ | |
1024 memset(block16, 0, sizeof(block16)); | |
1025 rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0); | |
1026 rv34_dequant4x4_16x16(block16, rv34_qscale_tab[luma_dc_quant],rv34_qscale_tab[s->qscale]); | |
1027 rv34_inv_transform_noround(block16); | |
1028 } | |
1029 | |
1030 for(i = 0; i < 16; i++, cbp >>= 1){ | |
1031 if(!r->is16 && !(cbp & 1)) continue; | |
1032 blknum = ((i & 2) >> 1) + ((i & 8) >> 2); | |
1033 blkoff = ((i & 1) << 2) + ((i & 4) << 3); | |
1034 if(cbp & 1) | |
1035 rv34_decode_block(s->block[blknum] + blkoff, gb, r->cur_vlcs, r->luma_vlc, 0); | |
1036 rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[luma_dc_quant],rv34_qscale_tab[s->qscale]); | |
1037 if(r->is16) //FIXME: optimize | |
1038 s->block[blknum][blkoff] = block16[(i & 3) | ((i & 0xC) << 1)]; | |
1039 rv34_inv_transform(s->block[blknum] + blkoff); | |
1040 } | |
1041 if(r->block_type == RV34_MB_P_MIX16x16) | |
1042 r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1); | |
1043 for(; i < 24; i++, cbp >>= 1){ | |
1044 if(!(cbp & 1)) continue; | |
1045 blknum = ((i & 4) >> 2) + 4; | |
1046 blkoff = ((i & 1) << 2) + ((i & 2) << 4); | |
1047 rv34_decode_block(s->block[blknum] + blkoff, gb, r->cur_vlcs, r->chroma_vlc, 1); | |
1048 rv34_dequant4x4(s->block[blknum] + blkoff, rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]],rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]]); | |
1049 rv34_inv_transform(s->block[blknum] + blkoff); | |
1050 } | |
1051 if(IS_INTRA(s->current_picture_ptr->mb_type[s->mb_x + s->mb_y*s->mb_stride])) | |
1052 rv34_output_macroblock(r, intra_types, cbp2, r->is16); | |
1053 else | |
1054 rv34_apply_differences(r, cbp2); | |
1055 | |
1056 return 0; | |
1057 } | |
1058 | |
1059 static int check_slice_end(RV34DecContext *r, MpegEncContext *s) | |
1060 { | |
1061 int bits; | |
1062 if(s->mb_y >= s->mb_height) | |
1063 return 1; | |
1064 if(!s->mb_num_left) | |
1065 return 1; | |
1066 if(r->s.mb_skip_run > 1) | |
1067 return 0; | |
1068 bits = r->bits - get_bits_count(&s->gb); | |
1069 if(bits < 0 || (bits < 8 && !show_bits(&s->gb, bits))) | |
1070 return 1; | |
1071 return 0; | |
1072 } | |
1073 | |
1074 static inline int slice_compare(SliceInfo *si1, SliceInfo *si2) | |
1075 { | |
1076 return si1->type != si2->type || | |
1077 si1->start >= si2->start || | |
1078 si1->width != si2->width || | |
1079 si1->height != si2->height; | |
1080 } | |
1081 | |
1082 static int rv34_decode_slice(RV34DecContext *r, int end, uint8_t* buf, int buf_size) | |
1083 { | |
1084 MpegEncContext *s = &r->s; | |
1085 GetBitContext *gb = &s->gb; | |
1086 int mb_pos; | |
1087 int res; | |
1088 | |
1089 init_get_bits(&r->s.gb, buf, buf_size*8); | |
1090 res = r->parse_slice_header(r, gb, &r->si); | |
1091 if(res < 0){ | |
1092 av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n"); | |
1093 return -1; | |
1094 } | |
1095 | |
1096 if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) { | |
1097 if(s->width != r->si.width || s->height != r->si.height){ | |
1098 av_log(s->avctx, AV_LOG_DEBUG, "Changing dimensions to %dx%d\n", r->si.width,r->si.height); | |
1099 MPV_common_end(s); | |
1100 s->width = r->si.width; | |
1101 s->height = r->si.height; | |
1102 if(MPV_common_init(s) < 0) | |
1103 return -1; | |
1104 r->intra_types_hist = av_realloc(r->intra_types_hist, s->b4_stride * 4 * 2 * sizeof(*r->intra_types_hist)); | |
1105 r->intra_types = r->intra_types_hist + s->b4_stride * 4; | |
1106 r->mb_type = av_realloc(r->mb_type, r->s.mb_stride * r->s.mb_height * sizeof(*r->mb_type)); | |
6155
a425bdc70ac5
Save coded block patterns for future loop filtering.
kostya
parents:
6121
diff
changeset
|
1107 r->cbp_luma = av_realloc(r->cbp_luma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_luma)); |
a425bdc70ac5
Save coded block patterns for future loop filtering.
kostya
parents:
6121
diff
changeset
|
1108 r->cbp_chroma = av_realloc(r->cbp_chroma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma)); |
6026 | 1109 } |
1110 s->pict_type = r->si.type ? r->si.type : I_TYPE; | |
1111 if(MPV_frame_start(s, s->avctx) < 0) | |
1112 return -1; | |
1113 ff_er_frame_start(s); | |
1114 s->current_picture_ptr = &s->current_picture; | |
1115 s->mb_x = s->mb_y = 0; | |
1116 } | |
1117 | |
1118 r->si.end = end; | |
1119 s->qscale = r->si.quant; | |
1120 r->bits = buf_size*8; | |
1121 s->mb_num_left = r->si.end - r->si.start; | |
1122 r->s.mb_skip_run = 0; | |
1123 | |
1124 mb_pos = s->mb_x + s->mb_y * s->mb_width; | |
1125 if(r->si.start != mb_pos){ | |
1126 av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos); | |
1127 s->mb_x = r->si.start % s->mb_width; | |
1128 s->mb_y = r->si.start / s->mb_width; | |
1129 } | |
1130 memset(r->intra_types_hist, -1, s->b4_stride * 4 * 2 * sizeof(*r->intra_types_hist)); | |
1131 s->first_slice_line = 1; | |
1132 s->resync_mb_x= s->mb_x; | |
1133 s->resync_mb_y= s->mb_y; | |
1134 | |
1135 ff_init_block_index(s); | |
1136 while(!check_slice_end(r, s)) { | |
1137 ff_update_block_index(s); | |
1138 s->dsp.clear_blocks(s->block[0]); | |
1139 | |
1140 if(rv34_decode_macroblock(r, r->intra_types + s->mb_x * 4 + 1) < 0){ | |
1141 ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_ERROR|DC_ERROR|MV_ERROR); | |
1142 return -1; | |
1143 } | |
1144 if (++s->mb_x == s->mb_width) { | |
1145 s->mb_x = 0; | |
1146 s->mb_y++; | |
1147 ff_init_block_index(s); | |
1148 | |
1149 memmove(r->intra_types_hist, r->intra_types, s->b4_stride * 4 * sizeof(*r->intra_types_hist)); | |
1150 memset(r->intra_types, -1, s->b4_stride * 4 * sizeof(*r->intra_types_hist)); | |
1151 } | |
1152 if(s->mb_x == s->resync_mb_x) | |
1153 s->first_slice_line=0; | |
1154 s->mb_num_left--; | |
1155 } | |
1156 ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END); | |
1157 | |
1158 return (s->mb_y == s->mb_height); | |
1159 } | |
1160 | |
1161 /** @} */ // recons group end | |
1162 | |
1163 /** | |
1164 * Initialize decoder. | |
1165 */ | |
1166 int ff_rv34_decode_init(AVCodecContext *avctx) | |
1167 { | |
1168 RV34DecContext *r = avctx->priv_data; | |
1169 MpegEncContext *s = &r->s; | |
1170 | |
1171 MPV_decode_defaults(s); | |
1172 s->avctx= avctx; | |
1173 s->out_format = FMT_H263; | |
1174 s->codec_id= avctx->codec_id; | |
1175 | |
1176 s->width = avctx->width; | |
1177 s->height = avctx->height; | |
1178 | |
1179 r->s.avctx = avctx; | |
1180 avctx->flags |= CODEC_FLAG_EMU_EDGE; | |
1181 r->s.flags |= CODEC_FLAG_EMU_EDGE; | |
1182 avctx->pix_fmt = PIX_FMT_YUV420P; | |
1183 avctx->has_b_frames = 1; | |
1184 s->low_delay = 0; | |
1185 | |
1186 if (MPV_common_init(s) < 0) | |
1187 return -1; | |
1188 | |
1189 ff_h264_pred_init(&r->h, CODEC_ID_RV40); | |
1190 | |
1191 r->intra_types_hist = av_malloc(s->b4_stride * 4 * 2 * sizeof(*r->intra_types_hist)); | |
1192 r->intra_types = r->intra_types_hist + s->b4_stride * 4; | |
1193 | |
1194 r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height * sizeof(*r->mb_type)); | |
1195 | |
6155
a425bdc70ac5
Save coded block patterns for future loop filtering.
kostya
parents:
6121
diff
changeset
|
1196 r->cbp_luma = av_malloc(r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_luma)); |
a425bdc70ac5
Save coded block patterns for future loop filtering.
kostya
parents:
6121
diff
changeset
|
1197 r->cbp_chroma = av_malloc(r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma)); |
a425bdc70ac5
Save coded block patterns for future loop filtering.
kostya
parents:
6121
diff
changeset
|
1198 |
6026 | 1199 if(!intra_vlcs[0].cbppattern[0].bits) |
1200 rv34_init_tables(); | |
1201 | |
1202 return 0; | |
1203 } | |
1204 | |
1205 static int get_slice_offset(AVCodecContext *avctx, uint8_t *buf, int n) | |
1206 { | |
1207 if(avctx->slice_count) return avctx->slice_offset[n]; | |
1208 else return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8); | |
1209 } | |
1210 | |
1211 int ff_rv34_decode_frame(AVCodecContext *avctx, | |
1212 void *data, int *data_size, | |
1213 uint8_t *buf, int buf_size) | |
1214 { | |
1215 RV34DecContext *r = avctx->priv_data; | |
1216 MpegEncContext *s = &r->s; | |
1217 AVFrame *pict = data; | |
1218 SliceInfo si; | |
1219 int i; | |
1220 int slice_count; | |
1221 uint8_t *slices_hdr = NULL; | |
1222 int last = 0; | |
1223 | |
1224 /* no supplementary picture */ | |
1225 if (buf_size == 0) { | |
1226 /* special case for last picture */ | |
1227 if (s->low_delay==0 && s->next_picture_ptr) { | |
1228 *pict= *(AVFrame*)s->next_picture_ptr; | |
1229 s->next_picture_ptr= NULL; | |
1230 | |
1231 *data_size = sizeof(AVFrame); | |
1232 } | |
1233 return 0; | |
1234 } | |
1235 | |
1236 if(!avctx->slice_count){ | |
1237 slice_count = (*buf++) + 1; | |
1238 slices_hdr = buf + 4; | |
1239 buf += 8 * slice_count; | |
1240 }else | |
1241 slice_count = avctx->slice_count; | |
1242 | |
1243 for(i=0; i<slice_count; i++){ | |
1244 int offset= get_slice_offset(avctx, slices_hdr, i); | |
1245 int size; | |
1246 if(i+1 == slice_count) | |
1247 size= buf_size - offset; | |
1248 else | |
1249 size= get_slice_offset(avctx, slices_hdr, i+1) - offset; | |
1250 | |
1251 r->si.end = s->mb_width * s->mb_height; | |
1252 if(i+1 < slice_count){ | |
1253 init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, i+1), (buf_size-get_slice_offset(avctx, slices_hdr, i+1))*8); | |
1254 if(r->parse_slice_header(r, &r->s.gb, &si) < 0){ | |
1255 if(i+2 < slice_count) | |
1256 size = get_slice_offset(avctx, slices_hdr, i+2) - offset; | |
1257 else | |
1258 size = buf_size - offset; | |
1259 }else | |
1260 r->si.end = si.start; | |
1261 } | |
1262 last = rv34_decode_slice(r, r->si.end, buf + offset, size); | |
1263 s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start; | |
1264 if(last) | |
1265 break; | |
1266 } | |
1267 | |
1268 if(last){ | |
1269 if(r->loop_filter) | |
1270 r->loop_filter(r); | |
1271 ff_er_frame_end(s); | |
1272 MPV_frame_end(s); | |
1273 if (s->pict_type == B_TYPE || s->low_delay) { | |
1274 *pict= *(AVFrame*)s->current_picture_ptr; | |
1275 } else if (s->last_picture_ptr != NULL) { | |
1276 *pict= *(AVFrame*)s->last_picture_ptr; | |
1277 } | |
1278 | |
1279 if(s->last_picture_ptr || s->low_delay){ | |
1280 *data_size = sizeof(AVFrame); | |
1281 ff_print_debug_info(s, pict); | |
1282 } | |
1283 s->current_picture_ptr= NULL; //so we can detect if frame_end wasnt called (find some nicer solution...) | |
1284 } | |
1285 return buf_size; | |
1286 } | |
1287 | |
1288 int ff_rv34_decode_end(AVCodecContext *avctx) | |
1289 { | |
1290 RV34DecContext *r = avctx->priv_data; | |
1291 | |
1292 MPV_common_end(&r->s); | |
1293 | |
1294 av_freep(&r->intra_types_hist); | |
1295 r->intra_types = NULL; | |
1296 av_freep(&r->mb_type); | |
1297 | |
1298 return 0; | |
1299 } |