comparison cavs.c @ 3380:1f47e26323bc libavcodec

new files for the CAVS decoder by (Stefan Gehrer <stefan gehrer gmx de)
author michael
date Sat, 01 Jul 2006 22:52:56 +0000
parents
children 62e51b7a1b36
comparison
equal deleted inserted replaced
3379:69901769c811 3380:1f47e26323bc
1 /*
2 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3 * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20 #include "avcodec.h"
21 #include "bitstream.h"
22 #include "golomb.h"
23 #include "mpegvideo.h"
24 #include "cavsdata.h"
25
26 typedef struct {
27 MpegEncContext s;
28 Picture picture; //currently decoded frame
29 Picture DPB[2]; //reference frames
30 int dist[2]; //temporal distances from current frame to ref frames
31 int profile, level;
32 int aspect_ratio;
33 int mb_width, mb_height;
34 int pic_type;
35 int progressive;
36 int pic_structure;
37 int skip_mode_flag;
38 int loop_filter_disable;
39 int alpha_offset, beta_offset;
40 int ref_flag;
41 int mbx, mby;
42 int flags;
43 int stc;
44 uint8_t *cy, *cu, *cv;
45 int left_qp;
46 uint8_t *top_qp;
47
48 /* mv motion vector cache
49 0: D3 B2 B3 C2
50 4: A1 X0 X1 -
51 8: A3 X2 X3 -
52
53 X are the vectors in the current macroblock (5,6,9,10)
54 A is the macroblock to the left (4,8)
55 B is the macroblock to the top (1,2)
56 C is the macroblock to the top-right (3)
57 D is the macroblock to the top-left (0)
58
59 the same is repeated for backward motion vectors */
60 vector_t mv[2*4*3];
61 vector_t *top_mv[2];
62 vector_t *col_mv;
63
64 /* luma pred mode cache
65 0: -- B2 B3
66 3: A1 X0 X1
67 6: A3 X2 X3 */
68 int pred_mode_Y[3*3];
69 int *top_pred_Y;
70 int l_stride, c_stride;
71 int luma_scan[4];
72 int qp;
73 int qp_fixed;
74 int cbp;
75
76 /* intra prediction is done with un-deblocked samples
77 they are saved here before deblocking the MB */
78 uint8_t *top_border_y, *top_border_u, *top_border_v;
79 uint8_t left_border_y[16], left_border_u[8], left_border_v[8];
80 uint8_t topleft_border_y, topleft_border_u, topleft_border_v;
81
82 void (*intra_pred_l[8])(uint8_t *d,uint8_t *top,uint8_t *left,int stride);
83 void (*intra_pred_c[7])(uint8_t *d,uint8_t *top,uint8_t *left,int stride);
84 uint8_t *col_type_base;
85 uint8_t *col_type;
86 int sym_factor;
87 int direct_den[2];
88 int scale_den[2];
89 int got_keyframe;
90 } AVSContext;
91
92 /*****************************************************************************
93 *
94 * in-loop deblocking filter
95 *
96 ****************************************************************************/
97
98 static inline int get_bs_p(vector_t *mvP, vector_t *mvQ) {
99 if((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA))
100 return 2;
101 if(mvP->ref != mvQ->ref)
102 return 1;
103 if( (abs(mvP->x - mvQ->x) >= 4) || (abs(mvP->y - mvQ->y) >= 4) )
104 return 1;
105 return 0;
106 }
107
108 static inline int get_bs_b(vector_t *mvP, vector_t *mvQ) {
109 if((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA)) {
110 return 2;
111 } else {
112 vector_t *mvPbw = mvP + MV_BWD_OFFS;
113 vector_t *mvQbw = mvQ + MV_BWD_OFFS;
114 if( (abs( mvP->x - mvQ->x) >= 4) ||
115 (abs( mvP->y - mvQ->y) >= 4) ||
116 (abs(mvPbw->x - mvQbw->x) >= 4) ||
117 (abs(mvPbw->y - mvQbw->y) >= 4) )
118 return 1;
119 }
120 return 0;
121 }
122
123 /* boundary strength (bs) mapping:
124 *
125 * --4---5--
126 * 0 2 |
127 * | 6 | 7 |
128 * 1 3 |
129 * ---------
130 *
131 */
132
133 #define SET_PARAMS \
134 alpha = alpha_tab[clip(qp_avg + h->alpha_offset,0,63)]; \
135 beta = beta_tab[clip(qp_avg + h->beta_offset, 0,63)]; \
136 tc = tc_tab[clip(qp_avg + h->alpha_offset,0,63)];
137
138 static void filter_mb(AVSContext *h, enum mb_t mb_type) {
139 uint8_t bs[8];
140 int qp_avg, alpha, beta, tc;
141 int i;
142
143 /* save un-deblocked lines */
144 h->topleft_border_y = h->top_border_y[h->mbx*16+15];
145 h->topleft_border_u = h->top_border_u[h->mbx*8+7];
146 h->topleft_border_v = h->top_border_v[h->mbx*8+7];
147 memcpy(&h->top_border_y[h->mbx*16], h->cy + 15* h->l_stride,16);
148 memcpy(&h->top_border_u[h->mbx* 8], h->cu + 7* h->c_stride,8);
149 memcpy(&h->top_border_v[h->mbx* 8], h->cv + 7* h->c_stride,8);
150 for(i=0;i<8;i++) {
151 h->left_border_y[i*2+0] = *(h->cy + 15 + (i*2+0)*h->l_stride);
152 h->left_border_y[i*2+1] = *(h->cy + 15 + (i*2+1)*h->l_stride);
153 h->left_border_u[i] = *(h->cu + 7 + i*h->c_stride);
154 h->left_border_v[i] = *(h->cv + 7 + i*h->c_stride);
155 }
156 if(!h->loop_filter_disable) {
157 /* clear bs */
158 *((uint64_t *)bs) = 0;
159 /* determine bs */
160 switch(mb_type) {
161 case I_8X8:
162 *((uint64_t *)bs) = 0x0202020202020202ULL;
163 break;
164 case P_8X8:
165 case P_8X16:
166 bs[2] = get_bs_p(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
167 bs[3] = get_bs_p(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3]);
168 case P_16X8:
169 bs[6] = get_bs_p(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
170 bs[7] = get_bs_p(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3]);
171 case P_16X16:
172 case P_SKIP:
173 bs[0] = get_bs_p(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
174 bs[1] = get_bs_p(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
175 bs[4] = get_bs_p(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
176 bs[5] = get_bs_p(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
177 break;
178 case B_SKIP:
179 case B_DIRECT:
180 case B_8X8:
181 bs[2] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
182 bs[3] = get_bs_b(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3]);
183 bs[6] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
184 bs[7] = get_bs_b(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3]);
185 case B_FWD_16X16:
186 case B_BWD_16X16:
187 case B_SYM_16X16:
188 bs[0] = get_bs_b(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
189 bs[1] = get_bs_b(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
190 bs[4] = get_bs_b(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
191 bs[5] = get_bs_b(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
192 break;
193 default:
194 if(mb_type & 1) { //16X8
195 bs[6] = bs[7] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2]);
196 } else { //8X16
197 bs[2] = bs[3] = get_bs_b(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1]);
198 }
199 bs[0] = get_bs_b(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0]);
200 bs[1] = get_bs_b(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2]);
201 bs[4] = get_bs_b(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0]);
202 bs[5] = get_bs_b(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1]);
203 }
204 if( *((uint64_t *)bs) ) {
205 if(h->flags & A_AVAIL) {
206 qp_avg = (h->qp + h->left_qp + 1) >> 1;
207 SET_PARAMS;
208 h->s.dsp.cavs_filter_lv(h->cy,h->l_stride,alpha,beta,tc,bs[0],bs[1]);
209 h->s.dsp.cavs_filter_cv(h->cu,h->c_stride,alpha,beta,tc,bs[0],bs[1]);
210 h->s.dsp.cavs_filter_cv(h->cv,h->c_stride,alpha,beta,tc,bs[0],bs[1]);
211 }
212 qp_avg = h->qp;
213 SET_PARAMS;
214 h->s.dsp.cavs_filter_lv(h->cy + 8,h->l_stride,alpha,beta,tc,bs[2],bs[3]);
215 h->s.dsp.cavs_filter_lh(h->cy + 8*h->l_stride,h->l_stride,alpha,beta,tc,
216 bs[6],bs[7]);
217
218 if(h->flags & B_AVAIL) {
219 qp_avg = (h->qp + h->top_qp[h->mbx] + 1) >> 1;
220 SET_PARAMS;
221 h->s.dsp.cavs_filter_lh(h->cy,h->l_stride,alpha,beta,tc,bs[4],bs[5]);
222 h->s.dsp.cavs_filter_ch(h->cu,h->c_stride,alpha,beta,tc,bs[4],bs[5]);
223 h->s.dsp.cavs_filter_ch(h->cv,h->c_stride,alpha,beta,tc,bs[4],bs[5]);
224 }
225 }
226 }
227 h->left_qp = h->qp;
228 h->top_qp[h->mbx] = h->qp;
229 }
230
231 #undef SET_PARAMS
232
233 /*****************************************************************************
234 *
235 * spatial intra prediction
236 *
237 ****************************************************************************/
238
239 static inline void load_intra_pred_luma(AVSContext *h, uint8_t *top,
240 uint8_t *left, int block) {
241 int i;
242
243 switch(block) {
244 case 0:
245 memcpy(&left[1],h->left_border_y,16);
246 left[0] = left[1];
247 left[17] = left[16];
248 memcpy(&top[1],&h->top_border_y[h->mbx*16],16);
249 top[17] = top[16];
250 top[0] = top[1];
251 if((h->flags & A_AVAIL) && (h->flags & B_AVAIL))
252 left[0] = top[0] = h->topleft_border_y;
253 break;
254 case 1:
255 for(i=0;i<8;i++)
256 left[i+1] = *(h->cy + 7 + i*h->l_stride);
257 memset(&left[9],left[8],9);
258 left[0] = left[1];
259 memcpy(&top[1],&h->top_border_y[h->mbx*16+8],8);
260 if(h->flags & C_AVAIL)
261 memcpy(&top[9],&h->top_border_y[(h->mbx + 1)*16],8);
262 else
263 memset(&top[9],top[8],9);
264 top[17] = top[16];
265 top[0] = top[1];
266 if(h->flags & B_AVAIL)
267 left[0] = top[0] = h->top_border_y[h->mbx*16+7];
268 break;
269 case 2:
270 memcpy(&left[1],&h->left_border_y[8],8);
271 memset(&left[9],left[8],9);
272 memcpy(&top[1],h->cy + 7*h->l_stride,16);
273 top[17] = top[16];
274 left[0] = h->left_border_y[7];
275 top[0] = top[1];
276 if(h->flags & A_AVAIL)
277 top[0] = left[0];
278 break;
279 case 3:
280 for(i=0;i<9;i++)
281 left[i] = *(h->cy + 7 + (i+7)*h->l_stride);
282 memset(&left[9],left[8],9);
283 memcpy(&top[0],h->cy + 7 + 7*h->l_stride,9);
284 memset(&top[9],top[8],9);
285 break;
286 }
287 }
288
289 static inline void load_intra_pred_chroma(uint8_t *stop, uint8_t *sleft,
290 uint8_t stopleft, uint8_t *dtop,
291 uint8_t *dleft, int stride, int flags) {
292 int i;
293
294 if(flags & A_AVAIL) {
295 for(i=0; i<8; i++)
296 dleft[i+1] = sleft[i];
297 dleft[0] = dleft[1];
298 dleft[9] = dleft[8];
299 }
300 if(flags & B_AVAIL) {
301 for(i=0; i<8; i++)
302 dtop[i+1] = stop[i];
303 dtop[0] = dtop[1];
304 dtop[9] = dtop[8];
305 if(flags & A_AVAIL)
306 dleft[0] = dtop[0] = stopleft;
307 }
308 }
309
310 static void intra_pred_vert(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
311 int y;
312 uint64_t a = *((uint64_t *)(&top[1]));
313 for(y=0;y<8;y++) {
314 *((uint64_t *)(d+y*stride)) = a;
315 }
316 }
317
318 static void intra_pred_horiz(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
319 int y;
320 uint64_t a;
321 for(y=0;y<8;y++) {
322 a = left[y+1] * 0x0101010101010101ULL;
323 *((uint64_t *)(d+y*stride)) = a;
324 }
325 }
326
327 static void intra_pred_dc_128(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
328 int y;
329 uint64_t a = 0x8080808080808080ULL;
330 for(y=0;y<8;y++)
331 *((uint64_t *)(d+y*stride)) = a;
332 }
333
334 static void intra_pred_plane(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
335 int x,y,ia;
336 int ih = 0;
337 int iv = 0;
338 uint8_t *cm = cropTbl + MAX_NEG_CROP;
339
340 for(x=0; x<4; x++) {
341 ih += (x+1)*(top[5+x]-top[3-x]);
342 iv += (x+1)*(left[5+x]-left[3-x]);
343 }
344 ia = (top[8]+left[8])<<4;
345 ih = (17*ih+16)>>5;
346 iv = (17*iv+16)>>5;
347 for(y=0; y<8; y++)
348 for(x=0; x<8; x++)
349 d[y*stride+x] = cm[(ia+(x-3)*ih+(y-3)*iv+16)>>5];
350 }
351
352 #define LOWPASS(ARRAY,INDEX) \
353 (( ARRAY[(INDEX)-1] + 2*ARRAY[(INDEX)] + ARRAY[(INDEX)+1] + 2) >> 2)
354
355 static void intra_pred_lp(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
356 int x,y;
357 for(y=0; y<8; y++)
358 for(x=0; x<8; x++)
359 d[y*stride+x] = (LOWPASS(top,x+1) + LOWPASS(left,y+1)) >> 1;
360 }
361
362 static void intra_pred_down_left(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
363 int x,y;
364 for(y=0; y<8; y++)
365 for(x=0; x<8; x++)
366 d[y*stride+x] = (LOWPASS(top,x+y+2) + LOWPASS(left,x+y+2)) >> 1;
367 }
368
369 static void intra_pred_down_right(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
370 int x,y;
371 for(y=0; y<8; y++)
372 for(x=0; x<8; x++)
373 if(x==y)
374 d[y*stride+x] = (left[1]+2*top[0]+top[1]+2)>>2;
375 else if(x>y)
376 d[y*stride+x] = LOWPASS(top,x-y);
377 else
378 d[y*stride+x] = LOWPASS(left,y-x);
379 }
380
381 static void intra_pred_lp_left(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
382 int x,y;
383 for(y=0; y<8; y++)
384 for(x=0; x<8; x++)
385 d[y*stride+x] = LOWPASS(left,y+1);
386 }
387
388 static void intra_pred_lp_top(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
389 int x,y;
390 for(y=0; y<8; y++)
391 for(x=0; x<8; x++)
392 d[y*stride+x] = LOWPASS(top,x+1);
393 }
394
395 #undef LOWPASS
396
397 static inline void modify_pred(const int8_t *mod_table, int *mode) {
398 int newmode = mod_table[(int)*mode];
399 if(newmode < 0) {
400 av_log(NULL, AV_LOG_ERROR, "Illegal intra prediction mode\n");
401 *mode = 0;
402 } else {
403 *mode = newmode;
404 }
405 }
406
407 /*****************************************************************************
408 *
409 * motion compensation
410 *
411 ****************************************************************************/
412
413 static inline void mc_dir_part(AVSContext *h,Picture *pic,int square,
414 int chroma_height,int delta,int list,uint8_t *dest_y,
415 uint8_t *dest_cb,uint8_t *dest_cr,int src_x_offset,
416 int src_y_offset,qpel_mc_func *qpix_op,
417 h264_chroma_mc_func chroma_op,vector_t *mv){
418 MpegEncContext * const s = &h->s;
419 const int mx= mv->x + src_x_offset*8;
420 const int my= mv->y + src_y_offset*8;
421 const int luma_xy= (mx&3) + ((my&3)<<2);
422 uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->l_stride;
423 uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->c_stride;
424 uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->c_stride;
425 int extra_width= 0; //(s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
426 int extra_height= extra_width;
427 int emu=0;
428 const int full_mx= mx>>2;
429 const int full_my= my>>2;
430 const int pic_width = 16*h->mb_width;
431 const int pic_height = 16*h->mb_height;
432
433 if(!pic->data[0])
434 return;
435 if(mx&7) extra_width -= 3;
436 if(my&7) extra_height -= 3;
437
438 if( full_mx < 0-extra_width
439 || full_my < 0-extra_height
440 || full_mx + 16/*FIXME*/ > pic_width + extra_width
441 || full_my + 16/*FIXME*/ > pic_height + extra_height){
442 ff_emulated_edge_mc(s->edge_emu_buffer, src_y - 2 - 2*h->l_stride, h->l_stride,
443 16+5, 16+5/*FIXME*/, full_mx-2, full_my-2, pic_width, pic_height);
444 src_y= s->edge_emu_buffer + 2 + 2*h->l_stride;
445 emu=1;
446 }
447
448 qpix_op[luma_xy](dest_y, src_y, h->l_stride); //FIXME try variable height perhaps?
449 if(!square){
450 qpix_op[luma_xy](dest_y + delta, src_y + delta, h->l_stride);
451 }
452
453 if(emu){
454 ff_emulated_edge_mc(s->edge_emu_buffer, src_cb, h->c_stride,
455 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
456 src_cb= s->edge_emu_buffer;
457 }
458 chroma_op(dest_cb, src_cb, h->c_stride, chroma_height, mx&7, my&7);
459
460 if(emu){
461 ff_emulated_edge_mc(s->edge_emu_buffer, src_cr, h->c_stride,
462 9, 9/*FIXME*/, (mx>>3), (my>>3), pic_width>>1, pic_height>>1);
463 src_cr= s->edge_emu_buffer;
464 }
465 chroma_op(dest_cr, src_cr, h->c_stride, chroma_height, mx&7, my&7);
466 }
467
468 static inline void mc_part_std(AVSContext *h,int square,int chroma_height,int delta,
469 uint8_t *dest_y,uint8_t *dest_cb,uint8_t *dest_cr,
470 int x_offset, int y_offset,qpel_mc_func *qpix_put,
471 h264_chroma_mc_func chroma_put,qpel_mc_func *qpix_avg,
472 h264_chroma_mc_func chroma_avg, vector_t *mv){
473 qpel_mc_func *qpix_op= qpix_put;
474 h264_chroma_mc_func chroma_op= chroma_put;
475
476 dest_y += 2*x_offset + 2*y_offset*h->l_stride;
477 dest_cb += x_offset + y_offset*h->c_stride;
478 dest_cr += x_offset + y_offset*h->c_stride;
479 x_offset += 8*h->mbx;
480 y_offset += 8*h->mby;
481
482 if(mv->ref >= 0){
483 Picture *ref= &h->DPB[mv->ref];
484 mc_dir_part(h, ref, square, chroma_height, delta, 0,
485 dest_y, dest_cb, dest_cr, x_offset, y_offset,
486 qpix_op, chroma_op, mv);
487
488 qpix_op= qpix_avg;
489 chroma_op= chroma_avg;
490 }
491
492 if((mv+MV_BWD_OFFS)->ref >= 0){
493 Picture *ref= &h->DPB[0];
494 mc_dir_part(h, ref, square, chroma_height, delta, 1,
495 dest_y, dest_cb, dest_cr, x_offset, y_offset,
496 qpix_op, chroma_op, mv+MV_BWD_OFFS);
497 }
498 }
499
500 static void inter_pred(AVSContext *h) {
501 /* always do 8x8 blocks TODO: are larger blocks worth it? */
502 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 0, 0,
503 h->s.dsp.put_cavs_qpel_pixels_tab[1],
504 h->s.dsp.put_h264_chroma_pixels_tab[1],
505 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
506 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X0]);
507 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 4, 0,
508 h->s.dsp.put_cavs_qpel_pixels_tab[1],
509 h->s.dsp.put_h264_chroma_pixels_tab[1],
510 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
511 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X1]);
512 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 0, 4,
513 h->s.dsp.put_cavs_qpel_pixels_tab[1],
514 h->s.dsp.put_h264_chroma_pixels_tab[1],
515 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
516 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X2]);
517 mc_part_std(h, 1, 4, 0, h->cy, h->cu, h->cv, 4, 4,
518 h->s.dsp.put_cavs_qpel_pixels_tab[1],
519 h->s.dsp.put_h264_chroma_pixels_tab[1],
520 h->s.dsp.avg_cavs_qpel_pixels_tab[1],
521 h->s.dsp.avg_h264_chroma_pixels_tab[1],&h->mv[MV_FWD_X3]);
522 /* set intra prediction modes to default values */
523 h->pred_mode_Y[3] = h->pred_mode_Y[6] = INTRA_L_LP;
524 h->top_pred_Y[h->mbx*2+0] = h->top_pred_Y[h->mbx*2+1] = INTRA_L_LP;
525 }
526
527 /*****************************************************************************
528 *
529 * motion vector prediction
530 *
531 ****************************************************************************/
532
533 static inline void veccpy(vector_t *dst, vector_t *src) {
534 *((uint64_t *)dst) = *((uint64_t *)src);
535 }
536
537 static inline void set_mvs(vector_t *mv, enum block_t size) {
538 switch(size) {
539 case BLK_16X16:
540 veccpy(mv+MV_STRIDE ,mv);
541 veccpy(mv+MV_STRIDE+1,mv);
542 case BLK_16X8:
543 veccpy(mv +1,mv);
544 break;
545 case BLK_8X16:
546 veccpy(mv+MV_STRIDE ,mv);
547 break;
548 }
549 }
550
551 static inline void store_mvs(AVSContext *h) {
552 veccpy(&h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 0], &h->mv[MV_FWD_X0]);
553 veccpy(&h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 1], &h->mv[MV_FWD_X1]);
554 veccpy(&h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 2], &h->mv[MV_FWD_X2]);
555 veccpy(&h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + 3], &h->mv[MV_FWD_X3]);
556 }
557
558 static inline void scale_mv(AVSContext *h, int *d_x, int *d_y, vector_t *src, int distp) {
559 int den = h->scale_den[src->ref];
560
561 *d_x = (src->x*distp*den + 256 + (src->x>>31)) >> 9;
562 *d_y = (src->y*distp*den + 256 + (src->y>>31)) >> 9;
563 }
564
565 static inline void mv_pred_median(AVSContext *h, vector_t *mvP, vector_t *mvA, vector_t *mvB, vector_t *mvC) {
566 int ax, ay, bx, by, cx, cy;
567 int len_ab, len_bc, len_ca, len_mid;
568
569 /* scale candidates according to their temporal span */
570 scale_mv(h, &ax, &ay, mvA, mvP->dist);
571 scale_mv(h, &bx, &by, mvB, mvP->dist);
572 scale_mv(h, &cx, &cy, mvC, mvP->dist);
573 /* find the geometrical median of the three candidates */
574 len_ab = abs(ax - bx) + abs(ay - by);
575 len_bc = abs(bx - cx) + abs(by - cy);
576 len_ca = abs(cx - ax) + abs(cy - ay);
577 len_mid = mid_pred(len_ab, len_bc, len_ca);
578 if(len_mid == len_ab) {
579 mvP->x = cx;
580 mvP->y = cy;
581 } else if(len_mid == len_bc) {
582 mvP->x = ax;
583 mvP->y = ay;
584 } else {
585 mvP->x = bx;
586 mvP->y = by;
587 }
588 }
589
590 static inline void mv_pred_direct(AVSContext *h, vector_t *pmv_fw,
591 vector_t *pmv_bw, vector_t *col_mv) {
592 int den = h->direct_den[col_mv->ref];
593 int m = col_mv->x >> 31;
594
595 pmv_fw->dist = h->dist[1];
596 pmv_bw->dist = h->dist[0];
597 pmv_fw->ref = 1;
598 pmv_bw->ref = 0;
599 /* scale the co-located motion vector according to its temporal span */
600 pmv_fw->x = (((den+(den*col_mv->x*pmv_fw->dist^m)-m-1)>>14)^m)-m;
601 pmv_bw->x = m-(((den+(den*col_mv->x*pmv_bw->dist^m)-m-1)>>14)^m);
602 m = col_mv->y >> 31;
603 pmv_fw->y = (((den+(den*col_mv->y*pmv_fw->dist^m)-m-1)>>14)^m)-m;
604 pmv_bw->y = m-(((den+(den*col_mv->y*pmv_bw->dist^m)-m-1)>>14)^m);
605 }
606
607 static inline void mv_pred_sym(AVSContext *h, vector_t *src, enum block_t size) {
608 vector_t *dst = src + MV_BWD_OFFS;
609
610 /* backward mv is the scaled and negated forward mv */
611 dst->x = -((src->x * h->sym_factor + 256) >> 9);
612 dst->y = -((src->y * h->sym_factor + 256) >> 9);
613 dst->ref = 0;
614 dst->dist = h->dist[0];
615 set_mvs(dst, size);
616 }
617
618 static void mv_pred(AVSContext *h, enum mv_loc_t nP, enum mv_loc_t nC,
619 enum mv_pred_t mode, enum block_t size, int ref) {
620 vector_t *mvP = &h->mv[nP];
621 vector_t *mvA = &h->mv[nP-1];
622 vector_t *mvB = &h->mv[nP-4];
623 vector_t *mvC = &h->mv[nC];
624 int mvAref = mvA->ref;
625 int mvBref = mvB->ref;
626 int mvCref;
627
628 mvP->ref = ref;
629 mvP->dist = h->dist[mvP->ref];
630 if(mvC->ref == NOT_AVAIL)
631 mvC = &h->mv[nP-5]; // set to top-left (mvD)
632 mvCref = mvC->ref;
633 if(mode == MV_PRED_PSKIP) {
634 if((mvAref == NOT_AVAIL) || (mvBref == NOT_AVAIL) ||
635 ((mvA->x | mvA->y | mvA->ref) == 0) ||
636 ((mvB->x | mvB->y | mvB->ref) == 0) ) {
637 mvP->x = mvP->y = 0;
638 set_mvs(mvP,size);
639 return;
640 }
641 }
642 /* if there is only one suitable candidate, take it */
643 if((mvAref >= 0) && (mvBref < 0) && (mvCref < 0)) {
644 mvP->x = mvA->x;
645 mvP->y = mvA->y;
646 } else if((mvAref < 0) && (mvBref >= 0) && (mvCref < 0)) {
647 mvP->x = mvB->x;
648 mvP->y = mvB->y;
649 } else if((mvAref < 0) && (mvBref < 0) && (mvCref >= 0)) {
650 mvP->x = mvC->x;
651 mvP->y = mvC->y;
652 } else {
653 switch(mode) {
654 case MV_PRED_LEFT:
655 if(mvAref == mvP->ref) {
656 mvP->x = mvA->x;
657 mvP->y = mvA->y;
658 } else
659 mv_pred_median(h, mvP, mvA, mvB, mvC);
660 break;
661 case MV_PRED_TOP:
662 if(mvBref == mvP->ref) {
663 mvP->x = mvB->x;
664 mvP->y = mvB->y;
665 } else
666 mv_pred_median(h, mvP, mvA, mvB, mvC);
667 break;
668 case MV_PRED_TOPRIGHT:
669 if(mvCref == mvP->ref) {
670 mvP->x = mvC->x;
671 mvP->y = mvC->y;
672 } else
673 mv_pred_median(h, mvP, mvA, mvB, mvC);
674 break;
675 default:
676 mv_pred_median(h, mvP, mvA, mvB, mvC);
677 break;
678 }
679 }
680 if(mode < MV_PRED_PSKIP) {
681 mvP->x += get_se_golomb(&h->s.gb);
682 mvP->y += get_se_golomb(&h->s.gb);
683 }
684 set_mvs(mvP,size);
685 }
686
687 /*****************************************************************************
688 *
689 * residual data decoding
690 *
691 ****************************************************************************/
692
693 /* kth-order exponential golomb code */
694 static inline int get_ue_code(GetBitContext *gb, int order) {
695 if(order)
696 return (get_ue_golomb(gb) << order) + get_bits(gb,order);
697 return get_ue_golomb(gb);
698 }
699
700 static int decode_residual_block(AVSContext *h, GetBitContext *gb,
701 const residual_vlc_t *r, int esc_golomb_order,
702 int qp, uint8_t *dst, int stride) {
703 int i,pos = -1;
704 int level_code, esc_code, level, run, mask;
705 int level_buf[64];
706 int run_buf[64];
707 int dqm = dequant_mul[qp];
708 int dqs = dequant_shift[qp];
709 int dqa = 1 << (dqs - 1);
710 const uint8_t *scantab = ff_zigzag_direct;
711 DCTELEM block[64];
712
713 memset(block,0,64*sizeof(DCTELEM));
714 for(i=0;i<65;i++) {
715 level_code = get_ue_code(gb,r->golomb_order);
716 if(level_code >= ESCAPE_CODE) {
717 run = (level_code - ESCAPE_CODE) >> 1;
718 esc_code = get_ue_code(gb,esc_golomb_order);
719 level = esc_code + (run > r->max_run ? 1 : r->level_add[run]);
720 while(level > r->inc_limit)
721 r++;
722 mask = -(level_code & 1);
723 level = (level^mask) - mask;
724 } else {
725 if(level_code < 0)
726 return -1;
727 level = r->rltab[level_code][0];
728 if(!level) //end of block signal
729 break;
730 run = r->rltab[level_code][1];
731 r += r->rltab[level_code][2];
732 }
733 level_buf[i] = level;
734 run_buf[i] = run;
735 }
736 /* inverse scan and dequantization */
737 for(i=i-1;i>=0;i--) {
738 pos += 1 + run_buf[i];
739 if(pos > 63) {
740 av_log(h->s.avctx, AV_LOG_ERROR,
741 "position out of block bounds at pic %d MB(%d,%d)\n",
742 h->picture.poc, h->mbx, h->mby);
743 return -1;
744 }
745 block[scantab[pos]] = (level_buf[i]*dqm + dqa) >> dqs;
746 }
747 h->s.dsp.cavs_idct8_add(dst,block,stride);
748 return 0;
749 }
750
751
752 static inline void decode_residual_chroma(AVSContext *h) {
753 if(h->cbp & (1<<4))
754 decode_residual_block(h,&h->s.gb,chroma_2dvlc,0, chroma_qp[h->qp],
755 h->cu,h->c_stride);
756 if(h->cbp & (1<<5))
757 decode_residual_block(h,&h->s.gb,chroma_2dvlc,0, chroma_qp[h->qp],
758 h->cv,h->c_stride);
759 }
760
761 static inline void decode_residual_inter(AVSContext *h) {
762 int block;
763
764 /* get coded block pattern */
765 h->cbp = cbp_tab[get_ue_golomb(&h->s.gb)][1];
766 /* get quantizer */
767 if(h->cbp && !h->qp_fixed)
768 h->qp += get_se_golomb(&h->s.gb);
769 for(block=0;block<4;block++)
770 if(h->cbp & (1<<block))
771 decode_residual_block(h,&h->s.gb,inter_2dvlc,0,h->qp,
772 h->cy + h->luma_scan[block], h->l_stride);
773 decode_residual_chroma(h);
774 }
775
776 /*****************************************************************************
777 *
778 * macroblock level
779 *
780 ****************************************************************************/
781
782 static inline void init_mb(AVSContext *h) {
783 int i;
784
785 /* copy predictors from top line (MB B and C) into cache */
786 for(i=0;i<3;i++) {
787 veccpy(&h->mv[MV_FWD_B2+i],&h->top_mv[0][h->mbx*2+i]);
788 veccpy(&h->mv[MV_BWD_B2+i],&h->top_mv[1][h->mbx*2+i]);
789 }
790 h->pred_mode_Y[1] = h->top_pred_Y[h->mbx*2+0];
791 h->pred_mode_Y[2] = h->top_pred_Y[h->mbx*2+1];
792 /* clear top predictors if MB B is not available */
793 if(!(h->flags & B_AVAIL)) {
794 veccpy(&h->mv[MV_FWD_B2],(vector_t *)&un_mv);
795 veccpy(&h->mv[MV_FWD_B3],(vector_t *)&un_mv);
796 veccpy(&h->mv[MV_BWD_B2],(vector_t *)&un_mv);
797 veccpy(&h->mv[MV_BWD_B3],(vector_t *)&un_mv);
798 h->pred_mode_Y[1] = h->pred_mode_Y[2] = NOT_AVAIL;
799 h->flags &= ~(C_AVAIL|D_AVAIL);
800 } else if(h->mbx) {
801 h->flags |= D_AVAIL;
802 }
803 if(h->mbx == h->mb_width-1) //MB C not available
804 h->flags &= ~C_AVAIL;
805 /* clear top-right predictors if MB C is not available */
806 if(!(h->flags & C_AVAIL)) {
807 veccpy(&h->mv[MV_FWD_C2],(vector_t *)&un_mv);
808 veccpy(&h->mv[MV_BWD_C2],(vector_t *)&un_mv);
809 }
810 /* clear top-left predictors if MB D is not available */
811 if(!(h->flags & D_AVAIL)) {
812 veccpy(&h->mv[MV_FWD_D3],(vector_t *)&un_mv);
813 veccpy(&h->mv[MV_BWD_D3],(vector_t *)&un_mv);
814 }
815 /* set pointer for co-located macroblock type */
816 h->col_type = &h->col_type_base[h->mby*h->mb_width + h->mbx];
817 }
818
819 static inline void check_for_slice(AVSContext *h);
820
821 static inline int next_mb(AVSContext *h) {
822 int i;
823
824 h->flags |= A_AVAIL;
825 h->cy += 16;
826 h->cu += 8;
827 h->cv += 8;
828 /* copy mvs as predictors to the left */
829 for(i=0;i<=20;i+=4)
830 veccpy(&h->mv[i],&h->mv[i+2]);
831 /* copy bottom mvs from cache to top line */
832 veccpy(&h->top_mv[0][h->mbx*2+0],&h->mv[MV_FWD_X2]);
833 veccpy(&h->top_mv[0][h->mbx*2+1],&h->mv[MV_FWD_X3]);
834 veccpy(&h->top_mv[1][h->mbx*2+0],&h->mv[MV_BWD_X2]);
835 veccpy(&h->top_mv[1][h->mbx*2+1],&h->mv[MV_BWD_X3]);
836 /* next MB address */
837 h->mbx++;
838 if(h->mbx == h->mb_width) { //new mb line
839 h->flags = B_AVAIL|C_AVAIL;
840 /* clear left pred_modes */
841 h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
842 /* clear left mv predictors */
843 for(i=0;i<=20;i+=4)
844 veccpy(&h->mv[i],(vector_t *)&un_mv);
845 h->mbx = 0;
846 h->mby++;
847 /* re-calculate sample pointers */
848 h->cy = h->picture.data[0] + h->mby*16*h->l_stride;
849 h->cu = h->picture.data[1] + h->mby*8*h->c_stride;
850 h->cv = h->picture.data[2] + h->mby*8*h->c_stride;
851 if(h->mby == h->mb_height) { //frame end
852 return 0;
853 } else {
854 //check_for_slice(h);
855 }
856 }
857 return 1;
858 }
859
860 static void decode_mb_i(AVSContext *h, int is_i_pic) {
861 GetBitContext *gb = &h->s.gb;
862 int block, pred_mode_uv;
863 uint8_t top[18];
864 uint8_t left[18];
865 uint8_t *d;
866
867 /* get intra prediction modes from stream */
868 for(block=0;block<4;block++) {
869 int nA,nB,predpred;
870 int pos = scan3x3[block];
871
872 nA = h->pred_mode_Y[pos-1];
873 nB = h->pred_mode_Y[pos-3];
874 if((nA == NOT_AVAIL) || (nB == NOT_AVAIL))
875 predpred = 2;
876 else
877 predpred = FFMIN(nA,nB);
878 if(get_bits1(gb))
879 h->pred_mode_Y[pos] = predpred;
880 else {
881 h->pred_mode_Y[pos] = get_bits(gb,2);
882 if(h->pred_mode_Y[pos] >= predpred)
883 h->pred_mode_Y[pos]++;
884 }
885 }
886 pred_mode_uv = get_ue_golomb(gb);
887 if(pred_mode_uv > 6) {
888 av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra chroma pred mode\n");
889 pred_mode_uv = 0;
890 }
891
892 /* save pred modes before they get modified */
893 h->pred_mode_Y[3] = h->pred_mode_Y[5];
894 h->pred_mode_Y[6] = h->pred_mode_Y[8];
895 h->top_pred_Y[h->mbx*2+0] = h->pred_mode_Y[7];
896 h->top_pred_Y[h->mbx*2+1] = h->pred_mode_Y[8];
897
898 /* modify pred modes according to availability of neighbour samples */
899 if(!(h->flags & A_AVAIL)) {
900 modify_pred(left_modifier_l, &h->pred_mode_Y[4] );
901 modify_pred(left_modifier_l, &h->pred_mode_Y[7] );
902 modify_pred(left_modifier_c, &pred_mode_uv );
903 }
904 if(!(h->flags & B_AVAIL)) {
905 modify_pred(top_modifier_l, &h->pred_mode_Y[4] );
906 modify_pred(top_modifier_l, &h->pred_mode_Y[5] );
907 modify_pred(top_modifier_c, &pred_mode_uv );
908 }
909
910 /* get coded block pattern */
911 if(is_i_pic)
912 h->cbp = cbp_tab[get_ue_golomb(gb)][0];
913 if(h->cbp && !h->qp_fixed)
914 h->qp += get_se_golomb(gb); //qp_delta
915
916 /* luma intra prediction interleaved with residual decode/transform/add */
917 for(block=0;block<4;block++) {
918 d = h->cy + h->luma_scan[block];
919 load_intra_pred_luma(h, top, left, block);
920 h->intra_pred_l[(int)h->pred_mode_Y[scan3x3[block]]]
921 (d, top, left, h->l_stride);
922 if(h->cbp & (1<<block))
923 decode_residual_block(h,gb,intra_2dvlc,1,h->qp,d,h->l_stride);
924 }
925
926 /* chroma intra prediction */
927 load_intra_pred_chroma(&h->top_border_u[h->mbx*8], h->left_border_u,
928 h->topleft_border_u, top, left, h->c_stride, h->flags);
929 h->intra_pred_c[pred_mode_uv](h->cu, top, left, h->c_stride);
930 load_intra_pred_chroma(&h->top_border_v[h->mbx*8], h->left_border_v,
931 h->topleft_border_v, top, left, h->c_stride, h->flags);
932 h->intra_pred_c[pred_mode_uv](h->cv, top, left, h->c_stride);
933
934 decode_residual_chroma(h);
935 filter_mb(h,I_8X8);
936
937 /* mark motion vectors as intra */
938 veccpy( &h->mv[MV_FWD_X0], (vector_t *)&intra_mv);
939 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
940 veccpy( &h->mv[MV_BWD_X0], (vector_t *)&intra_mv);
941 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
942 if(h->pic_type != FF_B_TYPE)
943 *h->col_type = I_8X8;
944 }
945
946 static void mb_skip_p(AVSContext *h) {
947 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_PSKIP, BLK_16X16, 0);
948 inter_pred(h);
949 store_mvs(h);
950 filter_mb(h,P_SKIP);
951 *h->col_type = P_SKIP;
952 }
953
954
955 static void mb_skip_b(AVSContext *h) {
956 int i;
957
958 if(!(*h->col_type)) {
959 /* intra MB at co-location, do in-plane prediction */
960 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_BSKIP, BLK_16X16, 1);
961 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_BSKIP, BLK_16X16, 0);
962 } else {
963 /* direct prediction from co-located P MB, block-wise */
964 for(i=0;i<4;i++)
965 mv_pred_direct(h,&h->mv[mv_scan[i]],
966 &h->mv[mv_scan[i]+MV_BWD_OFFS],
967 &h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + i]);
968 }
969 }
970
971 static void decode_mb_p(AVSContext *h, enum mb_t mb_type) {
972 GetBitContext *gb = &h->s.gb;
973 int ref[4];
974
975 switch(mb_type) {
976 case P_SKIP:
977 mb_skip_p(h);
978 return;
979 case P_16X16:
980 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
981 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16,ref[0]);
982 break;
983 case P_16X8:
984 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
985 ref[2] = h->ref_flag ? 0 : get_bits1(gb);
986 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, ref[0]);
987 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, ref[2]);
988 break;
989 case P_8X16:
990 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
991 ref[1] = h->ref_flag ? 0 : get_bits1(gb);
992 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, ref[0]);
993 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT, BLK_8X16, ref[1]);
994 break;
995 case P_8X8:
996 ref[0] = h->ref_flag ? 0 : get_bits1(gb);
997 ref[1] = h->ref_flag ? 0 : get_bits1(gb);
998 ref[2] = h->ref_flag ? 0 : get_bits1(gb);
999 ref[3] = h->ref_flag ? 0 : get_bits1(gb);
1000 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_MEDIAN, BLK_8X8, ref[0]);
1001 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_MEDIAN, BLK_8X8, ref[1]);
1002 mv_pred(h, MV_FWD_X2, MV_FWD_X1, MV_PRED_MEDIAN, BLK_8X8, ref[2]);
1003 mv_pred(h, MV_FWD_X3, MV_FWD_X0, MV_PRED_MEDIAN, BLK_8X8, ref[3]);
1004 }
1005 inter_pred(h);
1006 store_mvs(h);
1007 decode_residual_inter(h);
1008 filter_mb(h,mb_type);
1009 *h->col_type = mb_type;
1010 }
1011
1012 static void decode_mb_b(AVSContext *h, enum mb_t mb_type) {
1013 int block;
1014 enum sub_mb_t sub_type[4];
1015 int flags;
1016
1017 /* reset all MVs */
1018 veccpy( &h->mv[MV_FWD_X0], (vector_t *)&dir_mv);
1019 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
1020 veccpy( &h->mv[MV_BWD_X0], (vector_t *)&dir_mv);
1021 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
1022 switch(mb_type) {
1023 case B_SKIP:
1024 mb_skip_b(h);
1025 inter_pred(h);
1026 filter_mb(h,B_SKIP);
1027 return;
1028 case B_DIRECT:
1029 mb_skip_b(h);
1030 break;
1031 case B_FWD_16X16:
1032 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1);
1033 break;
1034 case B_SYM_16X16:
1035 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_MEDIAN, BLK_16X16, 1);
1036 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X16);
1037 break;
1038 case B_BWD_16X16:
1039 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_MEDIAN, BLK_16X16, 0);
1040 break;
1041 case B_8X8:
1042 for(block=0;block<4;block++)
1043 sub_type[block] = get_bits(&h->s.gb,2);
1044 for(block=0;block<4;block++) {
1045 switch(sub_type[block]) {
1046 case B_SUB_DIRECT:
1047 if(!(*h->col_type)) {
1048 /* intra MB at co-location, do in-plane prediction */
1049 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1050 MV_PRED_BSKIP, BLK_8X8, 1);
1051 mv_pred(h, mv_scan[block]+MV_BWD_OFFS,
1052 mv_scan[block]-3+MV_BWD_OFFS,
1053 MV_PRED_BSKIP, BLK_8X8, 0);
1054 } else
1055 mv_pred_direct(h,&h->mv[mv_scan[block]],
1056 &h->mv[mv_scan[block]+MV_BWD_OFFS],
1057 &h->col_mv[(h->mby*h->mb_width + h->mbx)*4 + block]);
1058 break;
1059 case B_SUB_FWD:
1060 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1061 MV_PRED_MEDIAN, BLK_8X8, 1);
1062 break;
1063 case B_SUB_SYM:
1064 mv_pred(h, mv_scan[block], mv_scan[block]-3,
1065 MV_PRED_MEDIAN, BLK_8X8, 1);
1066 mv_pred_sym(h, &h->mv[mv_scan[block]], BLK_8X8);
1067 break;
1068 }
1069 }
1070 for(block=0;block<4;block++) {
1071 if(sub_type[block] == B_SUB_BWD)
1072 mv_pred(h, mv_scan[block]+MV_BWD_OFFS,
1073 mv_scan[block]+MV_BWD_OFFS-3,
1074 MV_PRED_MEDIAN, BLK_8X8, 0);
1075 }
1076 break;
1077 default:
1078 assert((mb_type > B_SYM_16X16) && (mb_type < B_8X8));
1079 flags = b_partition_flags[(mb_type-1)>>1];
1080 if(mb_type & 1) { /* 16x8 macroblock types */
1081 if(flags & FWD0)
1082 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1);
1083 if(flags & SYM0) {
1084 mv_pred(h, MV_FWD_X0, MV_FWD_C2, MV_PRED_TOP, BLK_16X8, 1);
1085 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_16X8);
1086 }
1087 if(flags & FWD1)
1088 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1);
1089 if(flags & SYM1) {
1090 mv_pred(h, MV_FWD_X2, MV_FWD_A1, MV_PRED_LEFT, BLK_16X8, 1);
1091 mv_pred_sym(h, &h->mv[9], BLK_16X8);
1092 }
1093 if(flags & BWD0)
1094 mv_pred(h, MV_BWD_X0, MV_BWD_C2, MV_PRED_TOP, BLK_16X8, 0);
1095 if(flags & BWD1)
1096 mv_pred(h, MV_BWD_X2, MV_BWD_A1, MV_PRED_LEFT, BLK_16X8, 0);
1097 } else { /* 8x16 macroblock types */
1098 if(flags & FWD0)
1099 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1);
1100 if(flags & SYM0) {
1101 mv_pred(h, MV_FWD_X0, MV_FWD_B3, MV_PRED_LEFT, BLK_8X16, 1);
1102 mv_pred_sym(h, &h->mv[MV_FWD_X0], BLK_8X16);
1103 }
1104 if(flags & FWD1)
1105 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 1);
1106 if(flags & SYM1) {
1107 mv_pred(h, MV_FWD_X1, MV_FWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 1);
1108 mv_pred_sym(h, &h->mv[6], BLK_8X16);
1109 }
1110 if(flags & BWD0)
1111 mv_pred(h, MV_BWD_X0, MV_BWD_B3, MV_PRED_LEFT, BLK_8X16, 0);
1112 if(flags & BWD1)
1113 mv_pred(h, MV_BWD_X1, MV_BWD_C2, MV_PRED_TOPRIGHT,BLK_8X16, 0);
1114 }
1115 }
1116 inter_pred(h);
1117 decode_residual_inter(h);
1118 filter_mb(h,mb_type);
1119 }
1120
1121 /*****************************************************************************
1122 *
1123 * slice level
1124 *
1125 ****************************************************************************/
1126
1127 static inline int decode_slice_header(AVSContext *h, GetBitContext *gb) {
1128 if(h->stc > 0xAF)
1129 av_log(h->s.avctx, AV_LOG_ERROR, "unexpected start code 0x%02x\n", h->stc);
1130 h->mby = h->stc;
1131 if((h->mby == 0) && (!h->qp_fixed)){
1132 h->qp_fixed = get_bits1(gb);
1133 h->qp = get_bits(gb,6);
1134 }
1135 /* inter frame or second slice can have weighting params */
1136 if((h->pic_type != FF_I_TYPE) || (!h->pic_structure && h->mby >= h->mb_width/2))
1137 if(get_bits1(gb)) { //slice_weighting_flag
1138 av_log(h->s.avctx, AV_LOG_ERROR,
1139 "weighted prediction not yet supported\n");
1140 }
1141 return 0;
1142 }
1143
1144 static inline void check_for_slice(AVSContext *h) {
1145 GetBitContext *gb = &h->s.gb;
1146 int align;
1147 align = (-get_bits_count(gb)) & 7;
1148 if((show_bits_long(gb,24+align) & 0xFFFFFF) == 0x000001) {
1149 get_bits_long(gb,24+align);
1150 h->stc = get_bits(gb,8);
1151 decode_slice_header(h,gb);
1152 }
1153 }
1154
1155 /*****************************************************************************
1156 *
1157 * frame level
1158 *
1159 ****************************************************************************/
1160
1161 static void init_pic(AVSContext *h) {
1162 int i;
1163
1164 /* clear some predictors */
1165 for(i=0;i<=20;i+=4)
1166 veccpy(&h->mv[i],(vector_t *)&un_mv);
1167 veccpy(&h->mv[MV_BWD_X0], (vector_t *)&dir_mv);
1168 set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
1169 veccpy(&h->mv[MV_FWD_X0], (vector_t *)&dir_mv);
1170 set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
1171 h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
1172 h->cy = h->picture.data[0];
1173 h->cu = h->picture.data[1];
1174 h->cv = h->picture.data[2];
1175 h->l_stride = h->picture.linesize[0];
1176 h->c_stride = h->picture.linesize[1];
1177 h->luma_scan[2] = 8*h->l_stride;
1178 h->luma_scan[3] = 8*h->l_stride+8;
1179 h->mbx = h->mby = 0;
1180 h->flags = 0;
1181 }
1182
1183 static int decode_pic(AVSContext *h) {
1184 MpegEncContext *s = &h->s;
1185 int i,skip_count;
1186 enum mb_t mb_type;
1187
1188 if (!s->context_initialized) {
1189 if (MPV_common_init(s) < 0)
1190 return -1;
1191 }
1192 get_bits(&s->gb,16);//bbv_dwlay
1193 if(h->stc == PIC_PB_START_CODE) {
1194 h->pic_type = get_bits(&s->gb,2) + FF_I_TYPE;
1195 /* make sure we have the reference frames we need */
1196 if(!h->DPB[0].data[0] ||
1197 (!h->DPB[1].data[0] && h->pic_type == FF_B_TYPE))
1198 return -1;
1199 } else {
1200 h->pic_type = FF_I_TYPE;
1201 if(get_bits1(&s->gb))
1202 get_bits(&s->gb,16);//time_code
1203 }
1204 /* release last B frame */
1205 if(h->picture.data[0])
1206 s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture);
1207
1208 s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture);
1209 init_pic(h);
1210 h->picture.poc = get_bits(&s->gb,8)*2;
1211
1212 /* get temporal distances and MV scaling factors */
1213 if(h->pic_type != FF_B_TYPE) {
1214 h->dist[0] = (h->picture.poc - h->DPB[0].poc + 512) % 512;
1215 } else {
1216 h->dist[0] = (h->DPB[0].poc - h->picture.poc + 512) % 512;
1217 }
1218 h->dist[1] = (h->picture.poc - h->DPB[1].poc + 512) % 512;
1219 h->scale_den[0] = h->dist[0] ? 512/h->dist[0] : 0;
1220 h->scale_den[1] = h->dist[1] ? 512/h->dist[1] : 0;
1221 if(h->pic_type == FF_B_TYPE) {
1222 h->sym_factor = h->dist[0]*h->scale_den[1];
1223 } else {
1224 h->direct_den[0] = h->dist[0] ? 16384/h->dist[0] : 0;
1225 h->direct_den[1] = h->dist[1] ? 16384/h->dist[1] : 0;
1226 }
1227
1228 if(s->low_delay)
1229 get_ue_golomb(&s->gb); //bbv_check_times
1230 h->progressive = get_bits1(&s->gb);
1231 if(h->progressive)
1232 h->pic_structure = 1;
1233 else if(!(h->pic_structure = get_bits1(&s->gb) && (h->stc == PIC_PB_START_CODE)) )
1234 get_bits1(&s->gb); //advanced_pred_mode_disable
1235 skip_bits1(&s->gb); //top_field_first
1236 skip_bits1(&s->gb); //repeat_first_field
1237 h->qp_fixed = get_bits1(&s->gb);
1238 h->qp = get_bits(&s->gb,6);
1239 if(h->pic_type == FF_I_TYPE) {
1240 if(!h->progressive && !h->pic_structure)
1241 skip_bits1(&s->gb);//what is this?
1242 skip_bits(&s->gb,4); //reserved bits
1243 } else {
1244 if(!(h->pic_type == FF_B_TYPE && h->pic_structure == 1))
1245 h->ref_flag = get_bits1(&s->gb);
1246 skip_bits(&s->gb,4); //reserved bits
1247 h->skip_mode_flag = get_bits1(&s->gb);
1248 }
1249 h->loop_filter_disable = get_bits1(&s->gb);
1250 if(!h->loop_filter_disable && get_bits1(&s->gb)) {
1251 h->alpha_offset = get_se_golomb(&s->gb);
1252 h->beta_offset = get_se_golomb(&s->gb);
1253 } else {
1254 h->alpha_offset = h->beta_offset = 0;
1255 }
1256 check_for_slice(h);
1257 if(h->pic_type == FF_I_TYPE) {
1258 do {
1259 init_mb(h);
1260 decode_mb_i(h,1);
1261 } while(next_mb(h));
1262 } else if(h->pic_type == FF_P_TYPE) {
1263 do {
1264 if(h->skip_mode_flag) {
1265 skip_count = get_ue_golomb(&s->gb);
1266 for(i=0;i<skip_count;i++) {
1267 init_mb(h);
1268 mb_skip_p(h);
1269 if(!next_mb(h))
1270 goto done;
1271 }
1272 mb_type = get_ue_golomb(&s->gb) + P_16X16;
1273 } else {
1274 mb_type = get_ue_golomb(&s->gb) + P_SKIP;
1275 }
1276 init_mb(h);
1277 if(mb_type > P_8X8) {
1278 h->cbp = cbp_tab[mb_type - P_8X8 - 1][0];
1279 decode_mb_i(h,0);
1280 } else {
1281 decode_mb_p(h,mb_type);
1282 }
1283 } while(next_mb(h));
1284 } else { //FF_B_TYPE
1285 do {
1286 if(h->skip_mode_flag) {
1287 skip_count = get_ue_golomb(&s->gb);
1288 for(i=0;i<skip_count;i++) {
1289 init_mb(h);
1290 mb_skip_b(h);
1291 inter_pred(h);
1292 filter_mb(h,B_SKIP);
1293 if(!next_mb(h))
1294 goto done;
1295 }
1296 mb_type = get_ue_golomb(&s->gb) + B_DIRECT;
1297 } else {
1298 mb_type = get_ue_golomb(&s->gb) + B_SKIP;
1299 }
1300 init_mb(h);
1301 if(mb_type > B_8X8) {
1302 h->cbp = cbp_tab[mb_type - B_8X8 - 1][0];
1303 decode_mb_i(h,0);
1304 } else {
1305 decode_mb_b(h,mb_type);
1306 }
1307 } while(next_mb(h));
1308 }
1309 done:
1310 if(h->pic_type != FF_B_TYPE) {
1311 if(h->DPB[1].data[0])
1312 s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
1313 memcpy(&h->DPB[1], &h->DPB[0], sizeof(Picture));
1314 memcpy(&h->DPB[0], &h->picture, sizeof(Picture));
1315 memset(&h->picture,0,sizeof(Picture));
1316 }
1317 return 0;
1318 }
1319
1320 /*****************************************************************************
1321 *
1322 * headers and interface
1323 *
1324 ****************************************************************************/
1325
1326 static void init_top_lines(AVSContext *h) {
1327 /* alloc top line of predictors */
1328 h->top_qp = av_malloc( h->mb_width);
1329 h->top_mv[0] = av_malloc((h->mb_width*2+1)*sizeof(vector_t));
1330 h->top_mv[1] = av_malloc((h->mb_width*2+1)*sizeof(vector_t));
1331 h->top_pred_Y = av_malloc( h->mb_width*2*sizeof(int));
1332 h->top_border_y = av_malloc((h->mb_width+1)*16);
1333 h->top_border_u = av_malloc((h->mb_width+1)*8);
1334 h->top_border_v = av_malloc((h->mb_width+1)*8);
1335
1336 /* alloc space for co-located MVs and types */
1337 h->col_mv = av_malloc( h->mb_width*h->mb_height*4*sizeof(vector_t));
1338 h->col_type_base = av_malloc(h->mb_width*h->mb_height);
1339 }
1340
1341 static int decode_seq_header(AVSContext *h) {
1342 MpegEncContext *s = &h->s;
1343 extern const AVRational frame_rate_tab[];
1344 int frame_rate_code;
1345
1346 h->profile = get_bits(&s->gb,8);
1347 h->level = get_bits(&s->gb,8);
1348 skip_bits1(&s->gb); //progressive sequence
1349 s->width = get_bits(&s->gb,14);
1350 s->height = get_bits(&s->gb,14);
1351 skip_bits(&s->gb,2); //chroma format
1352 skip_bits(&s->gb,3); //sample_precision
1353 h->aspect_ratio = get_bits(&s->gb,4);
1354 frame_rate_code = get_bits(&s->gb,4);
1355 skip_bits(&s->gb,18);//bit_rate_lower
1356 skip_bits1(&s->gb); //marker_bit
1357 skip_bits(&s->gb,12);//bit_rate_upper
1358 s->low_delay = get_bits1(&s->gb);
1359 h->mb_width = (s->width + 15) >> 4;
1360 h->mb_height = (s->height + 15) >> 4;
1361 h->s.avctx->time_base.den = frame_rate_tab[frame_rate_code].num;
1362 h->s.avctx->time_base.num = frame_rate_tab[frame_rate_code].den;
1363 h->s.avctx->width = s->width;
1364 h->s.avctx->height = s->height;
1365 if(!h->top_qp)
1366 init_top_lines(h);
1367 return 0;
1368 }
1369
1370 /**
1371 * finds the end of the current frame in the bitstream.
1372 * @return the position of the first byte of the next frame, or -1
1373 */
1374 int ff_cavs_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size) {
1375 int pic_found, i;
1376 uint32_t state;
1377
1378 pic_found= pc->frame_start_found;
1379 state= pc->state;
1380
1381 i=0;
1382 if(!pic_found){
1383 for(i=0; i<buf_size; i++){
1384 state= (state<<8) | buf[i];
1385 if(state == PIC_I_START_CODE || state == PIC_PB_START_CODE){
1386 i++;
1387 pic_found=1;
1388 break;
1389 }
1390 }
1391 }
1392
1393 if(pic_found){
1394 /* EOF considered as end of frame */
1395 if (buf_size == 0)
1396 return 0;
1397 for(; i<buf_size; i++){
1398 state= (state<<8) | buf[i];
1399 if((state&0xFFFFFF00) == 0x100){
1400 if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){
1401 pc->frame_start_found=0;
1402 pc->state=-1;
1403 return i-3;
1404 }
1405 }
1406 }
1407 }
1408 pc->frame_start_found= pic_found;
1409 pc->state= state;
1410 return END_NOT_FOUND;
1411 }
1412
1413 void ff_cavs_flush(AVCodecContext * avctx) {
1414 AVSContext *h = (AVSContext *)avctx->priv_data;
1415 h->got_keyframe = 0;
1416 }
1417
1418 static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
1419 uint8_t * buf, int buf_size) {
1420 AVSContext *h = avctx->priv_data;
1421 MpegEncContext *s = &h->s;
1422 int input_size;
1423 const uint8_t *buf_end;
1424 const uint8_t *buf_ptr;
1425 AVFrame *picture = data;
1426 uint32_t stc;
1427
1428 s->avctx = avctx;
1429
1430 if (buf_size == 0) {
1431 if(!s->low_delay && h->DPB[0].data[0]) {
1432 *data_size = sizeof(AVPicture);
1433 *picture = *(AVFrame *) &h->DPB[0];
1434 }
1435 return 0;
1436 }
1437
1438 buf_ptr = buf;
1439 buf_end = buf + buf_size;
1440 for(;;) {
1441 buf_ptr = ff_find_start_code(buf_ptr,buf_end, &stc);
1442 if(stc & 0xFFFFFE00)
1443 return FFMAX(0, buf_ptr - buf - s->parse_context.last_index);
1444 input_size = (buf_end - buf_ptr)*8;
1445 switch(stc) {
1446 case SEQ_START_CODE:
1447 init_get_bits(&s->gb, buf_ptr, input_size);
1448 decode_seq_header(h);
1449 break;
1450 case PIC_I_START_CODE:
1451 if(!h->got_keyframe) {
1452 if(h->DPB[0].data[0])
1453 avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]);
1454 if(h->DPB[1].data[0])
1455 avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]);
1456 h->got_keyframe = 1;
1457 }
1458 case PIC_PB_START_CODE:
1459 *data_size = 0;
1460 if(!h->got_keyframe)
1461 break;
1462 init_get_bits(&s->gb, buf_ptr, input_size);
1463 h->stc = stc;
1464 if(decode_pic(h))
1465 break;
1466 *data_size = sizeof(AVPicture);
1467 if(h->pic_type != FF_B_TYPE) {
1468 if(h->DPB[1].data[0]) {
1469 *picture = *(AVFrame *) &h->DPB[1];
1470 } else {
1471 *data_size = 0;
1472 }
1473 } else
1474 *picture = *(AVFrame *) &h->picture;
1475 break;
1476 case EXT_START_CODE:
1477 //mpeg_decode_extension(avctx,buf_ptr, input_size);
1478 break;
1479 case USER_START_CODE:
1480 //mpeg_decode_user_data(avctx,buf_ptr, input_size);
1481 break;
1482 default:
1483 if (stc >= SLICE_MIN_START_CODE &&
1484 stc <= SLICE_MAX_START_CODE) {
1485 init_get_bits(&s->gb, buf_ptr, input_size);
1486 decode_slice_header(h, &s->gb);
1487 }
1488 break;
1489 }
1490 }
1491 }
1492
1493 static int cavs_decode_init(AVCodecContext * avctx) {
1494 AVSContext *h = (AVSContext *)avctx->priv_data;
1495 MpegEncContext * const s = &h->s;
1496
1497 MPV_decode_defaults(s);
1498 s->avctx = avctx;
1499
1500 avctx->pix_fmt= PIX_FMT_YUV420P;
1501
1502 h->luma_scan[0] = 0;
1503 h->luma_scan[1] = 8;
1504 h->intra_pred_l[ INTRA_L_VERT] = intra_pred_vert;
1505 h->intra_pred_l[ INTRA_L_HORIZ] = intra_pred_horiz;
1506 h->intra_pred_l[ INTRA_L_LP] = intra_pred_lp;
1507 h->intra_pred_l[ INTRA_L_DOWN_LEFT] = intra_pred_down_left;
1508 h->intra_pred_l[INTRA_L_DOWN_RIGHT] = intra_pred_down_right;
1509 h->intra_pred_l[ INTRA_L_LP_LEFT] = intra_pred_lp_left;
1510 h->intra_pred_l[ INTRA_L_LP_TOP] = intra_pred_lp_top;
1511 h->intra_pred_l[ INTRA_L_DC_128] = intra_pred_dc_128;
1512 h->intra_pred_c[ INTRA_C_LP] = intra_pred_lp;
1513 h->intra_pred_c[ INTRA_C_HORIZ] = intra_pred_horiz;
1514 h->intra_pred_c[ INTRA_C_VERT] = intra_pred_vert;
1515 h->intra_pred_c[ INTRA_C_PLANE] = intra_pred_plane;
1516 h->intra_pred_c[ INTRA_C_LP_LEFT] = intra_pred_lp_left;
1517 h->intra_pred_c[ INTRA_C_LP_TOP] = intra_pred_lp_top;
1518 h->intra_pred_c[ INTRA_C_DC_128] = intra_pred_dc_128;
1519 veccpy(&h->mv[ 7], (vector_t *)&un_mv);
1520 veccpy(&h->mv[19], (vector_t *)&un_mv);
1521 return 0;
1522 }
1523
1524 static int cavs_decode_end(AVCodecContext * avctx) {
1525 AVSContext *h = (AVSContext *)avctx->priv_data;
1526
1527 av_free(h->top_qp);
1528 av_free(h->top_mv[0]);
1529 av_free(h->top_mv[1]);
1530 av_free(h->top_pred_Y);
1531 av_free(h->top_border_y);
1532 av_free(h->top_border_u);
1533 av_free(h->top_border_v);
1534 av_free(h->col_mv);
1535 av_free(h->col_type_base);
1536 return 0;
1537 }
1538
1539 AVCodec cavs_decoder = {
1540 "cavs",
1541 CODEC_TYPE_VIDEO,
1542 CODEC_ID_CAVS,
1543 sizeof(AVSContext),
1544 cavs_decode_init,
1545 NULL,
1546 cavs_decode_end,
1547 cavs_decode_frame,
1548 CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, //FIXME is this correct ?
1549 .flush= ff_cavs_flush,
1550 };