comparison vc1.c @ 3689:6d9e8253da2a libavcodec

Proper support for B/BI frames
author kostya
date Sat, 09 Sep 2006 11:21:56 +0000
parents 9d4583fe8fca
children 35aae593db08
comparison
equal deleted inserted replaced
3688:c63b9bb49532 3689:6d9e8253da2a
367 uint8_t range_mapy; 367 uint8_t range_mapy;
368 uint8_t range_mapuv; 368 uint8_t range_mapuv;
369 //@} 369 //@}
370 370
371 int p_frame_skipped; 371 int p_frame_skipped;
372 int bi_type;
372 } VC1Context; 373 } VC1Context;
373 374
374 /** 375 /**
375 * Get unary code of limited length 376 * Get unary code of limited length
376 * @fixme FIXME Slow and ugly 377 * @fixme FIXME Slow and ugly
776 uint8_t *srcY, *srcU, *srcV; 777 uint8_t *srcY, *srcU, *srcV;
777 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y; 778 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
778 779
779 if(!v->s.last_picture.data[0])return; 780 if(!v->s.last_picture.data[0])return;
780 781
781 mx = s->mv[0][0][0]; 782 mx = s->mv[dir][0][0];
782 my = s->mv[0][0][1]; 783 my = s->mv[dir][0][1];
784
785 // store motion vectors for further use in B frames
786 if(s->pict_type == P_TYPE) {
787 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
788 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
789 }
783 uvmx = (mx + ((mx & 3) == 3)) >> 1; 790 uvmx = (mx + ((mx & 3) == 3)) >> 1;
784 uvmy = (my + ((my & 3) == 3)) >> 1; 791 uvmy = (my + ((my & 3) == 3)) >> 1;
785 if(!dir) { 792 if(!dir) {
786 srcY = s->last_picture.data[0]; 793 srcY = s->last_picture.data[0];
787 srcU = s->last_picture.data[1]; 794 srcU = s->last_picture.data[1];
1038 tx = (mvx[t1] + mvx[t2]) / 2; 1045 tx = (mvx[t1] + mvx[t2]) / 2;
1039 ty = (mvy[t1] + mvy[t2]) / 2; 1046 ty = (mvy[t1] + mvy[t2]) / 2;
1040 } else 1047 } else
1041 return; //no need to do MC for inter blocks 1048 return; //no need to do MC for inter blocks
1042 1049
1050 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
1051 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
1043 uvmx = (tx + ((tx&3) == 3)) >> 1; 1052 uvmx = (tx + ((tx&3) == 3)) >> 1;
1044 uvmy = (ty + ((ty&3) == 3)) >> 1; 1053 uvmy = (ty + ((ty&3) == 3)) >> 1;
1045 1054
1046 uvsrc_x = s->mb_x * 8 + (uvmx >> 2); 1055 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1047 uvsrc_y = s->mb_y * 8 + (uvmy >> 2); 1056 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1381 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE; 1390 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE;
1382 else v->s.pict_type = B_TYPE; 1391 else v->s.pict_type = B_TYPE;
1383 } else v->s.pict_type = P_TYPE; 1392 } else v->s.pict_type = P_TYPE;
1384 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE; 1393 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
1385 1394
1386 if(v->s.pict_type == I_TYPE) 1395 v->bi_type = 0;
1387 get_bits(gb, 7); // skip buffer fullness
1388 if(v->s.pict_type == B_TYPE) { 1396 if(v->s.pict_type == B_TYPE) {
1389 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1); 1397 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1390 v->bfraction = vc1_bfraction_lut[v->bfraction]; 1398 v->bfraction = vc1_bfraction_lut[v->bfraction];
1391 if(v->bfraction == -1) { 1399 if(v->bfraction == 0) {
1392 v->s.pict_type = BI_TYPE; 1400 v->s.pict_type = BI_TYPE;
1393 } 1401 }
1394 } 1402 }
1403 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1404 get_bits(gb, 7); // skip buffer fullness
1395 1405
1396 /* calculate RND */ 1406 /* calculate RND */
1397 if(v->s.pict_type == I_TYPE) 1407 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1398 v->rnd = 1; 1408 v->rnd = 1;
1399 if(v->s.pict_type == P_TYPE) 1409 if(v->s.pict_type == P_TYPE)
1400 v->rnd ^= 1; 1410 v->rnd ^= 1;
1401 1411
1402 /* Quantizer stuff */ 1412 /* Quantizer stuff */
1566 v->y_ac_table_index = decode012(gb); 1576 v->y_ac_table_index = decode012(gb);
1567 } 1577 }
1568 /* DC Syntax */ 1578 /* DC Syntax */
1569 v->s.dc_table_index = get_bits(gb, 1); 1579 v->s.dc_table_index = get_bits(gb, 1);
1570 1580
1581 if(v->s.pict_type == BI_TYPE) {
1582 v->s.pict_type = B_TYPE;
1583 v->bi_type = 1;
1584 }
1571 return 0; 1585 return 0;
1572 } 1586 }
1573 1587
1574 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb) 1588 static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1575 { 1589 {
2076 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3); 2090 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
2077 dsp->avg_qpel_pixels_tab[1][uvdxy](s->dest[1], srcU, s->uvlinesize); 2091 dsp->avg_qpel_pixels_tab[1][uvdxy](s->dest[1], srcU, s->uvlinesize);
2078 dsp->avg_qpel_pixels_tab[1][uvdxy](s->dest[2], srcV, s->uvlinesize); 2092 dsp->avg_qpel_pixels_tab[1][uvdxy](s->dest[2], srcV, s->uvlinesize);
2079 } 2093 }
2080 2094
2095 static always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2096 {
2097 int n = bfrac;
2098
2099 #if B_FRACTION_DEN==256
2100 if(inv)
2101 n -= 256;
2102 if(!qs)
2103 return 2 * ((value * n + 255) >> 9);
2104 return (value * n + 128) >> 8;
2105 #else
2106 if(inv)
2107 n -= B_FRACTION_DEN;
2108 if(!qs)
2109 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2110 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2111 #endif
2112 }
2113
2081 /** Reconstruct motion vector for B-frame and do motion compensation 2114 /** Reconstruct motion vector for B-frame and do motion compensation
2082 */ 2115 */
2083 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode) 2116 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
2084 { 2117 {
2118 if(direct) {
2119 vc1_mc_1mv(v, 0);
2120 vc1_interp_mc(v);
2121 return;
2122 }
2123 if(mode == BMV_TYPE_INTERPOLATED) {
2124 vc1_mc_1mv(v, 0);
2125 vc1_interp_mc(v);
2126 return;
2127 }
2128
2129 vc1_mc_1mv(v, (mode == BMV_TYPE_FORWARD));
2130 }
2131
2132 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
2133 {
2085 MpegEncContext *s = &v->s; 2134 MpegEncContext *s = &v->s;
2086 int mx[4], my[4], mv_x, mv_y; 2135 int xy, wrap, off = 0;
2087 int i; 2136 int16_t *A, *B, *C;
2088 2137 int px, py;
2138 int sum;
2139 int r_x, r_y;
2140 const uint8_t *is_intra = v->mb_type[0];
2141
2142 r_x = v->range_x;
2143 r_y = v->range_y;
2089 /* scale MV difference to be quad-pel */ 2144 /* scale MV difference to be quad-pel */
2090 dmv_x[0] <<= 1 - s->quarter_sample; 2145 dmv_x[0] <<= 1 - s->quarter_sample;
2091 dmv_y[0] <<= 1 - s->quarter_sample; 2146 dmv_y[0] <<= 1 - s->quarter_sample;
2092 dmv_x[1] <<= 1 - s->quarter_sample; 2147 dmv_x[1] <<= 1 - s->quarter_sample;
2093 dmv_y[1] <<= 1 - s->quarter_sample; 2148 dmv_y[1] <<= 1 - s->quarter_sample;
2094 2149
2150 wrap = s->b8_stride;
2151 xy = s->block_index[0];
2152
2153 if(s->mb_intra) {
2154 s->current_picture.motion_val[0][xy][0] =
2155 s->current_picture.motion_val[0][xy][1] =
2156 s->current_picture.motion_val[1][xy][0] =
2157 s->current_picture.motion_val[1][xy][1] = 0;
2158 return;
2159 }
2160 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2161 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2162 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2163 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2095 if(direct) { 2164 if(direct) {
2096 for(i = 0; i < 4; i++) { 2165 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2097 mx[i] = s->last_picture.motion_val[0][s->block_index[i]][0]; 2166 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2098 my[i] = s->last_picture.motion_val[0][s->block_index[i]][1]; 2167 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2099 } 2168 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2100 mv_x = median4(mx[0], mx[1], mx[2], mx[3]);
2101 mv_y = median4(my[0], my[1], my[2], my[3]);
2102 s->mv[0][0][0] = (mv_x * v->bfraction + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2103 s->mv[0][0][1] = (mv_y * v->bfraction + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2104 vc1_mc_1mv(v, 0);
2105
2106 for(i = 0; i < 4; i++) {
2107 mx[i] = s->next_picture.motion_val[0][s->block_index[i]][0];
2108 my[i] = s->next_picture.motion_val[0][s->block_index[i]][1];
2109 }
2110 mv_x = median4(mx[0], mx[1], mx[2], mx[3]);
2111 mv_y = median4(my[0], my[1], my[2], my[3]);
2112 s->mv[1][0][0] = (mv_x * (B_FRACTION_DEN - v->bfraction) + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2113 s->mv[1][0][1] = (mv_y * (B_FRACTION_DEN - v->bfraction) + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2114 vc1_interp_mc(v);
2115 return; 2169 return;
2116 } 2170 }
2117 if(mode == BMV_TYPE_INTERPOLATED) { 2171
2118 s->mv[0][0][0] = dmv_x[0]; 2172 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2119 s->mv[0][0][1] = dmv_y[0]; 2173 C = s->current_picture.motion_val[0][xy - 2];
2120 vc1_mc_1mv(v, 0); 2174 A = s->current_picture.motion_val[0][xy - wrap*2];
2121 s->mv[1][0][0] = dmv_x[1]; 2175 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2122 s->mv[1][0][1] = dmv_y[1]; 2176 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2123 vc1_interp_mc(v); 2177
2124 return; 2178 if(!s->first_slice_line) { // predictor A is not out of bounds
2125 } 2179 if(s->mb_width == 1) {
2126 2180 px = A[0];
2127 if(mode == BMV_TYPE_BACKWARD) { 2181 py = A[1];
2128 for(i = 0; i < 4; i++) { 2182 } else {
2129 mx[i] = s->last_picture.motion_val[0][s->block_index[i]][0]; 2183 px = mid_pred(A[0], B[0], C[0]);
2130 my[i] = s->last_picture.motion_val[0][s->block_index[i]][1]; 2184 py = mid_pred(A[1], B[1], C[1]);
2131 } 2185 }
2132 } else { 2186 } else if(s->mb_x) { // predictor C is not out of bounds
2133 for(i = 0; i < 4; i++) { 2187 px = C[0];
2134 mx[i] = s->next_picture.motion_val[0][s->block_index[i]][0]; 2188 py = C[1];
2135 my[i] = s->next_picture.motion_val[0][s->block_index[i]][1]; 2189 } else {
2136 } 2190 px = py = 0;
2137 } 2191 }
2138 2192 /* Pullback MV as specified in 8.3.5.3.4 */
2139 /* XXX: not right but how to determine 4-MV intra/inter in another frame? */ 2193 {
2140 mv_x = median4(mx[0], mx[1], mx[2], mx[3]); 2194 int qx, qy, X, Y;
2141 mv_y = median4(my[0], my[1], my[2], my[3]); 2195 if(v->profile < PROFILE_ADVANCED) {
2142 s->mv[0][0][0] = mv_x; 2196 qx = (s->mb_x << 5);
2143 s->mv[0][0][1] = mv_y; 2197 qy = (s->mb_y << 5);
2144 2198 X = (s->mb_width << 5) - 4;
2145 vc1_mc_1mv(v, (mode == BMV_TYPE_FORWARD)); 2199 Y = (s->mb_height << 5) - 4;
2200 if(qx + px < -28) px = -28 - qx;
2201 if(qy + py < -28) py = -28 - qy;
2202 if(qx + px > X) px = X - qx;
2203 if(qy + py > Y) py = Y - qy;
2204 } else {
2205 qx = (s->mb_x << 6);
2206 qy = (s->mb_y << 6);
2207 X = (s->mb_width << 6) - 4;
2208 Y = (s->mb_height << 6) - 4;
2209 if(qx + px < -60) px = -60 - qx;
2210 if(qy + py < -60) py = -60 - qy;
2211 if(qx + px > X) px = X - qx;
2212 if(qy + py > Y) py = Y - qy;
2213 }
2214 }
2215 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2216 if(0 && !s->first_slice_line && s->mb_x) {
2217 if(is_intra[xy - wrap])
2218 sum = ABS(px) + ABS(py);
2219 else
2220 sum = ABS(px - A[0]) + ABS(py - A[1]);
2221 if(sum > 32) {
2222 if(get_bits1(&s->gb)) {
2223 px = A[0];
2224 py = A[1];
2225 } else {
2226 px = C[0];
2227 py = C[1];
2228 }
2229 } else {
2230 if(is_intra[xy - 2])
2231 sum = ABS(px) + ABS(py);
2232 else
2233 sum = ABS(px - C[0]) + ABS(py - C[1]);
2234 if(sum > 32) {
2235 if(get_bits1(&s->gb)) {
2236 px = A[0];
2237 py = A[1];
2238 } else {
2239 px = C[0];
2240 py = C[1];
2241 }
2242 }
2243 }
2244 }
2245 /* store MV using signed modulus of MV range defined in 4.11 */
2246 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2247 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2248 }
2249 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2250 C = s->current_picture.motion_val[1][xy - 2];
2251 A = s->current_picture.motion_val[1][xy - wrap*2];
2252 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2253 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2254
2255 if(!s->first_slice_line) { // predictor A is not out of bounds
2256 if(s->mb_width == 1) {
2257 px = A[0];
2258 py = A[1];
2259 } else {
2260 px = mid_pred(A[0], B[0], C[0]);
2261 py = mid_pred(A[1], B[1], C[1]);
2262 }
2263 } else if(s->mb_x) { // predictor C is not out of bounds
2264 px = C[0];
2265 py = C[1];
2266 } else {
2267 px = py = 0;
2268 }
2269 /* Pullback MV as specified in 8.3.5.3.4 */
2270 {
2271 int qx, qy, X, Y;
2272 if(v->profile < PROFILE_ADVANCED) {
2273 qx = (s->mb_x << 5);
2274 qy = (s->mb_y << 5);
2275 X = (s->mb_width << 5) - 4;
2276 Y = (s->mb_height << 5) - 4;
2277 if(qx + px < -28) px = -28 - qx;
2278 if(qy + py < -28) py = -28 - qy;
2279 if(qx + px > X) px = X - qx;
2280 if(qy + py > Y) py = Y - qy;
2281 } else {
2282 qx = (s->mb_x << 6);
2283 qy = (s->mb_y << 6);
2284 X = (s->mb_width << 6) - 4;
2285 Y = (s->mb_height << 6) - 4;
2286 if(qx + px < -60) px = -60 - qx;
2287 if(qy + py < -60) py = -60 - qy;
2288 if(qx + px > X) px = X - qx;
2289 if(qy + py > Y) py = Y - qy;
2290 }
2291 }
2292 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2293 if(0 && !s->first_slice_line && s->mb_x) {
2294 if(is_intra[xy - wrap])
2295 sum = ABS(px) + ABS(py);
2296 else
2297 sum = ABS(px - A[0]) + ABS(py - A[1]);
2298 if(sum > 32) {
2299 if(get_bits1(&s->gb)) {
2300 px = A[0];
2301 py = A[1];
2302 } else {
2303 px = C[0];
2304 py = C[1];
2305 }
2306 } else {
2307 if(is_intra[xy - 2])
2308 sum = ABS(px) + ABS(py);
2309 else
2310 sum = ABS(px - C[0]) + ABS(py - C[1]);
2311 if(sum > 32) {
2312 if(get_bits1(&s->gb)) {
2313 px = A[0];
2314 py = A[1];
2315 } else {
2316 px = C[0];
2317 py = C[1];
2318 }
2319 }
2320 }
2321 }
2322 /* store MV using signed modulus of MV range defined in 4.11 */
2323
2324 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2325 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2326 }
2327 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2328 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2329 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2330 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2146 } 2331 }
2147 2332
2148 /** Get predicted DC value for I-frames only 2333 /** Get predicted DC value for I-frames only
2149 * prediction dir: left=0, top=1 2334 * prediction dir: left=0, top=1
2150 * @param s MpegEncContext 2335 * @param s MpegEncContext
3099 { 3284 {
3100 if (!skipped) 3285 if (!skipped)
3101 { 3286 {
3102 GET_MVDATA(dmv_x, dmv_y); 3287 GET_MVDATA(dmv_x, dmv_y);
3103 3288
3289 if (s->mb_intra) {
3290 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3291 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3292 }
3104 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16; 3293 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3105 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]); 3294 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3106 3295
3107 /* FIXME Set DC val for inter block ? */ 3296 /* FIXME Set DC val for inter block ? */
3108 if (s->mb_intra && !mb_has_coeffs) 3297 if (s->mb_intra && !mb_has_coeffs)
3352 case 1: 3541 case 1:
3353 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD; 3542 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
3354 break; 3543 break;
3355 case 2: 3544 case 2:
3356 bmvtype = BMV_TYPE_INTERPOLATED; 3545 bmvtype = BMV_TYPE_INTERPOLATED;
3357 } 3546 dmv_x[1] = dmv_y[1] = 0;
3358 } 3547 }
3359 } 3548 }
3549 }
3550 for(i = 0; i < 6; i++)
3551 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3552
3360 if (skipped) { 3553 if (skipped) {
3554 if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
3555 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3361 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); 3556 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3362 return; 3557 return;
3363 } 3558 }
3364 if (direct) { 3559 if (direct) {
3365 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2); 3560 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3367 s->mb_intra = 0; 3562 s->mb_intra = 0;
3368 mb_has_coeffs = 0; 3563 mb_has_coeffs = 0;
3369 s->current_picture.qscale_table[mb_pos] = mquant; 3564 s->current_picture.qscale_table[mb_pos] = mquant;
3370 if(!v->ttmbf) 3565 if(!v->ttmbf)
3371 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2); 3566 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3567 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
3568 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3372 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); 3569 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3373 } else { 3570 } else {
3374 if(!mb_has_coeffs && !s->mb_intra) { 3571 if(!mb_has_coeffs && !s->mb_intra) {
3375 /* no coded blocks - effectively skipped */ 3572 /* no coded blocks - effectively skipped */
3573 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3376 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); 3574 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3377 return; 3575 return;
3378 } 3576 }
3379 if(s->mb_intra && !mb_has_coeffs) { 3577 if(s->mb_intra && !mb_has_coeffs) {
3380 GET_MQUANT(); 3578 GET_MQUANT();
3381 s->current_picture.qscale_table[mb_pos] = mquant; 3579 s->current_picture.qscale_table[mb_pos] = mquant;
3382 s->ac_pred = get_bits1(gb); 3580 s->ac_pred = get_bits1(gb);
3383 cbp = 0; 3581 cbp = 0;
3582 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3384 } else { 3583 } else {
3385 if(bmvtype == BMV_TYPE_INTERPOLATED) { 3584 if(bmvtype == BMV_TYPE_INTERPOLATED) {
3386 GET_MVDATA(dmv_x[1], dmv_y[1]); 3585 GET_MVDATA(dmv_x[1], dmv_y[1]);
3387 if(!mb_has_coeffs) { 3586 if(!mb_has_coeffs) {
3388 /* interpolated skipped block */ 3587 /* interpolated skipped block */
3588 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3389 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); 3589 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3390 return; 3590 return;
3391 } 3591 }
3392 } 3592 }
3393 if(!s->mb_intra) 3593 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3594 if(!s->mb_intra) {
3394 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype); 3595 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3596 }
3395 if(s->mb_intra) 3597 if(s->mb_intra)
3396 s->ac_pred = get_bits1(gb); 3598 s->ac_pred = get_bits1(gb);
3397 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2); 3599 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3398 GET_MQUANT(); 3600 GET_MQUANT();
3399 s->current_picture.qscale_table[mb_pos] = mquant; 3601 s->current_picture.qscale_table[mb_pos] = mquant;
3802 vc1_decode_skip_blocks(v); 4004 vc1_decode_skip_blocks(v);
3803 else 4005 else
3804 vc1_decode_p_blocks(v); 4006 vc1_decode_p_blocks(v);
3805 break; 4007 break;
3806 case B_TYPE: 4008 case B_TYPE:
3807 vc1_decode_b_blocks(v); 4009 if(v->bi_type)
4010 vc1_decode_i_blocks(v);
4011 else
4012 vc1_decode_b_blocks(v);
3808 break; 4013 break;
3809 } 4014 }
3810 } 4015 }
3811 4016
3812 4017