Mercurial > libavcodec.hg
comparison h264.h @ 11627:56591b8041ea libavcodec
Move static function fill_filter_caches() from h264.h to h264.c.
The function is only used within that file, so it makes sense to place
it there. This fixes many warnings of the type:
h264.h:1170: warning: ¡Æfill_filter_caches¡Ç defined but not used
author | diego |
---|---|
date | Tue, 13 Apr 2010 22:15:49 +0000 |
parents | 44c5c540722c |
children | 7dd2a45249a9 |
comparison
equal
deleted
inserted
replaced
11626:4c120a633832 | 11627:56591b8041ea |
---|---|
1162 | 1162 |
1163 h->neighbor_transform_size= !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[0]); | 1163 h->neighbor_transform_size= !!IS_8x8DCT(top_type) + !!IS_8x8DCT(left_type[0]); |
1164 } | 1164 } |
1165 | 1165 |
1166 /** | 1166 /** |
1167 * | |
1168 * @return non zero if the loop filter can be skiped | |
1169 */ | |
1170 static int fill_filter_caches(H264Context *h, int mb_type){ | |
1171 MpegEncContext * const s = &h->s; | |
1172 const int mb_xy= h->mb_xy; | |
1173 int top_xy, left_xy[2]; | |
1174 int top_type, left_type[2]; | |
1175 | |
1176 top_xy = mb_xy - (s->mb_stride << MB_FIELD); | |
1177 | |
1178 //FIXME deblocking could skip the intra and nnz parts. | |
1179 | |
1180 /* Wow, what a mess, why didn't they simplify the interlacing & intra | |
1181 * stuff, I can't imagine that these complex rules are worth it. */ | |
1182 | |
1183 left_xy[1] = left_xy[0] = mb_xy-1; | |
1184 if(FRAME_MBAFF){ | |
1185 const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]); | |
1186 const int curr_mb_field_flag = IS_INTERLACED(mb_type); | |
1187 if(s->mb_y&1){ | |
1188 if (left_mb_field_flag != curr_mb_field_flag) { | |
1189 left_xy[0] -= s->mb_stride; | |
1190 } | |
1191 }else{ | |
1192 if(curr_mb_field_flag){ | |
1193 top_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy ]>>7)&1)-1); | |
1194 } | |
1195 if (left_mb_field_flag != curr_mb_field_flag) { | |
1196 left_xy[1] += s->mb_stride; | |
1197 } | |
1198 } | |
1199 } | |
1200 | |
1201 h->top_mb_xy = top_xy; | |
1202 h->left_mb_xy[0] = left_xy[0]; | |
1203 h->left_mb_xy[1] = left_xy[1]; | |
1204 { | |
1205 //for sufficiently low qp, filtering wouldn't do anything | |
1206 //this is a conservative estimate: could also check beta_offset and more accurate chroma_qp | |
1207 int qp_thresh = h->qp_thresh; //FIXME strictly we should store qp_thresh for each mb of a slice | |
1208 int qp = s->current_picture.qscale_table[mb_xy]; | |
1209 if(qp <= qp_thresh | |
1210 && (left_xy[0]<0 || ((qp + s->current_picture.qscale_table[left_xy[0]] + 1)>>1) <= qp_thresh) | |
1211 && (top_xy < 0 || ((qp + s->current_picture.qscale_table[top_xy ] + 1)>>1) <= qp_thresh)){ | |
1212 if(!FRAME_MBAFF) | |
1213 return 1; | |
1214 if( (left_xy[0]< 0 || ((qp + s->current_picture.qscale_table[left_xy[1] ] + 1)>>1) <= qp_thresh) | |
1215 && (top_xy < s->mb_stride || ((qp + s->current_picture.qscale_table[top_xy -s->mb_stride] + 1)>>1) <= qp_thresh)) | |
1216 return 1; | |
1217 } | |
1218 } | |
1219 | |
1220 top_type = s->current_picture.mb_type[top_xy] ; | |
1221 left_type[0] = s->current_picture.mb_type[left_xy[0]]; | |
1222 left_type[1] = s->current_picture.mb_type[left_xy[1]]; | |
1223 if(h->deblocking_filter == 2){ | |
1224 if(h->slice_table[top_xy ] != h->slice_num) top_type= 0; | |
1225 if(h->slice_table[left_xy[0] ] != h->slice_num) left_type[0]= left_type[1]= 0; | |
1226 }else{ | |
1227 if(h->slice_table[top_xy ] == 0xFFFF) top_type= 0; | |
1228 if(h->slice_table[left_xy[0] ] == 0xFFFF) left_type[0]= left_type[1] =0; | |
1229 } | |
1230 h->top_type = top_type ; | |
1231 h->left_type[0]= left_type[0]; | |
1232 h->left_type[1]= left_type[1]; | |
1233 | |
1234 if(IS_INTRA(mb_type)) | |
1235 return 0; | |
1236 | |
1237 AV_COPY64(&h->non_zero_count_cache[0+8*1], &h->non_zero_count[mb_xy][ 0]); | |
1238 AV_COPY64(&h->non_zero_count_cache[0+8*2], &h->non_zero_count[mb_xy][ 8]); | |
1239 AV_COPY32(&h->non_zero_count_cache[0+8*5], &h->non_zero_count[mb_xy][16]); | |
1240 AV_COPY32(&h->non_zero_count_cache[4+8*3], &h->non_zero_count[mb_xy][20]); | |
1241 AV_COPY64(&h->non_zero_count_cache[0+8*4], &h->non_zero_count[mb_xy][24]); | |
1242 | |
1243 h->cbp= h->cbp_table[mb_xy]; | |
1244 | |
1245 { | |
1246 int list; | |
1247 for(list=0; list<h->list_count; list++){ | |
1248 int8_t *ref; | |
1249 int y, b_stride; | |
1250 int16_t (*mv_dst)[2]; | |
1251 int16_t (*mv_src)[2]; | |
1252 | |
1253 if(!USES_LIST(mb_type, list)){ | |
1254 fill_rectangle( h->mv_cache[list][scan8[0]], 4, 4, 8, pack16to32(0,0), 4); | |
1255 AV_WN32A(&h->ref_cache[list][scan8[ 0]], ((LIST_NOT_USED)&0xFF)*0x01010101u); | |
1256 AV_WN32A(&h->ref_cache[list][scan8[ 2]], ((LIST_NOT_USED)&0xFF)*0x01010101u); | |
1257 AV_WN32A(&h->ref_cache[list][scan8[ 8]], ((LIST_NOT_USED)&0xFF)*0x01010101u); | |
1258 AV_WN32A(&h->ref_cache[list][scan8[10]], ((LIST_NOT_USED)&0xFF)*0x01010101u); | |
1259 continue; | |
1260 } | |
1261 | |
1262 ref = &s->current_picture.ref_index[list][4*mb_xy]; | |
1263 { | |
1264 int (*ref2frm)[64] = h->ref2frm[ h->slice_num&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); | |
1265 AV_WN32A(&h->ref_cache[list][scan8[ 0]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); | |
1266 AV_WN32A(&h->ref_cache[list][scan8[ 2]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); | |
1267 ref += 2; | |
1268 AV_WN32A(&h->ref_cache[list][scan8[ 8]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); | |
1269 AV_WN32A(&h->ref_cache[list][scan8[10]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); | |
1270 } | |
1271 | |
1272 b_stride = h->b_stride; | |
1273 mv_dst = &h->mv_cache[list][scan8[0]]; | |
1274 mv_src = &s->current_picture.motion_val[list][4*s->mb_x + 4*s->mb_y*b_stride]; | |
1275 for(y=0; y<4; y++){ | |
1276 AV_COPY128(mv_dst + 8*y, mv_src + y*b_stride); | |
1277 } | |
1278 | |
1279 } | |
1280 } | |
1281 | |
1282 | |
1283 /* | |
1284 0 . T T. T T T T | |
1285 1 L . .L . . . . | |
1286 2 L . .L . . . . | |
1287 3 . T TL . . . . | |
1288 4 L . .L . . . . | |
1289 5 L . .. . . . . | |
1290 */ | |
1291 //FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec) | |
1292 if(top_type){ | |
1293 AV_COPY32(&h->non_zero_count_cache[4+8*0], &h->non_zero_count[top_xy][4+3*8]); | |
1294 } | |
1295 | |
1296 if(left_type[0]){ | |
1297 h->non_zero_count_cache[3+8*1]= h->non_zero_count[left_xy[0]][7+0*8]; | |
1298 h->non_zero_count_cache[3+8*2]= h->non_zero_count[left_xy[0]][7+1*8]; | |
1299 h->non_zero_count_cache[3+8*3]= h->non_zero_count[left_xy[0]][7+2*8]; | |
1300 h->non_zero_count_cache[3+8*4]= h->non_zero_count[left_xy[0]][7+3*8]; | |
1301 } | |
1302 | |
1303 // CAVLC 8x8dct requires NNZ values for residual decoding that differ from what the loop filter needs | |
1304 if(!CABAC && h->pps.transform_8x8_mode){ | |
1305 if(IS_8x8DCT(top_type)){ | |
1306 h->non_zero_count_cache[4+8*0]= | |
1307 h->non_zero_count_cache[5+8*0]= h->cbp_table[top_xy] & 4; | |
1308 h->non_zero_count_cache[6+8*0]= | |
1309 h->non_zero_count_cache[7+8*0]= h->cbp_table[top_xy] & 8; | |
1310 } | |
1311 if(IS_8x8DCT(left_type[0])){ | |
1312 h->non_zero_count_cache[3+8*1]= | |
1313 h->non_zero_count_cache[3+8*2]= h->cbp_table[left_xy[0]]&2; //FIXME check MBAFF | |
1314 } | |
1315 if(IS_8x8DCT(left_type[1])){ | |
1316 h->non_zero_count_cache[3+8*3]= | |
1317 h->non_zero_count_cache[3+8*4]= h->cbp_table[left_xy[1]]&8; //FIXME check MBAFF | |
1318 } | |
1319 | |
1320 if(IS_8x8DCT(mb_type)){ | |
1321 h->non_zero_count_cache[scan8[0 ]]= h->non_zero_count_cache[scan8[1 ]]= | |
1322 h->non_zero_count_cache[scan8[2 ]]= h->non_zero_count_cache[scan8[3 ]]= h->cbp & 1; | |
1323 | |
1324 h->non_zero_count_cache[scan8[0+ 4]]= h->non_zero_count_cache[scan8[1+ 4]]= | |
1325 h->non_zero_count_cache[scan8[2+ 4]]= h->non_zero_count_cache[scan8[3+ 4]]= h->cbp & 2; | |
1326 | |
1327 h->non_zero_count_cache[scan8[0+ 8]]= h->non_zero_count_cache[scan8[1+ 8]]= | |
1328 h->non_zero_count_cache[scan8[2+ 8]]= h->non_zero_count_cache[scan8[3+ 8]]= h->cbp & 4; | |
1329 | |
1330 h->non_zero_count_cache[scan8[0+12]]= h->non_zero_count_cache[scan8[1+12]]= | |
1331 h->non_zero_count_cache[scan8[2+12]]= h->non_zero_count_cache[scan8[3+12]]= h->cbp & 8; | |
1332 } | |
1333 } | |
1334 | |
1335 if(IS_INTER(mb_type) || IS_DIRECT(mb_type)){ | |
1336 int list; | |
1337 for(list=0; list<h->list_count; list++){ | |
1338 if(USES_LIST(top_type, list)){ | |
1339 const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride; | |
1340 const int b8_xy= 4*top_xy + 2; | |
1341 int (*ref2frm)[64] = h->ref2frm[ h->slice_table[top_xy]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); | |
1342 AV_COPY128(h->mv_cache[list][scan8[0] + 0 - 1*8], s->current_picture.motion_val[list][b_xy + 0]); | |
1343 h->ref_cache[list][scan8[0] + 0 - 1*8]= | |
1344 h->ref_cache[list][scan8[0] + 1 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 0]]; | |
1345 h->ref_cache[list][scan8[0] + 2 - 1*8]= | |
1346 h->ref_cache[list][scan8[0] + 3 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 1]]; | |
1347 }else{ | |
1348 AV_ZERO128(h->mv_cache[list][scan8[0] + 0 - 1*8]); | |
1349 AV_WN32A(&h->ref_cache[list][scan8[0] + 0 - 1*8], ((LIST_NOT_USED)&0xFF)*0x01010101u); | |
1350 } | |
1351 | |
1352 if(!IS_INTERLACED(mb_type^left_type[0])){ | |
1353 if(USES_LIST(left_type[0], list)){ | |
1354 const int b_xy= h->mb2b_xy[left_xy[0]] + 3; | |
1355 const int b8_xy= 4*left_xy[0] + 1; | |
1356 int (*ref2frm)[64] = h->ref2frm[ h->slice_table[left_xy[0]]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); | |
1357 AV_COPY32(h->mv_cache[list][scan8[0] - 1 + 0 ], s->current_picture.motion_val[list][b_xy + h->b_stride*0]); | |
1358 AV_COPY32(h->mv_cache[list][scan8[0] - 1 + 8 ], s->current_picture.motion_val[list][b_xy + h->b_stride*1]); | |
1359 AV_COPY32(h->mv_cache[list][scan8[0] - 1 +16 ], s->current_picture.motion_val[list][b_xy + h->b_stride*2]); | |
1360 AV_COPY32(h->mv_cache[list][scan8[0] - 1 +24 ], s->current_picture.motion_val[list][b_xy + h->b_stride*3]); | |
1361 h->ref_cache[list][scan8[0] - 1 + 0 ]= | |
1362 h->ref_cache[list][scan8[0] - 1 + 8 ]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 2*0]]; | |
1363 h->ref_cache[list][scan8[0] - 1 +16 ]= | |
1364 h->ref_cache[list][scan8[0] - 1 +24 ]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 2*1]]; | |
1365 }else{ | |
1366 AV_ZERO32(h->mv_cache [list][scan8[0] - 1 + 0 ]); | |
1367 AV_ZERO32(h->mv_cache [list][scan8[0] - 1 + 8 ]); | |
1368 AV_ZERO32(h->mv_cache [list][scan8[0] - 1 +16 ]); | |
1369 AV_ZERO32(h->mv_cache [list][scan8[0] - 1 +24 ]); | |
1370 h->ref_cache[list][scan8[0] - 1 + 0 ]= | |
1371 h->ref_cache[list][scan8[0] - 1 + 8 ]= | |
1372 h->ref_cache[list][scan8[0] - 1 + 16 ]= | |
1373 h->ref_cache[list][scan8[0] - 1 + 24 ]= LIST_NOT_USED; | |
1374 } | |
1375 } | |
1376 } | |
1377 } | |
1378 | |
1379 return 0; | |
1380 } | |
1381 | |
1382 /** | |
1383 * gets the predicted intra4x4 prediction mode. | 1167 * gets the predicted intra4x4 prediction mode. |
1384 */ | 1168 */ |
1385 static inline int pred_intra_mode(H264Context *h, int n){ | 1169 static inline int pred_intra_mode(H264Context *h, int n){ |
1386 const int index8= scan8[n]; | 1170 const int index8= scan8[n]; |
1387 const int left= h->intra4x4_pred_mode_cache[index8 - 1]; | 1171 const int left= h->intra4x4_pred_mode_cache[index8 - 1]; |