comparison h264_loopfilter.c @ 11203:10c06a9bd3d9 libavcodec

H264: use alias-safe macros This eliminates all aliasing violation warnings in h264 code. No measurable speed difference with gcc-4.4.3 on i7.
author mru
date Thu, 18 Feb 2010 16:24:31 +0000
parents d464f498e19f
children 98970e51365a
comparison
equal deleted inserted replaced
11202:778139a5e058 11203:10c06a9bd3d9
23 * @file libavcodec/h264_loopfilter.c 23 * @file libavcodec/h264_loopfilter.c
24 * H.264 / AVC / MPEG4 part10 loop filter. 24 * H.264 / AVC / MPEG4 part10 loop filter.
25 * @author Michael Niedermayer <michaelni@gmx.at> 25 * @author Michael Niedermayer <michaelni@gmx.at>
26 */ 26 */
27 27
28 #include "libavutil/intreadwrite.h"
28 #include "internal.h" 29 #include "internal.h"
29 #include "dsputil.h" 30 #include "dsputil.h"
30 #include "avcodec.h" 31 #include "avcodec.h"
31 #include "mpegvideo.h" 32 #include "mpegvideo.h"
32 #include "h264.h" 33 #include "h264.h"
366 filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, h); 367 filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, h);
367 filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, h); 368 filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, h);
368 return; 369 return;
369 } else { 370 } else {
370 LOCAL_ALIGNED_8(int16_t, bS, [2], [4][4]); 371 LOCAL_ALIGNED_8(int16_t, bS, [2], [4][4]);
371 uint64_t (*bSv)[4] = (uint64_t(*)[4])bS;
372 int edges; 372 int edges;
373 if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 ) { 373 if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 ) {
374 edges = 4; 374 edges = 4;
375 bSv[0][0] = bSv[0][2] = bSv[1][0] = bSv[1][2] = 0x0002000200020002ULL; 375 AV_WN64A(bS[0][0], 0x0002000200020002ULL);
376 AV_WN64A(bS[0][2], 0x0002000200020002ULL);
377 AV_WN64A(bS[1][0], 0x0002000200020002ULL);
378 AV_WN64A(bS[1][2], 0x0002000200020002ULL);
376 } else { 379 } else {
377 int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0; 380 int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0;
378 int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[0] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0; 381 int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[0] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0;
379 int step = 1+(mb_type>>24); //IS_8x8DCT(mb_type) ? 2 : 1; 382 int step = 1+(mb_type>>24); //IS_8x8DCT(mb_type) ? 2 : 1;
380 edges = 4 - 3*((mb_type>>3) & !(h->cbp & 15)); //(mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4; 383 edges = 4 - 3*((mb_type>>3) & !(h->cbp & 15)); //(mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4;
381 s->dsp.h264_loop_filter_strength( bS, h->non_zero_count_cache, h->ref_cache, h->mv_cache, 384 s->dsp.h264_loop_filter_strength( bS, h->non_zero_count_cache, h->ref_cache, h->mv_cache,
382 h->list_count==2, edges, step, mask_edge0, mask_edge1, FIELD_PICTURE); 385 h->list_count==2, edges, step, mask_edge0, mask_edge1, FIELD_PICTURE);
383 } 386 }
384 if( IS_INTRA(left_type) ) 387 if( IS_INTRA(left_type) )
385 bSv[0][0] = 0x0004000400040004ULL; 388 AV_WN64A(bS[0][0], 0x0004000400040004ULL);
386 if( IS_INTRA(h->top_type) ) 389 if( IS_INTRA(h->top_type) )
387 bSv[1][0] = FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000400040004ULL; 390 AV_WN64A(bS[1][0], FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000400040004ULL);
388 391
389 #define FILTER(hv,dir,edge)\ 392 #define FILTER(hv,dir,edge)\
390 if(bSv[dir][edge]) {\ 393 if(AV_RN64A(bS[dir][edge])) { \
391 filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1)], linesize, bS[dir][edge], edge ? qp : qp##dir, h );\ 394 filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1)], linesize, bS[dir][edge], edge ? qp : qp##dir, h );\
392 if(!(edge&1)) {\ 395 if(!(edge&1)) {\
393 filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\ 396 filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\
394 filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\ 397 filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\
395 }\ 398 }\
475 478
476 for(j=0; j<2; j++, mbn_xy += s->mb_stride){ 479 for(j=0; j<2; j++, mbn_xy += s->mb_stride){
477 DECLARE_ALIGNED_8(int16_t, bS)[4]; 480 DECLARE_ALIGNED_8(int16_t, bS)[4];
478 int qp; 481 int qp;
479 if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) { 482 if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) {
480 *(uint64_t*)bS= 0x0003000300030003ULL; 483 AV_WN64A(bS, 0x0003000300030003ULL);
481 } else { 484 } else {
482 if(!CABAC && IS_8x8DCT(s->current_picture.mb_type[mbn_xy])){ 485 if(!CABAC && IS_8x8DCT(s->current_picture.mb_type[mbn_xy])){
483 bS[0]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_cache[scan8[0]+0]); 486 bS[0]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_cache[scan8[0]+0]);
484 bS[1]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_cache[scan8[0]+1]); 487 bS[1]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_cache[scan8[0]+1]);
485 bS[2]= 1+((h->cbp_table[mbn_xy] & 8)||h->non_zero_count_cache[scan8[0]+2]); 488 bS[2]= 1+((h->cbp_table[mbn_xy] & 8)||h->non_zero_count_cache[scan8[0]+2]);
506 }else{ 509 }else{
507 DECLARE_ALIGNED_8(int16_t, bS)[4]; 510 DECLARE_ALIGNED_8(int16_t, bS)[4];
508 int qp; 511 int qp;
509 512
510 if( IS_INTRA(mb_type|mbm_type)) { 513 if( IS_INTRA(mb_type|mbm_type)) {
511 *(uint64_t*)bS= 0x0003000300030003ULL; 514 AV_WN64A(bS, 0x0003000300030003ULL);
512 if ( (!IS_INTERLACED(mb_type|mbm_type)) 515 if ( (!IS_INTERLACED(mb_type|mbm_type))
513 || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0)) 516 || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0))
514 ) 517 )
515 *(uint64_t*)bS= 0x0004000400040004ULL; 518 AV_WN64A(bS, 0x0004000400040004ULL);
516 } else { 519 } else {
517 int i; 520 int i;
518 int mv_done; 521 int mv_done;
519 522
520 if( dir && FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbm_type)) { 523 if( dir && FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbm_type)) {
521 *(uint64_t*)bS= 0x0001000100010001ULL; 524 AV_WN64A(bS, 0x0001000100010001ULL);
522 mv_done = 1; 525 mv_done = 1;
523 } 526 }
524 else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) { 527 else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) {
525 int b_idx= 8 + 4; 528 int b_idx= 8 + 4;
526 int bn_idx= b_idx - (dir ? 8:1); 529 int bn_idx= b_idx - (dir ? 8:1);
586 589
587 if( IS_8x8DCT(mb_type & (edge<<24)) ) // (edge&1) && IS_8x8DCT(mb_type) 590 if( IS_8x8DCT(mb_type & (edge<<24)) ) // (edge&1) && IS_8x8DCT(mb_type)
588 continue; 591 continue;
589 592
590 if( IS_INTRA(mb_type)) { 593 if( IS_INTRA(mb_type)) {
591 *(uint64_t*)bS= 0x0003000300030003ULL; 594 AV_WN64A(bS, 0x0003000300030003ULL);
592 } else { 595 } else {
593 int i; 596 int i;
594 int mv_done; 597 int mv_done;
595 598
596 if( edge & mask_edge ) { 599 if( edge & mask_edge ) {
597 *(uint64_t*)bS= 0; 600 AV_ZERO64(bS);
598 mv_done = 1; 601 mv_done = 1;
599 } 602 }
600 else if( mask_par0 ) { 603 else if( mask_par0 ) {
601 int b_idx= 8 + 4 + edge * (dir ? 8:1); 604 int b_idx= 8 + 4 + edge * (dir ? 8:1);
602 int bn_idx= b_idx - (dir ? 8:1); 605 int bn_idx= b_idx - (dir ? 8:1);
672 int rqp[2]; 675 int rqp[2];
673 int mb_qp, mbn0_qp, mbn1_qp; 676 int mb_qp, mbn0_qp, mbn1_qp;
674 int i; 677 int i;
675 first_vertical_edge_done = 1; 678 first_vertical_edge_done = 1;
676 679
677 if( IS_INTRA(mb_type) ) 680 if( IS_INTRA(mb_type) ) {
678 *(uint64_t*)&bS[0]= 681 AV_WN64A(&bS[0], 0x0004000400040004ULL);
679 *(uint64_t*)&bS[4]= 0x0004000400040004ULL; 682 AV_WN64A(&bS[4], 0x0004000400040004ULL);
680 else { 683 } else {
681 static const uint8_t offset[2][2][8]={ 684 static const uint8_t offset[2][2][8]={
682 { 685 {
683 {7+8*0, 7+8*0, 7+8*0, 7+8*0, 7+8*1, 7+8*1, 7+8*1, 7+8*1}, 686 {7+8*0, 7+8*0, 7+8*0, 7+8*0, 7+8*1, 7+8*1, 7+8*1, 7+8*1},
684 {7+8*2, 7+8*2, 7+8*2, 7+8*2, 7+8*3, 7+8*3, 7+8*3, 7+8*3}, 687 {7+8*2, 7+8*2, 7+8*2, 7+8*2, 7+8*3, 7+8*3, 7+8*3, 7+8*3},
685 },{ 688 },{