# HG changeset patch # User mru # Date 1266510271 0 # Node ID 10c06a9bd3d90ed0116d86621fd6eb441af4eda1 # Parent 778139a5e058000aac0b75a0ae0f853c0745e2fc H264: use alias-safe macros This eliminates all aliasing violation warnings in h264 code. No measurable speed difference with gcc-4.4.3 on i7. diff -r 778139a5e058 -r 10c06a9bd3d9 h264.c --- a/h264.c Thu Feb 18 12:37:43 2010 +0000 +++ b/h264.c Thu Feb 18 16:24:31 2010 +0000 @@ -156,11 +156,11 @@ # if HAVE_FAST_64BIT # define RS 7 for(i=0; i+10 && !src[i]) i--; diff -r 778139a5e058 -r 10c06a9bd3d9 h264.h --- a/h264.h Thu Feb 18 12:37:43 2010 +0000 +++ b/h264.h Thu Feb 18 16:24:31 2010 +0000 @@ -28,6 +28,7 @@ #ifndef AVCODEC_H264_H #define AVCODEC_H264_H +#include "libavutil/intreadwrite.h" #include "dsputil.h" #include "cabac.h" #include "mpegvideo.h" @@ -921,7 +922,7 @@ */ //FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec) if(top_type){ - *(uint32_t*)&h->non_zero_count_cache[4+8*0]= *(uint32_t*)&h->non_zero_count[top_xy][4+3*8]; + AV_COPY32(&h->non_zero_count_cache[4+8*0], &h->non_zero_count[top_xy][4+3*8]); h->non_zero_count_cache[1+8*0]= h->non_zero_count[top_xy][1+1*8]; h->non_zero_count_cache[2+8*0]= h->non_zero_count[top_xy][2+1*8]; @@ -933,7 +934,7 @@ h->non_zero_count_cache[1+8*3]= h->non_zero_count_cache[2+8*3]= - *(uint32_t*)&h->non_zero_count_cache[4+8*0]= CABAC && !IS_INTRA(mb_type) ? 0 : 0x40404040; + AV_WN32A(&h->non_zero_count_cache[4+8*0], CABAC && !IS_INTRA(mb_type) ? 0 : 0x40404040); } for (i=0; i<2; i++) { @@ -1002,7 +1003,7 @@ h->ref_cache[list][scan8[0] + 3 - 1*8]= s->current_picture.ref_index[list][b8_xy + 1]; }else{ AV_ZERO128(h->mv_cache[list][scan8[0] + 0 - 1*8]); - *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101; + AV_WN32A(&h->ref_cache[list][scan8[0] + 0 - 1*8], ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101); } for(i=0; i<2; i++){ @@ -1010,13 +1011,13 @@ if(USES_LIST(left_type[i], list)){ const int b_xy= h->mb2b_xy[left_xy[i]] + 3; const int b8_xy= h->mb2b8_xy[left_xy[i]] + 1; - *(uint32_t*)h->mv_cache[list][cache_idx ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0+i*2]]; - *(uint32_t*)h->mv_cache[list][cache_idx+8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1+i*2]]; + AV_COPY32(h->mv_cache[list][cache_idx ], s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0+i*2]]); + AV_COPY32(h->mv_cache[list][cache_idx+8], s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1+i*2]]); h->ref_cache[list][cache_idx ]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0+i*2]>>1)]; h->ref_cache[list][cache_idx+8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[1+i*2]>>1)]; }else{ - *(uint32_t*)h->mv_cache [list][cache_idx ]= - *(uint32_t*)h->mv_cache [list][cache_idx+8]= 0; + AV_ZERO32(h->mv_cache [list][cache_idx ]); + AV_ZERO32(h->mv_cache [list][cache_idx+8]); h->ref_cache[list][cache_idx ]= h->ref_cache[list][cache_idx+8]= (left_type[i]) ? LIST_NOT_USED : PART_NOT_AVAILABLE; } @@ -1025,20 +1026,20 @@ if(USES_LIST(topleft_type, list)){ const int b_xy = h->mb2b_xy [topleft_xy] + 3 + h->b_stride + (h->topleft_partition & 2*h->b_stride); const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + (h->topleft_partition & h->b8_stride); - *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy]; + AV_COPY32(h->mv_cache[list][scan8[0] - 1 - 1*8], s->current_picture.motion_val[list][b_xy]); h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy]; }else{ - *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0; + AV_ZERO32(h->mv_cache[list][scan8[0] - 1 - 1*8]); h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; } if(USES_LIST(topright_type, list)){ const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride; const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride; - *(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy]; + AV_COPY32(h->mv_cache[list][scan8[0] + 4 - 1*8], s->current_picture.motion_val[list][b_xy]); h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy]; }else{ - *(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0; + AV_ZERO32(h->mv_cache [list][scan8[0] + 4 - 1*8]); h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; } @@ -1051,11 +1052,11 @@ h->ref_cache[list][scan8[13]+1] = //FIXME remove past 3 (init somewhere else) h->ref_cache[list][scan8[4 ]] = h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE; - *(uint32_t*)h->mv_cache [list][scan8[5 ]+1]= - *(uint32_t*)h->mv_cache [list][scan8[7 ]+1]= - *(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else) - *(uint32_t*)h->mv_cache [list][scan8[4 ]]= - *(uint32_t*)h->mv_cache [list][scan8[12]]= 0; + AV_ZERO32(h->mv_cache [list][scan8[5 ]+1]); + AV_ZERO32(h->mv_cache [list][scan8[7 ]+1]); + AV_ZERO32(h->mv_cache [list][scan8[13]+1]); //FIXME remove past 3 (init somewhere else) + AV_ZERO32(h->mv_cache [list][scan8[4 ]]); + AV_ZERO32(h->mv_cache [list][scan8[12]]); if( CABAC ) { /* XXX beurk, Load mvd */ @@ -1067,37 +1068,37 @@ } if(USES_LIST(left_type[0], list)){ const int b_xy= h->mb2b_xy[left_xy[0]] + 3; - *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[0]]; - *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[1]]; + AV_COPY32(h->mvd_cache[list][scan8[0] - 1 + 0*8], h->mvd_table[list][b_xy + h->b_stride*left_block[0]]); + AV_COPY32(h->mvd_cache[list][scan8[0] - 1 + 1*8], h->mvd_table[list][b_xy + h->b_stride*left_block[1]]); }else{ - *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 0*8]= - *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 1*8]= 0; + AV_ZERO32(h->mvd_cache [list][scan8[0] - 1 + 0*8]); + AV_ZERO32(h->mvd_cache [list][scan8[0] - 1 + 1*8]); } if(USES_LIST(left_type[1], list)){ const int b_xy= h->mb2b_xy[left_xy[1]] + 3; - *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[2]]; - *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[3]]; + AV_COPY32(h->mvd_cache[list][scan8[0] - 1 + 2*8], h->mvd_table[list][b_xy + h->b_stride*left_block[2]]); + AV_COPY32(h->mvd_cache[list][scan8[0] - 1 + 3*8], h->mvd_table[list][b_xy + h->b_stride*left_block[3]]); }else{ - *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 2*8]= - *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 3*8]= 0; + AV_ZERO32(h->mvd_cache [list][scan8[0] - 1 + 2*8]); + AV_ZERO32(h->mvd_cache [list][scan8[0] - 1 + 3*8]); } - *(uint32_t*)h->mvd_cache [list][scan8[5 ]+1]= - *(uint32_t*)h->mvd_cache [list][scan8[7 ]+1]= - *(uint32_t*)h->mvd_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewhere else) - *(uint32_t*)h->mvd_cache [list][scan8[4 ]]= - *(uint32_t*)h->mvd_cache [list][scan8[12]]= 0; + AV_ZERO32(h->mvd_cache [list][scan8[5 ]+1]); + AV_ZERO32(h->mvd_cache [list][scan8[7 ]+1]); + AV_ZERO32(h->mvd_cache [list][scan8[13]+1]); //FIXME remove past 3 (init somewhere else) + AV_ZERO32(h->mvd_cache [list][scan8[4 ]]); + AV_ZERO32(h->mvd_cache [list][scan8[12]]); if(h->slice_type_nos == FF_B_TYPE){ fill_rectangle(&h->direct_cache[scan8[0]], 4, 4, 8, MB_TYPE_16x16>>1, 1); if(IS_DIRECT(top_type)){ - *(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0x01010101*(MB_TYPE_DIRECT2>>1); + AV_WN32A(&h->direct_cache[scan8[0] - 1*8], 0x01010101*(MB_TYPE_DIRECT2>>1)); }else if(IS_8X8(top_type)){ int b8_xy = h->mb2b8_xy[top_xy] + h->b8_stride; h->direct_cache[scan8[0] + 0 - 1*8]= h->direct_table[b8_xy]; h->direct_cache[scan8[0] + 2 - 1*8]= h->direct_table[b8_xy + 1]; }else{ - *(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0x01010101*(MB_TYPE_16x16>>1); + AV_WN32A(&h->direct_cache[scan8[0] - 1*8], 0x01010101*(MB_TYPE_16x16>>1)); } if(IS_DIRECT(left_type[0])) @@ -1223,8 +1224,8 @@ AV_COPY64(&h->non_zero_count_cache[0+8*1], &h->non_zero_count[mb_xy][ 0]); AV_COPY64(&h->non_zero_count_cache[0+8*2], &h->non_zero_count[mb_xy][ 8]); - *((uint32_t*)&h->non_zero_count_cache[0+8*5])= *((uint32_t*)&h->non_zero_count[mb_xy][16]); - *((uint32_t*)&h->non_zero_count_cache[4+8*3])= *((uint32_t*)&h->non_zero_count[mb_xy][20]); + AV_COPY32(&h->non_zero_count_cache[0+8*5], &h->non_zero_count[mb_xy][16]); + AV_COPY32(&h->non_zero_count_cache[4+8*3], &h->non_zero_count[mb_xy][20]); AV_COPY64(&h->non_zero_count_cache[0+8*4], &h->non_zero_count[mb_xy][24]); h->cbp= h->cbp_table[mb_xy]; @@ -1239,21 +1240,21 @@ if(!USES_LIST(mb_type, list)){ fill_rectangle( h->mv_cache[list][scan8[0]], 4, 4, 8, pack16to32(0,0), 4); - *(uint32_t*)&h->ref_cache[list][scan8[ 0]] = - *(uint32_t*)&h->ref_cache[list][scan8[ 2]] = - *(uint32_t*)&h->ref_cache[list][scan8[ 8]] = - *(uint32_t*)&h->ref_cache[list][scan8[10]] = ((LIST_NOT_USED)&0xFF)*0x01010101U; + AV_WN32A(&h->ref_cache[list][scan8[ 0]], ((LIST_NOT_USED)&0xFF)*0x01010101u); + AV_WN32A(&h->ref_cache[list][scan8[ 2]], ((LIST_NOT_USED)&0xFF)*0x01010101u); + AV_WN32A(&h->ref_cache[list][scan8[ 8]], ((LIST_NOT_USED)&0xFF)*0x01010101u); + AV_WN32A(&h->ref_cache[list][scan8[10]], ((LIST_NOT_USED)&0xFF)*0x01010101u); continue; } ref = &s->current_picture.ref_index[list][h->mb2b8_xy[mb_xy]]; { int (*ref2frm)[64] = h->ref2frm[ h->slice_num&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); - *(uint32_t*)&h->ref_cache[list][scan8[ 0]] = - *(uint32_t*)&h->ref_cache[list][scan8[ 2]] = (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101; + AV_WN32A(&h->ref_cache[list][scan8[ 0]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); + AV_WN32A(&h->ref_cache[list][scan8[ 2]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); ref += h->b8_stride; - *(uint32_t*)&h->ref_cache[list][scan8[ 8]] = - *(uint32_t*)&h->ref_cache[list][scan8[10]] = (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101; + AV_WN32A(&h->ref_cache[list][scan8[ 8]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); + AV_WN32A(&h->ref_cache[list][scan8[10]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); } b_stride = h->b_stride; @@ -1277,7 +1278,7 @@ */ //FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec) if(top_type){ - *(uint32_t*)&h->non_zero_count_cache[4+8*0]= *(uint32_t*)&h->non_zero_count[top_xy][4+3*8]; + AV_COPY32(&h->non_zero_count_cache[4+8*0], &h->non_zero_count[top_xy][4+3*8]); } if(left_type[0]){ @@ -1333,7 +1334,7 @@ h->ref_cache[list][scan8[0] + 3 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 1]]; }else{ AV_ZERO128(h->mv_cache[list][scan8[0] + 0 - 1*8]); - *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((LIST_NOT_USED)&0xFF)*0x01010101U; + AV_WN32A(&h->ref_cache[list][scan8[0] + 0 - 1*8], ((LIST_NOT_USED)&0xFF)*0x01010101u); } if(!IS_INTERLACED(mb_type^left_type[0])){ @@ -1341,19 +1342,19 @@ const int b_xy= h->mb2b_xy[left_xy[0]] + 3; const int b8_xy= h->mb2b8_xy[left_xy[0]] + 1; int (*ref2frm)[64] = h->ref2frm[ h->slice_table[left_xy[0]]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); - *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 0 ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*0]; - *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 8 ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*1]; - *(uint32_t*)h->mv_cache[list][scan8[0] - 1 +16 ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*2]; - *(uint32_t*)h->mv_cache[list][scan8[0] - 1 +24 ]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*3]; + AV_COPY32(h->mv_cache[list][scan8[0] - 1 + 0 ], s->current_picture.motion_val[list][b_xy + h->b_stride*0]); + AV_COPY32(h->mv_cache[list][scan8[0] - 1 + 8 ], s->current_picture.motion_val[list][b_xy + h->b_stride*1]); + AV_COPY32(h->mv_cache[list][scan8[0] - 1 +16 ], s->current_picture.motion_val[list][b_xy + h->b_stride*2]); + AV_COPY32(h->mv_cache[list][scan8[0] - 1 +24 ], s->current_picture.motion_val[list][b_xy + h->b_stride*3]); h->ref_cache[list][scan8[0] - 1 + 0 ]= h->ref_cache[list][scan8[0] - 1 + 8 ]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + h->b8_stride*0]]; h->ref_cache[list][scan8[0] - 1 +16 ]= h->ref_cache[list][scan8[0] - 1 +24 ]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + h->b8_stride*1]]; }else{ - *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 0 ]= - *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 8 ]= - *(uint32_t*)h->mv_cache [list][scan8[0] - 1 +16 ]= - *(uint32_t*)h->mv_cache [list][scan8[0] - 1 +24 ]= 0; + AV_ZERO32(h->mv_cache [list][scan8[0] - 1 + 0 ]); + AV_ZERO32(h->mv_cache [list][scan8[0] - 1 + 8 ]); + AV_ZERO32(h->mv_cache [list][scan8[0] - 1 +16 ]); + AV_ZERO32(h->mv_cache [list][scan8[0] - 1 +24 ]); h->ref_cache[list][scan8[0] - 1 + 0 ]= h->ref_cache[list][scan8[0] - 1 + 8 ]= h->ref_cache[list][scan8[0] - 1 + 16 ]= @@ -1386,8 +1387,8 @@ AV_COPY64(&h->non_zero_count[mb_xy][ 0], &h->non_zero_count_cache[0+8*1]); AV_COPY64(&h->non_zero_count[mb_xy][ 8], &h->non_zero_count_cache[0+8*2]); - *((uint32_t*)&h->non_zero_count[mb_xy][16]) = *((uint32_t*)&h->non_zero_count_cache[0+8*5]); - *((uint32_t*)&h->non_zero_count[mb_xy][20]) = *((uint32_t*)&h->non_zero_count_cache[4+8*3]); + AV_COPY32(&h->non_zero_count[mb_xy][16], &h->non_zero_count_cache[0+8*5]); + AV_COPY32(&h->non_zero_count[mb_xy][20], &h->non_zero_count_cache[4+8*3]); AV_COPY64(&h->non_zero_count[mb_xy][24], &h->non_zero_count_cache[0+8*4]); } @@ -1446,9 +1447,9 @@ static inline int get_dct8x8_allowed(H264Context *h){ if(h->sps.direct_8x8_inference_flag) - return !(*(uint64_t*)h->sub_mb_type & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8 )*0x0001000100010001ULL)); + return !(AV_RN64A(h->sub_mb_type) & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8 )*0x0001000100010001ULL)); else - return !(*(uint64_t*)h->sub_mb_type & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8|MB_TYPE_DIRECT2)*0x0001000100010001ULL)); + return !(AV_RN64A(h->sub_mb_type) & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8|MB_TYPE_DIRECT2)*0x0001000100010001ULL)); } /** diff -r 778139a5e058 -r 10c06a9bd3d9 h264_direct.c --- a/h264_direct.c Thu Feb 18 12:37:43 2010 +0000 +++ b/h264_direct.c Thu Feb 18 16:24:31 2010 +0000 @@ -183,11 +183,11 @@ }else { assert(match_count==1); if(left_ref==ref[list]){ - mv[list]= *(uint32_t*)A; + mv[list]= AV_RN32A(A); }else if(top_ref==ref[list]){ - mv[list]= *(uint32_t*)B; + mv[list]= AV_RN32A(B); }else{ - mv[list]= *(uint32_t*)C; + mv[list]= AV_RN32A(C); } } }else{ @@ -362,9 +362,9 @@ const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride]; if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){ if(ref[0] == 0) - *(uint32_t*)h->mv_cache[0][scan8[i8*4+i4]] = 0; + AV_ZERO32(h->mv_cache[0][scan8[i8*4+i4]]); if(ref[1] == 0) - *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] = 0; + AV_ZERO32(h->mv_cache[1][scan8[i8*4+i4]]); m++; } } @@ -571,8 +571,8 @@ int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]]; mv_l0[0] = (scale * mv_col[0] + 128) >> 8; mv_l0[1] = (scale * mv_col[1] + 128) >> 8; - *(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] = - pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]); + AV_WN32A(h->mv_cache[1][scan8[i8*4+i4]], + pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1])); } } } diff -r 778139a5e058 -r 10c06a9bd3d9 h264_loopfilter.c --- a/h264_loopfilter.c Thu Feb 18 12:37:43 2010 +0000 +++ b/h264_loopfilter.c Thu Feb 18 16:24:31 2010 +0000 @@ -25,6 +25,7 @@ * @author Michael Niedermayer */ +#include "libavutil/intreadwrite.h" #include "internal.h" #include "dsputil.h" #include "avcodec.h" @@ -368,11 +369,13 @@ return; } else { LOCAL_ALIGNED_8(int16_t, bS, [2], [4][4]); - uint64_t (*bSv)[4] = (uint64_t(*)[4])bS; int edges; if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 ) { edges = 4; - bSv[0][0] = bSv[0][2] = bSv[1][0] = bSv[1][2] = 0x0002000200020002ULL; + AV_WN64A(bS[0][0], 0x0002000200020002ULL); + AV_WN64A(bS[0][2], 0x0002000200020002ULL); + AV_WN64A(bS[1][0], 0x0002000200020002ULL); + AV_WN64A(bS[1][2], 0x0002000200020002ULL); } else { int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0; int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[0] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0; @@ -382,12 +385,12 @@ h->list_count==2, edges, step, mask_edge0, mask_edge1, FIELD_PICTURE); } if( IS_INTRA(left_type) ) - bSv[0][0] = 0x0004000400040004ULL; + AV_WN64A(bS[0][0], 0x0004000400040004ULL); if( IS_INTRA(h->top_type) ) - bSv[1][0] = FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000400040004ULL; + AV_WN64A(bS[1][0], FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000400040004ULL); #define FILTER(hv,dir,edge)\ - if(bSv[dir][edge]) {\ + if(AV_RN64A(bS[dir][edge])) { \ filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1)], linesize, bS[dir][edge], edge ? qp : qp##dir, h );\ if(!(edge&1)) {\ filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\ @@ -477,7 +480,7 @@ DECLARE_ALIGNED_8(int16_t, bS)[4]; int qp; if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) { - *(uint64_t*)bS= 0x0003000300030003ULL; + AV_WN64A(bS, 0x0003000300030003ULL); } else { if(!CABAC && IS_8x8DCT(s->current_picture.mb_type[mbn_xy])){ bS[0]= 1+((h->cbp_table[mbn_xy] & 4)||h->non_zero_count_cache[scan8[0]+0]); @@ -508,17 +511,17 @@ int qp; if( IS_INTRA(mb_type|mbm_type)) { - *(uint64_t*)bS= 0x0003000300030003ULL; + AV_WN64A(bS, 0x0003000300030003ULL); if ( (!IS_INTERLACED(mb_type|mbm_type)) || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0)) ) - *(uint64_t*)bS= 0x0004000400040004ULL; + AV_WN64A(bS, 0x0004000400040004ULL); } else { int i; int mv_done; if( dir && FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbm_type)) { - *(uint64_t*)bS= 0x0001000100010001ULL; + AV_WN64A(bS, 0x0001000100010001ULL); mv_done = 1; } else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) { @@ -588,13 +591,13 @@ continue; if( IS_INTRA(mb_type)) { - *(uint64_t*)bS= 0x0003000300030003ULL; + AV_WN64A(bS, 0x0003000300030003ULL); } else { int i; int mv_done; if( edge & mask_edge ) { - *(uint64_t*)bS= 0; + AV_ZERO64(bS); mv_done = 1; } else if( mask_par0 ) { @@ -674,10 +677,10 @@ int i; first_vertical_edge_done = 1; - if( IS_INTRA(mb_type) ) - *(uint64_t*)&bS[0]= - *(uint64_t*)&bS[4]= 0x0004000400040004ULL; - else { + if( IS_INTRA(mb_type) ) { + AV_WN64A(&bS[0], 0x0004000400040004ULL); + AV_WN64A(&bS[4], 0x0004000400040004ULL); + } else { static const uint8_t offset[2][2][8]={ { {7+8*0, 7+8*0, 7+8*0, 7+8*0, 7+8*1, 7+8*1, 7+8*1, 7+8*1}, diff -r 778139a5e058 -r 10c06a9bd3d9 h264_mvpred.h --- a/h264_mvpred.h Thu Feb 18 12:37:43 2010 +0000 +++ b/h264_mvpred.h Thu Feb 18 16:24:31 2010 +0000 @@ -58,7 +58,7 @@ && h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){ const uint32_t *mb_types = s->current_picture_ptr->mb_type; const int16_t *mv; - *(uint32_t*)h->mv_cache[list][scan8[0]-2] = 0; + AV_ZERO32(h->mv_cache[list][scan8[0]-2]); *C = h->mv_cache[list][scan8[0]-2]; if(!MB_FIELD @@ -220,8 +220,8 @@ tprintf(h->s.avctx, "pred_pskip: (%d) (%d) at %2d %2d\n", top_ref, left_ref, h->s.mb_x, h->s.mb_y); if(top_ref == PART_NOT_AVAILABLE || left_ref == PART_NOT_AVAILABLE - || !( top_ref | *(uint32_t*)h->mv_cache[0][ scan8[0] - 8 ]) - || !(left_ref | *(uint32_t*)h->mv_cache[0][ scan8[0] - 1 ])){ + || !( top_ref | AV_RN32A(h->mv_cache[0][ scan8[0] - 8 ])) + || !(left_ref | AV_RN32A(h->mv_cache[0][ scan8[0] - 1 ]))){ *mx = *my = 0; return;