# HG changeset patch # User michaelni # Date 1042144957 0 # Node ID e162c09efbe7a48c8c2b83fa4f3948e3b581efde # Parent ca2a303ea039958db8fddf8566a2df32c452f0b9 qpel fix diff -r ca2a303ea039 -r e162c09efbe7 avcodec.h --- a/avcodec.h Thu Jan 09 11:37:08 2003 +0000 +++ b/avcodec.h Thu Jan 09 20:42:37 2003 +0000 @@ -5,8 +5,8 @@ #define LIBAVCODEC_VERSION_INT 0x000406 #define LIBAVCODEC_VERSION "0.4.6" -#define LIBAVCODEC_BUILD 4652 -#define LIBAVCODEC_BUILD_STR "4652" +#define LIBAVCODEC_BUILD 4653 +#define LIBAVCODEC_BUILD_STR "4653" enum CodecID { CODEC_ID_NONE, @@ -520,6 +520,7 @@ #define FF_BUG_NO_PADDING 16 #define FF_BUG_AC_VLC 32 #define FF_BUG_QPEL_CHROMA 64 +#define FF_BUG_STD_QPEL 128 //#define FF_BUG_FAKE_SCALABILITY 16 //autodetection should work 100% /** diff -r ca2a303ea039 -r e162c09efbe7 dsputil.c --- a/dsputil.c Thu Jan 09 11:37:08 2003 +0000 +++ b/dsputil.c Thu Jan 09 20:42:37 2003 +0000 @@ -801,7 +801,8 @@ }\ }\ \ -static void OPNAME ## mpeg4_qpel8_v_lowpass(UINT8 *dst, UINT8 *src, int dstStride, int srcStride, int w){\ +static void OPNAME ## mpeg4_qpel8_v_lowpass(UINT8 *dst, UINT8 *src, int dstStride, int srcStride){\ + const int w=8;\ UINT8 *cm = cropTbl + MAX_NEG_CROP;\ int i;\ for(i=0; i ABS(ey - sy)){ + if(sx > ex){ + t=sx; sx=ex; ex=t; + t=sy; sy=ey; ey=t; + } + buf+= sx + sy*stride; + ex-= sx; + f= ((ey-sy)<<16)/ex; + for(x= 0; x <= ex; x++){ + y= ((x*f) + (1<<15))>>16; + buf[y*stride + x]+= color; + } + }else{ + if(sy > ey){ + t=sx; sx=ex; ex=t; + t=sy; sy=ey; ey=t; + } + buf+= sx + sy*stride; + ey-= sy; + if(ey) f= ((ex-sx)<<16)/ey; + else f= 0; + for(y= 0; y <= ey; y++){ + x= ((y*f) + (1<<15))>>16; + buf[y*stride + x]+= color; + } + } +} + int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *data_size, UINT8 *buf, int buf_size) @@ -472,6 +508,14 @@ if(s->xvid_build && s->xvid_build<=1) s->workaround_bugs|= FF_BUG_QPEL_CHROMA; +#define SET_QPEL_FUNC(postfix1, postfix2) \ + s->dsp.put_ ## postfix1 = ff_put_ ## postfix2;\ + s->dsp.put_no_rnd_ ## postfix1 = ff_put_no_rnd_ ## postfix2;\ + s->dsp.avg_ ## postfix1 = ff_avg_ ## postfix2; + + if(s->lavc_build && s->lavc_build<4653) + s->workaround_bugs|= FF_BUG_STD_QPEL; + //printf("padding_bug_score: %d\n", s->padding_bug_score); #if 0 if(s->divx_version==500) @@ -489,6 +533,21 @@ #endif } + if(s->workaround_bugs& FF_BUG_STD_QPEL){ + SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_old_c) + SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_old_c) + SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_old_c) + SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_old_c) + SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_old_c) + SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_old_c) + + SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_old_c) + SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_old_c) + SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_old_c) + SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_old_c) + SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_old_c) + SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_old_c) + } #if 0 // dump bits per frame / qp / complexity { @@ -645,41 +704,40 @@ } MPV_frame_end(s); -#if 0 //dirty show MVs, we should export the MV tables and write a filter to show them -{ - int mb_y; - s->has_b_frames=1; - for(mb_y=0; mb_ymb_height; mb_y++){ - int mb_x; - int y= mb_y*16 + 8; - for(mb_x=0; mb_xmb_width; mb_x++){ - int x= mb_x*16 + 8; - uint8_t *ptr= s->last_picture.data[0]; - int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2); - int mx= (s->motion_val[xy][0]>>1) + x; - int my= (s->motion_val[xy][1]>>1) + y; - int i; - int max; + + if((avctx->debug&FF_DEBUG_VIS_MV) && s->last_picture.data[0]){ + const int shift= 1 + s->quarter_sample; + int mb_y; + uint8_t *ptr= s->last_picture.data[0]; + s->low_delay=0; //needed to see the vectors without trashing the buffers - if(mx<0) mx=0; - if(my<0) my=0; - if(mx>=s->width) mx= s->width -1; - if(my>=s->height) my= s->height-1; - max= ABS(mx-x); - if(ABS(my-y) > max) max= ABS(my-y); - /* the ugliest linedrawing routine ... */ - for(i=0; ilinesize + x1]+=100; - } - ptr[y*s->linesize + x]+=100; - s->mbskip_table[mb_x + mb_y*s->mb_width]=0; + for(mb_y=0; mb_ymb_height; mb_y++){ + int mb_x; + for(mb_x=0; mb_xmb_width; mb_x++){ + const int mb_index= mb_x + mb_y*s->mb_width; + if(s->co_located_type_table[mb_index] == MV_TYPE_8X8){ + int i; + for(i=0; i<4; i++){ + int sx= mb_x*16 + 4 + 8*(i&1); + int sy= mb_y*16 + 4 + 8*(i>>1); + int xy= 1 + mb_x*2 + (i&1) + (mb_y*2 + 1 + (i>>1))*(s->mb_width*2 + 2); + int mx= (s->motion_val[xy][0]>>shift) + sx; + int my= (s->motion_val[xy][1]>>shift) + sy; + draw_line(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100); + } + }else{ + int sx= mb_x*16 + 8; + int sy= mb_y*16 + 8; + int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2); + int mx= (s->motion_val[xy][0]>>shift) + sx; + int my= (s->motion_val[xy][1]>>shift) + sy; + draw_line(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100); + } + s->mbskip_table[mb_index]=0; + } + } } - } -} -#endif if(s->pict_type==B_TYPE || s->low_delay){ *pict= *(AVFrame*)&s->current_picture; diff -r ca2a303ea039 -r e162c09efbe7 i386/dsputil_mmx.c --- a/i386/dsputil_mmx.c Thu Jan 09 11:37:08 2003 +0000 +++ b/i386/dsputil_mmx.c Thu Jan 09 20:42:37 2003 +0000 @@ -1085,7 +1085,7 @@ }\ \ static void OPNAME ## qpel8_mc10_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t temp[32];\ + uint64_t temp[8];\ uint8_t * const half= (uint8_t*)temp;\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ OPNAME ## pixels8_l2_mmx(dst, src, half, stride, stride, 8);\ @@ -1096,14 +1096,14 @@ }\ \ static void OPNAME ## qpel8_mc30_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t temp[32];\ + uint64_t temp[8];\ uint8_t * const half= (uint8_t*)temp;\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ OPNAME ## pixels8_l2_mmx(dst, src+1, half, stride, stride, 8);\ }\ \ static void OPNAME ## qpel8_mc01_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t temp[32];\ + uint64_t temp[8];\ uint8_t * const half= (uint8_t*)temp;\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ OPNAME ## pixels8_l2_mmx(dst, src, half, stride, stride, 8);\ @@ -1114,53 +1114,49 @@ }\ \ static void OPNAME ## qpel8_mc03_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t temp[32];\ + uint64_t temp[8];\ uint8_t * const half= (uint8_t*)temp;\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ OPNAME ## pixels8_l2_mmx(dst, src+stride, half, stride, stride, 8);\ }\ static void OPNAME ## qpel8_mc11_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[8*2 + 8*2 + 18*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 2*64 + 8;\ - uint8_t * const halfV= ((uint8_t*)half);\ - uint8_t * const halfHV= ((uint8_t*)half) + 64;\ + uint64_t half[8 + 9];\ + uint8_t * const halfH= ((uint8_t*)half) + 64;\ + uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfV, src, 8, stride);\ + put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - OPNAME ## pixels8_l4_mmx(dst, src, (uint8_t*)half, stride, 8);\ + OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\ }\ static void OPNAME ## qpel8_mc31_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[8*2 + 8*2 + 18*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 2*64 + 8;\ - uint8_t * const halfV= ((uint8_t*)half);\ - uint8_t * const halfHV= ((uint8_t*)half) + 64;\ + uint64_t half[8 + 9];\ + uint8_t * const halfH= ((uint8_t*)half) + 64;\ + uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfV, src+1, 8, stride);\ + put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - OPNAME ## pixels8_l4_mmx(dst, src+1, (uint8_t*)half, stride, 8);\ + OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\ }\ static void OPNAME ## qpel8_mc13_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[8*2 + 8*2 + 9*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 2*64;\ - uint8_t * const halfV= ((uint8_t*)half);\ - uint8_t * const halfHV= ((uint8_t*)half) + 64;\ + uint64_t half[8 + 9];\ + uint8_t * const halfH= ((uint8_t*)half) + 64;\ + uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfV, src, 8, stride);\ + put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - OPNAME ## pixels8_l4_mmx(dst, src+stride, (uint8_t*)half, stride, 8);\ + OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\ }\ static void OPNAME ## qpel8_mc33_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[8*2 + 8*2 + 9*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 2*64;\ - uint8_t * const halfV= ((uint8_t*)half);\ - uint8_t * const halfHV= ((uint8_t*)half) + 64;\ - put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src , 8, stride, 9);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfV, src+1, 8, stride);\ + uint64_t half[8 + 9];\ + uint8_t * const halfH= ((uint8_t*)half) + 64;\ + uint8_t * const halfHV= ((uint8_t*)half);\ + put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ + put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\ put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - OPNAME ## pixels8_l4_mmx(dst, src+stride+1, (uint8_t*)half, stride, 8);\ + OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\ }\ static void OPNAME ## qpel8_mc21_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[8*2 + 9*2];\ + uint64_t half[8 + 9];\ uint8_t * const halfH= ((uint8_t*)half) + 64;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ @@ -1168,7 +1164,7 @@ OPNAME ## pixels8_l2_mmx(dst, halfH, halfHV, stride, 8, 8);\ }\ static void OPNAME ## qpel8_mc23_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[8*2 + 9*2];\ + uint64_t half[8 + 9];\ uint8_t * const halfH= ((uint8_t*)half) + 64;\ uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ @@ -1176,27 +1172,21 @@ OPNAME ## pixels8_l2_mmx(dst, halfH+8, halfHV, stride, 8, 8);\ }\ static void OPNAME ## qpel8_mc12_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[8*2 + 8*2 + 9*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 2*64;\ - uint8_t * const halfV= ((uint8_t*)half);\ - uint8_t * const halfHV= ((uint8_t*)half) + 64;\ + uint64_t half[8 + 9];\ + uint8_t * const halfH= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfV, src, 8, stride);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - OPNAME ## pixels8_l2_mmx(dst, halfV, halfHV, stride, 8, 8);\ + put ## RND ## pixels8_l2_mmx(halfH, src, halfH, 8, stride, 9);\ + OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ }\ static void OPNAME ## qpel8_mc32_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[8*2 + 8*2 + 9*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 2*64;\ - uint8_t * const halfV= ((uint8_t*)half);\ - uint8_t * const halfHV= ((uint8_t*)half) + 64;\ + uint64_t half[8 + 9];\ + uint8_t * const halfH= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfV, src+1, 8, stride);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - OPNAME ## pixels8_l2_mmx(dst, halfV, halfHV, stride, 8, 8);\ + put ## RND ## pixels8_l2_mmx(halfH, src+1, halfH, 8, stride, 9);\ + OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ }\ static void OPNAME ## qpel8_mc22_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[9*2];\ + uint64_t half[9];\ uint8_t * const halfH= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ @@ -1241,44 +1231,40 @@ OPNAME ## pixels16_l2_mmx(dst, src+stride, half, stride, stride, 16);\ }\ static void OPNAME ## qpel16_mc11_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[16*2 + 16*2 + 18*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 2*256 + 16;\ - uint8_t * const halfV= ((uint8_t*)half);\ - uint8_t * const halfHV= ((uint8_t*)half) + 256;\ + uint64_t half[16*2 + 17*2];\ + uint8_t * const halfH= ((uint8_t*)half) + 256;\ + uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfV, src, 16, stride);\ + put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ - OPNAME ## pixels16_l4_mmx(dst, src, (uint8_t*)half, stride, 16);\ + OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\ }\ static void OPNAME ## qpel16_mc31_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[16*2 + 16*2 + 18*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 2*256 + 16;\ - uint8_t * const halfV= ((uint8_t*)half);\ - uint8_t * const halfHV= ((uint8_t*)half) + 256;\ + uint64_t half[16*2 + 17*2];\ + uint8_t * const halfH= ((uint8_t*)half) + 256;\ + uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfV, src+1, 16, stride);\ + put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ - OPNAME ## pixels16_l4_mmx(dst, src+1, (uint8_t*)half, stride, 16);\ + OPNAME ## pixels16_l2_mmx(dst, halfH, halfHV, stride, 16, 16);\ }\ static void OPNAME ## qpel16_mc13_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[16*2 + 16*2 + 17*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 2*256;\ - uint8_t * const halfV= ((uint8_t*)half);\ - uint8_t * const halfHV= ((uint8_t*)half) + 256;\ + uint64_t half[16*2 + 17*2];\ + uint8_t * const halfH= ((uint8_t*)half) + 256;\ + uint8_t * const halfHV= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfV, src, 16, stride);\ + put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ - OPNAME ## pixels16_l4_mmx(dst, src+stride, (uint8_t*)half, stride, 16);\ + OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\ }\ static void OPNAME ## qpel16_mc33_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[16*2 + 16*2 + 17*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 2*256;\ - uint8_t * const halfV= ((uint8_t*)half);\ - uint8_t * const halfHV= ((uint8_t*)half) + 256;\ - put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src , 16, stride, 17);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfV, src+1, 16, stride);\ + uint64_t half[16*2 + 17*2];\ + uint8_t * const halfH= ((uint8_t*)half) + 256;\ + uint8_t * const halfHV= ((uint8_t*)half);\ + put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ + put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\ put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ - OPNAME ## pixels16_l4_mmx(dst, src+stride+1, (uint8_t*)half, stride, 16);\ + OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\ }\ static void OPNAME ## qpel16_mc21_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ uint64_t half[16*2 + 17*2];\ @@ -1297,24 +1283,18 @@ OPNAME ## pixels16_l2_mmx(dst, halfH+16, halfHV, stride, 16, 16);\ }\ static void OPNAME ## qpel16_mc12_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[16*2 + 16*2 + 17*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 2*256;\ - uint8_t * const halfV= ((uint8_t*)half);\ - uint8_t * const halfHV= ((uint8_t*)half) + 256;\ + uint64_t half[17*2];\ + uint8_t * const halfH= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfV, src, 16, stride);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ - OPNAME ## pixels16_l2_mmx(dst, halfV, halfHV, stride, 16, 16);\ + put ## RND ## pixels16_l2_mmx(halfH, src, halfH, 16, stride, 17);\ + OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ }\ static void OPNAME ## qpel16_mc32_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ - uint64_t half[16*2 + 16*2 + 17*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 2*256;\ - uint8_t * const halfV= ((uint8_t*)half);\ - uint8_t * const halfHV= ((uint8_t*)half) + 256;\ + uint64_t half[17*2];\ + uint8_t * const halfH= ((uint8_t*)half);\ put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfV, src+1, 16, stride);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ - OPNAME ## pixels16_l2_mmx(dst, halfV, halfHV, stride, 16, 16);\ + put ## RND ## pixels16_l2_mmx(halfH, src+1, halfH, 16, stride, 17);\ + OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ }\ static void OPNAME ## qpel16_mc22_ ## MMX(UINT8 *dst, UINT8 *src, int stride){\ uint64_t half[17*2];\ @@ -1525,7 +1505,7 @@ c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow; c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow; c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow; - + SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow) diff -r ca2a303ea039 -r e162c09efbe7 i386/dsputil_mmx_rnd.h --- a/i386/dsputil_mmx_rnd.h Thu Jan 09 11:37:08 2003 +0000 +++ b/i386/dsputil_mmx_rnd.h Thu Jan 09 20:42:37 2003 +0000 @@ -58,6 +58,16 @@ { MOVQ_BFE(mm6); __asm __volatile( + "test $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "addl %4, %1 \n\t" + "addl $8, %2 \n\t" + PAVGB(%%mm0, %%mm1, %%mm4, %%mm6) + "movq %%mm4, (%3) \n\t" + "addl %5, %3 \n\t" + "decl %0 \n\t" ".balign 8 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" @@ -144,6 +154,19 @@ { MOVQ_BFE(mm6); __asm __volatile( + "test $1, %0 \n\t" + " jz 1f \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "movq 8(%1), %%mm2 \n\t" + "movq 8(%2), %%mm3 \n\t" + "addl %4, %1 \n\t" + "addl $16, %2 \n\t" + PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%3) \n\t" + "movq %%mm5, 8(%3) \n\t" + "addl %5, %3 \n\t" + "decl %0 \n\t" ".balign 8 \n\t" "1: \n\t" "movq (%1), %%mm0 \n\t" @@ -271,124 +294,6 @@ :"eax", "memory"); } -static void DEF(put, pixels8_l4)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int stride, int h) -{ - MOVQ_ZERO(mm7); - SET_RND(mm6); // =2 for rnd and =1 for no_rnd version - __asm __volatile( - ".balign 8 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 64(%2), %%mm2 \n\t" - "movq 136(%2), %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm3 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm0, %%mm1 \n\t" - "paddusw %%mm2, %%mm3 \n\t" - "paddusw %%mm1, %%mm3 \n\t" - "psrlw $2, %%mm3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 64(%2), %%mm2 \n\t" - "movq 136(%2), %%mm4 \n\t" - "punpckhbw %%mm7, %%mm0 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm4 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm0, %%mm1 \n\t" - "paddusw %%mm2, %%mm4 \n\t" - "paddusw %%mm1, %%mm4 \n\t" - "psrlw $2, %%mm4 \n\t" - "packuswb %%mm4, %%mm3 \n\t" - "movq %%mm3, (%0) \n\t" - "addl %4, %0 \n\t" - "addl %4, %1 \n\t" - "addl $8, %2 \n\t" - "decl %3 \n\t" - "jnz 1b \n\t" - :"+r"(dst), "+r"(src1), "+r"(src2), "+r"(h) - :"r"(stride) - :"memory"); -} - -static void DEF(put, pixels16_l4)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int stride, int h) -{ - MOVQ_ZERO(mm7); - SET_RND(mm6); // =2 for rnd and =1 for no_rnd version - __asm __volatile( - ".balign 8 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 256(%2), %%mm2 \n\t" - "movq 528(%2), %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm3 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm0, %%mm1 \n\t" - "paddusw %%mm2, %%mm3 \n\t" - "paddusw %%mm1, %%mm3 \n\t" - "psrlw $2, %%mm3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 256(%2), %%mm2 \n\t" - "movq 528(%2), %%mm4 \n\t" - "punpckhbw %%mm7, %%mm0 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm4 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm0, %%mm1 \n\t" - "paddusw %%mm2, %%mm4 \n\t" - "paddusw %%mm1, %%mm4 \n\t" - "psrlw $2, %%mm4 \n\t" - "packuswb %%mm4, %%mm3 \n\t" - "movq %%mm3, (%0) \n\t" - "movq 8(%1), %%mm0 \n\t" - "movq 8(%2), %%mm1 \n\t" - "movq 264(%2), %%mm2 \n\t" - "movq 536(%2), %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm3 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm0, %%mm1 \n\t" - "paddusw %%mm2, %%mm3 \n\t" - "paddusw %%mm1, %%mm3 \n\t" - "psrlw $2, %%mm3 \n\t" - "movq 8(%1), %%mm0 \n\t" - "movq 8(%2), %%mm1 \n\t" - "movq 264(%2), %%mm2 \n\t" - "movq 536(%2), %%mm4 \n\t" - "punpckhbw %%mm7, %%mm0 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm4 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm0, %%mm1 \n\t" - "paddusw %%mm2, %%mm4 \n\t" - "paddusw %%mm1, %%mm4 \n\t" - "psrlw $2, %%mm4 \n\t" - "packuswb %%mm4, %%mm3 \n\t" - "movq %%mm3, 8(%0) \n\t" - "addl %4, %0 \n\t" - "addl %4, %1 \n\t" - "addl $16, %2 \n\t" - "decl %3 \n\t" - "jnz 1b \n\t" - :"+r"(dst), "+r"(src1), "+r"(src2), "+r"(h) - :"r"(stride) - :"memory"); -} - // avg_pixels // in case more speed is needed - unroling would certainly help static void DEF(avg, pixels8)(UINT8 *block, const UINT8 *pixels, int line_size, int h) @@ -641,133 +546,6 @@ :"eax", "memory"); } -static void DEF(avg, pixels8_l4)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int stride, int h) -{ - MOVQ_ZERO(mm7); - SET_RND(mm6); // =2 for rnd and =1 for no_rnd version - MOVQ_BFE(mm5); - __asm __volatile( - ".balign 8 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 64(%2), %%mm2 \n\t" - "movq 136(%2), %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm3 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm0, %%mm1 \n\t" - "paddusw %%mm2, %%mm3 \n\t" - "paddusw %%mm1, %%mm3 \n\t" - "psrlw $2, %%mm3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 64(%2), %%mm2 \n\t" - "movq 136(%2), %%mm4 \n\t" - "punpckhbw %%mm7, %%mm0 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm4 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm0, %%mm1 \n\t" - "paddusw %%mm2, %%mm4 \n\t" - "paddusw %%mm1, %%mm4 \n\t" - "psrlw $2, %%mm4 \n\t" - "packuswb %%mm4, %%mm3 \n\t" - "movq (%0), %%mm4 \n\t" - PAVGB(%%mm3, %%mm4, %%mm0, %%mm5) - "movq %%mm0, (%0) \n\t" - "addl %4, %0 \n\t" - "addl %4, %1 \n\t" - "addl $8, %2 \n\t" - "decl %3 \n\t" - "jnz 1b \n\t" - :"+r"(dst), "+r"(src1), "+r"(src2), "+r"(h) - :"r"(stride) - :"memory"); -} - -static void DEF(avg, pixels16_l4)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int stride, int h) -{ - MOVQ_ZERO(mm7); - SET_RND(mm6); // =2 for rnd and =1 for no_rnd version - MOVQ_BFE(mm5); - __asm __volatile( - ".balign 8 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 256(%2), %%mm2 \n\t" - "movq 528(%2), %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm3 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm0, %%mm1 \n\t" - "paddusw %%mm2, %%mm3 \n\t" - "paddusw %%mm1, %%mm3 \n\t" - "psrlw $2, %%mm3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 256(%2), %%mm2 \n\t" - "movq 528(%2), %%mm4 \n\t" - "punpckhbw %%mm7, %%mm0 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm4 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm0, %%mm1 \n\t" - "paddusw %%mm2, %%mm4 \n\t" - "paddusw %%mm1, %%mm4 \n\t" - "psrlw $2, %%mm4 \n\t" - "packuswb %%mm4, %%mm3 \n\t" - "movq (%0), %%mm4 \n\t" - PAVGB(%%mm3, %%mm4, %%mm0, %%mm5) - "movq %%mm0, (%0) \n\t" - "movq 8(%1), %%mm0 \n\t" - "movq 8(%2), %%mm1 \n\t" - "movq 264(%2), %%mm2 \n\t" - "movq 536(%2), %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm3 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm0, %%mm1 \n\t" - "paddusw %%mm2, %%mm3 \n\t" - "paddusw %%mm1, %%mm3 \n\t" - "psrlw $2, %%mm3 \n\t" - "movq 8(%1), %%mm0 \n\t" - "movq 8(%2), %%mm1 \n\t" - "movq 264(%2), %%mm2 \n\t" - "movq 536(%2), %%mm4 \n\t" - "punpckhbw %%mm7, %%mm0 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm4 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm0, %%mm1 \n\t" - "paddusw %%mm2, %%mm4 \n\t" - "paddusw %%mm1, %%mm4 \n\t" - "psrlw $2, %%mm4 \n\t" - "packuswb %%mm4, %%mm3 \n\t" - "movq 8(%0), %%mm4 \n\t" - PAVGB(%%mm3, %%mm4, %%mm0, %%mm5) - "movq %%mm0, 8(%0) \n\t" - "addl %4, %0 \n\t" - "addl %4, %1 \n\t" - "addl $16, %2 \n\t" - "decl %3 \n\t" - "jnz 1b \n\t" - :"+r"(dst), "+r"(src1), "+r"(src2), "+r"(h) - :"r"(stride) - :"memory"); -} - - //FIXME optimize static void DEF(put, pixels16_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){ DEF(put, pixels8_y2)(block , pixels , line_size, h);