Mercurial > libavcodec.hg
changeset 12241:c7f6ddcc5c01 libavcodec
VP8: optimize DC-only chroma case in the same way as luma.
Add MMX idct_dc_add4uv function for this case.
~40% faster chroma idct.
author | darkshikari |
---|---|
date | Fri, 23 Jul 2010 06:02:52 +0000 |
parents | e6ade5e849c9 |
children | a2f6d8c61b9c |
files | vp8.c vp8dsp.c vp8dsp.h x86/vp8dsp-init.c x86/vp8dsp.asm |
diffstat | 5 files changed, 89 insertions(+), 43 deletions(-) [+] |
line wrap: on
line diff
--- a/vp8.c Fri Jul 23 03:44:37 2010 +0000 +++ b/vp8.c Fri Jul 23 06:02:52 2010 +0000 @@ -1206,7 +1206,7 @@ } } } else { - s->vp8dsp.vp8_idct_dc_add4(y_dst, s->block[y], s->linesize); + s->vp8dsp.vp8_idct_dc_add4y(y_dst, s->block[y], s->linesize); } } y_dst += 4*s->linesize; @@ -1214,19 +1214,24 @@ } for (ch = 0; ch < 2; ch++) { - if (AV_RN32A(s->non_zero_count_cache[4+ch])) { + uint32_t nnz4 = AV_RN32A(s->non_zero_count_cache[4+ch]); + if (nnz4) { uint8_t *ch_dst = dst[1+ch]; - for (y = 0; y < 2; y++) { - for (x = 0; x < 2; x++) { - int nnz = s->non_zero_count_cache[4+ch][(y<<1)+x]; - if (nnz) { - if (nnz == 1) - s->vp8dsp.vp8_idct_dc_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize); - else - s->vp8dsp.vp8_idct_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize); + if (nnz4&~0x01010101) { + for (y = 0; y < 2; y++) { + for (x = 0; x < 2; x++) { + int nnz = s->non_zero_count_cache[4+ch][(y<<1)+x]; + if (nnz) { + if (nnz == 1) + s->vp8dsp.vp8_idct_dc_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize); + else + s->vp8dsp.vp8_idct_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize); + } } + ch_dst += 4*s->uvlinesize; } - ch_dst += 4*s->uvlinesize; + } else { + s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, s->block[4+ch], s->uvlinesize); } } }
--- a/vp8dsp.c Fri Jul 23 03:44:37 2010 +0000 +++ b/vp8dsp.c Fri Jul 23 06:02:52 2010 +0000 @@ -109,24 +109,20 @@ } } -static void vp8_idct_dc_add4_c(uint8_t *dst, DCTELEM block[4][16], int stride) +static void vp8_idct_dc_add4uv_c(uint8_t *dst, DCTELEM block[4][16], int stride) { - int i, j; - for (j = 0; j < 4; j++) { - uint8_t *pix = dst+j*4; - int dc = (block[j][0] + 4) >> 3; - uint8_t *cm = ff_cropTbl + MAX_NEG_CROP + dc; - block[j][0] = 0; - if (!dc) - continue; - for (i = 0; i < 4; i++) { - pix[0] = cm[pix[0]]; - pix[1] = cm[pix[1]]; - pix[2] = cm[pix[2]]; - pix[3] = cm[pix[3]]; - pix += stride; - } - } + vp8_idct_dc_add_c(dst+stride*0+0, block[0], stride); + vp8_idct_dc_add_c(dst+stride*0+4, block[1], stride); + vp8_idct_dc_add_c(dst+stride*4+0, block[2], stride); + vp8_idct_dc_add_c(dst+stride*4+4, block[3], stride); +} + +static void vp8_idct_dc_add4y_c(uint8_t *dst, DCTELEM block[4][16], int stride) +{ + vp8_idct_dc_add_c(dst+ 0, block[0], stride); + vp8_idct_dc_add_c(dst+ 4, block[1], stride); + vp8_idct_dc_add_c(dst+ 8, block[2], stride); + vp8_idct_dc_add_c(dst+12, block[3], stride); } // because I like only having two parameters to pass functions... @@ -479,10 +475,11 @@ av_cold void ff_vp8dsp_init(VP8DSPContext *dsp) { - dsp->vp8_luma_dc_wht = vp8_luma_dc_wht_c; - dsp->vp8_idct_add = vp8_idct_add_c; - dsp->vp8_idct_dc_add = vp8_idct_dc_add_c; - dsp->vp8_idct_dc_add4 = vp8_idct_dc_add4_c; + dsp->vp8_luma_dc_wht = vp8_luma_dc_wht_c; + dsp->vp8_idct_add = vp8_idct_add_c; + dsp->vp8_idct_dc_add = vp8_idct_dc_add_c; + dsp->vp8_idct_dc_add4y = vp8_idct_dc_add4y_c; + dsp->vp8_idct_dc_add4uv = vp8_idct_dc_add4uv_c; dsp->vp8_v_loop_filter16y = vp8_v_loop_filter16_c; dsp->vp8_h_loop_filter16y = vp8_h_loop_filter16_c;
--- a/vp8dsp.h Fri Jul 23 03:44:37 2010 +0000 +++ b/vp8dsp.h Fri Jul 23 06:02:52 2010 +0000 @@ -33,7 +33,8 @@ void (*vp8_luma_dc_wht)(DCTELEM block[4][4][16], DCTELEM dc[16]); void (*vp8_idct_add)(uint8_t *dst, DCTELEM block[16], int stride); void (*vp8_idct_dc_add)(uint8_t *dst, DCTELEM block[16], int stride); - void (*vp8_idct_dc_add4)(uint8_t *dst, DCTELEM block[4][16], int stride); + void (*vp8_idct_dc_add4y)(uint8_t *dst, DCTELEM block[4][16], int stride); + void (*vp8_idct_dc_add4uv)(uint8_t *dst, DCTELEM block[4][16], int stride); // loop filter applied to edges between macroblocks void (*vp8_v_loop_filter16y)(uint8_t *dst, int stride,
--- a/x86/vp8dsp-init.c Fri Jul 23 03:44:37 2010 +0000 +++ b/x86/vp8dsp-init.c Fri Jul 23 06:02:52 2010 +0000 @@ -220,8 +220,9 @@ extern void ff_vp8_idct_dc_add_mmx(uint8_t *dst, DCTELEM block[16], int stride); extern void ff_vp8_idct_dc_add_sse4(uint8_t *dst, DCTELEM block[16], int stride); -extern void ff_vp8_idct_dc_add4_mmx(uint8_t *dst, DCTELEM block[4][16], int stride); -extern void ff_vp8_idct_dc_add4_sse2(uint8_t *dst, DCTELEM block[4][16], int stride); +extern void ff_vp8_idct_dc_add4y_mmx(uint8_t *dst, DCTELEM block[4][16], int stride); +extern void ff_vp8_idct_dc_add4y_sse2(uint8_t *dst, DCTELEM block[4][16], int stride); +extern void ff_vp8_idct_dc_add4uv_mmx(uint8_t *dst, DCTELEM block[2][16], int stride); extern void ff_vp8_luma_dc_wht_mmx(DCTELEM block[4][4][16], DCTELEM dc[16]); extern void ff_vp8_idct_add_mmx(uint8_t *dst, DCTELEM block[16], int stride); extern void ff_vp8_idct_add_sse(uint8_t *dst, DCTELEM block[16], int stride); @@ -284,10 +285,11 @@ #if HAVE_YASM if (mm_flags & FF_MM_MMX) { - c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx; - c->vp8_idct_dc_add4 = ff_vp8_idct_dc_add4_mmx; - c->vp8_idct_add = ff_vp8_idct_add_mmx; - c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx; + c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx; + c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx; + c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx; + c->vp8_idct_add = ff_vp8_idct_add_mmx; + c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx; c->put_vp8_epel_pixels_tab[0][0][0] = c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx; c->put_vp8_epel_pixels_tab[1][0][0] = @@ -354,7 +356,7 @@ } if (mm_flags & FF_MM_SSE2) { - c->vp8_idct_dc_add4 = ff_vp8_idct_dc_add4_sse2; + c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2; c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2; c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2;
--- a/x86/vp8dsp.asm Fri Jul 23 03:44:37 2010 +0000 +++ b/x86/vp8dsp.asm Fri Jul 23 06:02:52 2010 +0000 @@ -976,11 +976,11 @@ RET ;----------------------------------------------------------------------------- -; void vp8_idct_dc_add4_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride); +; void vp8_idct_dc_add4y_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride); ;----------------------------------------------------------------------------- INIT_MMX -cglobal vp8_idct_dc_add4_mmx, 3, 3 +cglobal vp8_idct_dc_add4y_mmx, 3, 3 ; load data movd m0, [r1+32*0] ; A movd m1, [r1+32*2] ; C @@ -1015,7 +1015,7 @@ RET INIT_XMM -cglobal vp8_idct_dc_add4_sse2, 3, 3 +cglobal vp8_idct_dc_add4y_sse2, 3, 3, 6 ; load data movd m0, [r1+32*0] ; A movd m1, [r1+32*2] ; C @@ -1045,6 +1045,47 @@ RET ;----------------------------------------------------------------------------- +; void vp8_idct_dc_add4uv_<opt>(uint8_t *dst, DCTELEM block[4][16], int stride); +;----------------------------------------------------------------------------- + +INIT_MMX +cglobal vp8_idct_dc_add4uv_mmx, 3, 3 + ; load data + movd m0, [r1+32*0] ; A + movd m1, [r1+32*2] ; C + punpcklwd m0, [r1+32*1] ; A B + punpcklwd m1, [r1+32*3] ; C D + punpckldq m0, m1 ; A B C D + pxor m6, m6 + + ; calculate DC + paddw m0, [pw_4] + movd [r1+32*0], m6 + movd [r1+32*1], m6 + movd [r1+32*2], m6 + movd [r1+32*3], m6 + psraw m0, 3 + psubw m6, m0 + packuswb m0, m0 + packuswb m6, m6 + punpcklbw m0, m0 ; AABBCCDD + punpcklbw m6, m6 ; AABBCCDD + movq m1, m0 + movq m7, m6 + punpcklbw m0, m0 ; AAAABBBB + punpckhbw m1, m1 ; CCCCDDDD + punpcklbw m6, m6 ; AAAABBBB + punpckhbw m7, m7 ; CCCCDDDD + + ; add DC + lea r1, [r0+r2*2] + ADD_DC m0, m6, 0, mova + lea r0, [r0+r2*4] + lea r1, [r1+r2*4] + ADD_DC m1, m7, 0, mova + RET + +;----------------------------------------------------------------------------- ; void vp8_idct_add_<opt>(uint8_t *dst, DCTELEM block[16], int stride); ;-----------------------------------------------------------------------------