Mercurial > libavcodec.hg
changeset 9441:e14cd3ac3806 libavcodec
VC1: extend MMX qpel MC to include MMX2 avg qpel
author | conrad |
---|---|
date | Wed, 15 Apr 2009 02:25:42 +0000 |
parents | daee921fb6bb |
children | a91f60938763 |
files | x86/dsputil_mmx.c x86/vc1dsp_mmx.c |
diffstat | 2 files changed, 187 insertions(+), 133 deletions(-) [+] |
line wrap: on
line diff
--- a/x86/dsputil_mmx.c Tue Apr 14 23:56:10 2009 +0000 +++ b/x86/dsputil_mmx.c Wed Apr 15 02:25:42 2009 +0000 @@ -1813,6 +1813,9 @@ void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { put_pixels8_mmx(dst, src, stride, 8); } +void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) { + avg_pixels8_mmx2(dst, src, stride, 8); +} /* external functions, from idct_mmx.c */ void ff_mmx_idct(DCTELEM *block);
--- a/x86/vc1dsp_mmx.c Tue Apr 14 23:56:10 2009 +0000 +++ b/x86/vc1dsp_mmx.c Wed Apr 15 02:25:42 2009 +0000 @@ -28,6 +28,9 @@ #include "libavcodec/dsputil.h" #include "dsputil_mmx.h" +#define OP_PUT(S,D) +#define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t" + /** Add rounder from mm7 to mm3 and pack result at destination */ #define NORMALIZE_MMX(SHIFT) \ "paddw %%mm7, %%mm3 \n\t" /* +bias-r */ \ @@ -35,11 +38,14 @@ "psraw "SHIFT", %%mm3 \n\t" \ "psraw "SHIFT", %%mm4 \n\t" -#define TRANSFER_DO_PACK \ +#define TRANSFER_DO_PACK(OP) \ "packuswb %%mm4, %%mm3 \n\t" \ + OP((%2), %%mm3) \ "movq %%mm3, (%2) \n\t" -#define TRANSFER_DONT_PACK \ +#define TRANSFER_DONT_PACK(OP) \ + OP(0(%2), %%mm3) \ + OP(8(%2), %%mm4) \ "movq %%mm3, 0(%2) \n\t" \ "movq %%mm4, 8(%2) \n\t" @@ -107,98 +113,107 @@ * Data is already unpacked, so some operations can directly be made from * memory. */ -static void vc1_put_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride, - const int16_t *src, int rnd) -{ - int h = 8; +#define VC1_HOR_16b_SHIFT2(OP, OPNAME)\ +static void OPNAME ## vc1_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,\ + const int16_t *src, int rnd)\ +{\ + int h = 8;\ +\ + src -= 1;\ + rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */\ + __asm__ volatile(\ + LOAD_ROUNDER_MMX("%4")\ + "movq "MANGLE(ff_pw_128)", %%mm6\n\t"\ + "movq "MANGLE(ff_pw_9)", %%mm5 \n\t"\ + "1: \n\t"\ + "movq 2*0+0(%1), %%mm1 \n\t"\ + "movq 2*0+8(%1), %%mm2 \n\t"\ + "movq 2*1+0(%1), %%mm3 \n\t"\ + "movq 2*1+8(%1), %%mm4 \n\t"\ + "paddw 2*3+0(%1), %%mm1 \n\t"\ + "paddw 2*3+8(%1), %%mm2 \n\t"\ + "paddw 2*2+0(%1), %%mm3 \n\t"\ + "paddw 2*2+8(%1), %%mm4 \n\t"\ + "pmullw %%mm5, %%mm3 \n\t"\ + "pmullw %%mm5, %%mm4 \n\t"\ + "psubw %%mm1, %%mm3 \n\t"\ + "psubw %%mm2, %%mm4 \n\t"\ + NORMALIZE_MMX("$7")\ + /* Remove bias */\ + "paddw %%mm6, %%mm3 \n\t"\ + "paddw %%mm6, %%mm4 \n\t"\ + TRANSFER_DO_PACK(OP)\ + "add $24, %1 \n\t"\ + "add %3, %2 \n\t"\ + "decl %0 \n\t"\ + "jnz 1b \n\t"\ + : "+r"(h), "+r" (src), "+r" (dst)\ + : "r"(stride), "m"(rnd)\ + : "memory"\ + );\ +} - src -= 1; - rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */ - __asm__ volatile( - LOAD_ROUNDER_MMX("%4") - "movq "MANGLE(ff_pw_128)", %%mm6\n\t" - "movq "MANGLE(ff_pw_9)", %%mm5 \n\t" - "1: \n\t" - "movq 2*0+0(%1), %%mm1 \n\t" - "movq 2*0+8(%1), %%mm2 \n\t" - "movq 2*1+0(%1), %%mm3 \n\t" - "movq 2*1+8(%1), %%mm4 \n\t" - "paddw 2*3+0(%1), %%mm1 \n\t" - "paddw 2*3+8(%1), %%mm2 \n\t" - "paddw 2*2+0(%1), %%mm3 \n\t" - "paddw 2*2+8(%1), %%mm4 \n\t" - "pmullw %%mm5, %%mm3 \n\t" - "pmullw %%mm5, %%mm4 \n\t" - "psubw %%mm1, %%mm3 \n\t" - "psubw %%mm2, %%mm4 \n\t" - NORMALIZE_MMX("$7") - /* Remove bias */ - "paddw %%mm6, %%mm3 \n\t" - "paddw %%mm6, %%mm4 \n\t" - TRANSFER_DO_PACK - "add $24, %1 \n\t" - "add %3, %2 \n\t" - "decl %0 \n\t" - "jnz 1b \n\t" - : "+r"(h), "+r" (src), "+r" (dst) - : "r"(stride), "m"(rnd) - : "memory" - ); -} +VC1_HOR_16b_SHIFT2(OP_PUT, put_) +VC1_HOR_16b_SHIFT2(OP_AVG, avg_) /** * Purely vertical or horizontal 1/2 shift interpolation. * Sacrify mm6 for *9 factor. */ -static void vc1_put_shift2_mmx(uint8_t *dst, const uint8_t *src, - x86_reg stride, int rnd, x86_reg offset) -{ - rnd = 8-rnd; - __asm__ volatile( - "mov $8, %%"REG_c" \n\t" - LOAD_ROUNDER_MMX("%5") - "movq "MANGLE(ff_pw_9)", %%mm6\n\t" - "1: \n\t" - "movd 0(%0 ), %%mm3 \n\t" - "movd 4(%0 ), %%mm4 \n\t" - "movd 0(%0,%2), %%mm1 \n\t" - "movd 4(%0,%2), %%mm2 \n\t" - "add %2, %0 \n\t" - "punpcklbw %%mm0, %%mm3 \n\t" - "punpcklbw %%mm0, %%mm4 \n\t" - "punpcklbw %%mm0, %%mm1 \n\t" - "punpcklbw %%mm0, %%mm2 \n\t" - "paddw %%mm1, %%mm3 \n\t" - "paddw %%mm2, %%mm4 \n\t" - "movd 0(%0,%3), %%mm1 \n\t" - "movd 4(%0,%3), %%mm2 \n\t" - "pmullw %%mm6, %%mm3 \n\t" /* 0,9,9,0*/ - "pmullw %%mm6, %%mm4 \n\t" /* 0,9,9,0*/ - "punpcklbw %%mm0, %%mm1 \n\t" - "punpcklbw %%mm0, %%mm2 \n\t" - "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,0*/ - "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,0*/ - "movd 0(%0,%2), %%mm1 \n\t" - "movd 4(%0,%2), %%mm2 \n\t" - "punpcklbw %%mm0, %%mm1 \n\t" - "punpcklbw %%mm0, %%mm2 \n\t" - "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,-1*/ - "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,-1*/ - NORMALIZE_MMX("$4") - "packuswb %%mm4, %%mm3 \n\t" - "movq %%mm3, (%1) \n\t" - "add %6, %0 \n\t" - "add %4, %1 \n\t" - "dec %%"REG_c" \n\t" - "jnz 1b \n\t" - : "+r"(src), "+r"(dst) - : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd), - "g"(stride-offset) - : "%"REG_c, "memory" - ); +#define VC1_SHIFT2(OP, OPNAME)\ +static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\ + x86_reg stride, int rnd, x86_reg offset)\ +{\ + rnd = 8-rnd;\ + __asm__ volatile(\ + "mov $8, %%"REG_c" \n\t"\ + LOAD_ROUNDER_MMX("%5")\ + "movq "MANGLE(ff_pw_9)", %%mm6\n\t"\ + "1: \n\t"\ + "movd 0(%0 ), %%mm3 \n\t"\ + "movd 4(%0 ), %%mm4 \n\t"\ + "movd 0(%0,%2), %%mm1 \n\t"\ + "movd 4(%0,%2), %%mm2 \n\t"\ + "add %2, %0 \n\t"\ + "punpcklbw %%mm0, %%mm3 \n\t"\ + "punpcklbw %%mm0, %%mm4 \n\t"\ + "punpcklbw %%mm0, %%mm1 \n\t"\ + "punpcklbw %%mm0, %%mm2 \n\t"\ + "paddw %%mm1, %%mm3 \n\t"\ + "paddw %%mm2, %%mm4 \n\t"\ + "movd 0(%0,%3), %%mm1 \n\t"\ + "movd 4(%0,%3), %%mm2 \n\t"\ + "pmullw %%mm6, %%mm3 \n\t" /* 0,9,9,0*/\ + "pmullw %%mm6, %%mm4 \n\t" /* 0,9,9,0*/\ + "punpcklbw %%mm0, %%mm1 \n\t"\ + "punpcklbw %%mm0, %%mm2 \n\t"\ + "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,0*/\ + "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,0*/\ + "movd 0(%0,%2), %%mm1 \n\t"\ + "movd 4(%0,%2), %%mm2 \n\t"\ + "punpcklbw %%mm0, %%mm1 \n\t"\ + "punpcklbw %%mm0, %%mm2 \n\t"\ + "psubw %%mm1, %%mm3 \n\t" /*-1,9,9,-1*/\ + "psubw %%mm2, %%mm4 \n\t" /*-1,9,9,-1*/\ + NORMALIZE_MMX("$4")\ + "packuswb %%mm4, %%mm3 \n\t"\ + OP((%1), %%mm3)\ + "movq %%mm3, (%1) \n\t"\ + "add %6, %0 \n\t"\ + "add %4, %1 \n\t"\ + "dec %%"REG_c" \n\t"\ + "jnz 1b \n\t"\ + : "+r"(src), "+r"(dst)\ + : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\ + "g"(stride-offset)\ + : "%"REG_c, "memory"\ + );\ } +VC1_SHIFT2(OP_PUT, put_) +VC1_SHIFT2(OP_AVG, avg_) + /** * Filter coefficients made global to allow access by all 1 or 3 quarter shift * interpolation functions. @@ -272,7 +287,7 @@ "1: \n\t" \ MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \ NORMALIZE_MMX("%6") \ - TRANSFER_DONT_PACK \ + TRANSFER_DONT_PACK(OP_PUT) \ /* Last 3 (in fact 4) bytes on the line */ \ "movd 8+"A1", %%mm1 \n\t" \ DO_UNPACK("%%mm1") \ @@ -312,9 +327,9 @@ * @param NAME Either 1 or 3 * @see MSPEL_FILTER13_CORE for information on A1->A4 */ -#define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4) \ +#define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME) \ static void \ -vc1_put_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \ +OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \ const int16_t *src, int rnd) \ { \ int h = 8; \ @@ -331,7 +346,7 @@ /* Remove bias */ \ "paddw "MANGLE(ff_pw_128)", %%mm3 \n\t" \ "paddw "MANGLE(ff_pw_128)", %%mm4 \n\t" \ - TRANSFER_DO_PACK \ + TRANSFER_DO_PACK(OP) \ "add $24, %1 \n\t" \ "add %3, %2 \n\t" \ "decl %0 \n\t" \ @@ -350,9 +365,9 @@ * @param NAME Either 1 or 3 * @see MSPEL_FILTER13_CORE for information on A1->A4 */ -#define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4) \ +#define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME) \ static void \ -vc1_put_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \ +OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \ x86_reg stride, int rnd, x86_reg offset) \ { \ int h = 8; \ @@ -366,7 +381,7 @@ "1: \n\t" \ MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \ NORMALIZE_MMX("$6") \ - TRANSFER_DO_PACK \ + TRANSFER_DO_PACK(OP) \ "add %5, %1 \n\t" \ "add %5, %2 \n\t" \ "decl %0 \n\t" \ @@ -378,14 +393,18 @@ } /** 1/4 shift bicubic interpolation */ -MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )") +MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_PUT, put_) +MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_AVG, avg_) MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )") -MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)") +MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT, put_) +MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG, avg_) /** 3/4 shift bicubic interpolation */ -MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )") +MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_PUT, put_) +MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_AVG, avg_) MSPEL_FILTER13_VER_16B(shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )") -MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)") +MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT, put_) +MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG, avg_) typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift); typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd); @@ -402,50 +421,58 @@ * @param hmode Vertical filter. * @param rnd Rounding bias. */ -static void vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride, - int hmode, int vmode, int rnd) -{ - static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] = - { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx }; - static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] = - { NULL, vc1_put_hor_16b_shift1_mmx, vc1_put_hor_16b_shift2_mmx, vc1_put_hor_16b_shift3_mmx }; - static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] = - { NULL, vc1_put_shift1_mmx, vc1_put_shift2_mmx, vc1_put_shift3_mmx }; - - __asm__ volatile( - "pxor %%mm0, %%mm0 \n\t" - ::: "memory" - ); - - if (vmode) { /* Vertical filter to apply */ - if (hmode) { /* Horizontal filter to apply, output to tmp */ - static const int shift_value[] = { 0, 5, 1, 5 }; - int shift = (shift_value[hmode]+shift_value[vmode])>>1; - int r; - DECLARE_ALIGNED_16(int16_t, tmp[12*8]); - - r = (1<<(shift-1)) + rnd-1; - vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift); - - vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd); - return; - } - else { /* No horizontal filter, output 8 lines to dst */ - vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride); - return; - } - } - - /* Horizontal mode with no vertical mode */ - vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1); +#define VC1_MSPEL_MC(OP)\ +static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\ + int hmode, int vmode, int rnd)\ +{\ + static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\ + { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\ + static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\ + { NULL, OP ## vc1_hor_16b_shift1_mmx, OP ## vc1_hor_16b_shift2_mmx, OP ## vc1_hor_16b_shift3_mmx };\ + static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\ + { NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\ +\ + __asm__ volatile(\ + "pxor %%mm0, %%mm0 \n\t"\ + ::: "memory"\ + );\ +\ + if (vmode) { /* Vertical filter to apply */\ + if (hmode) { /* Horizontal filter to apply, output to tmp */\ + static const int shift_value[] = { 0, 5, 1, 5 };\ + int shift = (shift_value[hmode]+shift_value[vmode])>>1;\ + int r;\ + DECLARE_ALIGNED_16(int16_t, tmp[12*8]);\ +\ + r = (1<<(shift-1)) + rnd-1;\ + vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\ +\ + vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\ + return;\ + }\ + else { /* No horizontal filter, output 8 lines to dst */\ + vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\ + return;\ + }\ + }\ +\ + /* Horizontal mode with no vertical mode */\ + vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\ } +VC1_MSPEL_MC(put_) +VC1_MSPEL_MC(avg_) + void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd); +void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd); /** Macro to ease bicubic filter interpolation functions declarations */ #define DECLARE_FUNCTION(a, b) \ static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \ - vc1_mspel_mc(dst, src, stride, a, b, rnd); \ + put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \ +}\ +static void avg_vc1_mspel_mc ## a ## b ## _mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \ + avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \ } DECLARE_FUNCTION(0, 1) @@ -468,6 +495,8 @@ DECLARE_FUNCTION(3, 3) void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) { + mm_flags = mm_support(); + dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_mmx; dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx; dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_mmx; @@ -487,4 +516,26 @@ dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_mmx; dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx; dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx; + + if (mm_flags & FF_MM_MMX2){ + dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_vc1_mspel_mc00_mmx2; + dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_mmx2; + dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_mmx2; + dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_mmx2; + + dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_mmx2; + dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_mmx2; + dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_mmx2; + dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_mmx2; + + dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_mmx2; + dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_mmx2; + dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_mmx2; + dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_mmx2; + + dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_mmx2; + dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_mmx2; + dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_mmx2; + dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_mmx2; + } }