Mercurial > libavcodec.hg
comparison x86/h264dsp_mmx.c @ 8590:7a463923ecd1 libavcodec
Change semantic of CONFIG_*, HAVE_* and ARCH_*.
They are now always defined to either 0 or 1.
author | aurel |
---|---|
date | Tue, 13 Jan 2009 23:44:16 +0000 |
parents | cc64e1343397 |
children | 93980b03673e |
comparison
equal
deleted
inserted
replaced
8589:a29b5b5c3c9d | 8590:7a463923ecd1 |
---|---|
470 else if(block[i*16]) | 470 else if(block[i*16]) |
471 ff_h264_idct_dc_add_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride); | 471 ff_h264_idct_dc_add_mmx2(dest[(i&4)>>2] + block_offset[i], block + i*16, stride); |
472 } | 472 } |
473 } | 473 } |
474 | 474 |
475 #if defined(CONFIG_GPL) && defined(HAVE_YASM) | 475 #if CONFIG_GPL && HAVE_YASM |
476 static void ff_h264_idct_dc_add8_mmx2(uint8_t *dst, int16_t *block, int stride) | 476 static void ff_h264_idct_dc_add8_mmx2(uint8_t *dst, int16_t *block, int stride) |
477 { | 477 { |
478 __asm__ volatile( | 478 __asm__ volatile( |
479 "movd %0, %%mm0 \n\t" // 0 0 X D | 479 "movd %0, %%mm0 \n\t" // 0 0 X D |
480 "punpcklwd %1, %%mm0 \n\t" // x X d D | 480 "punpcklwd %1, %%mm0 \n\t" // x X d D |
1486 OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\ | 1486 OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\ |
1487 OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\ | 1487 OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\ |
1488 }\ | 1488 }\ |
1489 | 1489 |
1490 | 1490 |
1491 #ifdef ARCH_X86_64 | 1491 #if ARCH_X86_64 |
1492 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\ | 1492 #define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\ |
1493 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ | 1493 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ |
1494 int h=16;\ | 1494 int h=16;\ |
1495 __asm__ volatile(\ | 1495 __asm__ volatile(\ |
1496 "pxor %%xmm15, %%xmm15 \n\t"\ | 1496 "pxor %%xmm15, %%xmm15 \n\t"\ |
2063 QPEL_H264(avg_, AVG_MMX2_OP, mmx2) | 2063 QPEL_H264(avg_, AVG_MMX2_OP, mmx2) |
2064 QPEL_H264_V_XMM(put_, PUT_OP, sse2) | 2064 QPEL_H264_V_XMM(put_, PUT_OP, sse2) |
2065 QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2) | 2065 QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2) |
2066 QPEL_H264_HV_XMM(put_, PUT_OP, sse2) | 2066 QPEL_H264_HV_XMM(put_, PUT_OP, sse2) |
2067 QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2) | 2067 QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2) |
2068 #ifdef HAVE_SSSE3 | 2068 #if HAVE_SSSE3 |
2069 QPEL_H264_H_XMM(put_, PUT_OP, ssse3) | 2069 QPEL_H264_H_XMM(put_, PUT_OP, ssse3) |
2070 QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3) | 2070 QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3) |
2071 QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3) | 2071 QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3) |
2072 QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3) | 2072 QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3) |
2073 QPEL_H264_HV_XMM(put_, PUT_OP, ssse3) | 2073 QPEL_H264_HV_XMM(put_, PUT_OP, ssse3) |
2077 | 2077 |
2078 H264_MC_4816(3dnow) | 2078 H264_MC_4816(3dnow) |
2079 H264_MC_4816(mmx2) | 2079 H264_MC_4816(mmx2) |
2080 H264_MC_816(H264_MC_V, sse2) | 2080 H264_MC_816(H264_MC_V, sse2) |
2081 H264_MC_816(H264_MC_HV, sse2) | 2081 H264_MC_816(H264_MC_HV, sse2) |
2082 #ifdef HAVE_SSSE3 | 2082 #if HAVE_SSSE3 |
2083 H264_MC_816(H264_MC_H, ssse3) | 2083 H264_MC_816(H264_MC_H, ssse3) |
2084 H264_MC_816(H264_MC_HV, ssse3) | 2084 H264_MC_816(H264_MC_HV, ssse3) |
2085 #endif | 2085 #endif |
2086 | 2086 |
2087 /* rnd interleaved with rnd div 8, use p+1 to access rnd div 8 */ | 2087 /* rnd interleaved with rnd div 8, use p+1 to access rnd div 8 */ |
2159 #undef H264_CHROMA_OP4 | 2159 #undef H264_CHROMA_OP4 |
2160 #undef H264_CHROMA_MC8_TMPL | 2160 #undef H264_CHROMA_MC8_TMPL |
2161 #undef H264_CHROMA_MC4_TMPL | 2161 #undef H264_CHROMA_MC4_TMPL |
2162 #undef H264_CHROMA_MC8_MV0 | 2162 #undef H264_CHROMA_MC8_MV0 |
2163 | 2163 |
2164 #ifdef HAVE_SSSE3 | 2164 #if HAVE_SSSE3 |
2165 #define AVG_OP(X) | 2165 #define AVG_OP(X) |
2166 #undef H264_CHROMA_MC8_TMPL | 2166 #undef H264_CHROMA_MC8_TMPL |
2167 #undef H264_CHROMA_MC4_TMPL | 2167 #undef H264_CHROMA_MC4_TMPL |
2168 #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3 | 2168 #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3 |
2169 #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3 | 2169 #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3 |