Mercurial > libavcodec.hg
changeset 5737:efa3c1f9259a libavcodec
sse2 version of compute_autocorr().
4x faster than c (somehow, even though doubles only allow 2x simd).
overal flac encoding: 15-50% faster on core2, 4-11% on k8, 3-13% on p4.
author | lorenm |
---|---|
date | Sat, 29 Sep 2007 22:31:18 +0000 |
parents | 810067f2c33d |
children | c1a4aae5adb4 |
files | dsputil.c dsputil.h flacenc.c i386/dsputil_mmx.c |
diffstat | 4 files changed, 141 insertions(+), 5 deletions(-) [+] |
line wrap: on
line diff
--- a/dsputil.c Sat Sep 29 15:20:22 2007 +0000 +++ b/dsputil.c Sat Sep 29 22:31:18 2007 +0000 @@ -41,6 +41,9 @@ /* vorbis.c */ void vorbis_inverse_coupling(float *mag, float *ang, int blocksize); +/* flacenc.c */ +void ff_flac_compute_autocorr(const int32_t *data, int len, int lag, double *autoc); + uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP] = {0, }; uint32_t ff_squareTbl[512] = {0, }; @@ -4132,6 +4135,9 @@ #ifdef CONFIG_VORBIS_DECODER c->vorbis_inverse_coupling = vorbis_inverse_coupling; #endif +#ifdef CONFIG_FLAC_ENCODER + c->flac_compute_autocorr = ff_flac_compute_autocorr; +#endif c->vector_fmul = vector_fmul_c; c->vector_fmul_reverse = vector_fmul_reverse_c; c->vector_fmul_add_add = ff_vector_fmul_add_add_c;
--- a/dsputil.h Sat Sep 29 15:20:22 2007 +0000 +++ b/dsputil.h Sat Sep 29 22:31:18 2007 +0000 @@ -328,6 +328,8 @@ /* assume len is a multiple of 4, and arrays are 16-byte aligned */ void (*vorbis_inverse_coupling)(float *mag, float *ang, int blocksize); + /* no alignment needed */ + void (*flac_compute_autocorr)(const int32_t *data, int len, int lag, double *autoc); /* assume len is a multiple of 8, and arrays are 16-byte aligned */ void (*vector_fmul)(float *dst, const float *src, int len); void (*vector_fmul_reverse)(float *dst, const float *src0, const float *src1, int len);
--- a/flacenc.c Sat Sep 29 15:20:22 2007 +0000 +++ b/flacenc.c Sat Sep 29 22:31:18 2007 +0000 @@ -22,6 +22,7 @@ #include "avcodec.h" #include "bitstream.h" #include "crc.h" +#include "dsputil.h" #include "golomb.h" #include "lls.h" @@ -107,6 +108,7 @@ FlacFrame frame; CompressionOptions options; AVCodecContext *avctx; + DSPContext dsp; } FlacEncodeContext; static const int flac_samplerates[16] = { @@ -177,6 +179,8 @@ s->avctx = avctx; + dsputil_init(&s->dsp, avctx); + if(avctx->sample_fmt != SAMPLE_FMT_S16) { return -1; } @@ -604,8 +608,8 @@ * Calculates autocorrelation data from audio samples * A Welch window function is applied before calculation. */ -static void compute_autocorr(const int32_t *data, int len, int lag, - double *autoc) +void ff_flac_compute_autocorr(const int32_t *data, int len, int lag, + double *autoc) { int i, j; double tmp[len + lag + 1]; @@ -747,7 +751,8 @@ /** * Calculate LPC coefficients for multiple orders */ -static int lpc_calc_coefs(const int32_t *samples, int blocksize, int max_order, +static int lpc_calc_coefs(FlacEncodeContext *s, + const int32_t *samples, int blocksize, int max_order, int precision, int32_t coefs[][MAX_LPC_ORDER], int *shift, int use_lpc, int omethod) { @@ -760,7 +765,7 @@ assert(max_order >= MIN_LPC_ORDER && max_order <= MAX_LPC_ORDER); if(use_lpc == 1){ - compute_autocorr(samples, blocksize, max_order, autoc); + s->dsp.flac_compute_autocorr(samples, blocksize, max_order, autoc); compute_lpc_coefs(autoc, max_order, lpc, ref); }else{ @@ -1017,7 +1022,7 @@ } /* LPC */ - opt_order = lpc_calc_coefs(smp, n, max_order, precision, coefs, shift, ctx->options.use_lpc, omethod); + opt_order = lpc_calc_coefs(ctx, smp, n, max_order, precision, coefs, shift, ctx->options.use_lpc, omethod); if(omethod == ORDER_METHOD_2LEVEL || omethod == ORDER_METHOD_4LEVEL ||
--- a/i386/dsputil_mmx.c Sat Sep 29 15:20:22 2007 +0000 +++ b/i386/dsputil_mmx.c Sat Sep 29 22:31:18 2007 +0000 @@ -65,6 +65,9 @@ static const uint64_t ff_pb_5F attribute_used __attribute__ ((aligned(8))) = 0x5F5F5F5F5F5F5F5FULL; static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL; +static const double ff_pd_1[2] attribute_used __attribute__ ((aligned(16))) = { 1.0, 1.0 }; +static const double ff_pd_2[2] attribute_used __attribute__ ((aligned(16))) = { 2.0, 2.0 }; + #define JUMPALIGN() __asm __volatile (ASMALIGN(3)::) #define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::) @@ -2958,6 +2961,125 @@ } } +#ifdef CONFIG_ENCODERS +static void apply_welch_window_sse2(const int32_t *data, int len, double *w_data) +{ + double c = 2.0 / (len-1.0); + int n2 = len>>1; + long i = -n2*sizeof(int32_t); + long j = n2*sizeof(int32_t); + asm volatile( + "movsd %0, %%xmm7 \n\t" + "movapd %1, %%xmm6 \n\t" + "movapd %2, %%xmm5 \n\t" + "movlhps %%xmm7, %%xmm7 \n\t" + "subpd %%xmm5, %%xmm7 \n\t" + "addsd %%xmm6, %%xmm7 \n\t" + ::"m"(c), "m"(*ff_pd_1), "m"(*ff_pd_2) + ); +#define WELCH(MOVPD)\ + asm volatile(\ + "1: \n\t"\ + "movapd %%xmm7, %%xmm1 \n\t"\ + "mulpd %%xmm1, %%xmm1 \n\t"\ + "movapd %%xmm6, %%xmm0 \n\t"\ + "subpd %%xmm1, %%xmm0 \n\t"\ + "pshufd $0x4e, %%xmm0, %%xmm1 \n\t"\ + "cvtpi2pd (%4,%0), %%xmm2 \n\t"\ + "cvtpi2pd (%5,%1), %%xmm3 \n\t"\ + "mulpd %%xmm0, %%xmm2 \n\t"\ + "mulpd %%xmm1, %%xmm3 \n\t"\ + "movapd %%xmm2, (%2,%0,2) \n\t"\ + MOVPD" %%xmm3, (%3,%1,2) \n\t"\ + "subpd %%xmm5, %%xmm7 \n\t"\ + "sub $8, %1 \n\t"\ + "add $8, %0 \n\t"\ + "jl 1b \n\t"\ + :"+&r"(i), "+&r"(j)\ + :"r"(w_data+n2), "r"(w_data+len-2-n2),\ + "r"(data+n2), "r"(data+len-2-n2)\ + ); + if(len&1) + WELCH("movupd") + else + WELCH("movapd") +#undef WELCH +} + +static void flac_compute_autocorr_sse2(const int32_t *data, int len, int lag, + double *autoc) +{ + double tmp[len + lag + 2]; + double *data1 = tmp + lag; + int j; + + if((long)data1 & 15) + data1++; + + apply_welch_window_sse2(data, len, data1); + + for(j=0; j<lag; j++) + data1[j-lag]= 0.0; + data1[len] = 0.0; + + for(j=0; j<lag; j+=2){ + long i = -len*sizeof(double); + if(j == lag-2) { + asm volatile( + "movsd %6, %%xmm0 \n\t" + "movsd %6, %%xmm1 \n\t" + "movsd %6, %%xmm2 \n\t" + "1: \n\t" + "movapd (%4,%0), %%xmm3 \n\t" + "movupd -8(%5,%0), %%xmm4 \n\t" + "movapd (%5,%0), %%xmm5 \n\t" + "mulpd %%xmm3, %%xmm4 \n\t" + "mulpd %%xmm3, %%xmm5 \n\t" + "mulpd -16(%5,%0), %%xmm3 \n\t" + "addpd %%xmm4, %%xmm1 \n\t" + "addpd %%xmm5, %%xmm0 \n\t" + "addpd %%xmm3, %%xmm2 \n\t" + "add $16, %0 \n\t" + "jl 1b \n\t" + "movhlps %%xmm0, %%xmm3 \n\t" + "movhlps %%xmm1, %%xmm4 \n\t" + "movhlps %%xmm2, %%xmm5 \n\t" + "addsd %%xmm3, %%xmm0 \n\t" + "addsd %%xmm4, %%xmm1 \n\t" + "addsd %%xmm5, %%xmm2 \n\t" + "movsd %%xmm0, %1 \n\t" + "movsd %%xmm1, %2 \n\t" + "movsd %%xmm2, %3 \n\t" + :"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1]), "=m"(autoc[j+2]) + :"r"(data1+len), "r"(data1+len-j), "m"(*ff_pd_1) + ); + } else { + asm volatile( + "movsd %5, %%xmm0 \n\t" + "movsd %5, %%xmm1 \n\t" + "1: \n\t" + "movapd (%3,%0), %%xmm3 \n\t" + "movupd -8(%4,%0), %%xmm4 \n\t" + "mulpd %%xmm3, %%xmm4 \n\t" + "mulpd (%4,%0), %%xmm3 \n\t" + "addpd %%xmm4, %%xmm1 \n\t" + "addpd %%xmm3, %%xmm0 \n\t" + "add $16, %0 \n\t" + "jl 1b \n\t" + "movhlps %%xmm0, %%xmm3 \n\t" + "movhlps %%xmm1, %%xmm4 \n\t" + "addsd %%xmm3, %%xmm0 \n\t" + "addsd %%xmm4, %%xmm1 \n\t" + "movsd %%xmm0, %1 \n\t" + "movsd %%xmm1, %2 \n\t" + :"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1]) + :"r"(data1+len), "r"(data1+len-j), "m"(*ff_pd_1) + ); + } + } +} +#endif // CONFIG_ENCODERS + static void vector_fmul_3dnow(float *dst, const float *src, int len){ long i = (len-4)*4; asm volatile( @@ -3605,6 +3727,7 @@ c->sum_abs_dctelem= sum_abs_dctelem_sse2; c->hadamard8_diff[0]= hadamard8_diff16_sse2; c->hadamard8_diff[1]= hadamard8_diff_sse2; + c->flac_compute_autocorr = flac_compute_autocorr_sse2; } #ifdef HAVE_SSSE3