Mercurial > libavcodec.hg
changeset 10301:02798c603744 libavcodec
cosmetics: fix indentation after previous commit
author | mru |
---|---|
date | Sun, 27 Sep 2009 16:52:00 +0000 |
parents | 4d1b9ca628fc |
children | 6db89678b326 |
files | ppc/float_altivec.c x86/dsputil_mmx.c |
diffstat | 2 files changed, 45 insertions(+), 45 deletions(-) [+] |
line wrap: on
line diff
--- a/ppc/float_altivec.c Sun Sep 27 16:51:54 2009 +0000 +++ b/ppc/float_altivec.c Sun Sep 27 16:52:00 2009 +0000 @@ -75,19 +75,19 @@ vector unsigned char align = vec_lvsr(0,dst), mask = vec_lvsl(0, dst); - for (i=0; i<len-3; i+=4) { - t0 = vec_ld(0, dst+i); - t1 = vec_ld(15, dst+i); - s0 = vec_ld(0, src0+i); - s1 = vec_ld(0, src1+i); - s2 = vec_ld(0, src2+i); - edges = vec_perm(t1 ,t0, mask); - d = vec_madd(s0,s1,s2); - t1 = vec_perm(d, edges, align); - t0 = vec_perm(edges, d, align); - vec_st(t1, 15, dst+i); - vec_st(t0, 0, dst+i); - } + for (i=0; i<len-3; i+=4) { + t0 = vec_ld(0, dst+i); + t1 = vec_ld(15, dst+i); + s0 = vec_ld(0, src0+i); + s1 = vec_ld(0, src1+i); + s2 = vec_ld(0, src2+i); + edges = vec_perm(t1 ,t0, mask); + d = vec_madd(s0,s1,s2); + t1 = vec_perm(d, edges, align); + t0 = vec_perm(edges, d, align); + vec_st(t1, 15, dst+i); + vec_st(t0, 0, dst+i); + } } static void vector_fmul_window_altivec(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len)
--- a/x86/dsputil_mmx.c Sun Sep 27 16:51:54 2009 +0000 +++ b/x86/dsputil_mmx.c Sun Sep 27 16:52:00 2009 +0000 @@ -2128,43 +2128,43 @@ static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1, const float *src2, int len){ x86_reg i = (len-4)*4; - __asm__ volatile( - "1: \n\t" - "movq (%2,%0), %%mm0 \n\t" - "movq 8(%2,%0), %%mm1 \n\t" - "pfmul (%3,%0), %%mm0 \n\t" - "pfmul 8(%3,%0), %%mm1 \n\t" - "pfadd (%4,%0), %%mm0 \n\t" - "pfadd 8(%4,%0), %%mm1 \n\t" - "movq %%mm0, (%1,%0) \n\t" - "movq %%mm1, 8(%1,%0) \n\t" - "sub $16, %0 \n\t" - "jge 1b \n\t" - :"+r"(i) - :"r"(dst), "r"(src0), "r"(src1), "r"(src2) - :"memory" - ); + __asm__ volatile( + "1: \n\t" + "movq (%2,%0), %%mm0 \n\t" + "movq 8(%2,%0), %%mm1 \n\t" + "pfmul (%3,%0), %%mm0 \n\t" + "pfmul 8(%3,%0), %%mm1 \n\t" + "pfadd (%4,%0), %%mm0 \n\t" + "pfadd 8(%4,%0), %%mm1 \n\t" + "movq %%mm0, (%1,%0) \n\t" + "movq %%mm1, 8(%1,%0) \n\t" + "sub $16, %0 \n\t" + "jge 1b \n\t" + :"+r"(i) + :"r"(dst), "r"(src0), "r"(src1), "r"(src2) + :"memory" + ); __asm__ volatile("femms"); } static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1, const float *src2, int len){ x86_reg i = (len-8)*4; - __asm__ volatile( - "1: \n\t" - "movaps (%2,%0), %%xmm0 \n\t" - "movaps 16(%2,%0), %%xmm1 \n\t" - "mulps (%3,%0), %%xmm0 \n\t" - "mulps 16(%3,%0), %%xmm1 \n\t" - "addps (%4,%0), %%xmm0 \n\t" - "addps 16(%4,%0), %%xmm1 \n\t" - "movaps %%xmm0, (%1,%0) \n\t" - "movaps %%xmm1, 16(%1,%0) \n\t" - "sub $32, %0 \n\t" - "jge 1b \n\t" - :"+r"(i) - :"r"(dst), "r"(src0), "r"(src1), "r"(src2) - :"memory" - ); + __asm__ volatile( + "1: \n\t" + "movaps (%2,%0), %%xmm0 \n\t" + "movaps 16(%2,%0), %%xmm1 \n\t" + "mulps (%3,%0), %%xmm0 \n\t" + "mulps 16(%3,%0), %%xmm1 \n\t" + "addps (%4,%0), %%xmm0 \n\t" + "addps 16(%4,%0), %%xmm1 \n\t" + "movaps %%xmm0, (%1,%0) \n\t" + "movaps %%xmm1, 16(%1,%0) \n\t" + "sub $32, %0 \n\t" + "jge 1b \n\t" + :"+r"(i) + :"r"(dst), "r"(src0), "r"(src1), "r"(src2) + :"memory" + ); } static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,