Mercurial > libavcodec.hg
changeset 5586:f065fc609145 libavcodec
whitespace/indentation cosmetics
author | diego |
---|---|
date | Fri, 24 Aug 2007 23:49:11 +0000 |
parents | f644e7c90380 |
children | 3ae03eacbe9f |
files | ppc/h264_altivec.c |
diffstat | 1 files changed, 104 insertions(+), 104 deletions(-) [+] |
line wrap: on
line diff
--- a/ppc/h264_altivec.c Fri Aug 24 23:01:50 2007 +0000 +++ b/ppc/h264_altivec.c Fri Aug 24 23:49:11 2007 +0000 @@ -182,9 +182,9 @@ void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) { DECLARE_ALIGNED_16(signed int, ABCD[4]) = {((8 - x) * (8 - y)), - ((x) * (8 - y)), - ((8 - x) * (y)), - ((x) * (y))}; + ((x) * (8 - y)), + ((8 - x) * (y)), + ((x) * (y))}; register int i; vec_u8_t fperm; const vec_s32_t vABCD = vec_ld(0, ABCD); @@ -195,7 +195,7 @@ LOAD_ZERO; const vec_s16_t v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4)); const vec_u16_t v6us = vec_splat_u16(6); - register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; + register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; vec_u8_t vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1; @@ -206,98 +206,98 @@ vec_u8_t vdst, ppsum, fsum; if (((unsigned long)dst) % 16 == 0) { - fperm = (vec_u8_t)AVV(0x10, 0x11, 0x12, 0x13, - 0x14, 0x15, 0x16, 0x17, - 0x08, 0x09, 0x0A, 0x0B, - 0x0C, 0x0D, 0x0E, 0x0F); + fperm = (vec_u8_t)AVV(0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, + 0x08, 0x09, 0x0A, 0x0B, + 0x0C, 0x0D, 0x0E, 0x0F); } else { - fperm = (vec_u8_t)AVV(0x00, 0x01, 0x02, 0x03, - 0x04, 0x05, 0x06, 0x07, - 0x18, 0x19, 0x1A, 0x1B, - 0x1C, 0x1D, 0x1E, 0x1F); + fperm = (vec_u8_t)AVV(0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, + 0x18, 0x19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F); } vsrcAuc = vec_ld(0, src); if (loadSecond) - vsrcBuc = vec_ld(16, src); + vsrcBuc = vec_ld(16, src); vsrcperm0 = vec_lvsl(0, src); vsrcperm1 = vec_lvsl(1, src); vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0); if (reallyBadAlign) - vsrc1uc = vsrcBuc; + vsrc1uc = vsrcBuc; else - vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1); + vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1); vsrc0ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc0uc); vsrc1ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc1uc); if (!loadSecond) {// -> !reallyBadAlign - for (i = 0 ; i < h ; i++) { + for (i = 0 ; i < h ; i++) { - vsrcCuc = vec_ld(stride + 0, src); + vsrcCuc = vec_ld(stride + 0, src); - vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); - vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); + vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); + vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); - vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc2uc); - vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc3uc); + vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc2uc); + vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc3uc); - psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); - psum = vec_mladd(vB, vsrc1ssH, psum); - psum = vec_mladd(vC, vsrc2ssH, psum); - psum = vec_mladd(vD, vsrc3ssH, psum); - psum = vec_add(v28ss, psum); - psum = vec_sra(psum, v6us); + psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); + psum = vec_mladd(vB, vsrc1ssH, psum); + psum = vec_mladd(vC, vsrc2ssH, psum); + psum = vec_mladd(vD, vsrc3ssH, psum); + psum = vec_add(v28ss, psum); + psum = vec_sra(psum, v6us); - vdst = vec_ld(0, dst); - ppsum = (vec_u8_t)vec_packsu(psum, psum); - fsum = vec_perm(vdst, ppsum, fperm); + vdst = vec_ld(0, dst); + ppsum = (vec_u8_t)vec_packsu(psum, psum); + fsum = vec_perm(vdst, ppsum, fperm); - vec_st(fsum, 0, dst); + vec_st(fsum, 0, dst); - vsrc0ssH = vsrc2ssH; - vsrc1ssH = vsrc3ssH; + vsrc0ssH = vsrc2ssH; + vsrc1ssH = vsrc3ssH; - dst += stride; - src += stride; - } + dst += stride; + src += stride; + } } else { vec_u8_t vsrcDuc; - for (i = 0 ; i < h ; i++) { - vsrcCuc = vec_ld(stride + 0, src); - vsrcDuc = vec_ld(stride + 16, src); + for (i = 0 ; i < h ; i++) { + vsrcCuc = vec_ld(stride + 0, src); + vsrcDuc = vec_ld(stride + 16, src); - vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); - if (reallyBadAlign) - vsrc3uc = vsrcDuc; - else - vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); + vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); + if (reallyBadAlign) + vsrc3uc = vsrcDuc; + else + vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); - vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc2uc); - vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc3uc); + vsrc2ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc2uc); + vsrc3ssH = (vec_s16_t)vec_mergeh(zero_u8v, (vec_u8_t)vsrc3uc); - psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); - psum = vec_mladd(vB, vsrc1ssH, psum); - psum = vec_mladd(vC, vsrc2ssH, psum); - psum = vec_mladd(vD, vsrc3ssH, psum); - psum = vec_add(v28ss, psum); - psum = vec_sr(psum, v6us); + psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); + psum = vec_mladd(vB, vsrc1ssH, psum); + psum = vec_mladd(vC, vsrc2ssH, psum); + psum = vec_mladd(vD, vsrc3ssH, psum); + psum = vec_add(v28ss, psum); + psum = vec_sr(psum, v6us); - vdst = vec_ld(0, dst); - ppsum = (vec_u8_t)vec_pack(psum, psum); - fsum = vec_perm(vdst, ppsum, fperm); + vdst = vec_ld(0, dst); + ppsum = (vec_u8_t)vec_pack(psum, psum); + fsum = vec_perm(vdst, ppsum, fperm); - vec_st(fsum, 0, dst); + vec_st(fsum, 0, dst); - vsrc0ssH = vsrc2ssH; - vsrc1ssH = vsrc3ssH; + vsrc0ssH = vsrc2ssH; + vsrc1ssH = vsrc3ssH; - dst += stride; - src += stride; - } + dst += stride; + src += stride; + } } } @@ -398,19 +398,19 @@ * IDCT transform: ****************************************************************************/ -#define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \ - /* 1st stage */ \ - vz0 = vec_add(vb0,vb2); /* temp[0] = Y[0] + Y[2] */ \ - vz1 = vec_sub(vb0,vb2); /* temp[1] = Y[0] - Y[2] */ \ - vz2 = vec_sra(vb1,vec_splat_u16(1)); \ - vz2 = vec_sub(vz2,vb3); /* temp[2] = Y[1].1/2 - Y[3] */ \ - vz3 = vec_sra(vb3,vec_splat_u16(1)); \ - vz3 = vec_add(vb1,vz3); /* temp[3] = Y[1] + Y[3].1/2 */ \ - /* 2nd stage: output */ \ - va0 = vec_add(vz0,vz3); /* x[0] = temp[0] + temp[3] */ \ - va1 = vec_add(vz1,vz2); /* x[1] = temp[1] + temp[2] */ \ - va2 = vec_sub(vz1,vz2); /* x[2] = temp[1] - temp[2] */ \ - va3 = vec_sub(vz0,vz3) /* x[3] = temp[0] - temp[3] */ +#define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \ + /* 1st stage */ \ + vz0 = vec_add(vb0,vb2); /* temp[0] = Y[0] + Y[2] */ \ + vz1 = vec_sub(vb0,vb2); /* temp[1] = Y[0] - Y[2] */ \ + vz2 = vec_sra(vb1,vec_splat_u16(1)); \ + vz2 = vec_sub(vz2,vb3); /* temp[2] = Y[1].1/2 - Y[3] */ \ + vz3 = vec_sra(vb3,vec_splat_u16(1)); \ + vz3 = vec_add(vb1,vz3); /* temp[3] = Y[1] + Y[3].1/2 */ \ + /* 2nd stage: output */ \ + va0 = vec_add(vz0,vz3); /* x[0] = temp[0] + temp[3] */ \ + va1 = vec_add(vz1,vz2); /* x[1] = temp[1] + temp[2] */ \ + va2 = vec_sub(vz1,vz2); /* x[2] = temp[1] - temp[2] */ \ + va3 = vec_sub(vz0,vz3) /* x[3] = temp[0] - temp[3] */ #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \ b0 = vec_mergeh( a0, a0 ); \ @@ -820,7 +820,7 @@ finaltc0 = vec_and((vec_u8_t)tc0vec, mask); /* tc = tc0 */ \ \ p1mask = diff_lt_altivec(p2, p0, betavec); \ - p1mask = vec_and(p1mask, mask); /* if( |p2 - p0| < beta) */ \ + p1mask = vec_and(p1mask, mask); /* if ( |p2 - p0| < beta) */ \ tc0masked = vec_and(p1mask, (vec_u8_t)tc0vec); \ finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \ newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \ @@ -840,7 +840,7 @@ static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { - if((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) { + if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) { register vec_u8_t p2 = vec_ld(-3*stride, pix); register vec_u8_t p1 = vec_ld(-2*stride, pix); register vec_u8_t p0 = vec_ld(-1*stride, pix); @@ -858,7 +858,7 @@ static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { register vec_u8_t line0, line1, line2, line3, line4, line5; - if((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0) + if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0) return; readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5); h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0); @@ -868,35 +868,35 @@ void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) { - if (has_altivec()) { - c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec; - c->put_no_rnd_h264_chroma_pixels_tab[0] = put_no_rnd_h264_chroma_mc8_altivec; - c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec; - c->h264_idct_add = ff_h264_idct_add_altivec; - c->h264_idct8_add = ff_h264_idct8_add_altivec; - c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec; - c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec; + if (has_altivec()) { + c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec; + c->put_no_rnd_h264_chroma_pixels_tab[0] = put_no_rnd_h264_chroma_mc8_altivec; + c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec; + c->h264_idct_add = ff_h264_idct_add_altivec; + c->h264_idct8_add = ff_h264_idct8_add_altivec; + c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec; + c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec; #define dspfunc(PFX, IDX, NUM) \ - c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \ - c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \ - c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \ - c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \ - c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \ - c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \ - c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \ - c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \ - c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \ - c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \ - c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \ - c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \ - c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \ - c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \ - c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \ - c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec + c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \ + c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \ + c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \ + c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \ + c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \ + c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \ + c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \ + c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \ + c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \ + c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \ + c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \ + c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \ + c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \ + c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \ + c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \ + c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec - dspfunc(put_h264_qpel, 0, 16); - dspfunc(avg_h264_qpel, 0, 16); + dspfunc(put_h264_qpel, 0, 16); + dspfunc(avg_h264_qpel, 0, 16); #undef dspfunc - } + } }