Mercurial > libavcodec.hg
comparison ppc/gmc_altivec.c @ 7333:a8a79f5385f6 libavcodec
cosmetics: Reformat PPC code in libavcodec according to style guidelines.
This includes indentation changes, comment reformatting, consistent brace
placement and some prettyprinting.
author | diego |
---|---|
date | Sun, 20 Jul 2008 18:58:30 +0000 |
parents | f7cbb7733146 |
children | 7cee7292d5cc |
comparison
equal
deleted
inserted
replaced
7332:b1003e468c3d | 7333:a8a79f5385f6 |
---|---|
34 #define GMC1_PERF_COND (h==8) | 34 #define GMC1_PERF_COND (h==8) |
35 void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder) | 35 void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder) |
36 { | 36 { |
37 POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND); | 37 POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND); |
38 const DECLARE_ALIGNED_16(unsigned short, rounder_a[8]) = | 38 const DECLARE_ALIGNED_16(unsigned short, rounder_a[8]) = |
39 {rounder, rounder, rounder, rounder, | 39 {rounder, rounder, rounder, rounder, |
40 rounder, rounder, rounder, rounder}; | 40 rounder, rounder, rounder, rounder}; |
41 const DECLARE_ALIGNED_16(unsigned short, ABCD[8]) = | 41 const DECLARE_ALIGNED_16(unsigned short, ABCD[8]) = |
42 { | 42 { |
43 (16-x16)*(16-y16), /* A */ | 43 (16-x16)*(16-y16), /* A */ |
44 ( x16)*(16-y16), /* B */ | 44 ( x16)*(16-y16), /* B */ |
45 (16-x16)*( y16), /* C */ | 45 (16-x16)*( y16), /* C */ |
46 ( x16)*( y16), /* D */ | 46 ( x16)*( y16), /* D */ |
47 0, 0, 0, 0 /* padding */ | 47 0, 0, 0, 0 /* padding */ |
48 }; | 48 }; |
49 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0); | 49 register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0); |
50 register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8); | 50 register const vector unsigned short vcsr8 = (const vector unsigned short)vec_splat_u16(8); |
51 register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD; | 51 register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD; |
52 register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD; | 52 register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD; |
53 int i; | 53 int i; |
72 // as the 'src' of the next. | 72 // as the 'src' of the next. |
73 src_0 = vec_ld(0, src); | 73 src_0 = vec_ld(0, src); |
74 src_1 = vec_ld(16, src); | 74 src_1 = vec_ld(16, src); |
75 srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src)); | 75 srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src)); |
76 | 76 |
77 if (src_really_odd != 0x0000000F) | 77 if (src_really_odd != 0x0000000F) { |
78 { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector. | 78 // if src & 0xF == 0xF, then (src+1) is properly aligned |
79 srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src)); | 79 // on the second vector. |
80 } | 80 srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src)); |
81 else | 81 } else { |
82 { | 82 srcvB = src_1; |
83 srcvB = src_1; | |
84 } | 83 } |
85 srcvA = vec_mergeh(vczero, srcvA); | 84 srcvA = vec_mergeh(vczero, srcvA); |
86 srcvB = vec_mergeh(vczero, srcvB); | 85 srcvB = vec_mergeh(vczero, srcvB); |
87 | 86 |
88 for(i=0; i<h; i++) | 87 for(i=0; i<h; i++) { |
89 { | 88 dst_odd = (unsigned long)dst & 0x0000000F; |
90 dst_odd = (unsigned long)dst & 0x0000000F; | 89 src_really_odd = (((unsigned long)src) + stride) & 0x0000000F; |
91 src_really_odd = (((unsigned long)src) + stride) & 0x0000000F; | |
92 | 90 |
93 dstv = vec_ld(0, dst); | 91 dstv = vec_ld(0, dst); |
94 | 92 |
95 // we we'll be able to pick-up our 9 char elements | 93 // we we'll be able to pick-up our 9 char elements |
96 // at src + stride from those 32 bytes | 94 // at src + stride from those 32 bytes |
97 // then reuse the resulting 2 vectors srvcC and srcvD | 95 // then reuse the resulting 2 vectors srvcC and srcvD |
98 // as the next srcvA and srcvB | 96 // as the next srcvA and srcvB |
99 src_0 = vec_ld(stride + 0, src); | 97 src_0 = vec_ld(stride + 0, src); |
100 src_1 = vec_ld(stride + 16, src); | 98 src_1 = vec_ld(stride + 16, src); |
101 srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src)); | 99 srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src)); |
102 | 100 |
103 if (src_really_odd != 0x0000000F) | 101 if (src_really_odd != 0x0000000F) { |
104 { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector. | 102 // if src & 0xF == 0xF, then (src+1) is properly aligned |
105 srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src)); | 103 // on the second vector. |
106 } | 104 srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src)); |
107 else | 105 } else { |
108 { | 106 srcvD = src_1; |
109 srcvD = src_1; | 107 } |
110 } | |
111 | 108 |
112 srcvC = vec_mergeh(vczero, srcvC); | 109 srcvC = vec_mergeh(vczero, srcvC); |
113 srcvD = vec_mergeh(vczero, srcvD); | 110 srcvD = vec_mergeh(vczero, srcvD); |
114 | 111 |
115 | 112 |
116 // OK, now we (finally) do the math :-) | 113 // OK, now we (finally) do the math :-) |
117 // those four instructions replaces 32 int muls & 32 int adds. | 114 // those four instructions replaces 32 int muls & 32 int adds. |
118 // isn't AltiVec nice ? | 115 // isn't AltiVec nice ? |
119 tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV); | 116 tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV); |
120 tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA); | 117 tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA); |
121 tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB); | 118 tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB); |
122 tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC); | 119 tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC); |
123 | 120 |
124 srcvA = srcvC; | 121 srcvA = srcvC; |
125 srcvB = srcvD; | 122 srcvB = srcvD; |
126 | 123 |
127 tempD = vec_sr(tempD, vcsr8); | 124 tempD = vec_sr(tempD, vcsr8); |
128 | 125 |
129 dstv2 = vec_pack(tempD, (vector unsigned short)vczero); | 126 dstv2 = vec_pack(tempD, (vector unsigned short)vczero); |
130 | 127 |
131 if (dst_odd) | 128 if (dst_odd) { |
132 { | 129 dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1)); |
133 dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1)); | 130 } else { |
134 } | 131 dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3)); |
135 else | 132 } |
136 { | |
137 dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3)); | |
138 } | |
139 | 133 |
140 vec_st(dstv2, 0, dst); | 134 vec_st(dstv2, 0, dst); |
141 | 135 |
142 dst += stride; | 136 dst += stride; |
143 src += stride; | 137 src += stride; |
144 } | 138 } |
145 | 139 |
146 POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND); | 140 POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND); |
147 } | 141 } |