Mercurial > libpostproc.hg
annotate postprocess_altivec_template.c @ 78:47ff4bd209cd libpostproc
const
author | michael |
---|---|
date | Sat, 02 Feb 2008 21:24:54 +0000 |
parents | 650554bacd12 |
children | 8181b013dafa |
rev | line source |
---|---|
0 | 1 /* |
22
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
2 * AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org> |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
3 * |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
4 * based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at) |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
5 * |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
6 * This file is part of FFmpeg. |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
7 * |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
8 * FFmpeg is free software; you can redistribute it and/or modify |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
9 * it under the terms of the GNU General Public License as published by |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
10 * the Free Software Foundation; either version 2 of the License, or |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
11 * (at your option) any later version. |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
12 * |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
13 * FFmpeg is distributed in the hope that it will be useful, |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
16 * GNU General Public License for more details. |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
17 * |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
18 * You should have received a copy of the GNU General Public License |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
19 * along with FFmpeg; if not, write to the Free Software |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
da3bfee1fa67
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
0
diff
changeset
|
21 */ |
0 | 22 |
42 | 23 #include "avutil.h" |
0 | 24 |
25 #define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \ | |
26 do { \ | |
27 __typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \ | |
28 __typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \ | |
29 __typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \ | |
30 __typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \ | |
31 tempA1 = vec_mergeh (src_a, src_e); \ | |
32 tempB1 = vec_mergel (src_a, src_e); \ | |
33 tempC1 = vec_mergeh (src_b, src_f); \ | |
34 tempD1 = vec_mergel (src_b, src_f); \ | |
35 tempE1 = vec_mergeh (src_c, src_g); \ | |
36 tempF1 = vec_mergel (src_c, src_g); \ | |
37 tempG1 = vec_mergeh (src_d, src_h); \ | |
38 tempH1 = vec_mergel (src_d, src_h); \ | |
39 tempA2 = vec_mergeh (tempA1, tempE1); \ | |
40 tempB2 = vec_mergel (tempA1, tempE1); \ | |
41 tempC2 = vec_mergeh (tempB1, tempF1); \ | |
42 tempD2 = vec_mergel (tempB1, tempF1); \ | |
43 tempE2 = vec_mergeh (tempC1, tempG1); \ | |
44 tempF2 = vec_mergel (tempC1, tempG1); \ | |
45 tempG2 = vec_mergeh (tempD1, tempH1); \ | |
46 tempH2 = vec_mergel (tempD1, tempH1); \ | |
47 src_a = vec_mergeh (tempA2, tempE2); \ | |
48 src_b = vec_mergel (tempA2, tempE2); \ | |
49 src_c = vec_mergeh (tempB2, tempF2); \ | |
50 src_d = vec_mergel (tempB2, tempF2); \ | |
51 src_e = vec_mergeh (tempC2, tempG2); \ | |
52 src_f = vec_mergel (tempC2, tempG2); \ | |
53 src_g = vec_mergeh (tempD2, tempH2); \ | |
54 src_h = vec_mergel (tempD2, tempH2); \ | |
55 } while (0) | |
56 | |
57 | |
58 static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) { | |
59 /* | |
60 this code makes no assumption on src or stride. | |
61 One could remove the recomputation of the perm | |
62 vector by assuming (stride % 16) == 0, unfortunately | |
63 this is not always true. | |
64 */ | |
41 | 65 DECLARE_ALIGNED(16, short, data[8]); |
0 | 66 int numEq; |
67 uint8_t *src2 = src; | |
68 vector signed short v_dcOffset; | |
69 vector signed short v2QP; | |
70 vector unsigned short v4QP; | |
71 vector unsigned short v_dcThreshold; | |
72 const int properStride = (stride % 16); | |
73 const int srcAlign = ((unsigned long)src2 % 16); | |
74 const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0; | |
75 const vector signed int zero = vec_splat_s32(0); | |
76 const vector signed short mask = vec_splat_s16(1); | |
77 vector signed int v_numEq = vec_splat_s32(0); | |
78 | |
79 data[0] = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1; | |
80 data[1] = data[0] * 2 + 1; | |
81 data[2] = c->QP * 2; | |
82 data[3] = c->QP * 4; | |
83 vector signed short v_data = vec_ld(0, data); | |
84 v_dcOffset = vec_splat(v_data, 0); | |
85 v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1); | |
86 v2QP = vec_splat(v_data, 2); | |
87 v4QP = (vector unsigned short)vec_splat(v_data, 3); | |
88 | |
89 src2 += stride * 4; | |
90 | |
91 vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3, v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7; | |
92 | |
93 #define LOAD_LINE(i) \ | |
94 register int j##i = i * stride; \ | |
95 vector unsigned char perm##i = vec_lvsl(j##i, src2); \ | |
96 const vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \ | |
97 vector unsigned char v_srcA2##i; \ | |
98 if (two_vectors) \ | |
99 v_srcA2##i = vec_ld(j##i + 16, src2); \ | |
100 const vector unsigned char v_srcA##i = \ | |
101 vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \ | |
102 v_srcAss##i = \ | |
103 (vector signed short)vec_mergeh((vector signed char)zero, \ | |
104 (vector signed char)v_srcA##i) | |
105 | |
106 #define LOAD_LINE_ALIGNED(i) \ | |
107 register int j##i = i * stride; \ | |
108 const vector unsigned char v_srcA##i = vec_ld(j##i, src2); \ | |
109 v_srcAss##i = \ | |
110 (vector signed short)vec_mergeh((vector signed char)zero, \ | |
111 (vector signed char)v_srcA##i) | |
112 | |
113 // special casing the aligned case is worthwhile, as all call from | |
114 // the (transposed) horizontable deblocks will be aligned, i naddition | |
115 // to the naturraly aligned vertical deblocks. | |
116 if (properStride && srcAlign) { | |
117 LOAD_LINE_ALIGNED(0); | |
118 LOAD_LINE_ALIGNED(1); | |
119 LOAD_LINE_ALIGNED(2); | |
120 LOAD_LINE_ALIGNED(3); | |
121 LOAD_LINE_ALIGNED(4); | |
122 LOAD_LINE_ALIGNED(5); | |
123 LOAD_LINE_ALIGNED(6); | |
124 LOAD_LINE_ALIGNED(7); | |
125 } else { | |
126 LOAD_LINE(0); | |
127 LOAD_LINE(1); | |
128 LOAD_LINE(2); | |
129 LOAD_LINE(3); | |
130 LOAD_LINE(4); | |
131 LOAD_LINE(5); | |
132 LOAD_LINE(6); | |
133 LOAD_LINE(7); | |
134 } | |
135 #undef LOAD_LINE | |
136 #undef LOAD_LINE_ALIGNED | |
137 | |
138 #define ITER(i, j) \ | |
139 const vector signed short v_diff##i = \ | |
140 vec_sub(v_srcAss##i, v_srcAss##j); \ | |
141 const vector signed short v_sum##i = \ | |
142 vec_add(v_diff##i, v_dcOffset); \ | |
143 const vector signed short v_comp##i = \ | |
144 (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \ | |
145 v_dcThreshold); \ | |
146 const vector signed short v_part##i = vec_and(mask, v_comp##i); \ | |
147 v_numEq = vec_sum4s(v_part##i, v_numEq); | |
148 | |
149 ITER(0, 1); | |
150 ITER(1, 2); | |
151 ITER(2, 3); | |
152 ITER(3, 4); | |
153 ITER(4, 5); | |
154 ITER(5, 6); | |
155 ITER(6, 7); | |
156 #undef ITER | |
157 | |
158 v_numEq = vec_sums(v_numEq, zero); | |
159 | |
160 v_numEq = vec_splat(v_numEq, 3); | |
161 vec_ste(v_numEq, 0, &numEq); | |
162 | |
163 if (numEq > c->ppMode.flatnessThreshold) | |
164 { | |
165 const vector unsigned char mmoP1 = (const vector unsigned char) | |
166 AVV(0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, | |
167 0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B); | |
168 const vector unsigned char mmoP2 = (const vector unsigned char) | |
169 AVV(0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F, | |
170 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); | |
171 const vector unsigned char mmoP = (const vector unsigned char) | |
172 vec_lvsl(8, (unsigned char*)0); | |
173 | |
174 vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1); | |
175 vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2); | |
176 vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP); | |
177 vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1); | |
178 vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2); | |
179 vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP); | |
180 vector signed short mmoDiff = vec_sub(mmoL, mmoR); | |
181 vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP); | |
182 | |
183 if (vec_any_gt(mmoSum, v4QP)) | |
184 return 0; | |
185 else | |
186 return 1; | |
187 } | |
188 else return 2; | |
189 } | |
190 | |
191 static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) { | |
192 /* | |
193 this code makes no assumption on src or stride. | |
194 One could remove the recomputation of the perm | |
195 vector by assuming (stride % 16) == 0, unfortunately | |
196 this is not always true. Quite a lot of load/stores | |
63 | 197 can be removed by assuming proper alignment of |
0 | 198 src & stride :-( |
199 */ | |
200 uint8_t *src2 = src; | |
201 const vector signed int zero = vec_splat_s32(0); | |
202 const int properStride = (stride % 16); | |
203 const int srcAlign = ((unsigned long)src2 % 16); | |
41 | 204 DECLARE_ALIGNED(16, short, qp[8]); |
0 | 205 qp[0] = c->QP; |
206 vector signed short vqp = vec_ld(0, qp); | |
207 vqp = vec_splat(vqp, 0); | |
208 | |
209 src2 += stride*3; | |
210 | |
211 vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9; | |
212 vector unsigned char vbA0, vbA1, vbA2, vbA3, vbA4, vbA5, vbA6, vbA7, vbA8, vbA9; | |
213 vector unsigned char vbB0, vbB1, vbB2, vbB3, vbB4, vbB5, vbB6, vbB7, vbB8, vbB9; | |
214 vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9; | |
215 | |
216 #define LOAD_LINE(i) \ | |
217 const vector unsigned char perml##i = \ | |
218 vec_lvsl(i * stride, src2); \ | |
219 vbA##i = vec_ld(i * stride, src2); \ | |
220 vbB##i = vec_ld(i * stride + 16, src2); \ | |
221 vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \ | |
222 vb##i = \ | |
223 (vector signed short)vec_mergeh((vector unsigned char)zero, \ | |
224 (vector unsigned char)vbT##i) | |
225 | |
226 #define LOAD_LINE_ALIGNED(i) \ | |
227 register int j##i = i * stride; \ | |
228 vbT##i = vec_ld(j##i, src2); \ | |
229 vb##i = \ | |
230 (vector signed short)vec_mergeh((vector signed char)zero, \ | |
231 (vector signed char)vbT##i) | |
232 | |
233 // special casing the aligned case is worthwhile, as all call from | |
234 // the (transposed) horizontable deblocks will be aligned, in addition | |
235 // to the naturraly aligned vertical deblocks. | |
236 if (properStride && srcAlign) { | |
237 LOAD_LINE_ALIGNED(0); | |
238 LOAD_LINE_ALIGNED(1); | |
239 LOAD_LINE_ALIGNED(2); | |
240 LOAD_LINE_ALIGNED(3); | |
241 LOAD_LINE_ALIGNED(4); | |
242 LOAD_LINE_ALIGNED(5); | |
243 LOAD_LINE_ALIGNED(6); | |
244 LOAD_LINE_ALIGNED(7); | |
245 LOAD_LINE_ALIGNED(8); | |
246 LOAD_LINE_ALIGNED(9); | |
247 } else { | |
248 LOAD_LINE(0); | |
249 LOAD_LINE(1); | |
250 LOAD_LINE(2); | |
251 LOAD_LINE(3); | |
252 LOAD_LINE(4); | |
253 LOAD_LINE(5); | |
254 LOAD_LINE(6); | |
255 LOAD_LINE(7); | |
256 LOAD_LINE(8); | |
257 LOAD_LINE(9); | |
258 } | |
259 #undef LOAD_LINE | |
260 #undef LOAD_LINE_ALIGNED | |
261 | |
262 const vector unsigned short v_2 = vec_splat_u16(2); | |
263 const vector unsigned short v_4 = vec_splat_u16(4); | |
264 | |
265 const vector signed short v_diff01 = vec_sub(vb0, vb1); | |
266 const vector unsigned short v_cmp01 = | |
267 (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp); | |
268 const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01); | |
269 const vector signed short v_diff89 = vec_sub(vb8, vb9); | |
270 const vector unsigned short v_cmp89 = | |
271 (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp); | |
272 const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89); | |
273 | |
274 const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1); | |
275 const vector signed short temp02 = vec_add(vb2, vb3); | |
276 const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4); | |
277 const vector signed short v_sumsB0 = vec_add(temp02, temp03); | |
278 | |
279 const vector signed short temp11 = vec_sub(v_sumsB0, v_first); | |
280 const vector signed short v_sumsB1 = vec_add(temp11, vb4); | |
281 | |
282 const vector signed short temp21 = vec_sub(v_sumsB1, v_first); | |
283 const vector signed short v_sumsB2 = vec_add(temp21, vb5); | |
284 | |
285 const vector signed short temp31 = vec_sub(v_sumsB2, v_first); | |
286 const vector signed short v_sumsB3 = vec_add(temp31, vb6); | |
287 | |
288 const vector signed short temp41 = vec_sub(v_sumsB3, v_first); | |
289 const vector signed short v_sumsB4 = vec_add(temp41, vb7); | |
290 | |
291 const vector signed short temp51 = vec_sub(v_sumsB4, vb1); | |
292 const vector signed short v_sumsB5 = vec_add(temp51, vb8); | |
293 | |
294 const vector signed short temp61 = vec_sub(v_sumsB5, vb2); | |
295 const vector signed short v_sumsB6 = vec_add(temp61, v_last); | |
296 | |
297 const vector signed short temp71 = vec_sub(v_sumsB6, vb3); | |
298 const vector signed short v_sumsB7 = vec_add(temp71, v_last); | |
299 | |
300 const vector signed short temp81 = vec_sub(v_sumsB7, vb4); | |
301 const vector signed short v_sumsB8 = vec_add(temp81, v_last); | |
302 | |
303 const vector signed short temp91 = vec_sub(v_sumsB8, vb5); | |
304 const vector signed short v_sumsB9 = vec_add(temp91, v_last); | |
305 | |
306 #define COMPUTE_VR(i, j, k) \ | |
307 const vector signed short temps1##i = \ | |
308 vec_add(v_sumsB##i, v_sumsB##k); \ | |
309 const vector signed short temps2##i = \ | |
310 vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \ | |
311 const vector signed short vr##j = vec_sra(temps2##i, v_4) | |
312 | |
313 COMPUTE_VR(0, 1, 2); | |
314 COMPUTE_VR(1, 2, 3); | |
315 COMPUTE_VR(2, 3, 4); | |
316 COMPUTE_VR(3, 4, 5); | |
317 COMPUTE_VR(4, 5, 6); | |
318 COMPUTE_VR(5, 6, 7); | |
319 COMPUTE_VR(6, 7, 8); | |
320 COMPUTE_VR(7, 8, 9); | |
321 | |
322 const vector signed char neg1 = vec_splat_s8(-1); | |
323 const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, | |
324 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F); | |
325 | |
326 #define PACK_AND_STORE(i) \ | |
327 const vector unsigned char perms##i = \ | |
328 vec_lvsr(i * stride, src2); \ | |
329 const vector unsigned char vf##i = \ | |
330 vec_packsu(vr##i, (vector signed short)zero); \ | |
331 const vector unsigned char vg##i = \ | |
332 vec_perm(vf##i, vbT##i, permHH); \ | |
333 const vector unsigned char mask##i = \ | |
334 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \ | |
335 const vector unsigned char vg2##i = \ | |
336 vec_perm(vg##i, vg##i, perms##i); \ | |
337 const vector unsigned char svA##i = \ | |
338 vec_sel(vbA##i, vg2##i, mask##i); \ | |
339 const vector unsigned char svB##i = \ | |
340 vec_sel(vg2##i, vbB##i, mask##i); \ | |
341 vec_st(svA##i, i * stride, src2); \ | |
342 vec_st(svB##i, i * stride + 16, src2) | |
343 | |
344 #define PACK_AND_STORE_ALIGNED(i) \ | |
345 const vector unsigned char vf##i = \ | |
346 vec_packsu(vr##i, (vector signed short)zero); \ | |
347 const vector unsigned char vg##i = \ | |
348 vec_perm(vf##i, vbT##i, permHH); \ | |
349 vec_st(vg##i, i * stride, src2) | |
350 | |
351 // special casing the aligned case is worthwhile, as all call from | |
352 // the (transposed) horizontable deblocks will be aligned, in addition | |
353 // to the naturraly aligned vertical deblocks. | |
354 if (properStride && srcAlign) { | |
355 PACK_AND_STORE_ALIGNED(1); | |
356 PACK_AND_STORE_ALIGNED(2); | |
357 PACK_AND_STORE_ALIGNED(3); | |
358 PACK_AND_STORE_ALIGNED(4); | |
359 PACK_AND_STORE_ALIGNED(5); | |
360 PACK_AND_STORE_ALIGNED(6); | |
361 PACK_AND_STORE_ALIGNED(7); | |
362 PACK_AND_STORE_ALIGNED(8); | |
363 } else { | |
364 PACK_AND_STORE(1); | |
365 PACK_AND_STORE(2); | |
366 PACK_AND_STORE(3); | |
367 PACK_AND_STORE(4); | |
368 PACK_AND_STORE(5); | |
369 PACK_AND_STORE(6); | |
370 PACK_AND_STORE(7); | |
371 PACK_AND_STORE(8); | |
372 } | |
373 #undef PACK_AND_STORE | |
374 #undef PACK_AND_STORE_ALIGNED | |
375 } | |
376 | |
377 | |
378 | |
379 static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) { | |
380 /* | |
381 this code makes no assumption on src or stride. | |
382 One could remove the recomputation of the perm | |
383 vector by assuming (stride % 16) == 0, unfortunately | |
384 this is not always true. Quite a lot of load/stores | |
63 | 385 can be removed by assuming proper alignment of |
0 | 386 src & stride :-( |
387 */ | |
388 uint8_t *src2 = src; | |
389 const vector signed int zero = vec_splat_s32(0); | |
41 | 390 DECLARE_ALIGNED(16, short, qp[8]); |
0 | 391 qp[0] = 8*c->QP; |
392 vector signed short vqp = vec_ld(0, qp); | |
393 vqp = vec_splat(vqp, 0); | |
394 | |
395 #define LOAD_LINE(i) \ | |
396 const vector unsigned char perm##i = \ | |
397 vec_lvsl(i * stride, src2); \ | |
398 const vector unsigned char vbA##i = \ | |
399 vec_ld(i * stride, src2); \ | |
400 const vector unsigned char vbB##i = \ | |
401 vec_ld(i * stride + 16, src2); \ | |
402 const vector unsigned char vbT##i = \ | |
403 vec_perm(vbA##i, vbB##i, perm##i); \ | |
404 const vector signed short vb##i = \ | |
405 (vector signed short)vec_mergeh((vector unsigned char)zero, \ | |
406 (vector unsigned char)vbT##i) | |
407 | |
408 src2 += stride*3; | |
409 | |
410 LOAD_LINE(1); | |
411 LOAD_LINE(2); | |
412 LOAD_LINE(3); | |
413 LOAD_LINE(4); | |
414 LOAD_LINE(5); | |
415 LOAD_LINE(6); | |
416 LOAD_LINE(7); | |
417 LOAD_LINE(8); | |
418 #undef LOAD_LINE | |
419 | |
420 const vector signed short v_1 = vec_splat_s16(1); | |
421 const vector signed short v_2 = vec_splat_s16(2); | |
422 const vector signed short v_5 = vec_splat_s16(5); | |
423 const vector signed short v_32 = vec_sl(v_1, | |
424 (vector unsigned short)v_5); | |
425 /* middle energy */ | |
426 const vector signed short l3minusl6 = vec_sub(vb3, vb6); | |
427 const vector signed short l5minusl4 = vec_sub(vb5, vb4); | |
428 const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero); | |
429 const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6); | |
430 const vector signed short absmE = vec_abs(mE); | |
431 /* left & right energy */ | |
432 const vector signed short l1minusl4 = vec_sub(vb1, vb4); | |
433 const vector signed short l3minusl2 = vec_sub(vb3, vb2); | |
434 const vector signed short l5minusl8 = vec_sub(vb5, vb8); | |
435 const vector signed short l7minusl6 = vec_sub(vb7, vb6); | |
436 const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero); | |
437 const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero); | |
438 const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4); | |
439 const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8); | |
440 /* d */ | |
441 const vector signed short ddiff = vec_sub(absmE, | |
442 vec_min(vec_abs(lE), | |
443 vec_abs(rE))); | |
444 const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero); | |
445 const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32); | |
446 const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6)); | |
447 const vector signed short minusd = vec_sub((vector signed short)zero, d); | |
448 const vector signed short finald = vec_sel(minusd, | |
449 d, | |
450 vec_cmpgt(vec_sub((vector signed short)zero, mE), | |
451 (vector signed short)zero)); | |
452 /* q */ | |
453 const vector signed short qtimes2 = vec_sub(vb4, vb5); | |
454 /* for a shift right to behave like /2, we need to add one | |
455 to all negative integer */ | |
456 const vector signed short rounddown = vec_sel((vector signed short)zero, | |
457 v_1, | |
458 vec_cmplt(qtimes2, (vector signed short)zero)); | |
459 const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1)); | |
460 /* clamp */ | |
461 const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald); | |
462 const vector signed short dclamp_P = vec_min(dclamp_P1, q); | |
463 const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald); | |
464 const vector signed short dclamp_N = vec_max(dclamp_N1, q); | |
465 | |
466 const vector signed short dclampedfinal = vec_sel(dclamp_N, | |
467 dclamp_P, | |
468 vec_cmpgt(q, (vector signed short)zero)); | |
469 const vector signed short dornotd = vec_sel((vector signed short)zero, | |
470 dclampedfinal, | |
471 vec_cmplt(absmE, vqp)); | |
63 | 472 /* add/subtract to l4 and l5 */ |
0 | 473 const vector signed short vb4minusd = vec_sub(vb4, dornotd); |
474 const vector signed short vb5plusd = vec_add(vb5, dornotd); | |
475 /* finally, stores */ | |
476 const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero); | |
477 const vector unsigned char st5 = vec_packsu(vb5plusd, (vector signed short)zero); | |
478 | |
479 const vector signed char neg1 = vec_splat_s8(-1); | |
480 const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, | |
481 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F); | |
482 | |
483 #define STORE(i) \ | |
484 const vector unsigned char perms##i = \ | |
485 vec_lvsr(i * stride, src2); \ | |
486 const vector unsigned char vg##i = \ | |
487 vec_perm(st##i, vbT##i, permHH); \ | |
488 const vector unsigned char mask##i = \ | |
489 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \ | |
490 const vector unsigned char vg2##i = \ | |
491 vec_perm(vg##i, vg##i, perms##i); \ | |
492 const vector unsigned char svA##i = \ | |
493 vec_sel(vbA##i, vg2##i, mask##i); \ | |
494 const vector unsigned char svB##i = \ | |
495 vec_sel(vg2##i, vbB##i, mask##i); \ | |
496 vec_st(svA##i, i * stride, src2); \ | |
497 vec_st(svB##i, i * stride + 16, src2) | |
498 | |
499 STORE(4); | |
500 STORE(5); | |
501 } | |
502 | |
503 static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) { | |
504 /* | |
505 this code makes no assumption on src or stride. | |
506 One could remove the recomputation of the perm | |
507 vector by assuming (stride % 16) == 0, unfortunately | |
508 this is not always true. Quite a lot of load/stores | |
63 | 509 can be removed by assuming proper alignment of |
0 | 510 src & stride :-( |
511 */ | |
512 uint8_t *srcCopy = src; | |
41 | 513 DECLARE_ALIGNED(16, uint8_t, dt[16]); |
0 | 514 const vector signed int zero = vec_splat_s32(0); |
515 vector unsigned char v_dt; | |
516 dt[0] = deringThreshold; | |
517 v_dt = vec_splat(vec_ld(0, dt), 0); | |
518 | |
519 #define LOAD_LINE(i) \ | |
520 const vector unsigned char perm##i = \ | |
521 vec_lvsl(i * stride, srcCopy); \ | |
522 vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \ | |
523 vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \ | |
524 vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i) | |
525 | |
526 LOAD_LINE(0); | |
527 LOAD_LINE(1); | |
528 LOAD_LINE(2); | |
529 LOAD_LINE(3); | |
530 LOAD_LINE(4); | |
531 LOAD_LINE(5); | |
532 LOAD_LINE(6); | |
533 LOAD_LINE(7); | |
534 LOAD_LINE(8); | |
535 LOAD_LINE(9); | |
536 #undef LOAD_LINE | |
537 | |
538 vector unsigned char v_avg; | |
539 { | |
540 const vector unsigned char trunc_perm = (vector unsigned char) | |
541 AVV(0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, | |
542 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18); | |
543 const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm); | |
544 const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm); | |
545 const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm); | |
546 const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm); | |
547 | |
548 #define EXTRACT(op) do { \ | |
549 const vector unsigned char s##op##_1 = vec_##op(trunc_src12, trunc_src34); \ | |
550 const vector unsigned char s##op##_2 = vec_##op(trunc_src56, trunc_src78); \ | |
551 const vector unsigned char s##op##_6 = vec_##op(s##op##_1, s##op##_2); \ | |
552 const vector unsigned char s##op##_8h = vec_mergeh(s##op##_6, s##op##_6); \ | |
553 const vector unsigned char s##op##_8l = vec_mergel(s##op##_6, s##op##_6); \ | |
554 const vector unsigned char s##op##_9 = vec_##op(s##op##_8h, s##op##_8l); \ | |
555 const vector unsigned char s##op##_9h = vec_mergeh(s##op##_9, s##op##_9); \ | |
556 const vector unsigned char s##op##_9l = vec_mergel(s##op##_9, s##op##_9); \ | |
557 const vector unsigned char s##op##_10 = vec_##op(s##op##_9h, s##op##_9l); \ | |
558 const vector unsigned char s##op##_10h = vec_mergeh(s##op##_10, s##op##_10); \ | |
559 const vector unsigned char s##op##_10l = vec_mergel(s##op##_10, s##op##_10); \ | |
560 const vector unsigned char s##op##_11 = vec_##op(s##op##_10h, s##op##_10l); \ | |
561 const vector unsigned char s##op##_11h = vec_mergeh(s##op##_11, s##op##_11); \ | |
562 const vector unsigned char s##op##_11l = vec_mergel(s##op##_11, s##op##_11); \ | |
563 v_##op = vec_##op(s##op##_11h, s##op##_11l); } while (0) | |
564 | |
565 vector unsigned char v_min; | |
566 vector unsigned char v_max; | |
567 EXTRACT(min); | |
568 EXTRACT(max); | |
569 #undef EXTRACT | |
570 | |
571 if (vec_all_lt(vec_sub(v_max, v_min), v_dt)) | |
572 return; | |
573 | |
574 v_avg = vec_avg(v_min, v_max); | |
575 } | |
576 | |
41 | 577 DECLARE_ALIGNED(16, signed int, S[8]); |
0 | 578 { |
579 const vector unsigned short mask1 = (vector unsigned short) | |
580 AVV(0x0001, 0x0002, 0x0004, 0x0008, | |
581 0x0010, 0x0020, 0x0040, 0x0080); | |
582 const vector unsigned short mask2 = (vector unsigned short) | |
583 AVV(0x0100, 0x0200, 0x0000, 0x0000, | |
584 0x0000, 0x0000, 0x0000, 0x0000); | |
585 | |
586 const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4)); | |
587 const vector unsigned int vuint32_1 = vec_splat_u32(1); | |
588 | |
589 #define COMPARE(i) \ | |
590 vector signed int sum##i; \ | |
591 do { \ | |
592 const vector unsigned char cmp##i = \ | |
593 (vector unsigned char)vec_cmpgt(src##i, v_avg); \ | |
594 const vector unsigned short cmpHi##i = \ | |
595 (vector unsigned short)vec_mergeh(cmp##i, cmp##i); \ | |
596 const vector unsigned short cmpLi##i = \ | |
597 (vector unsigned short)vec_mergel(cmp##i, cmp##i); \ | |
598 const vector signed short cmpHf##i = \ | |
599 (vector signed short)vec_and(cmpHi##i, mask1); \ | |
600 const vector signed short cmpLf##i = \ | |
601 (vector signed short)vec_and(cmpLi##i, mask2); \ | |
602 const vector signed int sump##i = vec_sum4s(cmpHf##i, zero); \ | |
603 const vector signed int sumq##i = vec_sum4s(cmpLf##i, sump##i); \ | |
604 sum##i = vec_sums(sumq##i, zero); } while (0) | |
605 | |
606 COMPARE(0); | |
607 COMPARE(1); | |
608 COMPARE(2); | |
609 COMPARE(3); | |
610 COMPARE(4); | |
611 COMPARE(5); | |
612 COMPARE(6); | |
613 COMPARE(7); | |
614 COMPARE(8); | |
615 COMPARE(9); | |
616 #undef COMPARE | |
617 | |
618 vector signed int sumA2; | |
619 vector signed int sumB2; | |
620 { | |
621 const vector signed int sump02 = vec_mergel(sum0, sum2); | |
622 const vector signed int sump13 = vec_mergel(sum1, sum3); | |
623 const vector signed int sumA = vec_mergel(sump02, sump13); | |
624 | |
625 const vector signed int sump46 = vec_mergel(sum4, sum6); | |
626 const vector signed int sump57 = vec_mergel(sum5, sum7); | |
627 const vector signed int sumB = vec_mergel(sump46, sump57); | |
628 | |
629 const vector signed int sump8A = vec_mergel(sum8, zero); | |
630 const vector signed int sump9B = vec_mergel(sum9, zero); | |
631 const vector signed int sumC = vec_mergel(sump8A, sump9B); | |
632 | |
633 const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16); | |
634 const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16); | |
635 const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16); | |
636 const vector signed int t2A = vec_or(sumA, tA); | |
637 const vector signed int t2B = vec_or(sumB, tB); | |
638 const vector signed int t2C = vec_or(sumC, tC); | |
639 const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1), | |
640 vec_sl(t2A, vuint32_1)); | |
641 const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1), | |
642 vec_sl(t2B, vuint32_1)); | |
643 const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1), | |
644 vec_sl(t2C, vuint32_1)); | |
645 const vector signed int yA = vec_and(t2A, t3A); | |
646 const vector signed int yB = vec_and(t2B, t3B); | |
647 const vector signed int yC = vec_and(t2C, t3C); | |
648 | |
649 const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0); | |
650 const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0); | |
651 const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1); | |
652 const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2); | |
653 const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1); | |
654 const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2); | |
655 const vector signed int sumAp = vec_and(yA, | |
656 vec_and(sumAd4,sumAd8)); | |
657 const vector signed int sumBp = vec_and(yB, | |
658 vec_and(sumBd4,sumBd8)); | |
659 sumA2 = vec_or(sumAp, | |
660 vec_sra(sumAp, | |
661 vuint32_16)); | |
662 sumB2 = vec_or(sumBp, | |
663 vec_sra(sumBp, | |
664 vuint32_16)); | |
665 } | |
666 vec_st(sumA2, 0, S); | |
667 vec_st(sumB2, 16, S); | |
668 } | |
669 | |
670 /* I'm not sure the following is actually faster | |
671 than straight, unvectorized C code :-( */ | |
672 | |
41 | 673 DECLARE_ALIGNED(16, int, tQP2[4]); |
0 | 674 tQP2[0]= c->QP/2 + 1; |
675 vector signed int vQP2 = vec_ld(0, tQP2); | |
676 vQP2 = vec_splat(vQP2, 0); | |
677 const vector signed int vsint32_8 = vec_splat_s32(8); | |
678 const vector unsigned int vuint32_4 = vec_splat_u32(4); | |
679 | |
680 const vector unsigned char permA1 = (vector unsigned char) | |
681 AVV(0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F, | |
682 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F); | |
683 const vector unsigned char permA2 = (vector unsigned char) | |
684 AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11, | |
685 0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F); | |
686 const vector unsigned char permA1inc = (vector unsigned char) | |
687 AVV(0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, | |
688 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); | |
689 const vector unsigned char permA2inc = (vector unsigned char) | |
690 AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, | |
691 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); | |
692 const vector unsigned char magic = (vector unsigned char) | |
693 AVV(0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02, | |
694 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); | |
695 const vector unsigned char extractPerm = (vector unsigned char) | |
696 AVV(0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01, | |
697 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01); | |
698 const vector unsigned char extractPermInc = (vector unsigned char) | |
699 AVV(0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, | |
700 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01); | |
701 const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0); | |
702 const vector unsigned char tenRight = (vector unsigned char) | |
703 AVV(0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
704 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); | |
705 const vector unsigned char eightLeft = (vector unsigned char) | |
706 AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | |
707 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08); | |
708 | |
709 | |
710 #define F_INIT(i) \ | |
711 vector unsigned char tenRightM##i = tenRight; \ | |
712 vector unsigned char permA1M##i = permA1; \ | |
713 vector unsigned char permA2M##i = permA2; \ | |
714 vector unsigned char extractPermM##i = extractPerm | |
715 | |
716 #define F2(i, j, k, l) \ | |
717 if (S[i] & (1 << (l+1))) { \ | |
718 const vector unsigned char a_##j##_A##l = \ | |
719 vec_perm(src##i, src##j, permA1M##i); \ | |
720 const vector unsigned char a_##j##_B##l = \ | |
721 vec_perm(a_##j##_A##l, src##k, permA2M##i); \ | |
722 const vector signed int a_##j##_sump##l = \ | |
723 (vector signed int)vec_msum(a_##j##_B##l, magic, \ | |
724 (vector unsigned int)zero); \ | |
725 vector signed int F_##j##_##l = \ | |
726 vec_sr(vec_sums(a_##j##_sump##l, vsint32_8), vuint32_4); \ | |
727 F_##j##_##l = vec_splat(F_##j##_##l, 3); \ | |
728 const vector signed int p_##j##_##l = \ | |
729 (vector signed int)vec_perm(src##j, \ | |
730 (vector unsigned char)zero, \ | |
731 extractPermM##i); \ | |
732 const vector signed int sum_##j##_##l = vec_add( p_##j##_##l, vQP2);\ | |
733 const vector signed int diff_##j##_##l = vec_sub( p_##j##_##l, vQP2);\ | |
734 vector signed int newpm_##j##_##l; \ | |
735 if (vec_all_lt(sum_##j##_##l, F_##j##_##l)) \ | |
736 newpm_##j##_##l = sum_##j##_##l; \ | |
737 else if (vec_all_gt(diff_##j##_##l, F_##j##_##l)) \ | |
738 newpm_##j##_##l = diff_##j##_##l; \ | |
739 else newpm_##j##_##l = F_##j##_##l; \ | |
740 const vector unsigned char newpm2_##j##_##l = \ | |
741 vec_splat((vector unsigned char)newpm_##j##_##l, 15); \ | |
742 const vector unsigned char mask##j##l = vec_add(identity, \ | |
743 tenRightM##i); \ | |
744 src##j = vec_perm(src##j, newpm2_##j##_##l, mask##j##l); \ | |
745 } \ | |
746 permA1M##i = vec_add(permA1M##i, permA1inc); \ | |
747 permA2M##i = vec_add(permA2M##i, permA2inc); \ | |
748 tenRightM##i = vec_sro(tenRightM##i, eightLeft); \ | |
749 extractPermM##i = vec_add(extractPermM##i, extractPermInc) | |
750 | |
751 #define ITER(i, j, k) \ | |
752 F_INIT(i); \ | |
753 F2(i, j, k, 0); \ | |
754 F2(i, j, k, 1); \ | |
755 F2(i, j, k, 2); \ | |
756 F2(i, j, k, 3); \ | |
757 F2(i, j, k, 4); \ | |
758 F2(i, j, k, 5); \ | |
759 F2(i, j, k, 6); \ | |
760 F2(i, j, k, 7) | |
761 | |
762 ITER(0, 1, 2); | |
763 ITER(1, 2, 3); | |
764 ITER(2, 3, 4); | |
765 ITER(3, 4, 5); | |
766 ITER(4, 5, 6); | |
767 ITER(5, 6, 7); | |
768 ITER(6, 7, 8); | |
769 ITER(7, 8, 9); | |
770 | |
771 const vector signed char neg1 = vec_splat_s8(-1); | |
772 | |
773 #define STORE_LINE(i) \ | |
774 const vector unsigned char permST##i = \ | |
775 vec_lvsr(i * stride, srcCopy); \ | |
776 const vector unsigned char maskST##i = \ | |
777 vec_perm((vector unsigned char)zero, \ | |
778 (vector unsigned char)neg1, permST##i); \ | |
779 src##i = vec_perm(src##i ,src##i, permST##i); \ | |
780 sA##i= vec_sel(sA##i, src##i, maskST##i); \ | |
781 sB##i= vec_sel(src##i, sB##i, maskST##i); \ | |
782 vec_st(sA##i, i * stride, srcCopy); \ | |
783 vec_st(sB##i, i * stride + 16, srcCopy) | |
784 | |
785 STORE_LINE(1); | |
786 STORE_LINE(2); | |
787 STORE_LINE(3); | |
788 STORE_LINE(4); | |
789 STORE_LINE(5); | |
790 STORE_LINE(6); | |
791 STORE_LINE(7); | |
792 STORE_LINE(8); | |
793 | |
794 #undef STORE_LINE | |
795 #undef ITER | |
796 #undef F2 | |
797 } | |
798 | |
799 #define doHorizLowPass_altivec(a...) doHorizLowPass_C(a) | |
800 #define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a) | |
801 #define do_a_deblock_altivec(a...) do_a_deblock_C(a) | |
802 | |
803 static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride, | |
804 uint8_t *tempBlured, uint32_t *tempBluredPast, int *maxNoise) | |
805 { | |
806 const vector signed int zero = vec_splat_s32(0); | |
807 const vector signed short vsint16_1 = vec_splat_s16(1); | |
808 vector signed int v_dp = zero; | |
809 vector signed int v_sysdp = zero; | |
810 int d, sysd, i; | |
811 | |
812 tempBluredPast[127]= maxNoise[0]; | |
813 tempBluredPast[128]= maxNoise[1]; | |
814 tempBluredPast[129]= maxNoise[2]; | |
815 | |
816 #define LOAD_LINE(src, i) \ | |
817 register int j##src##i = i * stride; \ | |
818 vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \ | |
819 const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \ | |
820 const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \ | |
821 const vector unsigned char v_##src##A##i = \ | |
822 vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \ | |
823 vector signed short v_##src##Ass##i = \ | |
824 (vector signed short)vec_mergeh((vector signed char)zero, \ | |
825 (vector signed char)v_##src##A##i) | |
826 | |
827 LOAD_LINE(src, 0); | |
828 LOAD_LINE(src, 1); | |
829 LOAD_LINE(src, 2); | |
830 LOAD_LINE(src, 3); | |
831 LOAD_LINE(src, 4); | |
832 LOAD_LINE(src, 5); | |
833 LOAD_LINE(src, 6); | |
834 LOAD_LINE(src, 7); | |
835 | |
836 LOAD_LINE(tempBlured, 0); | |
837 LOAD_LINE(tempBlured, 1); | |
838 LOAD_LINE(tempBlured, 2); | |
839 LOAD_LINE(tempBlured, 3); | |
840 LOAD_LINE(tempBlured, 4); | |
841 LOAD_LINE(tempBlured, 5); | |
842 LOAD_LINE(tempBlured, 6); | |
843 LOAD_LINE(tempBlured, 7); | |
844 #undef LOAD_LINE | |
845 | |
846 #define ACCUMULATE_DIFFS(i) \ | |
847 vector signed short v_d##i = vec_sub(v_tempBluredAss##i, \ | |
848 v_srcAss##i); \ | |
849 v_dp = vec_msums(v_d##i, v_d##i, v_dp); \ | |
850 v_sysdp = vec_msums(v_d##i, vsint16_1, v_sysdp) | |
851 | |
852 ACCUMULATE_DIFFS(0); | |
853 ACCUMULATE_DIFFS(1); | |
854 ACCUMULATE_DIFFS(2); | |
855 ACCUMULATE_DIFFS(3); | |
856 ACCUMULATE_DIFFS(4); | |
857 ACCUMULATE_DIFFS(5); | |
858 ACCUMULATE_DIFFS(6); | |
859 ACCUMULATE_DIFFS(7); | |
860 #undef ACCUMULATE_DIFFS | |
861 | |
862 v_dp = vec_sums(v_dp, zero); | |
863 v_sysdp = vec_sums(v_sysdp, zero); | |
864 | |
865 v_dp = vec_splat(v_dp, 3); | |
866 v_sysdp = vec_splat(v_sysdp, 3); | |
867 | |
868 vec_ste(v_dp, 0, &d); | |
869 vec_ste(v_sysdp, 0, &sysd); | |
870 | |
871 i = d; | |
872 d = (4*d | |
873 +(*(tempBluredPast-256)) | |
874 +(*(tempBluredPast-1))+ (*(tempBluredPast+1)) | |
875 +(*(tempBluredPast+256)) | |
876 +4)>>3; | |
877 | |
878 *tempBluredPast=i; | |
879 | |
880 if (d > maxNoise[1]) { | |
881 if (d < maxNoise[2]) { | |
882 #define OP(i) v_tempBluredAss##i = vec_avg(v_tempBluredAss##i, v_srcAss##i); | |
883 | |
884 OP(0); | |
885 OP(1); | |
886 OP(2); | |
887 OP(3); | |
888 OP(4); | |
889 OP(5); | |
890 OP(6); | |
891 OP(7); | |
892 #undef OP | |
893 } else { | |
894 #define OP(i) v_tempBluredAss##i = v_srcAss##i; | |
895 | |
896 OP(0); | |
897 OP(1); | |
898 OP(2); | |
899 OP(3); | |
900 OP(4); | |
901 OP(5); | |
902 OP(6); | |
903 OP(7); | |
904 #undef OP | |
905 } | |
906 } else { | |
907 if (d < maxNoise[0]) { | |
908 const vector signed short vsint16_7 = vec_splat_s16(7); | |
909 const vector signed short vsint16_4 = vec_splat_s16(4); | |
910 const vector unsigned short vuint16_3 = vec_splat_u16(3); | |
911 | |
912 #define OP(i) \ | |
913 const vector signed short v_temp##i = \ | |
914 vec_mladd(v_tempBluredAss##i, \ | |
915 vsint16_7, v_srcAss##i); \ | |
916 const vector signed short v_temp2##i = \ | |
917 vec_add(v_temp##i, vsint16_4); \ | |
918 v_tempBluredAss##i = vec_sr(v_temp2##i, vuint16_3) | |
919 | |
920 OP(0); | |
921 OP(1); | |
922 OP(2); | |
923 OP(3); | |
924 OP(4); | |
925 OP(5); | |
926 OP(6); | |
927 OP(7); | |
928 #undef OP | |
929 } else { | |
930 const vector signed short vsint16_3 = vec_splat_s16(3); | |
931 const vector signed short vsint16_2 = vec_splat_s16(2); | |
932 | |
933 #define OP(i) \ | |
934 const vector signed short v_temp##i = \ | |
935 vec_mladd(v_tempBluredAss##i, \ | |
936 vsint16_3, v_srcAss##i); \ | |
937 const vector signed short v_temp2##i = \ | |
938 vec_add(v_temp##i, vsint16_2); \ | |
939 v_tempBluredAss##i = vec_sr(v_temp2##i, (vector unsigned short)vsint16_2) | |
940 | |
941 OP(0); | |
942 OP(1); | |
943 OP(2); | |
944 OP(3); | |
945 OP(4); | |
946 OP(5); | |
947 OP(6); | |
948 OP(7); | |
949 #undef OP | |
950 } | |
951 } | |
952 | |
953 const vector signed char neg1 = vec_splat_s8(-1); | |
954 const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, | |
955 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F); | |
956 | |
957 #define PACK_AND_STORE(src, i) \ | |
958 const vector unsigned char perms##src##i = \ | |
959 vec_lvsr(i * stride, src); \ | |
960 const vector unsigned char vf##src##i = \ | |
961 vec_packsu(v_tempBluredAss##i, (vector signed short)zero); \ | |
962 const vector unsigned char vg##src##i = \ | |
963 vec_perm(vf##src##i, v_##src##A##i, permHH); \ | |
964 const vector unsigned char mask##src##i = \ | |
965 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##src##i); \ | |
966 const vector unsigned char vg2##src##i = \ | |
967 vec_perm(vg##src##i, vg##src##i, perms##src##i); \ | |
968 const vector unsigned char svA##src##i = \ | |
969 vec_sel(v_##src##A1##i, vg2##src##i, mask##src##i); \ | |
970 const vector unsigned char svB##src##i = \ | |
971 vec_sel(vg2##src##i, v_##src##A2##i, mask##src##i); \ | |
972 vec_st(svA##src##i, i * stride, src); \ | |
973 vec_st(svB##src##i, i * stride + 16, src) | |
974 | |
975 PACK_AND_STORE(src, 0); | |
976 PACK_AND_STORE(src, 1); | |
977 PACK_AND_STORE(src, 2); | |
978 PACK_AND_STORE(src, 3); | |
979 PACK_AND_STORE(src, 4); | |
980 PACK_AND_STORE(src, 5); | |
981 PACK_AND_STORE(src, 6); | |
982 PACK_AND_STORE(src, 7); | |
983 PACK_AND_STORE(tempBlured, 0); | |
984 PACK_AND_STORE(tempBlured, 1); | |
985 PACK_AND_STORE(tempBlured, 2); | |
986 PACK_AND_STORE(tempBlured, 3); | |
987 PACK_AND_STORE(tempBlured, 4); | |
988 PACK_AND_STORE(tempBlured, 5); | |
989 PACK_AND_STORE(tempBlured, 6); | |
990 PACK_AND_STORE(tempBlured, 7); | |
991 #undef PACK_AND_STORE | |
992 } | |
993 | |
994 static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) { | |
995 const vector unsigned char zero = vec_splat_u8(0); | |
996 | |
997 #define LOAD_DOUBLE_LINE(i, j) \ | |
998 vector unsigned char perm1##i = vec_lvsl(i * stride, src); \ | |
999 vector unsigned char perm2##i = vec_lvsl(j * stride, src); \ | |
1000 vector unsigned char srcA##i = vec_ld(i * stride, src); \ | |
1001 vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \ | |
1002 vector unsigned char srcC##i = vec_ld(j * stride, src); \ | |
1003 vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \ | |
1004 vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \ | |
1005 vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i) | |
1006 | |
1007 LOAD_DOUBLE_LINE(0, 1); | |
1008 LOAD_DOUBLE_LINE(2, 3); | |
1009 LOAD_DOUBLE_LINE(4, 5); | |
1010 LOAD_DOUBLE_LINE(6, 7); | |
1011 #undef LOAD_DOUBLE_LINE | |
1012 | |
1013 vector unsigned char tempA = vec_mergeh(src0, zero); | |
1014 vector unsigned char tempB = vec_mergel(src0, zero); | |
1015 vector unsigned char tempC = vec_mergeh(src1, zero); | |
1016 vector unsigned char tempD = vec_mergel(src1, zero); | |
1017 vector unsigned char tempE = vec_mergeh(src2, zero); | |
1018 vector unsigned char tempF = vec_mergel(src2, zero); | |
1019 vector unsigned char tempG = vec_mergeh(src3, zero); | |
1020 vector unsigned char tempH = vec_mergel(src3, zero); | |
1021 vector unsigned char tempI = vec_mergeh(src4, zero); | |
1022 vector unsigned char tempJ = vec_mergel(src4, zero); | |
1023 vector unsigned char tempK = vec_mergeh(src5, zero); | |
1024 vector unsigned char tempL = vec_mergel(src5, zero); | |
1025 vector unsigned char tempM = vec_mergeh(src6, zero); | |
1026 vector unsigned char tempN = vec_mergel(src6, zero); | |
1027 vector unsigned char tempO = vec_mergeh(src7, zero); | |
1028 vector unsigned char tempP = vec_mergel(src7, zero); | |
1029 | |
1030 vector unsigned char temp0 = vec_mergeh(tempA, tempI); | |
1031 vector unsigned char temp1 = vec_mergel(tempA, tempI); | |
1032 vector unsigned char temp2 = vec_mergeh(tempB, tempJ); | |
1033 vector unsigned char temp3 = vec_mergel(tempB, tempJ); | |
1034 vector unsigned char temp4 = vec_mergeh(tempC, tempK); | |
1035 vector unsigned char temp5 = vec_mergel(tempC, tempK); | |
1036 vector unsigned char temp6 = vec_mergeh(tempD, tempL); | |
1037 vector unsigned char temp7 = vec_mergel(tempD, tempL); | |
1038 vector unsigned char temp8 = vec_mergeh(tempE, tempM); | |
1039 vector unsigned char temp9 = vec_mergel(tempE, tempM); | |
1040 vector unsigned char temp10 = vec_mergeh(tempF, tempN); | |
1041 vector unsigned char temp11 = vec_mergel(tempF, tempN); | |
1042 vector unsigned char temp12 = vec_mergeh(tempG, tempO); | |
1043 vector unsigned char temp13 = vec_mergel(tempG, tempO); | |
1044 vector unsigned char temp14 = vec_mergeh(tempH, tempP); | |
1045 vector unsigned char temp15 = vec_mergel(tempH, tempP); | |
1046 | |
1047 tempA = vec_mergeh(temp0, temp8); | |
1048 tempB = vec_mergel(temp0, temp8); | |
1049 tempC = vec_mergeh(temp1, temp9); | |
1050 tempD = vec_mergel(temp1, temp9); | |
1051 tempE = vec_mergeh(temp2, temp10); | |
1052 tempF = vec_mergel(temp2, temp10); | |
1053 tempG = vec_mergeh(temp3, temp11); | |
1054 tempH = vec_mergel(temp3, temp11); | |
1055 tempI = vec_mergeh(temp4, temp12); | |
1056 tempJ = vec_mergel(temp4, temp12); | |
1057 tempK = vec_mergeh(temp5, temp13); | |
1058 tempL = vec_mergel(temp5, temp13); | |
1059 tempM = vec_mergeh(temp6, temp14); | |
1060 tempN = vec_mergel(temp6, temp14); | |
1061 tempO = vec_mergeh(temp7, temp15); | |
1062 tempP = vec_mergel(temp7, temp15); | |
1063 | |
1064 temp0 = vec_mergeh(tempA, tempI); | |
1065 temp1 = vec_mergel(tempA, tempI); | |
1066 temp2 = vec_mergeh(tempB, tempJ); | |
1067 temp3 = vec_mergel(tempB, tempJ); | |
1068 temp4 = vec_mergeh(tempC, tempK); | |
1069 temp5 = vec_mergel(tempC, tempK); | |
1070 temp6 = vec_mergeh(tempD, tempL); | |
1071 temp7 = vec_mergel(tempD, tempL); | |
1072 temp8 = vec_mergeh(tempE, tempM); | |
1073 temp9 = vec_mergel(tempE, tempM); | |
1074 temp10 = vec_mergeh(tempF, tempN); | |
1075 temp11 = vec_mergel(tempF, tempN); | |
1076 temp12 = vec_mergeh(tempG, tempO); | |
1077 temp13 = vec_mergel(tempG, tempO); | |
1078 temp14 = vec_mergeh(tempH, tempP); | |
1079 temp15 = vec_mergel(tempH, tempP); | |
1080 | |
1081 vec_st(temp0, 0, dst); | |
1082 vec_st(temp1, 16, dst); | |
1083 vec_st(temp2, 32, dst); | |
1084 vec_st(temp3, 48, dst); | |
1085 vec_st(temp4, 64, dst); | |
1086 vec_st(temp5, 80, dst); | |
1087 vec_st(temp6, 96, dst); | |
1088 vec_st(temp7, 112, dst); | |
1089 vec_st(temp8, 128, dst); | |
1090 vec_st(temp9, 144, dst); | |
1091 vec_st(temp10, 160, dst); | |
1092 vec_st(temp11, 176, dst); | |
1093 vec_st(temp12, 192, dst); | |
1094 vec_st(temp13, 208, dst); | |
1095 vec_st(temp14, 224, dst); | |
1096 vec_st(temp15, 240, dst); | |
1097 } | |
1098 | |
1099 static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) { | |
1100 const vector unsigned char zero = vec_splat_u8(0); | |
1101 | |
1102 #define LOAD_DOUBLE_LINE(i, j) \ | |
1103 vector unsigned char src##i = vec_ld(i * 16, src); \ | |
1104 vector unsigned char src##j = vec_ld(j * 16, src) | |
1105 | |
1106 LOAD_DOUBLE_LINE(0, 1); | |
1107 LOAD_DOUBLE_LINE(2, 3); | |
1108 LOAD_DOUBLE_LINE(4, 5); | |
1109 LOAD_DOUBLE_LINE(6, 7); | |
1110 LOAD_DOUBLE_LINE(8, 9); | |
1111 LOAD_DOUBLE_LINE(10, 11); | |
1112 LOAD_DOUBLE_LINE(12, 13); | |
1113 LOAD_DOUBLE_LINE(14, 15); | |
1114 #undef LOAD_DOUBLE_LINE | |
1115 | |
1116 vector unsigned char tempA = vec_mergeh(src0, src8); | |
1117 vector unsigned char tempB; | |
1118 vector unsigned char tempC = vec_mergeh(src1, src9); | |
1119 vector unsigned char tempD; | |
1120 vector unsigned char tempE = vec_mergeh(src2, src10); | |
1121 vector unsigned char tempG = vec_mergeh(src3, src11); | |
1122 vector unsigned char tempI = vec_mergeh(src4, src12); | |
1123 vector unsigned char tempJ; | |
1124 vector unsigned char tempK = vec_mergeh(src5, src13); | |
1125 vector unsigned char tempL; | |
1126 vector unsigned char tempM = vec_mergeh(src6, src14); | |
1127 vector unsigned char tempO = vec_mergeh(src7, src15); | |
1128 | |
1129 vector unsigned char temp0 = vec_mergeh(tempA, tempI); | |
1130 vector unsigned char temp1 = vec_mergel(tempA, tempI); | |
1131 vector unsigned char temp2; | |
1132 vector unsigned char temp3; | |
1133 vector unsigned char temp4 = vec_mergeh(tempC, tempK); | |
1134 vector unsigned char temp5 = vec_mergel(tempC, tempK); | |
1135 vector unsigned char temp6; | |
1136 vector unsigned char temp7; | |
1137 vector unsigned char temp8 = vec_mergeh(tempE, tempM); | |
1138 vector unsigned char temp9 = vec_mergel(tempE, tempM); | |
1139 vector unsigned char temp12 = vec_mergeh(tempG, tempO); | |
1140 vector unsigned char temp13 = vec_mergel(tempG, tempO); | |
1141 | |
1142 tempA = vec_mergeh(temp0, temp8); | |
1143 tempB = vec_mergel(temp0, temp8); | |
1144 tempC = vec_mergeh(temp1, temp9); | |
1145 tempD = vec_mergel(temp1, temp9); | |
1146 tempI = vec_mergeh(temp4, temp12); | |
1147 tempJ = vec_mergel(temp4, temp12); | |
1148 tempK = vec_mergeh(temp5, temp13); | |
1149 tempL = vec_mergel(temp5, temp13); | |
1150 | |
1151 temp0 = vec_mergeh(tempA, tempI); | |
1152 temp1 = vec_mergel(tempA, tempI); | |
1153 temp2 = vec_mergeh(tempB, tempJ); | |
1154 temp3 = vec_mergel(tempB, tempJ); | |
1155 temp4 = vec_mergeh(tempC, tempK); | |
1156 temp5 = vec_mergel(tempC, tempK); | |
1157 temp6 = vec_mergeh(tempD, tempL); | |
1158 temp7 = vec_mergel(tempD, tempL); | |
1159 | |
1160 | |
1161 const vector signed char neg1 = vec_splat_s8(-1); | |
1162 #define STORE_DOUBLE_LINE(i, j) \ | |
1163 vector unsigned char dstA##i = vec_ld(i * stride, dst); \ | |
1164 vector unsigned char dstB##i = vec_ld(i * stride + 16, dst); \ | |
1165 vector unsigned char dstA##j = vec_ld(j * stride, dst); \ | |
1166 vector unsigned char dstB##j = vec_ld(j * stride+ 16, dst); \ | |
1167 vector unsigned char align##i = vec_lvsr(i * stride, dst); \ | |
1168 vector unsigned char align##j = vec_lvsr(j * stride, dst); \ | |
1169 vector unsigned char mask##i = vec_perm(zero, (vector unsigned char)neg1, align##i); \ | |
1170 vector unsigned char mask##j = vec_perm(zero, (vector unsigned char)neg1, align##j); \ | |
1171 vector unsigned char dstR##i = vec_perm(temp##i, temp##i, align##i); \ | |
1172 vector unsigned char dstR##j = vec_perm(temp##j, temp##j, align##j); \ | |
1173 vector unsigned char dstAF##i = vec_sel(dstA##i, dstR##i, mask##i); \ | |
1174 vector unsigned char dstBF##i = vec_sel(dstR##i, dstB##i, mask##i); \ | |
1175 vector unsigned char dstAF##j = vec_sel(dstA##j, dstR##j, mask##j); \ | |
1176 vector unsigned char dstBF##j = vec_sel(dstR##j, dstB##j, mask##j); \ | |
1177 vec_st(dstAF##i, i * stride, dst); \ | |
1178 vec_st(dstBF##i, i * stride + 16, dst); \ | |
1179 vec_st(dstAF##j, j * stride, dst); \ | |
1180 vec_st(dstBF##j, j * stride + 16, dst) | |
1181 | |
1182 STORE_DOUBLE_LINE(0,1); | |
1183 STORE_DOUBLE_LINE(2,3); | |
1184 STORE_DOUBLE_LINE(4,5); | |
1185 STORE_DOUBLE_LINE(6,7); | |
1186 } |