Mercurial > libavcodec.hg
comparison ppc/dsputil_snow_altivec.c @ 3222:38ee2f1d386d libavcodec
altivec support for snow
author | lu_zero |
---|---|
date | Mon, 27 Mar 2006 12:45:27 +0000 |
parents | |
children | 037570194464 |
comparison
equal
deleted
inserted
replaced
3221:ed485fd3f984 | 3222:38ee2f1d386d |
---|---|
1 /* | |
2 * Altivec optimized snow DSP utils | |
3 * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org> | |
4 * | |
5 * This library is free software; you can redistribute it and/or | |
6 * modify it under the terms of the GNU Lesser General Public | |
7 * License as published by the Free Software Foundation; either | |
8 * version 2 of the License, or (at your option) any later version. | |
9 * | |
10 * This library is distributed in the hope that it will be useful, | |
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 * Lesser General Public License for more details. | |
14 * | |
15 * You should have received a copy of the GNU Lesser General Public | |
16 * License along with this library; if not, write to the Free Software | |
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
18 * | |
19 * | |
20 */ | |
21 | |
22 #include "../dsputil.h" | |
23 | |
24 #include "gcc_fixes.h" | |
25 #include "dsputil_altivec.h" | |
26 #include "../snow.h" | |
27 | |
28 #undef NDEBUG | |
29 #include <assert.h> | |
30 | |
31 | |
32 | |
33 //FIXME remove this replication | |
34 #define slice_buffer_get_line(slice_buf, line_num) ((slice_buf)->line[line_num] ? (slice_buf)->line[line_num] : slice_buffer_load_line((slice_buf), (line_num))) | |
35 | |
36 static DWTELEM * slice_buffer_load_line(slice_buffer * buf, int line) | |
37 { | |
38 int offset; | |
39 DWTELEM * buffer; | |
40 | |
41 // av_log(NULL, AV_LOG_DEBUG, "Cache hit: %d\n", line); | |
42 | |
43 assert(buf->data_stack_top >= 0); | |
44 // assert(!buf->line[line]); | |
45 if (buf->line[line]) | |
46 return buf->line[line]; | |
47 | |
48 offset = buf->line_width * line; | |
49 buffer = buf->data_stack[buf->data_stack_top]; | |
50 buf->data_stack_top--; | |
51 buf->line[line] = buffer; | |
52 | |
53 // av_log(NULL, AV_LOG_DEBUG, "slice_buffer_load_line: line: %d remaining: %d\n", line, buf->data_stack_top + 1); | |
54 | |
55 return buffer; | |
56 } | |
57 | |
58 | |
59 //altivec code | |
60 | |
61 void ff_snow_horizontal_compose97i_altivec(DWTELEM *b, int width) | |
62 { | |
63 const int w2= (width+1)>>1; | |
64 DECLARE_ALIGNED_16(DWTELEM, temp[(width>>1)]); | |
65 const int w_l= (width>>1); | |
66 const int w_r= w2 - 1; | |
67 int i; | |
68 vector signed int t1, t2, x, y, tmp1, tmp2; | |
69 vector signed int *vbuf, *vtmp; | |
70 vector unsigned char align; | |
71 | |
72 | |
73 | |
74 { // Lift 0 | |
75 DWTELEM * const ref = b + w2 - 1; | |
76 DWTELEM b_0 = b[0]; | |
77 vbuf = (vector signed int *)b; | |
78 | |
79 tmp1 = vec_ld (0, ref); | |
80 align = vec_lvsl (0, ref); | |
81 tmp2 = vec_ld (15, ref); | |
82 t1= vec_perm(tmp1, tmp2, align); | |
83 | |
84 i = 0; | |
85 | |
86 for (i=0; i<w_l-15; i+=16) { | |
87 #if 0 | |
88 b[i+0] = b[i+0] - ((3 * (ref[i+0] + ref[i+1]) + 4) >> 3); | |
89 b[i+1] = b[i+1] - ((3 * (ref[i+1] + ref[i+2]) + 4) >> 3); | |
90 b[i+2] = b[i+2] - ((3 * (ref[i+2] + ref[i+3]) + 4) >> 3); | |
91 b[i+3] = b[i+3] - ((3 * (ref[i+3] + ref[i+4]) + 4) >> 3); | |
92 #else | |
93 | |
94 tmp1 = vec_ld (0, ref+4+i); | |
95 tmp2 = vec_ld (15, ref+4+i); | |
96 | |
97 t2 = vec_perm(tmp1, tmp2, align); | |
98 | |
99 y = vec_add(t1,vec_sld(t1,t2,4)); | |
100 y = vec_add(vec_add(y,y),y); | |
101 | |
102 tmp1 = vec_ld (0, ref+8+i); | |
103 | |
104 y = vec_add(y, vec_splat_s32(4)); | |
105 y = vec_sra(y, vec_splat_u32(3)); | |
106 | |
107 tmp2 = vec_ld (15, ref+8+i); | |
108 | |
109 *vbuf = vec_sub(*vbuf, y); | |
110 | |
111 t1=t2; | |
112 | |
113 vbuf++; | |
114 | |
115 t2 = vec_perm(tmp1, tmp2, align); | |
116 | |
117 y = vec_add(t1,vec_sld(t1,t2,4)); | |
118 y = vec_add(vec_add(y,y),y); | |
119 | |
120 tmp1 = vec_ld (0, ref+12+i); | |
121 | |
122 y = vec_add(y, vec_splat_s32(4)); | |
123 y = vec_sra(y, vec_splat_u32(3)); | |
124 | |
125 tmp2 = vec_ld (15, ref+12+i); | |
126 | |
127 *vbuf = vec_sub(*vbuf, y); | |
128 | |
129 t1=t2; | |
130 | |
131 vbuf++; | |
132 | |
133 t2 = vec_perm(tmp1, tmp2, align); | |
134 | |
135 y = vec_add(t1,vec_sld(t1,t2,4)); | |
136 y = vec_add(vec_add(y,y),y); | |
137 | |
138 tmp1 = vec_ld (0, ref+16+i); | |
139 | |
140 y = vec_add(y, vec_splat_s32(4)); | |
141 y = vec_sra(y, vec_splat_u32(3)); | |
142 | |
143 tmp2 = vec_ld (15, ref+16+i); | |
144 | |
145 *vbuf = vec_sub(*vbuf, y); | |
146 | |
147 t1=t2; | |
148 | |
149 t2 = vec_perm(tmp1, tmp2, align); | |
150 | |
151 y = vec_add(t1,vec_sld(t1,t2,4)); | |
152 y = vec_add(vec_add(y,y),y); | |
153 | |
154 vbuf++; | |
155 | |
156 y = vec_add(y, vec_splat_s32(4)); | |
157 y = vec_sra(y, vec_splat_u32(3)); | |
158 *vbuf = vec_sub(*vbuf, y); | |
159 | |
160 t1=t2; | |
161 | |
162 vbuf++; | |
163 | |
164 #endif | |
165 } | |
166 | |
167 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS); | |
168 b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); | |
169 } | |
170 | |
171 { // Lift 1 | |
172 DWTELEM * const dst = b+w2; | |
173 | |
174 i = 0; | |
175 for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){ | |
176 dst[i] = dst[i] - (b[i] + b[i + 1]); | |
177 } | |
178 | |
179 align = vec_lvsl(0, b+i); | |
180 tmp1 = vec_ld(0, b+i); | |
181 vbuf = (vector signed int*) (dst + i); | |
182 tmp2 = vec_ld(15, b+i); | |
183 | |
184 t1 = vec_perm(tmp1, tmp2, align); | |
185 | |
186 for (; i<w_r-3; i+=4) { | |
187 | |
188 #if 0 | |
189 dst[i] = dst[i] - (b[i] + b[i + 1]); | |
190 dst[i+1] = dst[i+1] - (b[i+1] + b[i + 2]); | |
191 dst[i+2] = dst[i+2] - (b[i+2] + b[i + 3]); | |
192 dst[i+3] = dst[i+3] - (b[i+3] + b[i + 4]); | |
193 #else | |
194 | |
195 tmp1 = vec_ld(0, b+4+i); | |
196 tmp2 = vec_ld(15, b+4+i); | |
197 | |
198 t2 = vec_perm(tmp1, tmp2, align); | |
199 | |
200 y = vec_add(t1, vec_sld(t1,t2,4)); | |
201 *vbuf = vec_sub (*vbuf, y); | |
202 | |
203 vbuf++; | |
204 | |
205 t1 = t2; | |
206 | |
207 #endif | |
208 | |
209 } | |
210 | |
211 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS); | |
212 } | |
213 | |
214 { // Lift 2 | |
215 DWTELEM * const ref = b+w2 - 1; | |
216 DWTELEM b_0 = b[0]; | |
217 vbuf= (vector signed int *) b; | |
218 | |
219 tmp1 = vec_ld (0, ref); | |
220 align = vec_lvsl (0, ref); | |
221 tmp2 = vec_ld (15, ref); | |
222 t1= vec_perm(tmp1, tmp2, align); | |
223 | |
224 i = 0; | |
225 for (; i<w_l-15; i+=16) { | |
226 #if 0 | |
227 b[i] = b[i] - (((8 -(ref[i] + ref[i+1])) - (b[i] <<2)) >> 4); | |
228 b[i+1] = b[i+1] - (((8 -(ref[i+1] + ref[i+2])) - (b[i+1]<<2)) >> 4); | |
229 b[i+2] = b[i+2] - (((8 -(ref[i+2] + ref[i+3])) - (b[i+2]<<2)) >> 4); | |
230 b[i+3] = b[i+3] - (((8 -(ref[i+3] + ref[i+4])) - (b[i+3]<<2)) >> 4); | |
231 #else | |
232 tmp1 = vec_ld (0, ref+4+i); | |
233 tmp2 = vec_ld (15, ref+4+i); | |
234 | |
235 t2 = vec_perm(tmp1, tmp2, align); | |
236 | |
237 y = vec_add(t1,vec_sld(t1,t2,4)); | |
238 y = vec_sub(vec_splat_s32(8),y); | |
239 | |
240 tmp1 = vec_ld (0, ref+8+i); | |
241 | |
242 x = vec_sl(*vbuf,vec_splat_u32(2)); | |
243 y = vec_sra(vec_sub(y,x),vec_splat_u32(4)); | |
244 | |
245 tmp2 = vec_ld (15, ref+8+i); | |
246 | |
247 *vbuf = vec_sub( *vbuf, y); | |
248 | |
249 t1 = t2; | |
250 | |
251 vbuf++; | |
252 | |
253 t2 = vec_perm(tmp1, tmp2, align); | |
254 | |
255 y = vec_add(t1,vec_sld(t1,t2,4)); | |
256 y = vec_sub(vec_splat_s32(8),y); | |
257 | |
258 tmp1 = vec_ld (0, ref+12+i); | |
259 | |
260 x = vec_sl(*vbuf,vec_splat_u32(2)); | |
261 y = vec_sra(vec_sub(y,x),vec_splat_u32(4)); | |
262 | |
263 tmp2 = vec_ld (15, ref+12+i); | |
264 | |
265 *vbuf = vec_sub( *vbuf, y); | |
266 | |
267 t1 = t2; | |
268 | |
269 vbuf++; | |
270 | |
271 t2 = vec_perm(tmp1, tmp2, align); | |
272 | |
273 y = vec_add(t1,vec_sld(t1,t2,4)); | |
274 y = vec_sub(vec_splat_s32(8),y); | |
275 | |
276 tmp1 = vec_ld (0, ref+16+i); | |
277 | |
278 x = vec_sl(*vbuf,vec_splat_u32(2)); | |
279 y = vec_sra(vec_sub(y,x),vec_splat_u32(4)); | |
280 | |
281 tmp2 = vec_ld (15, ref+16+i); | |
282 | |
283 *vbuf = vec_sub( *vbuf, y); | |
284 | |
285 t1 = t2; | |
286 | |
287 vbuf++; | |
288 | |
289 t2 = vec_perm(tmp1, tmp2, align); | |
290 | |
291 y = vec_add(t1,vec_sld(t1,t2,4)); | |
292 y = vec_sub(vec_splat_s32(8),y); | |
293 | |
294 t1 = t2; | |
295 | |
296 x = vec_sl(*vbuf,vec_splat_u32(2)); | |
297 y = vec_sra(vec_sub(y,x),vec_splat_u32(4)); | |
298 *vbuf = vec_sub( *vbuf, y); | |
299 | |
300 vbuf++; | |
301 | |
302 #endif | |
303 } | |
304 | |
305 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l); | |
306 b[0] = b_0 - (((-2 * ref[1] + W_BO) - 4 * b_0) >> W_BS); | |
307 } | |
308 | |
309 { // Lift 3 | |
310 DWTELEM * const src = b+w2; | |
311 | |
312 vbuf = (vector signed int *)b; | |
313 vtmp = (vector signed int *)temp; | |
314 | |
315 i = 0; | |
316 align = vec_lvsl(0, src); | |
317 | |
318 for (; i<w_r-3; i+=4) { | |
319 #if 0 | |
320 temp[i] = src[i] - ((-3*(b[i] + b[i+1]))>>1); | |
321 temp[i+1] = src[i+1] - ((-3*(b[i+1] + b[i+2]))>>1); | |
322 temp[i+2] = src[i+2] - ((-3*(b[i+2] + b[i+3]))>>1); | |
323 temp[i+3] = src[i+3] - ((-3*(b[i+3] + b[i+4]))>>1); | |
324 #else | |
325 tmp1 = vec_ld(0,src+i); | |
326 t1 = vec_add(vbuf[0],vec_sld(vbuf[0],vbuf[1],4)); | |
327 tmp2 = vec_ld(15,src+i); | |
328 t1 = vec_sub(vec_splat_s32(0),t1); //bad! | |
329 t1 = vec_add(t1,vec_add(t1,t1)); | |
330 t2 = vec_perm(tmp1 ,tmp2 ,align); | |
331 t1 = vec_sra(t1,vec_splat_u32(1)); | |
332 vbuf++; | |
333 *vtmp = vec_sub(t2,t1); | |
334 vtmp++; | |
335 | |
336 #endif | |
337 | |
338 } | |
339 | |
340 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -3, 0, 1); | |
341 } | |
342 | |
343 { | |
344 //Interleave | |
345 int a; | |
346 vector signed int *t = (vector signed int *)temp, | |
347 *v = (vector signed int *)b; | |
348 | |
349 snow_interleave_line_header(&i, width, b, temp); | |
350 | |
351 for (; (i & 0xE) != 0xE; i-=2){ | |
352 b[i+1] = temp[i>>1]; | |
353 b[i] = b[i>>1]; | |
354 } | |
355 for (i-=14; i>=0; i-=16){ | |
356 a=i/4; | |
357 | |
358 v[a+3]=vec_mergel(v[(a>>1)+1],t[(a>>1)+1]); | |
359 v[a+2]=vec_mergeh(v[(a>>1)+1],t[(a>>1)+1]); | |
360 v[a+1]=vec_mergel(v[a>>1],t[a>>1]); | |
361 v[a]=vec_mergeh(v[a>>1],t[a>>1]); | |
362 | |
363 } | |
364 | |
365 } | |
366 } | |
367 | |
368 void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width) | |
369 { | |
370 int i, w4 = width/4; | |
371 vector signed int *v0, *v1,*v2,*v3,*v4,*v5; | |
372 vector signed int t1, t2; | |
373 | |
374 v0=(vector signed int *)b0; | |
375 v1=(vector signed int *)b1; | |
376 v2=(vector signed int *)b2; | |
377 v3=(vector signed int *)b3; | |
378 v4=(vector signed int *)b4; | |
379 v5=(vector signed int *)b5; | |
380 | |
381 for (i=0; i< w4;i++) | |
382 { | |
383 | |
384 #if 0 | |
385 b4[i] -= (3*(b3[i] + b5[i])+4)>>3; | |
386 b3[i] -= ((b2[i] + b4[i])); | |
387 b2[i] += ((b1[i] + b3[i])+4*b2[i]+8)>>4; | |
388 b1[i] += (3*(b0[i] + b2[i]))>>1; | |
389 #else | |
390 t1 = vec_add(v3[i], v5[i]); | |
391 t2 = vec_add(t1, vec_add(t1,t1)); | |
392 t1 = vec_add(t2, vec_splat_s32(4)); | |
393 v4[i] = vec_sub(v4[i], vec_sra(t1,vec_splat_u32(3))); | |
394 | |
395 v3[i] = vec_sub(v3[i], vec_add(v2[i], v4[i])); | |
396 | |
397 t1 = vec_add(vec_splat_s32(8), vec_add(v1[i], v3[i])); | |
398 t2 = vec_sl(v2[i], vec_splat_u32(2)); | |
399 v2[i] = vec_add(v2[i], vec_sra(vec_add(t1,t2),vec_splat_u32(4))); | |
400 t1 = vec_add(v0[i], v2[i]); | |
401 t2 = vec_add(t1, vec_add(t1,t1)); | |
402 v1[i] = vec_add(v1[i], vec_sra(t2,vec_splat_u32(1))); | |
403 | |
404 #endif | |
405 } | |
406 | |
407 for(i*=4; i < width; i++) | |
408 { | |
409 b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS; | |
410 b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS; | |
411 b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS; | |
412 b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS; | |
413 } | |
414 } | |
415 | |
416 | |
417 static void inner_add_yblock_bw_8_obmc_16_altivec(uint8_t *obmc, | |
418 const int obmc_stride, | |
419 uint8_t * * block, int b_w, | |
420 int b_h, int src_x, int src_y, | |
421 int src_stride, slice_buffer * sb, | |
422 int add, uint8_t * dst8) | |
423 { | |
424 int y, x; | |
425 DWTELEM * dst; | |
426 vector bool int mask; | |
427 vector signed int vs; | |
428 vector unsigned short h1, h2, l1, l2; | |
429 vector unsigned char ih, il, tmp1, tmp2, align; | |
430 vector unsigned char b0,b1,b2,b3; | |
431 | |
432 for(y=0; y<b_h; y++){ | |
433 //FIXME ugly missue of obmc_stride | |
434 | |
435 uint8_t *obmc1= obmc + y*obmc_stride; | |
436 uint8_t *obmc2= obmc1+ (obmc_stride>>1); | |
437 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1); | |
438 uint8_t *obmc4= obmc3+ (obmc_stride>>1); | |
439 #if 1 | |
440 vector unsigned char ob1; | |
441 vector unsigned char ob2; | |
442 vector unsigned char ob3; | |
443 vector unsigned char ob4; | |
444 | |
445 #endif | |
446 DECLARE_ALIGNED_16(int, vbuf[16]); | |
447 vector signed int *v = (vector signed int *)vbuf, *d; | |
448 | |
449 dst = slice_buffer_get_line(sb, src_y + y); | |
450 d = (vector signed int *)(dst + src_x); | |
451 | |
452 #if 0 | |
453 for(x=0; x<b_w; x++){ | |
454 vbuf[x] = obmc1[x] * block[3][x + y*src_stride] | |
455 +obmc2[x] * block[2][x + y*src_stride] | |
456 +obmc3[x] * block[1][x + y*src_stride] | |
457 +obmc4[x] * block[0][x + y*src_stride]; | |
458 } | |
459 #else | |
460 | |
461 | |
462 // load blocks | |
463 //FIXME i could avoid some loads! | |
464 tmp1 = vec_ld(0, &block[3][y*src_stride]); | |
465 align = vec_lvsl(0, &block[3][y*src_stride]); | |
466 tmp2 = vec_ld(15, &block[3][y*src_stride]); | |
467 | |
468 b3 = vec_perm(tmp1,tmp2,align); | |
469 | |
470 tmp1 = vec_ld(0, &block[2][y*src_stride]); | |
471 align = vec_lvsl(0, &block[2][y*src_stride]); | |
472 tmp2 = vec_ld(15, &block[2][y*src_stride]); | |
473 | |
474 b2 = vec_perm(tmp1,tmp2,align); | |
475 | |
476 tmp1 = vec_ld(0, &block[1][y*src_stride]); | |
477 align = vec_lvsl(0, &block[1][y*src_stride]); | |
478 tmp2 = vec_ld(15, &block[1][y*src_stride]); | |
479 | |
480 b1 = vec_perm(tmp1,tmp2,align); | |
481 | |
482 tmp1 = vec_ld(0, &block[0][y*src_stride]); | |
483 align = vec_lvsl(0, &block[0][y*src_stride]); | |
484 tmp2 = vec_ld(15, &block[0][y*src_stride]); | |
485 | |
486 b0 = vec_perm(tmp1,tmp2,align); | |
487 | |
488 // load obmcs | |
489 | |
490 tmp1 = vec_ld(0, obmc1); | |
491 align = vec_lvsl(0, obmc1); | |
492 tmp2 = vec_ld(15, obmc1); | |
493 | |
494 ob1 = vec_perm(tmp1,tmp2,align); | |
495 | |
496 tmp1 = vec_ld(0, obmc2); | |
497 align = vec_lvsl(0, obmc2); | |
498 tmp2 = vec_ld(15, obmc2); | |
499 | |
500 ob2 = vec_perm(tmp1,tmp2,align); | |
501 | |
502 tmp1 = vec_ld(0, obmc3); | |
503 align = vec_lvsl(0, obmc3); | |
504 tmp2 = vec_ld(15, obmc3); | |
505 | |
506 ob3 = vec_perm(tmp1,tmp2,align); | |
507 | |
508 tmp1 = vec_ld(0, obmc4); | |
509 align = vec_lvsl(0, obmc4); | |
510 tmp2 = vec_ld(15, obmc4); | |
511 | |
512 ob4 = vec_perm(tmp1,tmp2,align); | |
513 h1 = vec_mergeh(ob1, ob2); /*h1 <- [ a,b,a,b, a,b,a,b, | |
514 a,b,a,b, a,b,a,b ] */ | |
515 h2 = vec_mergeh(ob3, ob4); /*h2 <- [ c,d,c,d, c,d,c,d, | |
516 c,d,c,d, c,d,c,d ] */ | |
517 | |
518 ih = vec_mergeh(h1,h2); /* ih <- [ a,b,c,d,a,b,c,d,a,b,c,d,a,b,c,d ]*/ | |
519 | |
520 l1 = vec_mergeh(b3, b2); | |
521 | |
522 l2 = vec_mergeh(b1, b0); | |
523 | |
524 il = vec_mergeh(l1,l2); | |
525 | |
526 v[0] = vec_msum(ih, il, vec_splat_u32(0)); | |
527 //step1 | |
528 | |
529 h1 = vec_mergeh(ob1, ob2); | |
530 | |
531 h2 = vec_mergeh(ob3, ob4); | |
532 | |
533 ih = vec_mergel(h1,h2); | |
534 | |
535 l1 = vec_mergeh(b3, b2); | |
536 | |
537 l2 = vec_mergeh(b1, b0); | |
538 | |
539 il = vec_mergel(l1,l2); | |
540 | |
541 v[1] = vec_msum(ih, il, vec_splat_u32(0)); | |
542 | |
543 | |
544 #endif | |
545 if(add) | |
546 { | |
547 for(x=0; x<b_w/4; x++) | |
548 { | |
549 v[x] = vec_add(v[x], d[x]); | |
550 v[x] = vec_sra(vec_add(v[x], | |
551 vec_sl( vec_splat_s32(1), | |
552 vec_splat_u32(7))), | |
553 vec_splat_u32(8)); | |
554 | |
555 mask = vec_sl((vector signed int) vec_cmpeq(v[x],v[x]), | |
556 vec_splat_u32(8)); | |
557 mask = vec_and(v[x],vec_nor(mask,mask)); | |
558 | |
559 mask = (vector signed int) vec_cmpeq((vector signed int)mask, | |
560 (vector signed int)vec_splat_u32(0)); | |
561 | |
562 vs = vec_sra(v[x],vec_splat_u32(8)); | |
563 vs = vec_sra(v[x],vec_splat_u32(8)); | |
564 vs = vec_sra(v[x],vec_splat_u32(15)); | |
565 | |
566 vs = vec_nor(vs,vs); | |
567 | |
568 v[x]= vec_sel(v[x],vs,mask); | |
569 } | |
570 for(x=0; x<b_w; x++) | |
571 dst8[x + y*src_stride] = vbuf[x]; | |
572 } | |
573 else | |
574 for(x=0; x<b_w/4; x++) | |
575 d[x] = vec_sub(d[x], v[x]); | |
576 | |
577 } | |
578 | |
579 | |
580 } | |
581 | |
582 static void inner_add_yblock_bw_16_obmc_32_altivec(uint8_t *obmc, | |
583 const int obmc_stride, | |
584 uint8_t * * block, int b_w, | |
585 int b_h, int src_x, int src_y, | |
586 int src_stride, slice_buffer * sb, | |
587 int add, uint8_t * dst8) | |
588 { | |
589 int y, x; | |
590 DWTELEM * dst; | |
591 vector unsigned short h1, h2, l1, l2; | |
592 vector unsigned char ih, il, tmp1, tmp2, align; | |
593 vector unsigned char b0,b1,b2,b3; | |
594 | |
595 for(y=0; y<b_h; y++){ | |
596 //FIXME ugly missue of obmc_stride | |
597 | |
598 uint8_t *obmc1= obmc + y*obmc_stride; | |
599 uint8_t *obmc2= obmc1+ (obmc_stride>>1); | |
600 uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1); | |
601 uint8_t *obmc4= obmc3+ (obmc_stride>>1); | |
602 | |
603 vector unsigned char ob1; | |
604 vector unsigned char ob2; | |
605 vector unsigned char ob3; | |
606 vector unsigned char ob4; | |
607 | |
608 DECLARE_ALIGNED_16(int, vbuf[b_w]); | |
609 vector signed int *v = (vector signed int *)vbuf, *d; | |
610 | |
611 dst = slice_buffer_get_line(sb, src_y + y); | |
612 d = (vector signed int *)(dst + src_x); | |
613 | |
614 // load blocks | |
615 | |
616 tmp1 = vec_ld(0, &block[3][y*src_stride]); | |
617 align = vec_lvsl(0, &block[3][y*src_stride]); | |
618 tmp2 = vec_ld(15, &block[3][y*src_stride]); | |
619 | |
620 b3 = vec_perm(tmp1,tmp2,align); | |
621 | |
622 tmp1 = vec_ld(0, &block[2][y*src_stride]); | |
623 align = vec_lvsl(0, &block[2][y*src_stride]); | |
624 tmp2 = vec_ld(15, &block[2][y*src_stride]); | |
625 | |
626 b2 = vec_perm(tmp1,tmp2,align); | |
627 | |
628 tmp1 = vec_ld(0, &block[1][y*src_stride]); | |
629 align = vec_lvsl(0, &block[1][y*src_stride]); | |
630 tmp2 = vec_ld(15, &block[1][y*src_stride]); | |
631 | |
632 b1 = vec_perm(tmp1,tmp2,align); | |
633 | |
634 tmp1 = vec_ld(0, &block[0][y*src_stride]); | |
635 align = vec_lvsl(0, &block[0][y*src_stride]); | |
636 tmp2 = vec_ld(15, &block[0][y*src_stride]); | |
637 | |
638 b0 = vec_perm(tmp1,tmp2,align); | |
639 | |
640 // load obmcs | |
641 | |
642 tmp1 = vec_ld(0, obmc1); | |
643 align = vec_lvsl(0, obmc1); | |
644 tmp2 = vec_ld(15, obmc1); | |
645 | |
646 ob1 = vec_perm(tmp1,tmp2,align); | |
647 | |
648 tmp1 = vec_ld(0, obmc2); | |
649 align = vec_lvsl(0, obmc2); | |
650 tmp2 = vec_ld(15, obmc2); | |
651 | |
652 ob2 = vec_perm(tmp1,tmp2,align); | |
653 | |
654 tmp1 = vec_ld(0, obmc3); | |
655 align = vec_lvsl(0, obmc3); | |
656 tmp2 = vec_ld(15, obmc3); | |
657 | |
658 ob3 = vec_perm(tmp1,tmp2,align); | |
659 | |
660 tmp1 = vec_ld(0, obmc4); | |
661 align = vec_lvsl(0, obmc4); | |
662 tmp2 = vec_ld(15, obmc4); | |
663 | |
664 ob4 = vec_perm(tmp1,tmp2,align); | |
665 | |
666 //step0 | |
667 h1 = vec_mergeh(ob1, ob2); /*h1 <- [ a,b,a,b, | |
668 a,b,a,b, | |
669 a,b,a,b, | |
670 a,b,a,b ] */ | |
671 h2 = vec_mergeh(ob3, ob4); /*h2 <- [ c,d,c,d, | |
672 c,d,c,d, | |
673 c,d,c,d, | |
674 c,d,c,d ] */ | |
675 | |
676 ih = vec_mergeh(h1,h2); /* ih <- [ a,b,c,d,a,b,c,d,a,b,c,d,a,b,c,d ]*/ | |
677 | |
678 l1 = vec_mergeh(b3, b2); | |
679 | |
680 l2 = vec_mergeh(b1, b0); | |
681 | |
682 il = vec_mergeh(l1,l2); | |
683 | |
684 v[0] = vec_msum(ih, il, vec_splat_u32(0)); | |
685 //step1 | |
686 | |
687 h1 = vec_mergeh(ob1, ob2); | |
688 | |
689 h2 = vec_mergeh(ob3, ob4); | |
690 | |
691 ih = vec_mergel(h1,h2); | |
692 | |
693 l1 = vec_mergeh(b3, b2); | |
694 | |
695 l2 = vec_mergeh(b1, b0); | |
696 | |
697 il = vec_mergel(l1,l2); | |
698 | |
699 v[1] = vec_msum(ih, il, vec_splat_u32(0)); | |
700 | |
701 //step2 | |
702 h1 = vec_mergel(ob1, ob2); | |
703 | |
704 h2 = vec_mergel(ob3, ob4); | |
705 | |
706 ih = vec_mergeh(h1,h2); | |
707 | |
708 l1 = vec_mergel(b3, b2); | |
709 | |
710 l2 = vec_mergel(b1, b0); | |
711 | |
712 il = vec_mergeh(l1,l2); | |
713 | |
714 v[2] = vec_msum(ih, il, vec_splat_u32(0)); | |
715 | |
716 //step3 | |
717 h1 = vec_mergel(ob1, ob2); | |
718 | |
719 h2 = vec_mergel(ob3, ob4); | |
720 | |
721 ih = vec_mergel(h1,h2); | |
722 | |
723 l1 = vec_mergel(b3, b2); | |
724 | |
725 l2 = vec_mergel(b1, b0); | |
726 | |
727 il = vec_mergel(l1,l2); | |
728 | |
729 v[3] = vec_msum(ih, il, vec_splat_u32(0)); | |
730 #if 1 | |
731 for(x=0; x<b_w; x++) | |
732 if(add){ | |
733 vbuf[x] += dst[x + src_x]; | |
734 vbuf[x] = (vbuf[x] + (1<<(FRAC_BITS-1))) >> FRAC_BITS; | |
735 if(vbuf[x]&(~255)) vbuf[x]= ~(vbuf[x]>>31); | |
736 dst8[x + y*src_stride] = vbuf[x]; | |
737 }else{ | |
738 dst[x + src_x] -= vbuf[x]; | |
739 } | |
740 #else | |
741 if(add) | |
742 { | |
743 for(x=0; x<b_w/4; x++) | |
744 { | |
745 v[x] = vec_add(v[x], d[x]); | |
746 v[x] = vec_sra(vec_add(v[x], | |
747 vec_sl( vec_splat_s32(1), | |
748 vec_splat_u32(7))), | |
749 vec_splat_u32(8)); | |
750 | |
751 mask = vec_sl((vector signed int) vec_cmpeq(v[x],v[x]),vec_splat_u32(8)); | |
752 mask = vec_and(v[x],vec_nor(mask,mask)); | |
753 | |
754 mask = (vector signed int) vec_cmpeq((vector signed int)mask,(vector signed int)vec_splat_u32(0)); | |
755 | |
756 vs = vec_sra(v[x],vec_splat_u32(8)); | |
757 vs = vec_sra(v[x],vec_splat_u32(8)); | |
758 vs = vec_sra(v[x],vec_splat_u32(15)); | |
759 | |
760 vs = vec_nor(vs,vs); | |
761 | |
762 v[x]= vec_sel(v[x],vs,mask); | |
763 } | |
764 | |
765 for(x=0; x<b_w; x++) | |
766 dst8[x + y*src_stride] = vbuf[x]; | |
767 | |
768 } | |
769 else | |
770 for(x=0; x<b_w/4; x++) | |
771 d[x] = vec_sub(d[x], v[x]); | |
772 #endif | |
773 } | |
774 } | |
775 | |
776 | |
777 void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride, | |
778 uint8_t * * block, int b_w, int b_h, | |
779 int src_x, int src_y, int src_stride, | |
780 slice_buffer * sb, int add, | |
781 uint8_t * dst8) | |
782 { | |
783 if (b_w == 16) | |
784 inner_add_yblock_bw_16_obmc_32_altivec(obmc, obmc_stride, block, b_w, | |
785 b_h, src_x, src_y, src_stride, | |
786 sb, add, dst8); | |
787 else if (b_w == 8 && ! src_x&15 ) | |
788 inner_add_yblock_bw_8_obmc_16_altivec(obmc, obmc_stride, block, | |
789 b_w, b_h, src_x, src_y, | |
790 src_stride, sb, add, dst8); | |
791 else | |
792 | |
793 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x, | |
794 src_y, src_stride, sb, add, dst8); | |
795 } |