Mercurial > libavcodec.hg
annotate ppc/vc1dsp_altivec.c @ 3947:c8c591fe26f8 libavcodec
Change license headers to say 'FFmpeg' instead of 'this program/this library'
and fix GPL/LGPL version mismatches.
author | diego |
---|---|
date | Sat, 07 Oct 2006 15:30:46 +0000 |
parents | f52e3f60481b |
children | fec25fd9febf |
rev | line source |
---|---|
3537 | 1 /* |
2 * VC-1 and WMV3 decoder - DSP functions AltiVec-optimized | |
3 * Copyright (c) 2006 Konstantin Shishkov | |
4 * | |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3537
diff
changeset
|
5 * This file is part of FFmpeg. |
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3537
diff
changeset
|
6 * |
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3537
diff
changeset
|
7 * FFmpeg is free software; you can redistribute it and/or |
3537 | 8 * modify it under the terms of the GNU Lesser General Public |
9 * License as published by the Free Software Foundation; either | |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3537
diff
changeset
|
10 * version 2.1 of the License, or (at your option) any later version. |
3537 | 11 * |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3537
diff
changeset
|
12 * FFmpeg is distributed in the hope that it will be useful, |
3537 | 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 * Lesser General Public License for more details. | |
16 * | |
17 * You should have received a copy of the GNU Lesser General Public | |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3537
diff
changeset
|
18 * License along with FFmpeg; if not, write to the Free Software |
3537 | 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
20 * | |
21 */ | |
22 | |
23 #include "../dsputil.h" | |
24 | |
25 #include "gcc_fixes.h" | |
26 | |
27 #include "dsputil_altivec.h" | |
28 | |
29 // Transpose 8x8 matrix of 16-bit elements. Borrowed from mpegvideo_altivec.c | |
30 #define TRANSPOSE8(a,b,c,d,e,f,g,h) \ | |
31 do { \ | |
32 vector signed short A1, B1, C1, D1, E1, F1, G1, H1; \ | |
33 vector signed short A2, B2, C2, D2, E2, F2, G2, H2; \ | |
34 \ | |
35 A1 = vec_mergeh (a, e); \ | |
36 B1 = vec_mergel (a, e); \ | |
37 C1 = vec_mergeh (b, f); \ | |
38 D1 = vec_mergel (b, f); \ | |
39 E1 = vec_mergeh (c, g); \ | |
40 F1 = vec_mergel (c, g); \ | |
41 G1 = vec_mergeh (d, h); \ | |
42 H1 = vec_mergel (d, h); \ | |
43 \ | |
44 A2 = vec_mergeh (A1, E1); \ | |
45 B2 = vec_mergel (A1, E1); \ | |
46 C2 = vec_mergeh (B1, F1); \ | |
47 D2 = vec_mergel (B1, F1); \ | |
48 E2 = vec_mergeh (C1, G1); \ | |
49 F2 = vec_mergel (C1, G1); \ | |
50 G2 = vec_mergeh (D1, H1); \ | |
51 H2 = vec_mergel (D1, H1); \ | |
52 \ | |
53 a = vec_mergeh (A2, E2); \ | |
54 b = vec_mergel (A2, E2); \ | |
55 c = vec_mergeh (B2, F2); \ | |
56 d = vec_mergel (B2, F2); \ | |
57 e = vec_mergeh (C2, G2); \ | |
58 f = vec_mergel (C2, G2); \ | |
59 g = vec_mergeh (D2, H2); \ | |
60 h = vec_mergel (D2, H2); \ | |
61 } while (0) | |
62 | |
63 // main steps of 8x8 transform | |
64 #define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \ | |
65 do { \ | |
66 t0 = vec_sl(vec_add(s0, s4), vec_2); \ | |
67 t0 = vec_add(vec_sl(t0, vec_1), t0); \ | |
68 t0 = vec_add(t0, vec_rnd); \ | |
69 t1 = vec_sl(vec_sub(s0, s4), vec_2); \ | |
70 t1 = vec_add(vec_sl(t1, vec_1), t1); \ | |
71 t1 = vec_add(t1, vec_rnd); \ | |
72 t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \ | |
73 t2 = vec_add(t2, vec_sl(s2, vec_4)); \ | |
74 t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \ | |
75 t3 = vec_sub(t3, vec_sl(s6, vec_4)); \ | |
76 t4 = vec_add(t0, t2); \ | |
77 t5 = vec_add(t1, t3); \ | |
78 t6 = vec_sub(t1, t3); \ | |
79 t7 = vec_sub(t0, t2); \ | |
80 \ | |
81 t0 = vec_sl(vec_add(s1, s3), vec_4); \ | |
82 t0 = vec_add(t0, vec_sl(s5, vec_3)); \ | |
83 t0 = vec_add(t0, vec_sl(s7, vec_2)); \ | |
84 t0 = vec_add(t0, vec_sub(s5, s3)); \ | |
85 \ | |
86 t1 = vec_sl(vec_sub(s1, s5), vec_4); \ | |
87 t1 = vec_sub(t1, vec_sl(s7, vec_3)); \ | |
88 t1 = vec_sub(t1, vec_sl(s3, vec_2)); \ | |
89 t1 = vec_sub(t1, vec_add(s1, s7)); \ | |
90 \ | |
91 t2 = vec_sl(vec_sub(s7, s3), vec_4); \ | |
92 t2 = vec_add(t2, vec_sl(s1, vec_3)); \ | |
93 t2 = vec_add(t2, vec_sl(s5, vec_2)); \ | |
94 t2 = vec_add(t2, vec_sub(s1, s7)); \ | |
95 \ | |
96 t3 = vec_sl(vec_sub(s5, s7), vec_4); \ | |
97 t3 = vec_sub(t3, vec_sl(s3, vec_3)); \ | |
98 t3 = vec_add(t3, vec_sl(s1, vec_2)); \ | |
99 t3 = vec_sub(t3, vec_add(s3, s5)); \ | |
100 \ | |
101 s0 = vec_add(t4, t0); \ | |
102 s1 = vec_add(t5, t1); \ | |
103 s2 = vec_add(t6, t2); \ | |
104 s3 = vec_add(t7, t3); \ | |
105 s4 = vec_sub(t7, t3); \ | |
106 s5 = vec_sub(t6, t2); \ | |
107 s6 = vec_sub(t5, t1); \ | |
108 s7 = vec_sub(t4, t0); \ | |
109 }while(0) | |
110 | |
111 #define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \ | |
112 do { \ | |
113 s0 = vec_sra(s0, vec_3); \ | |
114 s1 = vec_sra(s1, vec_3); \ | |
115 s2 = vec_sra(s2, vec_3); \ | |
116 s3 = vec_sra(s3, vec_3); \ | |
117 s4 = vec_sra(s4, vec_3); \ | |
118 s5 = vec_sra(s5, vec_3); \ | |
119 s6 = vec_sra(s6, vec_3); \ | |
120 s7 = vec_sra(s7, vec_3); \ | |
121 }while(0) | |
122 | |
123 #define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \ | |
124 do { \ | |
125 s0 = vec_sra(s0, vec_7); \ | |
126 s1 = vec_sra(s1, vec_7); \ | |
127 s2 = vec_sra(s2, vec_7); \ | |
128 s3 = vec_sra(s3, vec_7); \ | |
129 s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \ | |
130 s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \ | |
131 s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \ | |
132 s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \ | |
133 }while(0) | |
134 | |
135 /* main steps of 4x4 transform */ | |
136 #define STEP4(s0, s1, s2, s3, vec_rnd) \ | |
137 do { \ | |
138 t1 = vec_add(vec_sl(s0, vec_4), s0); \ | |
139 t1 = vec_add(t1, vec_rnd); \ | |
140 t2 = vec_add(vec_sl(s2, vec_4), s2); \ | |
141 t0 = vec_add(t1, t2); \ | |
142 t1 = vec_sub(t1, t2); \ | |
143 t3 = vec_sl(vec_sub(s3, s1), vec_1); \ | |
144 t3 = vec_add(t3, vec_sl(t3, vec_2)); \ | |
145 t2 = vec_add(t3, vec_sl(s1, vec_5)); \ | |
146 t3 = vec_add(t3, vec_sl(s3, vec_3)); \ | |
147 t3 = vec_add(t3, vec_sl(s3, vec_2)); \ | |
148 s0 = vec_add(t0, t2); \ | |
149 s1 = vec_sub(t1, t3); \ | |
150 s2 = vec_add(t1, t3); \ | |
151 s3 = vec_sub(t0, t2); \ | |
152 }while (0) | |
153 | |
154 #define SHIFT_HOR4(s0, s1, s2, s3) \ | |
155 s0 = vec_sra(s0, vec_3); \ | |
156 s1 = vec_sra(s1, vec_3); \ | |
157 s2 = vec_sra(s2, vec_3); \ | |
158 s3 = vec_sra(s3, vec_3); | |
159 | |
160 #define SHIFT_VERT4(s0, s1, s2, s3) \ | |
161 s0 = vec_sra(s0, vec_7); \ | |
162 s1 = vec_sra(s1, vec_7); \ | |
163 s2 = vec_sra(s2, vec_7); \ | |
164 s3 = vec_sra(s3, vec_7); | |
165 | |
166 /** Do inverse transform on 8x8 block | |
167 */ | |
168 static void vc1_inv_trans_8x8_altivec(DCTELEM block[64]) | |
169 { | |
170 vector signed short src0, src1, src2, src3, src4, src5, src6, src7; | |
171 vector signed int s0, s1, s2, s3, s4, s5, s6, s7; | |
172 vector signed int s8, s9, sA, sB, sC, sD, sE, sF; | |
173 vector signed int t0, t1, t2, t3, t4, t5, t6, t7; | |
174 const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4)); | |
175 const vector unsigned int vec_7 = vec_splat_u32(7); | |
176 const vector unsigned int vec_5 = vec_splat_u32(5); | |
177 const vector unsigned int vec_4 = vec_splat_u32(4); | |
178 const vector signed int vec_4s = vec_splat_s32(4); | |
179 const vector unsigned int vec_3 = vec_splat_u32(3); | |
180 const vector unsigned int vec_2 = vec_splat_u32(2); | |
181 const vector signed int vec_1s = vec_splat_s32(1); | |
182 const vector unsigned int vec_1 = vec_splat_u32(1); | |
183 | |
184 | |
185 src0 = vec_ld( 0, block); | |
186 src1 = vec_ld( 16, block); | |
187 src2 = vec_ld( 32, block); | |
188 src3 = vec_ld( 48, block); | |
189 src4 = vec_ld( 64, block); | |
190 src5 = vec_ld( 80, block); | |
191 src6 = vec_ld( 96, block); | |
192 src7 = vec_ld(112, block); | |
193 | |
194 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); | |
195 s0 = vec_unpackl(src0); | |
196 s1 = vec_unpackl(src1); | |
197 s2 = vec_unpackl(src2); | |
198 s3 = vec_unpackl(src3); | |
199 s4 = vec_unpackl(src4); | |
200 s5 = vec_unpackl(src5); | |
201 s6 = vec_unpackl(src6); | |
202 s7 = vec_unpackl(src7); | |
203 s8 = vec_unpackh(src0); | |
204 s9 = vec_unpackh(src1); | |
205 sA = vec_unpackh(src2); | |
206 sB = vec_unpackh(src3); | |
207 sC = vec_unpackh(src4); | |
208 sD = vec_unpackh(src5); | |
209 sE = vec_unpackh(src6); | |
210 sF = vec_unpackh(src7); | |
211 STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s); | |
212 SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7); | |
213 STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s); | |
214 SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF); | |
215 src0 = vec_pack(s8, s0); | |
216 src1 = vec_pack(s9, s1); | |
217 src2 = vec_pack(sA, s2); | |
218 src3 = vec_pack(sB, s3); | |
219 src4 = vec_pack(sC, s4); | |
220 src5 = vec_pack(sD, s5); | |
221 src6 = vec_pack(sE, s6); | |
222 src7 = vec_pack(sF, s7); | |
223 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); | |
224 | |
225 s0 = vec_unpackl(src0); | |
226 s1 = vec_unpackl(src1); | |
227 s2 = vec_unpackl(src2); | |
228 s3 = vec_unpackl(src3); | |
229 s4 = vec_unpackl(src4); | |
230 s5 = vec_unpackl(src5); | |
231 s6 = vec_unpackl(src6); | |
232 s7 = vec_unpackl(src7); | |
233 s8 = vec_unpackh(src0); | |
234 s9 = vec_unpackh(src1); | |
235 sA = vec_unpackh(src2); | |
236 sB = vec_unpackh(src3); | |
237 sC = vec_unpackh(src4); | |
238 sD = vec_unpackh(src5); | |
239 sE = vec_unpackh(src6); | |
240 sF = vec_unpackh(src7); | |
241 STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64); | |
242 SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7); | |
243 STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64); | |
244 SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF); | |
245 src0 = vec_pack(s8, s0); | |
246 src1 = vec_pack(s9, s1); | |
247 src2 = vec_pack(sA, s2); | |
248 src3 = vec_pack(sB, s3); | |
249 src4 = vec_pack(sC, s4); | |
250 src5 = vec_pack(sD, s5); | |
251 src6 = vec_pack(sE, s6); | |
252 src7 = vec_pack(sF, s7); | |
253 | |
254 vec_st(src0, 0, block); | |
255 vec_st(src1, 16, block); | |
256 vec_st(src2, 32, block); | |
257 vec_st(src3, 48, block); | |
258 vec_st(src4, 64, block); | |
259 vec_st(src5, 80, block); | |
260 vec_st(src6, 96, block); | |
261 vec_st(src7,112, block); | |
262 } | |
263 | |
264 /** Do inverse transform on 8x4 part of block | |
265 */ | |
266 static void vc1_inv_trans_8x4_altivec(DCTELEM block[64], int n) | |
267 { | |
268 vector signed short src0, src1, src2, src3, src4, src5, src6, src7; | |
269 vector signed int s0, s1, s2, s3, s4, s5, s6, s7; | |
270 vector signed int s8, s9, sA, sB, sC, sD, sE, sF; | |
271 vector signed int t0, t1, t2, t3, t4, t5, t6, t7; | |
272 const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4)); | |
273 const vector unsigned int vec_7 = vec_splat_u32(7); | |
274 const vector unsigned int vec_5 = vec_splat_u32(5); | |
275 const vector unsigned int vec_4 = vec_splat_u32(4); | |
276 const vector signed int vec_4s = vec_splat_s32(4); | |
277 const vector unsigned int vec_3 = vec_splat_u32(3); | |
278 const vector unsigned int vec_2 = vec_splat_u32(2); | |
279 const vector unsigned int vec_1 = vec_splat_u32(1); | |
280 | |
281 src0 = vec_ld( 0, block); | |
282 src1 = vec_ld( 16, block); | |
283 src2 = vec_ld( 32, block); | |
284 src3 = vec_ld( 48, block); | |
285 src4 = vec_ld( 64, block); | |
286 src5 = vec_ld( 80, block); | |
287 src6 = vec_ld( 96, block); | |
288 src7 = vec_ld(112, block); | |
289 | |
290 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); | |
291 s0 = vec_unpackl(src0); | |
292 s1 = vec_unpackl(src1); | |
293 s2 = vec_unpackl(src2); | |
294 s3 = vec_unpackl(src3); | |
295 s4 = vec_unpackl(src4); | |
296 s5 = vec_unpackl(src5); | |
297 s6 = vec_unpackl(src6); | |
298 s7 = vec_unpackl(src7); | |
299 s8 = vec_unpackh(src0); | |
300 s9 = vec_unpackh(src1); | |
301 sA = vec_unpackh(src2); | |
302 sB = vec_unpackh(src3); | |
303 sC = vec_unpackh(src4); | |
304 sD = vec_unpackh(src5); | |
305 sE = vec_unpackh(src6); | |
306 sF = vec_unpackh(src7); | |
307 STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s); | |
308 SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7); | |
309 STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s); | |
310 SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF); | |
311 src0 = vec_pack(s8, s0); | |
312 src1 = vec_pack(s9, s1); | |
313 src2 = vec_pack(sA, s2); | |
314 src3 = vec_pack(sB, s3); | |
315 src4 = vec_pack(sC, s4); | |
316 src5 = vec_pack(sD, s5); | |
317 src6 = vec_pack(sE, s6); | |
318 src7 = vec_pack(sF, s7); | |
319 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); | |
320 | |
321 if(!n){ // upper half of block | |
322 s0 = vec_unpackh(src0); | |
323 s1 = vec_unpackh(src1); | |
324 s2 = vec_unpackh(src2); | |
325 s3 = vec_unpackh(src3); | |
326 s8 = vec_unpackl(src0); | |
327 s9 = vec_unpackl(src1); | |
328 sA = vec_unpackl(src2); | |
329 sB = vec_unpackl(src3); | |
330 STEP4(s0, s1, s2, s3, vec_64); | |
331 SHIFT_VERT4(s0, s1, s2, s3); | |
332 STEP4(s8, s9, sA, sB, vec_64); | |
333 SHIFT_VERT4(s8, s9, sA, sB); | |
334 src0 = vec_pack(s0, s8); | |
335 src1 = vec_pack(s1, s9); | |
336 src2 = vec_pack(s2, sA); | |
337 src3 = vec_pack(s3, sB); | |
338 | |
339 vec_st(src0, 0, block); | |
340 vec_st(src1, 16, block); | |
341 vec_st(src2, 32, block); | |
342 vec_st(src3, 48, block); | |
343 } else { //lower half of block | |
344 s0 = vec_unpackh(src4); | |
345 s1 = vec_unpackh(src5); | |
346 s2 = vec_unpackh(src6); | |
347 s3 = vec_unpackh(src7); | |
348 s8 = vec_unpackl(src4); | |
349 s9 = vec_unpackl(src5); | |
350 sA = vec_unpackl(src6); | |
351 sB = vec_unpackl(src7); | |
352 STEP4(s0, s1, s2, s3, vec_64); | |
353 SHIFT_VERT4(s0, s1, s2, s3); | |
354 STEP4(s8, s9, sA, sB, vec_64); | |
355 SHIFT_VERT4(s8, s9, sA, sB); | |
356 src4 = vec_pack(s0, s8); | |
357 src5 = vec_pack(s1, s9); | |
358 src6 = vec_pack(s2, sA); | |
359 src7 = vec_pack(s3, sB); | |
360 | |
361 vec_st(src4, 64, block); | |
362 vec_st(src5, 80, block); | |
363 vec_st(src6, 96, block); | |
364 vec_st(src7,112, block); | |
365 } | |
366 } | |
367 | |
368 | |
369 void vc1dsp_init_altivec(DSPContext* dsp, AVCodecContext *avctx) { | |
370 dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec; | |
371 dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec; | |
372 } |