comparison ppc/vp8dsp_altivec.c @ 12011:f96187e79438 libavcodec

Altivec VP8 MC functions
author conrad
date Tue, 29 Jun 2010 06:42:17 +0000
parents
children 06abedae2906
comparison
equal deleted inserted replaced
12010:8499462c732f 12011:f96187e79438
1 /**
2 * VP8 compatible video decoder
3 *
4 * Copyright (C) 2010 David Conrad
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include "libavcodec/vp8dsp.h"
24 #include "dsputil_altivec.h"
25 #include "types_altivec.h"
26 #include "util_altivec.h"
27
28 #define REPT4(...) { __VA_ARGS__, __VA_ARGS__, __VA_ARGS__, __VA_ARGS__ }
29
30 // h subpel filter uses msum to multiply+add 4 pixel taps at once
31 static const vec_s8 h_subpel_filters_inner[7] =
32 {
33 REPT4( -6, 123, 12, -1),
34 REPT4(-11, 108, 36, -8),
35 REPT4( -9, 93, 50, -6),
36 REPT4(-16, 77, 77, -16),
37 REPT4( -6, 50, 93, -9),
38 REPT4( -8, 36, 108, -11),
39 REPT4( -1, 12, 123, -6),
40 };
41
42 // for 6tap filters, these are the outer two taps
43 // The zeros mask off pixels 4-7 when filtering 0-3
44 // and vice-versa
45 static const vec_s8 h_subpel_filters_outer[3] =
46 {
47 REPT4(0, 0, 2, 1),
48 REPT4(0, 0, 3, 3),
49 REPT4(0, 0, 1, 2),
50 };
51
52 #define LOAD_H_SUBPEL_FILTER(i) \
53 vec_s8 filter_inner = h_subpel_filters_inner[i]; \
54 vec_s8 filter_outerh = h_subpel_filters_outer[(i)>>1]; \
55 vec_s8 filter_outerl = vec_sld(filter_outerh, filter_outerh, 2)
56
57 #define FILTER_H(dstv, off) \
58 a = vec_ld((off)-2, src); \
59 b = vec_ld((off)-2+15, src); \
60 \
61 pixh = vec_perm(a, b, permh##off); \
62 pixl = vec_perm(a, b, perml##off); \
63 filth = vec_msum(filter_inner, pixh, c64); \
64 filtl = vec_msum(filter_inner, pixl, c64); \
65 \
66 if (is6tap) { \
67 outer = vec_perm(a, b, perm_6tap##off); \
68 filth = vec_msum(filter_outerh, outer, filth); \
69 filtl = vec_msum(filter_outerl, outer, filtl); \
70 } \
71 if (w == 4) \
72 filtl = filth; /* discard pixels 4-7 */ \
73 dstv = vec_packs(filth, filtl); \
74 dstv = vec_sra(dstv, c7)
75
76 static av_always_inline
77 void put_vp8_epel_h_altivec_core(uint8_t *dst, int dst_stride,
78 uint8_t *src, int src_stride,
79 int h, int mx, int w, int is6tap)
80 {
81 LOAD_H_SUBPEL_FILTER(mx-1);
82 vec_u8 align_vec0, align_vec8, permh0, permh8, filt;
83 vec_u8 perm_6tap0, perm_6tap8, perml0, perml8;
84 vec_u8 a, b, pixh, pixl, outer;
85 vec_s16 f16h, f16l;
86 vec_s32 filth, filtl;
87
88 vec_u8 perm_inner = { 1,2,3,4, 2,3,4,5, 3,4,5,6, 4,5,6,7 };
89 vec_u8 perm_outer = { 4,9, 0,5, 5,10, 1,6, 6,11, 2,7, 7,12, 3,8 };
90 vec_s32 c64 = vec_sl(vec_splat_s32(1), vec_splat_u32(6));
91 vec_u16 c7 = vec_splat_u16(7);
92
93 align_vec0 = vec_lvsl( -2, src);
94 align_vec8 = vec_lvsl(8-2, src);
95
96 permh0 = vec_perm(align_vec0, align_vec0, perm_inner);
97 permh8 = vec_perm(align_vec8, align_vec8, perm_inner);
98 perm_inner = vec_add(perm_inner, vec_splat_u8(4));
99 perml0 = vec_perm(align_vec0, align_vec0, perm_inner);
100 perml8 = vec_perm(align_vec8, align_vec8, perm_inner);
101 perm_6tap0 = vec_perm(align_vec0, align_vec0, perm_outer);
102 perm_6tap8 = vec_perm(align_vec8, align_vec8, perm_outer);
103
104 while (h --> 0) {
105 FILTER_H(f16h, 0);
106
107 if (w == 16) {
108 FILTER_H(f16l, 8);
109 filt = vec_packsu(f16h, f16l);
110 vec_st(filt, 0, dst);
111 } else {
112 filt = vec_packsu(f16h, f16h);
113 vec_ste((vec_u32)filt, 0, (uint32_t*)dst);
114 if (w == 8)
115 vec_ste((vec_u32)filt, 4, (uint32_t*)dst);
116 }
117 src += src_stride;
118 dst += dst_stride;
119 }
120 }
121
122 // v subpel filter does a simple vertical multiply + add
123 static const vec_u8 v_subpel_filters[7] =
124 {
125 { 0, 6, 123, 12, 1, 0 },
126 { 2, 11, 108, 36, 8, 1 },
127 { 0, 9, 93, 50, 6, 0 },
128 { 3, 16, 77, 77, 16, 3 },
129 { 0, 6, 50, 93, 9, 0 },
130 { 1, 8, 36, 108, 11, 2 },
131 { 0, 1, 12, 123, 6, 0 },
132 };
133
134 #define LOAD_V_SUBPEL_FILTER(i) \
135 vec_u8 subpel_filter = v_subpel_filters[i]; \
136 vec_u8 f0 = vec_splat(subpel_filter, 0); \
137 vec_u8 f1 = vec_splat(subpel_filter, 1); \
138 vec_u8 f2 = vec_splat(subpel_filter, 2); \
139 vec_u8 f3 = vec_splat(subpel_filter, 3); \
140 vec_u8 f4 = vec_splat(subpel_filter, 4); \
141 vec_u8 f5 = vec_splat(subpel_filter, 5)
142
143 #define FILTER_V(dstv, vec_mul) \
144 s1f = (vec_s16)vec_mul(s1, f1); \
145 s2f = (vec_s16)vec_mul(s2, f2); \
146 s3f = (vec_s16)vec_mul(s3, f3); \
147 s4f = (vec_s16)vec_mul(s4, f4); \
148 s2f = vec_subs(s2f, s1f); \
149 s3f = vec_subs(s3f, s4f); \
150 if (is6tap) { \
151 s0f = (vec_s16)vec_mul(s0, f0); \
152 s5f = (vec_s16)vec_mul(s5, f5); \
153 s2f = vec_adds(s2f, s0f); \
154 s3f = vec_adds(s3f, s5f); \
155 } \
156 dstv = vec_adds(s2f, s3f); \
157 dstv = vec_adds(dstv, c64); \
158 dstv = vec_sra(dstv, c7)
159
160 static av_always_inline
161 void put_vp8_epel_v_altivec_core(uint8_t *dst, int dst_stride,
162 uint8_t *src, int src_stride,
163 int h, int my, int w, int is6tap)
164 {
165 LOAD_V_SUBPEL_FILTER(my-1);
166 vec_u8 s0, s1, s2, s3, s4, s5, filt, align_vech, perm_vec, align_vecl;
167 vec_s16 s0f, s1f, s2f, s3f, s4f, s5f, f16h, f16l;
168 vec_s16 c64 = vec_sl(vec_splat_s16(1), vec_splat_u16(6));
169 vec_u16 c7 = vec_splat_u16(7);
170
171 // we want pixels 0-7 to be in the even positions and 8-15 in the odd,
172 // so combine this permute with the alignment permute vector
173 align_vech = vec_lvsl(0, src);
174 align_vecl = vec_sld(align_vech, align_vech, 8);
175 if (w ==16)
176 perm_vec = vec_mergeh(align_vech, align_vecl);
177 else
178 perm_vec = vec_mergeh(align_vech, align_vech);
179
180 if (is6tap)
181 s0 = load_with_perm_vec(-2*src_stride, src, perm_vec);
182 s1 = load_with_perm_vec(-1*src_stride, src, perm_vec);
183 s2 = load_with_perm_vec( 0*src_stride, src, perm_vec);
184 s3 = load_with_perm_vec( 1*src_stride, src, perm_vec);
185 if (is6tap)
186 s4 = load_with_perm_vec( 2*src_stride, src, perm_vec);
187
188 src += (2+is6tap)*src_stride;
189
190 while (h --> 0) {
191 if (is6tap)
192 s5 = load_with_perm_vec(0, src, perm_vec);
193 else
194 s4 = load_with_perm_vec(0, src, perm_vec);
195
196 FILTER_V(f16h, vec_mule);
197
198 if (w == 16) {
199 FILTER_V(f16l, vec_mulo);
200 filt = vec_packsu(f16h, f16l);
201 vec_st(filt, 0, dst);
202 } else {
203 filt = vec_packsu(f16h, f16h);
204 if (w == 4)
205 filt = (vec_u8)vec_splat((vec_u32)filt, 0);
206 else
207 vec_ste((vec_u32)filt, 4, (uint32_t*)dst);
208 vec_ste((vec_u32)filt, 0, (uint32_t*)dst);
209 }
210
211 if (is6tap)
212 s0 = s1;
213 s1 = s2;
214 s2 = s3;
215 s3 = s4;
216 if (is6tap)
217 s4 = s5;
218
219 dst += dst_stride;
220 src += src_stride;
221 }
222 }
223
224 #define EPEL_FUNCS(WIDTH, TAPS) \
225 static av_noinline \
226 void put_vp8_epel ## WIDTH ## _h ## TAPS ## _altivec(uint8_t *dst, int dst_stride, uint8_t *src, int src_stride, int h, int mx, int my) \
227 { \
228 put_vp8_epel_h_altivec_core(dst, dst_stride, src, src_stride, h, mx, WIDTH, TAPS == 6); \
229 } \
230 \
231 static av_noinline \
232 void put_vp8_epel ## WIDTH ## _v ## TAPS ## _altivec(uint8_t *dst, int dst_stride, uint8_t *src, int src_stride, int h, int mx, int my) \
233 { \
234 put_vp8_epel_v_altivec_core(dst, dst_stride, src, src_stride, h, my, WIDTH, TAPS == 6); \
235 }
236
237 #define EPEL_HV(WIDTH, HTAPS, VTAPS) \
238 static void put_vp8_epel ## WIDTH ## _h ## HTAPS ## v ## VTAPS ## _altivec(uint8_t *dst, int stride, uint8_t *src, int s, int h, int mx, int my) \
239 { \
240 DECLARE_ALIGNED(16, uint8_t, tmp)[(2*WIDTH+5)*16]; \
241 put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16, src-2*stride, stride, h+5, mx, my); \
242 put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, stride, tmp+2*16, 16, h, mx, my); \
243 }
244
245 EPEL_FUNCS(16,6)
246 EPEL_FUNCS(8, 6)
247 EPEL_FUNCS(8, 4)
248 EPEL_FUNCS(4, 6)
249 EPEL_FUNCS(4, 4)
250
251 EPEL_HV(16, 6,6)
252 EPEL_HV(8, 6,6)
253 EPEL_HV(8, 4,6)
254 EPEL_HV(8, 6,4)
255 EPEL_HV(8, 4,4)
256 EPEL_HV(4, 6,6)
257 EPEL_HV(4, 4,6)
258 EPEL_HV(4, 6,4)
259 EPEL_HV(4, 4,4)
260
261 static void put_vp8_pixels16_altivec(uint8_t *dst, int stride, uint8_t *src, int s, int h, int mx, int my)
262 {
263 put_pixels16_altivec(dst, src, stride, h);
264 }
265
266 av_cold void ff_vp8dsp_init_altivec(VP8DSPContext *c)
267 {
268 if (!has_altivec())
269 return;
270
271 c->put_vp8_epel_pixels_tab[0][0][0] = put_vp8_pixels16_altivec;
272 c->put_vp8_epel_pixels_tab[0][0][2] = put_vp8_epel16_h6_altivec;
273 c->put_vp8_epel_pixels_tab[0][2][0] = put_vp8_epel16_v6_altivec;
274 c->put_vp8_epel_pixels_tab[0][2][2] = put_vp8_epel16_h6v6_altivec;
275
276 c->put_vp8_epel_pixels_tab[1][0][2] = put_vp8_epel8_h6_altivec;
277 c->put_vp8_epel_pixels_tab[1][2][0] = put_vp8_epel8_v6_altivec;
278 c->put_vp8_epel_pixels_tab[1][0][1] = put_vp8_epel8_h4_altivec;
279 c->put_vp8_epel_pixels_tab[1][1][0] = put_vp8_epel8_v4_altivec;
280
281 c->put_vp8_epel_pixels_tab[1][2][2] = put_vp8_epel8_h6v6_altivec;
282 c->put_vp8_epel_pixels_tab[1][1][1] = put_vp8_epel8_h4v4_altivec;
283 c->put_vp8_epel_pixels_tab[1][1][2] = put_vp8_epel8_h6v4_altivec;
284 c->put_vp8_epel_pixels_tab[1][2][1] = put_vp8_epel8_h4v6_altivec;
285
286 c->put_vp8_epel_pixels_tab[2][0][2] = put_vp8_epel4_h6_altivec;
287 c->put_vp8_epel_pixels_tab[2][2][0] = put_vp8_epel4_v6_altivec;
288 c->put_vp8_epel_pixels_tab[2][0][1] = put_vp8_epel4_h4_altivec;
289 c->put_vp8_epel_pixels_tab[2][1][0] = put_vp8_epel4_v4_altivec;
290
291 c->put_vp8_epel_pixels_tab[2][2][2] = put_vp8_epel4_h6v6_altivec;
292 c->put_vp8_epel_pixels_tab[2][1][1] = put_vp8_epel4_h4v4_altivec;
293 c->put_vp8_epel_pixels_tab[2][1][2] = put_vp8_epel4_h6v4_altivec;
294 c->put_vp8_epel_pixels_tab[2][2][1] = put_vp8_epel4_h4v6_altivec;
295 }