Mercurial > libavcodec.hg
annotate sparc/dsputil_vis.c @ 5732:d6fc148d1a48 libavcodec
replace brute force find_optimal_param() with a closed-form solution.
overall flac encoding: 4-15% faster.
output is not identical to the previous algorithm due to occasional rounding
errors, but the differece is less than .0005% bitrate.
author | lorenm |
---|---|
date | Sat, 29 Sep 2007 05:41:27 +0000 |
parents | c0f471cc871d |
children | ace63c809071 |
rev | line source |
---|---|
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1 /* |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
2 * dsputil_vis.c |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3 * Copyright (C) 2003 David S. Miller <davem@redhat.com> |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
4 * |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3036
diff
changeset
|
5 * This file is part of FFmpeg. |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
6 * |
3987
2c54309fef91
Switch to the LGPL as agreed to by the author according to the
diego
parents:
3947
diff
changeset
|
7 * FFmpeg is free software; you can redistribute it and/or |
2c54309fef91
Switch to the LGPL as agreed to by the author according to the
diego
parents:
3947
diff
changeset
|
8 * modify it under the terms of the GNU Lesser General Public |
2c54309fef91
Switch to the LGPL as agreed to by the author according to the
diego
parents:
3947
diff
changeset
|
9 * License as published by the Free Software Foundation; either |
2c54309fef91
Switch to the LGPL as agreed to by the author according to the
diego
parents:
3947
diff
changeset
|
10 * version 2.1 of the License, or (at your option) any later version. |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
11 * |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3036
diff
changeset
|
12 * FFmpeg is distributed in the hope that it will be useful, |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
3987
2c54309fef91
Switch to the LGPL as agreed to by the author according to the
diego
parents:
3947
diff
changeset
|
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
2c54309fef91
Switch to the LGPL as agreed to by the author according to the
diego
parents:
3947
diff
changeset
|
15 * Lesser General Public License for more details. |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
16 * |
3987
2c54309fef91
Switch to the LGPL as agreed to by the author according to the
diego
parents:
3947
diff
changeset
|
17 * You should have received a copy of the GNU Lesser General Public |
2c54309fef91
Switch to the LGPL as agreed to by the author according to the
diego
parents:
3947
diff
changeset
|
18 * License along with FFmpeg; if not, write to the Free Software |
3036
0b546eab515d
Update licensing information: The FSF changed postal address.
diego
parents:
2979
diff
changeset
|
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
20 */ |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
21 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
22 /* The *no_round* functions have been added by James A. Morrison, 2003,2004. |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
23 The vis code from libmpeg2 was adapted for ffmpeg by James A. Morrison. |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
24 */ |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
25 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
26 #include "config.h" |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
27 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
28 #include <inttypes.h> |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
29 #include <signal.h> |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
30 #include <setjmp.h> |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
31 |
5010
d5ba514e3f4a
Add libavcodec to compiler include flags in order to simplify header
diego
parents:
3987
diff
changeset
|
32 #include "dsputil.h" |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
33 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
34 #include "vis.h" |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
35 |
5618 | 36 extern void ff_simple_idct_put_vis(uint8_t *dest, int line_size, DCTELEM *data); |
37 extern void ff_simple_idct_add_vis(uint8_t *dest, int line_size, DCTELEM *data); | |
38 extern void ff_simple_idct_vis(DCTELEM *data); | |
39 | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
40 /* The trick used in some of this file is the formula from the MMX |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
41 * motion comp code, which is: |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
42 * |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
43 * (x+y+1)>>1 == (x|y)-((x^y)>>1) |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
44 * |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
45 * This allows us to average 8 bytes at a time in a 64-bit FPU reg. |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
46 * We avoid overflows by masking before we do the shift, and we |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
47 * implement the shift by multiplying by 1/2 using mul8x16. So in |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
48 * VIS this is (assume 'x' is in f0, 'y' is in f2, a repeating mask |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
49 * of '0xfe' is in f4, a repeating mask of '0x7f' is in f6, and |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
50 * the value 0x80808080 is in f8): |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
51 * |
2979 | 52 * fxor f0, f2, f10 |
53 * fand f10, f4, f10 | |
54 * fmul8x16 f8, f10, f10 | |
55 * fand f10, f6, f10 | |
56 * for f0, f2, f12 | |
57 * fpsub16 f12, f10, f10 | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
58 */ |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
59 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
60 #define ATTR_ALIGN(alignd) __attribute__ ((aligned(alignd))) |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
61 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
62 #define DUP4(x) {x, x, x, x} |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
63 #define DUP8(x) {x, x, x, x, x, x, x, x} |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
64 static const int16_t constants1[] ATTR_ALIGN(8) = DUP4 (1); |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
65 static const int16_t constants2[] ATTR_ALIGN(8) = DUP4 (2); |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
66 static const int16_t constants3[] ATTR_ALIGN(8) = DUP4 (3); |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
67 static const int16_t constants6[] ATTR_ALIGN(8) = DUP4 (6); |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
68 static const int8_t constants_fe[] ATTR_ALIGN(8) = DUP8 (0xfe); |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
69 static const int8_t constants_7f[] ATTR_ALIGN(8) = DUP8 (0x7f); |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
70 static const int8_t constants128[] ATTR_ALIGN(8) = DUP8 (128); |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
71 static const int16_t constants256_512[] ATTR_ALIGN(8) = |
2979 | 72 {256, 512, 256, 512}; |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
73 static const int16_t constants256_1024[] ATTR_ALIGN(8) = |
2979 | 74 {256, 1024, 256, 1024}; |
75 | |
76 #define REF_0 0 | |
77 #define REF_0_1 1 | |
78 #define REF_2 2 | |
79 #define REF_2_1 3 | |
80 #define REF_4 4 | |
81 #define REF_4_1 5 | |
82 #define REF_6 6 | |
83 #define REF_6_1 7 | |
84 #define REF_S0 8 | |
85 #define REF_S0_1 9 | |
86 #define REF_S2 10 | |
87 #define REF_S2_1 11 | |
88 #define REF_S4 12 | |
89 #define REF_S4_1 13 | |
90 #define REF_S6 14 | |
91 #define REF_S6_1 15 | |
92 #define DST_0 16 | |
93 #define DST_1 17 | |
94 #define DST_2 18 | |
95 #define DST_3 19 | |
96 #define CONST_1 20 | |
97 #define CONST_2 20 | |
98 #define CONST_3 20 | |
99 #define CONST_6 20 | |
100 #define MASK_fe 20 | |
101 #define CONST_128 22 | |
102 #define CONST_256 22 | |
103 #define CONST_512 22 | |
104 #define CONST_1024 22 | |
105 #define TMP0 24 | |
106 #define TMP1 25 | |
107 #define TMP2 26 | |
108 #define TMP3 27 | |
109 #define TMP4 28 | |
110 #define TMP5 29 | |
111 #define ZERO 30 | |
112 #define MASK_7f 30 | |
113 | |
114 #define TMP6 32 | |
115 #define TMP8 34 | |
116 #define TMP10 36 | |
117 #define TMP12 38 | |
118 #define TMP14 40 | |
119 #define TMP16 42 | |
120 #define TMP18 44 | |
121 #define TMP20 46 | |
122 #define TMP22 48 | |
123 #define TMP24 50 | |
124 #define TMP26 52 | |
125 #define TMP28 54 | |
126 #define TMP30 56 | |
127 #define TMP32 58 | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
128 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
129 static void MC_put_o_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 130 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
131 { |
2979 | 132 uint8_t *ref = (uint8_t *) _ref; |
133 | |
134 ref = vis_alignaddr(ref); | |
135 do { /* 5 cycles */ | |
136 vis_ld64(ref[0], TMP0); | |
137 | |
138 vis_ld64_2(ref, 8, TMP2); | |
139 | |
140 vis_ld64_2(ref, 16, TMP4); | |
141 ref += stride; | |
142 | |
143 vis_faligndata(TMP0, TMP2, REF_0); | |
144 vis_st64(REF_0, dest[0]); | |
145 | |
146 vis_faligndata(TMP2, TMP4, REF_2); | |
147 vis_st64_2(REF_2, dest, 8); | |
148 dest += stride; | |
149 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
150 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
151 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
152 static void MC_put_o_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 153 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
154 { |
2979 | 155 uint8_t *ref = (uint8_t *) _ref; |
156 | |
157 ref = vis_alignaddr(ref); | |
158 do { /* 4 cycles */ | |
159 vis_ld64(ref[0], TMP0); | |
160 | |
161 vis_ld64(ref[8], TMP2); | |
162 ref += stride; | |
163 | |
164 /* stall */ | |
165 | |
166 vis_faligndata(TMP0, TMP2, REF_0); | |
167 vis_st64(REF_0, dest[0]); | |
168 dest += stride; | |
169 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
170 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
171 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
172 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
173 static void MC_avg_o_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 174 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
175 { |
2979 | 176 uint8_t *ref = (uint8_t *) _ref; |
177 int stride_8 = stride + 8; | |
178 | |
179 ref = vis_alignaddr(ref); | |
180 | |
181 vis_ld64(ref[0], TMP0); | |
182 | |
183 vis_ld64(ref[8], TMP2); | |
184 | |
185 vis_ld64(ref[16], TMP4); | |
186 | |
187 vis_ld64(dest[0], DST_0); | |
188 | |
189 vis_ld64(dest[8], DST_2); | |
190 | |
191 vis_ld64(constants_fe[0], MASK_fe); | |
192 vis_faligndata(TMP0, TMP2, REF_0); | |
193 | |
194 vis_ld64(constants_7f[0], MASK_7f); | |
195 vis_faligndata(TMP2, TMP4, REF_2); | |
196 | |
197 vis_ld64(constants128[0], CONST_128); | |
198 | |
199 ref += stride; | |
200 height = (height >> 1) - 1; | |
201 | |
202 do { /* 24 cycles */ | |
203 vis_ld64(ref[0], TMP0); | |
204 vis_xor(DST_0, REF_0, TMP6); | |
205 | |
206 vis_ld64_2(ref, 8, TMP2); | |
207 vis_and(TMP6, MASK_fe, TMP6); | |
208 | |
209 vis_ld64_2(ref, 16, TMP4); | |
210 ref += stride; | |
211 vis_mul8x16(CONST_128, TMP6, TMP6); | |
212 vis_xor(DST_2, REF_2, TMP8); | |
213 | |
214 vis_and(TMP8, MASK_fe, TMP8); | |
215 | |
216 vis_or(DST_0, REF_0, TMP10); | |
217 vis_ld64_2(dest, stride, DST_0); | |
218 vis_mul8x16(CONST_128, TMP8, TMP8); | |
219 | |
220 vis_or(DST_2, REF_2, TMP12); | |
221 vis_ld64_2(dest, stride_8, DST_2); | |
222 | |
223 vis_ld64(ref[0], TMP14); | |
224 vis_and(TMP6, MASK_7f, TMP6); | |
225 | |
226 vis_and(TMP8, MASK_7f, TMP8); | |
227 | |
228 vis_psub16(TMP10, TMP6, TMP6); | |
229 vis_st64(TMP6, dest[0]); | |
230 | |
231 vis_psub16(TMP12, TMP8, TMP8); | |
232 vis_st64_2(TMP8, dest, 8); | |
233 | |
234 dest += stride; | |
235 vis_ld64_2(ref, 8, TMP16); | |
236 vis_faligndata(TMP0, TMP2, REF_0); | |
237 | |
238 vis_ld64_2(ref, 16, TMP18); | |
239 vis_faligndata(TMP2, TMP4, REF_2); | |
240 ref += stride; | |
241 | |
242 vis_xor(DST_0, REF_0, TMP20); | |
243 | |
244 vis_and(TMP20, MASK_fe, TMP20); | |
245 | |
246 vis_xor(DST_2, REF_2, TMP22); | |
247 vis_mul8x16(CONST_128, TMP20, TMP20); | |
248 | |
249 vis_and(TMP22, MASK_fe, TMP22); | |
250 | |
251 vis_or(DST_0, REF_0, TMP24); | |
252 vis_mul8x16(CONST_128, TMP22, TMP22); | |
253 | |
254 vis_or(DST_2, REF_2, TMP26); | |
255 | |
256 vis_ld64_2(dest, stride, DST_0); | |
257 vis_faligndata(TMP14, TMP16, REF_0); | |
258 | |
259 vis_ld64_2(dest, stride_8, DST_2); | |
260 vis_faligndata(TMP16, TMP18, REF_2); | |
261 | |
262 vis_and(TMP20, MASK_7f, TMP20); | |
263 | |
264 vis_and(TMP22, MASK_7f, TMP22); | |
265 | |
266 vis_psub16(TMP24, TMP20, TMP20); | |
267 vis_st64(TMP20, dest[0]); | |
268 | |
269 vis_psub16(TMP26, TMP22, TMP22); | |
270 vis_st64_2(TMP22, dest, 8); | |
271 dest += stride; | |
272 } while (--height); | |
273 | |
274 vis_ld64(ref[0], TMP0); | |
275 vis_xor(DST_0, REF_0, TMP6); | |
276 | |
277 vis_ld64_2(ref, 8, TMP2); | |
278 vis_and(TMP6, MASK_fe, TMP6); | |
279 | |
280 vis_ld64_2(ref, 16, TMP4); | |
281 vis_mul8x16(CONST_128, TMP6, TMP6); | |
282 vis_xor(DST_2, REF_2, TMP8); | |
283 | |
284 vis_and(TMP8, MASK_fe, TMP8); | |
285 | |
286 vis_or(DST_0, REF_0, TMP10); | |
287 vis_ld64_2(dest, stride, DST_0); | |
288 vis_mul8x16(CONST_128, TMP8, TMP8); | |
289 | |
290 vis_or(DST_2, REF_2, TMP12); | |
291 vis_ld64_2(dest, stride_8, DST_2); | |
292 | |
293 vis_ld64(ref[0], TMP14); | |
294 vis_and(TMP6, MASK_7f, TMP6); | |
295 | |
296 vis_and(TMP8, MASK_7f, TMP8); | |
297 | |
298 vis_psub16(TMP10, TMP6, TMP6); | |
299 vis_st64(TMP6, dest[0]); | |
300 | |
301 vis_psub16(TMP12, TMP8, TMP8); | |
302 vis_st64_2(TMP8, dest, 8); | |
303 | |
304 dest += stride; | |
305 vis_faligndata(TMP0, TMP2, REF_0); | |
306 | |
307 vis_faligndata(TMP2, TMP4, REF_2); | |
308 | |
309 vis_xor(DST_0, REF_0, TMP20); | |
310 | |
311 vis_and(TMP20, MASK_fe, TMP20); | |
312 | |
313 vis_xor(DST_2, REF_2, TMP22); | |
314 vis_mul8x16(CONST_128, TMP20, TMP20); | |
315 | |
316 vis_and(TMP22, MASK_fe, TMP22); | |
317 | |
318 vis_or(DST_0, REF_0, TMP24); | |
319 vis_mul8x16(CONST_128, TMP22, TMP22); | |
320 | |
321 vis_or(DST_2, REF_2, TMP26); | |
322 | |
323 vis_and(TMP20, MASK_7f, TMP20); | |
324 | |
325 vis_and(TMP22, MASK_7f, TMP22); | |
326 | |
327 vis_psub16(TMP24, TMP20, TMP20); | |
328 vis_st64(TMP20, dest[0]); | |
329 | |
330 vis_psub16(TMP26, TMP22, TMP22); | |
331 vis_st64_2(TMP22, dest, 8); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
332 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
333 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
334 static void MC_avg_o_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 335 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
336 { |
2979 | 337 uint8_t *ref = (uint8_t *) _ref; |
338 | |
339 ref = vis_alignaddr(ref); | |
340 | |
341 vis_ld64(ref[0], TMP0); | |
342 | |
343 vis_ld64(ref[8], TMP2); | |
344 | |
345 vis_ld64(dest[0], DST_0); | |
346 | |
347 vis_ld64(constants_fe[0], MASK_fe); | |
348 | |
349 vis_ld64(constants_7f[0], MASK_7f); | |
350 vis_faligndata(TMP0, TMP2, REF_0); | |
351 | |
352 vis_ld64(constants128[0], CONST_128); | |
353 | |
354 ref += stride; | |
355 height = (height >> 1) - 1; | |
356 | |
357 do { /* 12 cycles */ | |
358 vis_ld64(ref[0], TMP0); | |
359 vis_xor(DST_0, REF_0, TMP4); | |
360 | |
361 vis_ld64(ref[8], TMP2); | |
362 vis_and(TMP4, MASK_fe, TMP4); | |
363 | |
364 vis_or(DST_0, REF_0, TMP6); | |
365 vis_ld64_2(dest, stride, DST_0); | |
366 ref += stride; | |
367 vis_mul8x16(CONST_128, TMP4, TMP4); | |
368 | |
369 vis_ld64(ref[0], TMP12); | |
370 vis_faligndata(TMP0, TMP2, REF_0); | |
371 | |
372 vis_ld64(ref[8], TMP2); | |
373 vis_xor(DST_0, REF_0, TMP0); | |
374 ref += stride; | |
375 | |
376 vis_and(TMP0, MASK_fe, TMP0); | |
377 | |
378 vis_and(TMP4, MASK_7f, TMP4); | |
379 | |
380 vis_psub16(TMP6, TMP4, TMP4); | |
381 vis_st64(TMP4, dest[0]); | |
382 dest += stride; | |
383 vis_mul8x16(CONST_128, TMP0, TMP0); | |
384 | |
385 vis_or(DST_0, REF_0, TMP6); | |
386 vis_ld64_2(dest, stride, DST_0); | |
387 | |
388 vis_faligndata(TMP12, TMP2, REF_0); | |
389 | |
390 vis_and(TMP0, MASK_7f, TMP0); | |
391 | |
392 vis_psub16(TMP6, TMP0, TMP4); | |
393 vis_st64(TMP4, dest[0]); | |
394 dest += stride; | |
395 } while (--height); | |
396 | |
397 vis_ld64(ref[0], TMP0); | |
398 vis_xor(DST_0, REF_0, TMP4); | |
399 | |
400 vis_ld64(ref[8], TMP2); | |
401 vis_and(TMP4, MASK_fe, TMP4); | |
402 | |
403 vis_or(DST_0, REF_0, TMP6); | |
404 vis_ld64_2(dest, stride, DST_0); | |
405 vis_mul8x16(CONST_128, TMP4, TMP4); | |
406 | |
407 vis_faligndata(TMP0, TMP2, REF_0); | |
408 | |
409 vis_xor(DST_0, REF_0, TMP0); | |
410 | |
411 vis_and(TMP0, MASK_fe, TMP0); | |
412 | |
413 vis_and(TMP4, MASK_7f, TMP4); | |
414 | |
415 vis_psub16(TMP6, TMP4, TMP4); | |
416 vis_st64(TMP4, dest[0]); | |
417 dest += stride; | |
418 vis_mul8x16(CONST_128, TMP0, TMP0); | |
419 | |
420 vis_or(DST_0, REF_0, TMP6); | |
421 | |
422 vis_and(TMP0, MASK_7f, TMP0); | |
423 | |
424 vis_psub16(TMP6, TMP0, TMP4); | |
425 vis_st64(TMP4, dest[0]); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
426 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
427 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
428 static void MC_put_x_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 429 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
430 { |
2979 | 431 uint8_t *ref = (uint8_t *) _ref; |
432 unsigned long off = (unsigned long) ref & 0x7; | |
433 unsigned long off_plus_1 = off + 1; | |
434 | |
435 ref = vis_alignaddr(ref); | |
436 | |
437 vis_ld64(ref[0], TMP0); | |
438 | |
439 vis_ld64_2(ref, 8, TMP2); | |
440 | |
441 vis_ld64_2(ref, 16, TMP4); | |
442 | |
443 vis_ld64(constants_fe[0], MASK_fe); | |
444 | |
445 vis_ld64(constants_7f[0], MASK_7f); | |
446 vis_faligndata(TMP0, TMP2, REF_0); | |
447 | |
448 vis_ld64(constants128[0], CONST_128); | |
449 vis_faligndata(TMP2, TMP4, REF_4); | |
450 | |
451 if (off != 0x7) { | |
452 vis_alignaddr_g0((void *)off_plus_1); | |
453 vis_faligndata(TMP0, TMP2, REF_2); | |
454 vis_faligndata(TMP2, TMP4, REF_6); | |
455 } else { | |
456 vis_src1(TMP2, REF_2); | |
457 vis_src1(TMP4, REF_6); | |
458 } | |
459 | |
460 ref += stride; | |
461 height = (height >> 1) - 1; | |
462 | |
463 do { /* 34 cycles */ | |
464 vis_ld64(ref[0], TMP0); | |
465 vis_xor(REF_0, REF_2, TMP6); | |
466 | |
467 vis_ld64_2(ref, 8, TMP2); | |
468 vis_xor(REF_4, REF_6, TMP8); | |
469 | |
470 vis_ld64_2(ref, 16, TMP4); | |
471 vis_and(TMP6, MASK_fe, TMP6); | |
472 ref += stride; | |
473 | |
474 vis_ld64(ref[0], TMP14); | |
475 vis_mul8x16(CONST_128, TMP6, TMP6); | |
476 vis_and(TMP8, MASK_fe, TMP8); | |
477 | |
478 vis_ld64_2(ref, 8, TMP16); | |
479 vis_mul8x16(CONST_128, TMP8, TMP8); | |
480 vis_or(REF_0, REF_2, TMP10); | |
481 | |
482 vis_ld64_2(ref, 16, TMP18); | |
483 ref += stride; | |
484 vis_or(REF_4, REF_6, TMP12); | |
485 | |
486 vis_alignaddr_g0((void *)off); | |
487 | |
488 vis_faligndata(TMP0, TMP2, REF_0); | |
489 | |
490 vis_faligndata(TMP2, TMP4, REF_4); | |
491 | |
492 if (off != 0x7) { | |
493 vis_alignaddr_g0((void *)off_plus_1); | |
494 vis_faligndata(TMP0, TMP2, REF_2); | |
495 vis_faligndata(TMP2, TMP4, REF_6); | |
496 } else { | |
497 vis_src1(TMP2, REF_2); | |
498 vis_src1(TMP4, REF_6); | |
499 } | |
500 | |
501 vis_and(TMP6, MASK_7f, TMP6); | |
502 | |
503 vis_and(TMP8, MASK_7f, TMP8); | |
504 | |
505 vis_psub16(TMP10, TMP6, TMP6); | |
506 vis_st64(TMP6, dest[0]); | |
507 | |
508 vis_psub16(TMP12, TMP8, TMP8); | |
509 vis_st64_2(TMP8, dest, 8); | |
510 dest += stride; | |
511 | |
512 vis_xor(REF_0, REF_2, TMP6); | |
513 | |
514 vis_xor(REF_4, REF_6, TMP8); | |
515 | |
516 vis_and(TMP6, MASK_fe, TMP6); | |
517 | |
518 vis_mul8x16(CONST_128, TMP6, TMP6); | |
519 vis_and(TMP8, MASK_fe, TMP8); | |
520 | |
521 vis_mul8x16(CONST_128, TMP8, TMP8); | |
522 vis_or(REF_0, REF_2, TMP10); | |
523 | |
524 vis_or(REF_4, REF_6, TMP12); | |
525 | |
526 vis_alignaddr_g0((void *)off); | |
527 | |
528 vis_faligndata(TMP14, TMP16, REF_0); | |
529 | |
530 vis_faligndata(TMP16, TMP18, REF_4); | |
531 | |
532 if (off != 0x7) { | |
533 vis_alignaddr_g0((void *)off_plus_1); | |
534 vis_faligndata(TMP14, TMP16, REF_2); | |
535 vis_faligndata(TMP16, TMP18, REF_6); | |
536 } else { | |
537 vis_src1(TMP16, REF_2); | |
538 vis_src1(TMP18, REF_6); | |
539 } | |
540 | |
541 vis_and(TMP6, MASK_7f, TMP6); | |
542 | |
543 vis_and(TMP8, MASK_7f, TMP8); | |
544 | |
545 vis_psub16(TMP10, TMP6, TMP6); | |
546 vis_st64(TMP6, dest[0]); | |
547 | |
548 vis_psub16(TMP12, TMP8, TMP8); | |
549 vis_st64_2(TMP8, dest, 8); | |
550 dest += stride; | |
551 } while (--height); | |
552 | |
553 vis_ld64(ref[0], TMP0); | |
554 vis_xor(REF_0, REF_2, TMP6); | |
555 | |
556 vis_ld64_2(ref, 8, TMP2); | |
557 vis_xor(REF_4, REF_6, TMP8); | |
558 | |
559 vis_ld64_2(ref, 16, TMP4); | |
560 vis_and(TMP6, MASK_fe, TMP6); | |
561 | |
562 vis_mul8x16(CONST_128, TMP6, TMP6); | |
563 vis_and(TMP8, MASK_fe, TMP8); | |
564 | |
565 vis_mul8x16(CONST_128, TMP8, TMP8); | |
566 vis_or(REF_0, REF_2, TMP10); | |
567 | |
568 vis_or(REF_4, REF_6, TMP12); | |
569 | |
570 vis_alignaddr_g0((void *)off); | |
571 | |
572 vis_faligndata(TMP0, TMP2, REF_0); | |
573 | |
574 vis_faligndata(TMP2, TMP4, REF_4); | |
575 | |
576 if (off != 0x7) { | |
577 vis_alignaddr_g0((void *)off_plus_1); | |
578 vis_faligndata(TMP0, TMP2, REF_2); | |
579 vis_faligndata(TMP2, TMP4, REF_6); | |
580 } else { | |
581 vis_src1(TMP2, REF_2); | |
582 vis_src1(TMP4, REF_6); | |
583 } | |
584 | |
585 vis_and(TMP6, MASK_7f, TMP6); | |
586 | |
587 vis_and(TMP8, MASK_7f, TMP8); | |
588 | |
589 vis_psub16(TMP10, TMP6, TMP6); | |
590 vis_st64(TMP6, dest[0]); | |
591 | |
592 vis_psub16(TMP12, TMP8, TMP8); | |
593 vis_st64_2(TMP8, dest, 8); | |
594 dest += stride; | |
595 | |
596 vis_xor(REF_0, REF_2, TMP6); | |
597 | |
598 vis_xor(REF_4, REF_6, TMP8); | |
599 | |
600 vis_and(TMP6, MASK_fe, TMP6); | |
601 | |
602 vis_mul8x16(CONST_128, TMP6, TMP6); | |
603 vis_and(TMP8, MASK_fe, TMP8); | |
604 | |
605 vis_mul8x16(CONST_128, TMP8, TMP8); | |
606 vis_or(REF_0, REF_2, TMP10); | |
607 | |
608 vis_or(REF_4, REF_6, TMP12); | |
609 | |
610 vis_and(TMP6, MASK_7f, TMP6); | |
611 | |
612 vis_and(TMP8, MASK_7f, TMP8); | |
613 | |
614 vis_psub16(TMP10, TMP6, TMP6); | |
615 vis_st64(TMP6, dest[0]); | |
616 | |
617 vis_psub16(TMP12, TMP8, TMP8); | |
618 vis_st64_2(TMP8, dest, 8); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
619 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
620 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
621 static void MC_put_x_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 622 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
623 { |
2979 | 624 uint8_t *ref = (uint8_t *) _ref; |
625 unsigned long off = (unsigned long) ref & 0x7; | |
626 unsigned long off_plus_1 = off + 1; | |
627 | |
628 ref = vis_alignaddr(ref); | |
629 | |
630 vis_ld64(ref[0], TMP0); | |
631 | |
632 vis_ld64(ref[8], TMP2); | |
633 | |
634 vis_ld64(constants_fe[0], MASK_fe); | |
635 | |
636 vis_ld64(constants_7f[0], MASK_7f); | |
637 | |
638 vis_ld64(constants128[0], CONST_128); | |
639 vis_faligndata(TMP0, TMP2, REF_0); | |
640 | |
641 if (off != 0x7) { | |
642 vis_alignaddr_g0((void *)off_plus_1); | |
643 vis_faligndata(TMP0, TMP2, REF_2); | |
644 } else { | |
645 vis_src1(TMP2, REF_2); | |
646 } | |
647 | |
648 ref += stride; | |
649 height = (height >> 1) - 1; | |
650 | |
651 do { /* 20 cycles */ | |
652 vis_ld64(ref[0], TMP0); | |
653 vis_xor(REF_0, REF_2, TMP4); | |
654 | |
655 vis_ld64_2(ref, 8, TMP2); | |
656 vis_and(TMP4, MASK_fe, TMP4); | |
657 ref += stride; | |
658 | |
659 vis_ld64(ref[0], TMP8); | |
660 vis_or(REF_0, REF_2, TMP6); | |
661 vis_mul8x16(CONST_128, TMP4, TMP4); | |
662 | |
663 vis_alignaddr_g0((void *)off); | |
664 | |
665 vis_ld64_2(ref, 8, TMP10); | |
666 ref += stride; | |
667 vis_faligndata(TMP0, TMP2, REF_0); | |
668 | |
669 if (off != 0x7) { | |
670 vis_alignaddr_g0((void *)off_plus_1); | |
671 vis_faligndata(TMP0, TMP2, REF_2); | |
672 } else { | |
673 vis_src1(TMP2, REF_2); | |
674 } | |
675 | |
676 vis_and(TMP4, MASK_7f, TMP4); | |
677 | |
678 vis_psub16(TMP6, TMP4, DST_0); | |
679 vis_st64(DST_0, dest[0]); | |
680 dest += stride; | |
681 | |
682 vis_xor(REF_0, REF_2, TMP12); | |
683 | |
684 vis_and(TMP12, MASK_fe, TMP12); | |
685 | |
686 vis_or(REF_0, REF_2, TMP14); | |
687 vis_mul8x16(CONST_128, TMP12, TMP12); | |
688 | |
689 vis_alignaddr_g0((void *)off); | |
690 vis_faligndata(TMP8, TMP10, REF_0); | |
691 if (off != 0x7) { | |
692 vis_alignaddr_g0((void *)off_plus_1); | |
693 vis_faligndata(TMP8, TMP10, REF_2); | |
694 } else { | |
695 vis_src1(TMP10, REF_2); | |
696 } | |
697 | |
698 vis_and(TMP12, MASK_7f, TMP12); | |
699 | |
700 vis_psub16(TMP14, TMP12, DST_0); | |
701 vis_st64(DST_0, dest[0]); | |
702 dest += stride; | |
703 } while (--height); | |
704 | |
705 vis_ld64(ref[0], TMP0); | |
706 vis_xor(REF_0, REF_2, TMP4); | |
707 | |
708 vis_ld64_2(ref, 8, TMP2); | |
709 vis_and(TMP4, MASK_fe, TMP4); | |
710 | |
711 vis_or(REF_0, REF_2, TMP6); | |
712 vis_mul8x16(CONST_128, TMP4, TMP4); | |
713 | |
714 vis_alignaddr_g0((void *)off); | |
715 | |
716 vis_faligndata(TMP0, TMP2, REF_0); | |
717 | |
718 if (off != 0x7) { | |
719 vis_alignaddr_g0((void *)off_plus_1); | |
720 vis_faligndata(TMP0, TMP2, REF_2); | |
721 } else { | |
722 vis_src1(TMP2, REF_2); | |
723 } | |
724 | |
725 vis_and(TMP4, MASK_7f, TMP4); | |
726 | |
727 vis_psub16(TMP6, TMP4, DST_0); | |
728 vis_st64(DST_0, dest[0]); | |
729 dest += stride; | |
730 | |
731 vis_xor(REF_0, REF_2, TMP12); | |
732 | |
733 vis_and(TMP12, MASK_fe, TMP12); | |
734 | |
735 vis_or(REF_0, REF_2, TMP14); | |
736 vis_mul8x16(CONST_128, TMP12, TMP12); | |
737 | |
738 vis_and(TMP12, MASK_7f, TMP12); | |
739 | |
740 vis_psub16(TMP14, TMP12, DST_0); | |
741 vis_st64(DST_0, dest[0]); | |
742 dest += stride; | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
743 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
744 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
745 static void MC_avg_x_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 746 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
747 { |
2979 | 748 uint8_t *ref = (uint8_t *) _ref; |
749 unsigned long off = (unsigned long) ref & 0x7; | |
750 unsigned long off_plus_1 = off + 1; | |
751 | |
752 vis_set_gsr(5 << VIS_GSR_SCALEFACT_SHIFT); | |
753 | |
754 vis_ld64(constants3[0], CONST_3); | |
755 vis_fzero(ZERO); | |
756 vis_ld64(constants256_512[0], CONST_256); | |
757 | |
758 ref = vis_alignaddr(ref); | |
759 do { /* 26 cycles */ | |
760 vis_ld64(ref[0], TMP0); | |
761 | |
762 vis_ld64(ref[8], TMP2); | |
763 | |
764 vis_alignaddr_g0((void *)off); | |
765 | |
766 vis_ld64(ref[16], TMP4); | |
767 | |
768 vis_ld64(dest[0], DST_0); | |
769 vis_faligndata(TMP0, TMP2, REF_0); | |
770 | |
771 vis_ld64(dest[8], DST_2); | |
772 vis_faligndata(TMP2, TMP4, REF_4); | |
773 | |
774 if (off != 0x7) { | |
775 vis_alignaddr_g0((void *)off_plus_1); | |
776 vis_faligndata(TMP0, TMP2, REF_2); | |
777 vis_faligndata(TMP2, TMP4, REF_6); | |
778 } else { | |
779 vis_src1(TMP2, REF_2); | |
780 vis_src1(TMP4, REF_6); | |
781 } | |
782 | |
783 vis_mul8x16au(REF_0, CONST_256, TMP0); | |
784 | |
785 vis_pmerge(ZERO, REF_2, TMP4); | |
786 vis_mul8x16au(REF_0_1, CONST_256, TMP2); | |
787 | |
788 vis_pmerge(ZERO, REF_2_1, TMP6); | |
789 | |
790 vis_padd16(TMP0, TMP4, TMP0); | |
791 | |
792 vis_mul8x16al(DST_0, CONST_512, TMP4); | |
793 vis_padd16(TMP2, TMP6, TMP2); | |
794 | |
795 vis_mul8x16al(DST_1, CONST_512, TMP6); | |
796 | |
797 vis_mul8x16au(REF_6, CONST_256, TMP12); | |
798 | |
799 vis_padd16(TMP0, TMP4, TMP0); | |
800 vis_mul8x16au(REF_6_1, CONST_256, TMP14); | |
801 | |
802 vis_padd16(TMP2, TMP6, TMP2); | |
803 vis_mul8x16au(REF_4, CONST_256, TMP16); | |
804 | |
805 vis_padd16(TMP0, CONST_3, TMP8); | |
806 vis_mul8x16au(REF_4_1, CONST_256, TMP18); | |
807 | |
808 vis_padd16(TMP2, CONST_3, TMP10); | |
809 vis_pack16(TMP8, DST_0); | |
810 | |
811 vis_pack16(TMP10, DST_1); | |
812 vis_padd16(TMP16, TMP12, TMP0); | |
813 | |
814 vis_st64(DST_0, dest[0]); | |
815 vis_mul8x16al(DST_2, CONST_512, TMP4); | |
816 vis_padd16(TMP18, TMP14, TMP2); | |
817 | |
818 vis_mul8x16al(DST_3, CONST_512, TMP6); | |
819 vis_padd16(TMP0, CONST_3, TMP0); | |
820 | |
821 vis_padd16(TMP2, CONST_3, TMP2); | |
822 | |
823 vis_padd16(TMP0, TMP4, TMP0); | |
824 | |
825 vis_padd16(TMP2, TMP6, TMP2); | |
826 vis_pack16(TMP0, DST_2); | |
827 | |
828 vis_pack16(TMP2, DST_3); | |
829 vis_st64(DST_2, dest[8]); | |
830 | |
831 ref += stride; | |
832 dest += stride; | |
833 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
834 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
835 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
836 static void MC_avg_x_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 837 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
838 { |
2979 | 839 uint8_t *ref = (uint8_t *) _ref; |
840 unsigned long off = (unsigned long) ref & 0x7; | |
841 unsigned long off_plus_1 = off + 1; | |
842 int stride_times_2 = stride << 1; | |
843 | |
844 vis_set_gsr(5 << VIS_GSR_SCALEFACT_SHIFT); | |
845 | |
846 vis_ld64(constants3[0], CONST_3); | |
847 vis_fzero(ZERO); | |
848 vis_ld64(constants256_512[0], CONST_256); | |
849 | |
850 ref = vis_alignaddr(ref); | |
851 height >>= 2; | |
852 do { /* 47 cycles */ | |
853 vis_ld64(ref[0], TMP0); | |
854 | |
855 vis_ld64_2(ref, 8, TMP2); | |
856 ref += stride; | |
857 | |
858 vis_alignaddr_g0((void *)off); | |
859 | |
860 vis_ld64(ref[0], TMP4); | |
861 vis_faligndata(TMP0, TMP2, REF_0); | |
862 | |
863 vis_ld64_2(ref, 8, TMP6); | |
864 ref += stride; | |
865 | |
866 vis_ld64(ref[0], TMP8); | |
867 | |
868 vis_ld64_2(ref, 8, TMP10); | |
869 ref += stride; | |
870 vis_faligndata(TMP4, TMP6, REF_4); | |
871 | |
872 vis_ld64(ref[0], TMP12); | |
873 | |
874 vis_ld64_2(ref, 8, TMP14); | |
875 ref += stride; | |
876 vis_faligndata(TMP8, TMP10, REF_S0); | |
877 | |
878 vis_faligndata(TMP12, TMP14, REF_S4); | |
879 | |
880 if (off != 0x7) { | |
881 vis_alignaddr_g0((void *)off_plus_1); | |
882 | |
883 vis_ld64(dest[0], DST_0); | |
884 vis_faligndata(TMP0, TMP2, REF_2); | |
885 | |
886 vis_ld64_2(dest, stride, DST_2); | |
887 vis_faligndata(TMP4, TMP6, REF_6); | |
888 | |
889 vis_faligndata(TMP8, TMP10, REF_S2); | |
890 | |
891 vis_faligndata(TMP12, TMP14, REF_S6); | |
892 } else { | |
893 vis_ld64(dest[0], DST_0); | |
894 vis_src1(TMP2, REF_2); | |
895 | |
896 vis_ld64_2(dest, stride, DST_2); | |
897 vis_src1(TMP6, REF_6); | |
898 | |
899 vis_src1(TMP10, REF_S2); | |
900 | |
901 vis_src1(TMP14, REF_S6); | |
902 } | |
903 | |
904 vis_pmerge(ZERO, REF_0, TMP0); | |
905 vis_mul8x16au(REF_0_1, CONST_256, TMP2); | |
906 | |
907 vis_pmerge(ZERO, REF_2, TMP4); | |
908 vis_mul8x16au(REF_2_1, CONST_256, TMP6); | |
909 | |
910 vis_padd16(TMP0, CONST_3, TMP0); | |
911 vis_mul8x16al(DST_0, CONST_512, TMP16); | |
912 | |
913 vis_padd16(TMP2, CONST_3, TMP2); | |
914 vis_mul8x16al(DST_1, CONST_512, TMP18); | |
915 | |
916 vis_padd16(TMP0, TMP4, TMP0); | |
917 vis_mul8x16au(REF_4, CONST_256, TMP8); | |
918 | |
919 vis_padd16(TMP2, TMP6, TMP2); | |
920 vis_mul8x16au(REF_4_1, CONST_256, TMP10); | |
921 | |
922 vis_padd16(TMP0, TMP16, TMP0); | |
923 vis_mul8x16au(REF_6, CONST_256, TMP12); | |
924 | |
925 vis_padd16(TMP2, TMP18, TMP2); | |
926 vis_mul8x16au(REF_6_1, CONST_256, TMP14); | |
927 | |
928 vis_padd16(TMP8, CONST_3, TMP8); | |
929 vis_mul8x16al(DST_2, CONST_512, TMP16); | |
930 | |
931 vis_padd16(TMP8, TMP12, TMP8); | |
932 vis_mul8x16al(DST_3, CONST_512, TMP18); | |
933 | |
934 vis_padd16(TMP10, TMP14, TMP10); | |
935 vis_pack16(TMP0, DST_0); | |
936 | |
937 vis_pack16(TMP2, DST_1); | |
938 vis_st64(DST_0, dest[0]); | |
939 dest += stride; | |
940 vis_padd16(TMP10, CONST_3, TMP10); | |
941 | |
942 vis_ld64_2(dest, stride, DST_0); | |
943 vis_padd16(TMP8, TMP16, TMP8); | |
944 | |
945 vis_ld64_2(dest, stride_times_2, TMP4/*DST_2*/); | |
946 vis_padd16(TMP10, TMP18, TMP10); | |
947 vis_pack16(TMP8, DST_2); | |
948 | |
949 vis_pack16(TMP10, DST_3); | |
950 vis_st64(DST_2, dest[0]); | |
951 dest += stride; | |
952 | |
953 vis_mul8x16au(REF_S0_1, CONST_256, TMP2); | |
954 vis_pmerge(ZERO, REF_S0, TMP0); | |
955 | |
956 vis_pmerge(ZERO, REF_S2, TMP24); | |
957 vis_mul8x16au(REF_S2_1, CONST_256, TMP6); | |
958 | |
959 vis_padd16(TMP0, CONST_3, TMP0); | |
960 vis_mul8x16au(REF_S4, CONST_256, TMP8); | |
961 | |
962 vis_padd16(TMP2, CONST_3, TMP2); | |
963 vis_mul8x16au(REF_S4_1, CONST_256, TMP10); | |
964 | |
965 vis_padd16(TMP0, TMP24, TMP0); | |
966 vis_mul8x16au(REF_S6, CONST_256, TMP12); | |
967 | |
968 vis_padd16(TMP2, TMP6, TMP2); | |
969 vis_mul8x16au(REF_S6_1, CONST_256, TMP14); | |
970 | |
971 vis_padd16(TMP8, CONST_3, TMP8); | |
972 vis_mul8x16al(DST_0, CONST_512, TMP16); | |
973 | |
974 vis_padd16(TMP10, CONST_3, TMP10); | |
975 vis_mul8x16al(DST_1, CONST_512, TMP18); | |
976 | |
977 vis_padd16(TMP8, TMP12, TMP8); | |
978 vis_mul8x16al(TMP4/*DST_2*/, CONST_512, TMP20); | |
979 | |
980 vis_mul8x16al(TMP5/*DST_3*/, CONST_512, TMP22); | |
981 vis_padd16(TMP0, TMP16, TMP0); | |
982 | |
983 vis_padd16(TMP2, TMP18, TMP2); | |
984 vis_pack16(TMP0, DST_0); | |
985 | |
986 vis_padd16(TMP10, TMP14, TMP10); | |
987 vis_pack16(TMP2, DST_1); | |
988 vis_st64(DST_0, dest[0]); | |
989 dest += stride; | |
990 | |
991 vis_padd16(TMP8, TMP20, TMP8); | |
992 | |
993 vis_padd16(TMP10, TMP22, TMP10); | |
994 vis_pack16(TMP8, DST_2); | |
995 | |
996 vis_pack16(TMP10, DST_3); | |
997 vis_st64(DST_2, dest[0]); | |
998 dest += stride; | |
999 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1000 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1001 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1002 static void MC_put_y_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 1003 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1004 { |
2979 | 1005 uint8_t *ref = (uint8_t *) _ref; |
1006 | |
1007 ref = vis_alignaddr(ref); | |
1008 vis_ld64(ref[0], TMP0); | |
1009 | |
1010 vis_ld64_2(ref, 8, TMP2); | |
1011 | |
1012 vis_ld64_2(ref, 16, TMP4); | |
1013 ref += stride; | |
1014 | |
1015 vis_ld64(ref[0], TMP6); | |
1016 vis_faligndata(TMP0, TMP2, REF_0); | |
1017 | |
1018 vis_ld64_2(ref, 8, TMP8); | |
1019 vis_faligndata(TMP2, TMP4, REF_4); | |
1020 | |
1021 vis_ld64_2(ref, 16, TMP10); | |
1022 ref += stride; | |
1023 | |
1024 vis_ld64(constants_fe[0], MASK_fe); | |
1025 vis_faligndata(TMP6, TMP8, REF_2); | |
1026 | |
1027 vis_ld64(constants_7f[0], MASK_7f); | |
1028 vis_faligndata(TMP8, TMP10, REF_6); | |
1029 | |
1030 vis_ld64(constants128[0], CONST_128); | |
1031 height = (height >> 1) - 1; | |
1032 do { /* 24 cycles */ | |
1033 vis_ld64(ref[0], TMP0); | |
1034 vis_xor(REF_0, REF_2, TMP12); | |
1035 | |
1036 vis_ld64_2(ref, 8, TMP2); | |
1037 vis_xor(REF_4, REF_6, TMP16); | |
1038 | |
1039 vis_ld64_2(ref, 16, TMP4); | |
1040 ref += stride; | |
1041 vis_or(REF_0, REF_2, TMP14); | |
1042 | |
1043 vis_ld64(ref[0], TMP6); | |
1044 vis_or(REF_4, REF_6, TMP18); | |
1045 | |
1046 vis_ld64_2(ref, 8, TMP8); | |
1047 vis_faligndata(TMP0, TMP2, REF_0); | |
1048 | |
1049 vis_ld64_2(ref, 16, TMP10); | |
1050 ref += stride; | |
1051 vis_faligndata(TMP2, TMP4, REF_4); | |
1052 | |
1053 vis_and(TMP12, MASK_fe, TMP12); | |
1054 | |
1055 vis_and(TMP16, MASK_fe, TMP16); | |
1056 vis_mul8x16(CONST_128, TMP12, TMP12); | |
1057 | |
1058 vis_mul8x16(CONST_128, TMP16, TMP16); | |
1059 vis_xor(REF_0, REF_2, TMP0); | |
1060 | |
1061 vis_xor(REF_4, REF_6, TMP2); | |
1062 | |
1063 vis_or(REF_0, REF_2, TMP20); | |
1064 | |
1065 vis_and(TMP12, MASK_7f, TMP12); | |
1066 | |
1067 vis_and(TMP16, MASK_7f, TMP16); | |
1068 | |
1069 vis_psub16(TMP14, TMP12, TMP12); | |
1070 vis_st64(TMP12, dest[0]); | |
1071 | |
1072 vis_psub16(TMP18, TMP16, TMP16); | |
1073 vis_st64_2(TMP16, dest, 8); | |
1074 dest += stride; | |
1075 | |
1076 vis_or(REF_4, REF_6, TMP18); | |
1077 | |
1078 vis_and(TMP0, MASK_fe, TMP0); | |
1079 | |
1080 vis_and(TMP2, MASK_fe, TMP2); | |
1081 vis_mul8x16(CONST_128, TMP0, TMP0); | |
1082 | |
1083 vis_faligndata(TMP6, TMP8, REF_2); | |
1084 vis_mul8x16(CONST_128, TMP2, TMP2); | |
1085 | |
1086 vis_faligndata(TMP8, TMP10, REF_6); | |
1087 | |
1088 vis_and(TMP0, MASK_7f, TMP0); | |
1089 | |
1090 vis_and(TMP2, MASK_7f, TMP2); | |
1091 | |
1092 vis_psub16(TMP20, TMP0, TMP0); | |
1093 vis_st64(TMP0, dest[0]); | |
1094 | |
1095 vis_psub16(TMP18, TMP2, TMP2); | |
1096 vis_st64_2(TMP2, dest, 8); | |
1097 dest += stride; | |
1098 } while (--height); | |
1099 | |
1100 vis_ld64(ref[0], TMP0); | |
1101 vis_xor(REF_0, REF_2, TMP12); | |
1102 | |
1103 vis_ld64_2(ref, 8, TMP2); | |
1104 vis_xor(REF_4, REF_6, TMP16); | |
1105 | |
1106 vis_ld64_2(ref, 16, TMP4); | |
1107 vis_or(REF_0, REF_2, TMP14); | |
1108 | |
1109 vis_or(REF_4, REF_6, TMP18); | |
1110 | |
1111 vis_faligndata(TMP0, TMP2, REF_0); | |
1112 | |
1113 vis_faligndata(TMP2, TMP4, REF_4); | |
1114 | |
1115 vis_and(TMP12, MASK_fe, TMP12); | |
1116 | |
1117 vis_and(TMP16, MASK_fe, TMP16); | |
1118 vis_mul8x16(CONST_128, TMP12, TMP12); | |
1119 | |
1120 vis_mul8x16(CONST_128, TMP16, TMP16); | |
1121 vis_xor(REF_0, REF_2, TMP0); | |
1122 | |
1123 vis_xor(REF_4, REF_6, TMP2); | |
1124 | |
1125 vis_or(REF_0, REF_2, TMP20); | |
1126 | |
1127 vis_and(TMP12, MASK_7f, TMP12); | |
1128 | |
1129 vis_and(TMP16, MASK_7f, TMP16); | |
1130 | |
1131 vis_psub16(TMP14, TMP12, TMP12); | |
1132 vis_st64(TMP12, dest[0]); | |
1133 | |
1134 vis_psub16(TMP18, TMP16, TMP16); | |
1135 vis_st64_2(TMP16, dest, 8); | |
1136 dest += stride; | |
1137 | |
1138 vis_or(REF_4, REF_6, TMP18); | |
1139 | |
1140 vis_and(TMP0, MASK_fe, TMP0); | |
1141 | |
1142 vis_and(TMP2, MASK_fe, TMP2); | |
1143 vis_mul8x16(CONST_128, TMP0, TMP0); | |
1144 | |
1145 vis_mul8x16(CONST_128, TMP2, TMP2); | |
1146 | |
1147 vis_and(TMP0, MASK_7f, TMP0); | |
1148 | |
1149 vis_and(TMP2, MASK_7f, TMP2); | |
1150 | |
1151 vis_psub16(TMP20, TMP0, TMP0); | |
1152 vis_st64(TMP0, dest[0]); | |
1153 | |
1154 vis_psub16(TMP18, TMP2, TMP2); | |
1155 vis_st64_2(TMP2, dest, 8); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1156 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1157 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1158 static void MC_put_y_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 1159 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1160 { |
2979 | 1161 uint8_t *ref = (uint8_t *) _ref; |
1162 | |
1163 ref = vis_alignaddr(ref); | |
1164 vis_ld64(ref[0], TMP0); | |
1165 | |
1166 vis_ld64_2(ref, 8, TMP2); | |
1167 ref += stride; | |
1168 | |
1169 vis_ld64(ref[0], TMP4); | |
1170 | |
1171 vis_ld64_2(ref, 8, TMP6); | |
1172 ref += stride; | |
1173 | |
1174 vis_ld64(constants_fe[0], MASK_fe); | |
1175 vis_faligndata(TMP0, TMP2, REF_0); | |
1176 | |
1177 vis_ld64(constants_7f[0], MASK_7f); | |
1178 vis_faligndata(TMP4, TMP6, REF_2); | |
1179 | |
1180 vis_ld64(constants128[0], CONST_128); | |
1181 height = (height >> 1) - 1; | |
1182 do { /* 12 cycles */ | |
1183 vis_ld64(ref[0], TMP0); | |
1184 vis_xor(REF_0, REF_2, TMP4); | |
1185 | |
1186 vis_ld64_2(ref, 8, TMP2); | |
1187 ref += stride; | |
1188 vis_and(TMP4, MASK_fe, TMP4); | |
1189 | |
1190 vis_or(REF_0, REF_2, TMP6); | |
1191 vis_mul8x16(CONST_128, TMP4, TMP4); | |
1192 | |
1193 vis_faligndata(TMP0, TMP2, REF_0); | |
1194 vis_ld64(ref[0], TMP0); | |
1195 | |
1196 vis_ld64_2(ref, 8, TMP2); | |
1197 ref += stride; | |
1198 vis_xor(REF_0, REF_2, TMP12); | |
1199 | |
1200 vis_and(TMP4, MASK_7f, TMP4); | |
1201 | |
1202 vis_and(TMP12, MASK_fe, TMP12); | |
1203 | |
1204 vis_mul8x16(CONST_128, TMP12, TMP12); | |
1205 vis_or(REF_0, REF_2, TMP14); | |
1206 | |
1207 vis_psub16(TMP6, TMP4, DST_0); | |
1208 vis_st64(DST_0, dest[0]); | |
1209 dest += stride; | |
1210 | |
1211 vis_faligndata(TMP0, TMP2, REF_2); | |
1212 | |
1213 vis_and(TMP12, MASK_7f, TMP12); | |
1214 | |
1215 vis_psub16(TMP14, TMP12, DST_0); | |
1216 vis_st64(DST_0, dest[0]); | |
1217 dest += stride; | |
1218 } while (--height); | |
1219 | |
1220 vis_ld64(ref[0], TMP0); | |
1221 vis_xor(REF_0, REF_2, TMP4); | |
1222 | |
1223 vis_ld64_2(ref, 8, TMP2); | |
1224 vis_and(TMP4, MASK_fe, TMP4); | |
1225 | |
1226 vis_or(REF_0, REF_2, TMP6); | |
1227 vis_mul8x16(CONST_128, TMP4, TMP4); | |
1228 | |
1229 vis_faligndata(TMP0, TMP2, REF_0); | |
1230 | |
1231 vis_xor(REF_0, REF_2, TMP12); | |
1232 | |
1233 vis_and(TMP4, MASK_7f, TMP4); | |
1234 | |
1235 vis_and(TMP12, MASK_fe, TMP12); | |
1236 | |
1237 vis_mul8x16(CONST_128, TMP12, TMP12); | |
1238 vis_or(REF_0, REF_2, TMP14); | |
1239 | |
1240 vis_psub16(TMP6, TMP4, DST_0); | |
1241 vis_st64(DST_0, dest[0]); | |
1242 dest += stride; | |
1243 | |
1244 vis_and(TMP12, MASK_7f, TMP12); | |
1245 | |
1246 vis_psub16(TMP14, TMP12, DST_0); | |
1247 vis_st64(DST_0, dest[0]); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1248 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1249 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1250 static void MC_avg_y_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 1251 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1252 { |
2979 | 1253 uint8_t *ref = (uint8_t *) _ref; |
1254 int stride_8 = stride + 8; | |
1255 int stride_16 = stride + 16; | |
1256 | |
1257 vis_set_gsr(5 << VIS_GSR_SCALEFACT_SHIFT); | |
1258 | |
1259 ref = vis_alignaddr(ref); | |
1260 | |
1261 vis_ld64(ref[ 0], TMP0); | |
1262 vis_fzero(ZERO); | |
1263 | |
1264 vis_ld64(ref[ 8], TMP2); | |
1265 | |
1266 vis_ld64(ref[16], TMP4); | |
1267 | |
1268 vis_ld64(constants3[0], CONST_3); | |
1269 vis_faligndata(TMP0, TMP2, REF_2); | |
1270 | |
1271 vis_ld64(constants256_512[0], CONST_256); | |
1272 vis_faligndata(TMP2, TMP4, REF_6); | |
1273 height >>= 1; | |
1274 | |
1275 do { /* 31 cycles */ | |
1276 vis_ld64_2(ref, stride, TMP0); | |
1277 vis_pmerge(ZERO, REF_2, TMP12); | |
1278 vis_mul8x16au(REF_2_1, CONST_256, TMP14); | |
1279 | |
1280 vis_ld64_2(ref, stride_8, TMP2); | |
1281 vis_pmerge(ZERO, REF_6, TMP16); | |
1282 vis_mul8x16au(REF_6_1, CONST_256, TMP18); | |
1283 | |
1284 vis_ld64_2(ref, stride_16, TMP4); | |
1285 ref += stride; | |
1286 | |
1287 vis_ld64(dest[0], DST_0); | |
1288 vis_faligndata(TMP0, TMP2, REF_0); | |
1289 | |
1290 vis_ld64_2(dest, 8, DST_2); | |
1291 vis_faligndata(TMP2, TMP4, REF_4); | |
1292 | |
1293 vis_ld64_2(ref, stride, TMP6); | |
1294 vis_pmerge(ZERO, REF_0, TMP0); | |
1295 vis_mul8x16au(REF_0_1, CONST_256, TMP2); | |
1296 | |
1297 vis_ld64_2(ref, stride_8, TMP8); | |
1298 vis_pmerge(ZERO, REF_4, TMP4); | |
1299 | |
1300 vis_ld64_2(ref, stride_16, TMP10); | |
1301 ref += stride; | |
1302 | |
1303 vis_ld64_2(dest, stride, REF_S0/*DST_4*/); | |
1304 vis_faligndata(TMP6, TMP8, REF_2); | |
1305 vis_mul8x16au(REF_4_1, CONST_256, TMP6); | |
1306 | |
1307 vis_ld64_2(dest, stride_8, REF_S2/*DST_6*/); | |
1308 vis_faligndata(TMP8, TMP10, REF_6); | |
1309 vis_mul8x16al(DST_0, CONST_512, TMP20); | |
1310 | |
1311 vis_padd16(TMP0, CONST_3, TMP0); | |
1312 vis_mul8x16al(DST_1, CONST_512, TMP22); | |
1313 | |
1314 vis_padd16(TMP2, CONST_3, TMP2); | |
1315 vis_mul8x16al(DST_2, CONST_512, TMP24); | |
1316 | |
1317 vis_padd16(TMP4, CONST_3, TMP4); | |
1318 vis_mul8x16al(DST_3, CONST_512, TMP26); | |
1319 | |
1320 vis_padd16(TMP6, CONST_3, TMP6); | |
1321 | |
1322 vis_padd16(TMP12, TMP20, TMP12); | |
1323 vis_mul8x16al(REF_S0, CONST_512, TMP20); | |
1324 | |
1325 vis_padd16(TMP14, TMP22, TMP14); | |
1326 vis_mul8x16al(REF_S0_1, CONST_512, TMP22); | |
1327 | |
1328 vis_padd16(TMP16, TMP24, TMP16); | |
1329 vis_mul8x16al(REF_S2, CONST_512, TMP24); | |
1330 | |
1331 vis_padd16(TMP18, TMP26, TMP18); | |
1332 vis_mul8x16al(REF_S2_1, CONST_512, TMP26); | |
1333 | |
1334 vis_padd16(TMP12, TMP0, TMP12); | |
1335 vis_mul8x16au(REF_2, CONST_256, TMP28); | |
1336 | |
1337 vis_padd16(TMP14, TMP2, TMP14); | |
1338 vis_mul8x16au(REF_2_1, CONST_256, TMP30); | |
1339 | |
1340 vis_padd16(TMP16, TMP4, TMP16); | |
1341 vis_mul8x16au(REF_6, CONST_256, REF_S4); | |
1342 | |
1343 vis_padd16(TMP18, TMP6, TMP18); | |
1344 vis_mul8x16au(REF_6_1, CONST_256, REF_S6); | |
1345 | |
1346 vis_pack16(TMP12, DST_0); | |
1347 vis_padd16(TMP28, TMP0, TMP12); | |
1348 | |
1349 vis_pack16(TMP14, DST_1); | |
1350 vis_st64(DST_0, dest[0]); | |
1351 vis_padd16(TMP30, TMP2, TMP14); | |
1352 | |
1353 vis_pack16(TMP16, DST_2); | |
1354 vis_padd16(REF_S4, TMP4, TMP16); | |
1355 | |
1356 vis_pack16(TMP18, DST_3); | |
1357 vis_st64_2(DST_2, dest, 8); | |
1358 dest += stride; | |
1359 vis_padd16(REF_S6, TMP6, TMP18); | |
1360 | |
1361 vis_padd16(TMP12, TMP20, TMP12); | |
1362 | |
1363 vis_padd16(TMP14, TMP22, TMP14); | |
1364 vis_pack16(TMP12, DST_0); | |
1365 | |
1366 vis_padd16(TMP16, TMP24, TMP16); | |
1367 vis_pack16(TMP14, DST_1); | |
1368 vis_st64(DST_0, dest[0]); | |
1369 | |
1370 vis_padd16(TMP18, TMP26, TMP18); | |
1371 vis_pack16(TMP16, DST_2); | |
1372 | |
1373 vis_pack16(TMP18, DST_3); | |
1374 vis_st64_2(DST_2, dest, 8); | |
1375 dest += stride; | |
1376 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1377 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1378 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1379 static void MC_avg_y_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 1380 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1381 { |
2979 | 1382 uint8_t *ref = (uint8_t *) _ref; |
1383 int stride_8 = stride + 8; | |
1384 | |
1385 vis_set_gsr(5 << VIS_GSR_SCALEFACT_SHIFT); | |
1386 | |
1387 ref = vis_alignaddr(ref); | |
1388 | |
1389 vis_ld64(ref[ 0], TMP0); | |
1390 vis_fzero(ZERO); | |
1391 | |
1392 vis_ld64(ref[ 8], TMP2); | |
1393 | |
1394 vis_ld64(constants3[0], CONST_3); | |
1395 vis_faligndata(TMP0, TMP2, REF_2); | |
1396 | |
1397 vis_ld64(constants256_512[0], CONST_256); | |
1398 | |
1399 height >>= 1; | |
1400 do { /* 20 cycles */ | |
1401 vis_ld64_2(ref, stride, TMP0); | |
1402 vis_pmerge(ZERO, REF_2, TMP8); | |
1403 vis_mul8x16au(REF_2_1, CONST_256, TMP10); | |
1404 | |
1405 vis_ld64_2(ref, stride_8, TMP2); | |
1406 ref += stride; | |
1407 | |
1408 vis_ld64(dest[0], DST_0); | |
1409 | |
1410 vis_ld64_2(dest, stride, DST_2); | |
1411 vis_faligndata(TMP0, TMP2, REF_0); | |
1412 | |
1413 vis_ld64_2(ref, stride, TMP4); | |
1414 vis_mul8x16al(DST_0, CONST_512, TMP16); | |
1415 vis_pmerge(ZERO, REF_0, TMP12); | |
1416 | |
1417 vis_ld64_2(ref, stride_8, TMP6); | |
1418 ref += stride; | |
1419 vis_mul8x16al(DST_1, CONST_512, TMP18); | |
1420 vis_pmerge(ZERO, REF_0_1, TMP14); | |
1421 | |
1422 vis_padd16(TMP12, CONST_3, TMP12); | |
1423 vis_mul8x16al(DST_2, CONST_512, TMP24); | |
1424 | |
1425 vis_padd16(TMP14, CONST_3, TMP14); | |
1426 vis_mul8x16al(DST_3, CONST_512, TMP26); | |
1427 | |
1428 vis_faligndata(TMP4, TMP6, REF_2); | |
1429 | |
1430 vis_padd16(TMP8, TMP12, TMP8); | |
1431 | |
1432 vis_padd16(TMP10, TMP14, TMP10); | |
1433 vis_mul8x16au(REF_2, CONST_256, TMP20); | |
1434 | |
1435 vis_padd16(TMP8, TMP16, TMP0); | |
1436 vis_mul8x16au(REF_2_1, CONST_256, TMP22); | |
1437 | |
1438 vis_padd16(TMP10, TMP18, TMP2); | |
1439 vis_pack16(TMP0, DST_0); | |
1440 | |
1441 vis_pack16(TMP2, DST_1); | |
1442 vis_st64(DST_0, dest[0]); | |
1443 dest += stride; | |
1444 vis_padd16(TMP12, TMP20, TMP12); | |
1445 | |
1446 vis_padd16(TMP14, TMP22, TMP14); | |
1447 | |
1448 vis_padd16(TMP12, TMP24, TMP0); | |
1449 | |
1450 vis_padd16(TMP14, TMP26, TMP2); | |
1451 vis_pack16(TMP0, DST_2); | |
1452 | |
1453 vis_pack16(TMP2, DST_3); | |
1454 vis_st64(DST_2, dest[0]); | |
1455 dest += stride; | |
1456 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1457 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1458 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1459 static void MC_put_xy_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 1460 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1461 { |
2979 | 1462 uint8_t *ref = (uint8_t *) _ref; |
1463 unsigned long off = (unsigned long) ref & 0x7; | |
1464 unsigned long off_plus_1 = off + 1; | |
1465 int stride_8 = stride + 8; | |
1466 int stride_16 = stride + 16; | |
1467 | |
1468 vis_set_gsr(5 << VIS_GSR_SCALEFACT_SHIFT); | |
1469 | |
1470 ref = vis_alignaddr(ref); | |
1471 | |
1472 vis_ld64(ref[ 0], TMP0); | |
1473 vis_fzero(ZERO); | |
1474 | |
1475 vis_ld64(ref[ 8], TMP2); | |
1476 | |
1477 vis_ld64(ref[16], TMP4); | |
1478 | |
1479 vis_ld64(constants2[0], CONST_2); | |
1480 vis_faligndata(TMP0, TMP2, REF_S0); | |
1481 | |
1482 vis_ld64(constants256_512[0], CONST_256); | |
1483 vis_faligndata(TMP2, TMP4, REF_S4); | |
1484 | |
1485 if (off != 0x7) { | |
1486 vis_alignaddr_g0((void *)off_plus_1); | |
1487 vis_faligndata(TMP0, TMP2, REF_S2); | |
1488 vis_faligndata(TMP2, TMP4, REF_S6); | |
1489 } else { | |
1490 vis_src1(TMP2, REF_S2); | |
1491 vis_src1(TMP4, REF_S6); | |
1492 } | |
1493 | |
1494 height >>= 1; | |
1495 do { | |
1496 vis_ld64_2(ref, stride, TMP0); | |
1497 vis_mul8x16au(REF_S0, CONST_256, TMP12); | |
1498 vis_pmerge(ZERO, REF_S0_1, TMP14); | |
1499 | |
1500 vis_alignaddr_g0((void *)off); | |
1501 | |
1502 vis_ld64_2(ref, stride_8, TMP2); | |
1503 vis_mul8x16au(REF_S2, CONST_256, TMP16); | |
1504 vis_pmerge(ZERO, REF_S2_1, TMP18); | |
1505 | |
1506 vis_ld64_2(ref, stride_16, TMP4); | |
1507 ref += stride; | |
1508 vis_mul8x16au(REF_S4, CONST_256, TMP20); | |
1509 vis_pmerge(ZERO, REF_S4_1, TMP22); | |
1510 | |
1511 vis_ld64_2(ref, stride, TMP6); | |
1512 vis_mul8x16au(REF_S6, CONST_256, TMP24); | |
1513 vis_pmerge(ZERO, REF_S6_1, TMP26); | |
1514 | |
1515 vis_ld64_2(ref, stride_8, TMP8); | |
1516 vis_faligndata(TMP0, TMP2, REF_0); | |
1517 | |
1518 vis_ld64_2(ref, stride_16, TMP10); | |
1519 ref += stride; | |
1520 vis_faligndata(TMP2, TMP4, REF_4); | |
1521 | |
1522 vis_faligndata(TMP6, TMP8, REF_S0); | |
1523 | |
1524 vis_faligndata(TMP8, TMP10, REF_S4); | |
1525 | |
1526 if (off != 0x7) { | |
1527 vis_alignaddr_g0((void *)off_plus_1); | |
1528 vis_faligndata(TMP0, TMP2, REF_2); | |
1529 vis_faligndata(TMP2, TMP4, REF_6); | |
1530 vis_faligndata(TMP6, TMP8, REF_S2); | |
1531 vis_faligndata(TMP8, TMP10, REF_S6); | |
1532 } else { | |
1533 vis_src1(TMP2, REF_2); | |
1534 vis_src1(TMP4, REF_6); | |
1535 vis_src1(TMP8, REF_S2); | |
1536 vis_src1(TMP10, REF_S6); | |
1537 } | |
1538 | |
1539 vis_mul8x16au(REF_0, CONST_256, TMP0); | |
1540 vis_pmerge(ZERO, REF_0_1, TMP2); | |
1541 | |
1542 vis_mul8x16au(REF_2, CONST_256, TMP4); | |
1543 vis_pmerge(ZERO, REF_2_1, TMP6); | |
1544 | |
1545 vis_padd16(TMP0, CONST_2, TMP8); | |
1546 vis_mul8x16au(REF_4, CONST_256, TMP0); | |
1547 | |
1548 vis_padd16(TMP2, CONST_2, TMP10); | |
1549 vis_mul8x16au(REF_4_1, CONST_256, TMP2); | |
1550 | |
1551 vis_padd16(TMP8, TMP4, TMP8); | |
1552 vis_mul8x16au(REF_6, CONST_256, TMP4); | |
1553 | |
1554 vis_padd16(TMP10, TMP6, TMP10); | |
1555 vis_mul8x16au(REF_6_1, CONST_256, TMP6); | |
1556 | |
1557 vis_padd16(TMP12, TMP8, TMP12); | |
1558 | |
1559 vis_padd16(TMP14, TMP10, TMP14); | |
1560 | |
1561 vis_padd16(TMP12, TMP16, TMP12); | |
1562 | |
1563 vis_padd16(TMP14, TMP18, TMP14); | |
1564 vis_pack16(TMP12, DST_0); | |
1565 | |
1566 vis_pack16(TMP14, DST_1); | |
1567 vis_st64(DST_0, dest[0]); | |
1568 vis_padd16(TMP0, CONST_2, TMP12); | |
1569 | |
1570 vis_mul8x16au(REF_S0, CONST_256, TMP0); | |
1571 vis_padd16(TMP2, CONST_2, TMP14); | |
1572 | |
1573 vis_mul8x16au(REF_S0_1, CONST_256, TMP2); | |
1574 vis_padd16(TMP12, TMP4, TMP12); | |
1575 | |
1576 vis_mul8x16au(REF_S2, CONST_256, TMP4); | |
1577 vis_padd16(TMP14, TMP6, TMP14); | |
1578 | |
1579 vis_mul8x16au(REF_S2_1, CONST_256, TMP6); | |
1580 vis_padd16(TMP20, TMP12, TMP20); | |
1581 | |
1582 vis_padd16(TMP22, TMP14, TMP22); | |
1583 | |
1584 vis_padd16(TMP20, TMP24, TMP20); | |
1585 | |
1586 vis_padd16(TMP22, TMP26, TMP22); | |
1587 vis_pack16(TMP20, DST_2); | |
1588 | |
1589 vis_pack16(TMP22, DST_3); | |
1590 vis_st64_2(DST_2, dest, 8); | |
1591 dest += stride; | |
1592 vis_padd16(TMP0, TMP4, TMP24); | |
1593 | |
1594 vis_mul8x16au(REF_S4, CONST_256, TMP0); | |
1595 vis_padd16(TMP2, TMP6, TMP26); | |
1596 | |
1597 vis_mul8x16au(REF_S4_1, CONST_256, TMP2); | |
1598 vis_padd16(TMP24, TMP8, TMP24); | |
1599 | |
1600 vis_padd16(TMP26, TMP10, TMP26); | |
1601 vis_pack16(TMP24, DST_0); | |
1602 | |
1603 vis_pack16(TMP26, DST_1); | |
1604 vis_st64(DST_0, dest[0]); | |
1605 vis_pmerge(ZERO, REF_S6, TMP4); | |
1606 | |
1607 vis_pmerge(ZERO, REF_S6_1, TMP6); | |
1608 | |
1609 vis_padd16(TMP0, TMP4, TMP0); | |
1610 | |
1611 vis_padd16(TMP2, TMP6, TMP2); | |
1612 | |
1613 vis_padd16(TMP0, TMP12, TMP0); | |
1614 | |
1615 vis_padd16(TMP2, TMP14, TMP2); | |
1616 vis_pack16(TMP0, DST_2); | |
1617 | |
1618 vis_pack16(TMP2, DST_3); | |
1619 vis_st64_2(DST_2, dest, 8); | |
1620 dest += stride; | |
1621 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1622 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1623 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1624 static void MC_put_xy_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 1625 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1626 { |
2979 | 1627 uint8_t *ref = (uint8_t *) _ref; |
1628 unsigned long off = (unsigned long) ref & 0x7; | |
1629 unsigned long off_plus_1 = off + 1; | |
1630 int stride_8 = stride + 8; | |
1631 | |
1632 vis_set_gsr(5 << VIS_GSR_SCALEFACT_SHIFT); | |
1633 | |
1634 ref = vis_alignaddr(ref); | |
1635 | |
1636 vis_ld64(ref[ 0], TMP0); | |
1637 vis_fzero(ZERO); | |
1638 | |
1639 vis_ld64(ref[ 8], TMP2); | |
1640 | |
1641 vis_ld64(constants2[0], CONST_2); | |
1642 | |
1643 vis_ld64(constants256_512[0], CONST_256); | |
1644 vis_faligndata(TMP0, TMP2, REF_S0); | |
1645 | |
1646 if (off != 0x7) { | |
1647 vis_alignaddr_g0((void *)off_plus_1); | |
1648 vis_faligndata(TMP0, TMP2, REF_S2); | |
1649 } else { | |
1650 vis_src1(TMP2, REF_S2); | |
1651 } | |
1652 | |
1653 height >>= 1; | |
1654 do { /* 26 cycles */ | |
1655 vis_ld64_2(ref, stride, TMP0); | |
1656 vis_mul8x16au(REF_S0, CONST_256, TMP8); | |
1657 vis_pmerge(ZERO, REF_S2, TMP12); | |
1658 | |
1659 vis_alignaddr_g0((void *)off); | |
1660 | |
1661 vis_ld64_2(ref, stride_8, TMP2); | |
1662 ref += stride; | |
1663 vis_mul8x16au(REF_S0_1, CONST_256, TMP10); | |
1664 vis_pmerge(ZERO, REF_S2_1, TMP14); | |
1665 | |
1666 vis_ld64_2(ref, stride, TMP4); | |
1667 | |
1668 vis_ld64_2(ref, stride_8, TMP6); | |
1669 ref += stride; | |
1670 vis_faligndata(TMP0, TMP2, REF_S4); | |
1671 | |
1672 vis_pmerge(ZERO, REF_S4, TMP18); | |
1673 | |
1674 vis_pmerge(ZERO, REF_S4_1, TMP20); | |
1675 | |
1676 vis_faligndata(TMP4, TMP6, REF_S0); | |
1677 | |
1678 if (off != 0x7) { | |
1679 vis_alignaddr_g0((void *)off_plus_1); | |
1680 vis_faligndata(TMP0, TMP2, REF_S6); | |
1681 vis_faligndata(TMP4, TMP6, REF_S2); | |
1682 } else { | |
1683 vis_src1(TMP2, REF_S6); | |
1684 vis_src1(TMP6, REF_S2); | |
1685 } | |
1686 | |
1687 vis_padd16(TMP18, CONST_2, TMP18); | |
1688 vis_mul8x16au(REF_S6, CONST_256, TMP22); | |
1689 | |
1690 vis_padd16(TMP20, CONST_2, TMP20); | |
1691 vis_mul8x16au(REF_S6_1, CONST_256, TMP24); | |
1692 | |
1693 vis_mul8x16au(REF_S0, CONST_256, TMP26); | |
1694 vis_pmerge(ZERO, REF_S0_1, TMP28); | |
1695 | |
1696 vis_mul8x16au(REF_S2, CONST_256, TMP30); | |
1697 vis_padd16(TMP18, TMP22, TMP18); | |
1698 | |
1699 vis_mul8x16au(REF_S2_1, CONST_256, TMP32); | |
1700 vis_padd16(TMP20, TMP24, TMP20); | |
1701 | |
1702 vis_padd16(TMP8, TMP18, TMP8); | |
1703 | |
1704 vis_padd16(TMP10, TMP20, TMP10); | |
1705 | |
1706 vis_padd16(TMP8, TMP12, TMP8); | |
1707 | |
1708 vis_padd16(TMP10, TMP14, TMP10); | |
1709 vis_pack16(TMP8, DST_0); | |
1710 | |
1711 vis_pack16(TMP10, DST_1); | |
1712 vis_st64(DST_0, dest[0]); | |
1713 dest += stride; | |
1714 vis_padd16(TMP18, TMP26, TMP18); | |
1715 | |
1716 vis_padd16(TMP20, TMP28, TMP20); | |
1717 | |
1718 vis_padd16(TMP18, TMP30, TMP18); | |
1719 | |
1720 vis_padd16(TMP20, TMP32, TMP20); | |
1721 vis_pack16(TMP18, DST_2); | |
1722 | |
1723 vis_pack16(TMP20, DST_3); | |
1724 vis_st64(DST_2, dest[0]); | |
1725 dest += stride; | |
1726 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1727 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1728 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1729 static void MC_avg_xy_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 1730 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1731 { |
2979 | 1732 uint8_t *ref = (uint8_t *) _ref; |
1733 unsigned long off = (unsigned long) ref & 0x7; | |
1734 unsigned long off_plus_1 = off + 1; | |
1735 int stride_8 = stride + 8; | |
1736 int stride_16 = stride + 16; | |
1737 | |
1738 vis_set_gsr(4 << VIS_GSR_SCALEFACT_SHIFT); | |
1739 | |
1740 ref = vis_alignaddr(ref); | |
1741 | |
1742 vis_ld64(ref[ 0], TMP0); | |
1743 vis_fzero(ZERO); | |
1744 | |
1745 vis_ld64(ref[ 8], TMP2); | |
1746 | |
1747 vis_ld64(ref[16], TMP4); | |
1748 | |
1749 vis_ld64(constants6[0], CONST_6); | |
1750 vis_faligndata(TMP0, TMP2, REF_S0); | |
1751 | |
1752 vis_ld64(constants256_1024[0], CONST_256); | |
1753 vis_faligndata(TMP2, TMP4, REF_S4); | |
1754 | |
1755 if (off != 0x7) { | |
1756 vis_alignaddr_g0((void *)off_plus_1); | |
1757 vis_faligndata(TMP0, TMP2, REF_S2); | |
1758 vis_faligndata(TMP2, TMP4, REF_S6); | |
1759 } else { | |
1760 vis_src1(TMP2, REF_S2); | |
1761 vis_src1(TMP4, REF_S6); | |
1762 } | |
1763 | |
1764 height >>= 1; | |
1765 do { /* 55 cycles */ | |
1766 vis_ld64_2(ref, stride, TMP0); | |
1767 vis_mul8x16au(REF_S0, CONST_256, TMP12); | |
1768 vis_pmerge(ZERO, REF_S0_1, TMP14); | |
1769 | |
1770 vis_alignaddr_g0((void *)off); | |
1771 | |
1772 vis_ld64_2(ref, stride_8, TMP2); | |
1773 vis_mul8x16au(REF_S2, CONST_256, TMP16); | |
1774 vis_pmerge(ZERO, REF_S2_1, TMP18); | |
1775 | |
1776 vis_ld64_2(ref, stride_16, TMP4); | |
1777 ref += stride; | |
1778 vis_mul8x16au(REF_S4, CONST_256, TMP20); | |
1779 vis_pmerge(ZERO, REF_S4_1, TMP22); | |
1780 | |
1781 vis_ld64_2(ref, stride, TMP6); | |
1782 vis_mul8x16au(REF_S6, CONST_256, TMP24); | |
1783 vis_pmerge(ZERO, REF_S6_1, TMP26); | |
1784 | |
1785 vis_ld64_2(ref, stride_8, TMP8); | |
1786 vis_faligndata(TMP0, TMP2, REF_0); | |
1787 | |
1788 vis_ld64_2(ref, stride_16, TMP10); | |
1789 ref += stride; | |
1790 vis_faligndata(TMP2, TMP4, REF_4); | |
1791 | |
1792 vis_ld64(dest[0], DST_0); | |
1793 vis_faligndata(TMP6, TMP8, REF_S0); | |
1794 | |
1795 vis_ld64_2(dest, 8, DST_2); | |
1796 vis_faligndata(TMP8, TMP10, REF_S4); | |
1797 | |
1798 if (off != 0x7) { | |
1799 vis_alignaddr_g0((void *)off_plus_1); | |
1800 vis_faligndata(TMP0, TMP2, REF_2); | |
1801 vis_faligndata(TMP2, TMP4, REF_6); | |
1802 vis_faligndata(TMP6, TMP8, REF_S2); | |
1803 vis_faligndata(TMP8, TMP10, REF_S6); | |
1804 } else { | |
1805 vis_src1(TMP2, REF_2); | |
1806 vis_src1(TMP4, REF_6); | |
1807 vis_src1(TMP8, REF_S2); | |
1808 vis_src1(TMP10, REF_S6); | |
1809 } | |
1810 | |
1811 vis_mul8x16al(DST_0, CONST_1024, TMP30); | |
1812 vis_pmerge(ZERO, REF_0, TMP0); | |
1813 | |
1814 vis_mul8x16al(DST_1, CONST_1024, TMP32); | |
1815 vis_pmerge(ZERO, REF_0_1, TMP2); | |
1816 | |
1817 vis_mul8x16au(REF_2, CONST_256, TMP4); | |
1818 vis_pmerge(ZERO, REF_2_1, TMP6); | |
1819 | |
1820 vis_mul8x16al(DST_2, CONST_1024, REF_0); | |
1821 vis_padd16(TMP0, CONST_6, TMP0); | |
1822 | |
1823 vis_mul8x16al(DST_3, CONST_1024, REF_2); | |
1824 vis_padd16(TMP2, CONST_6, TMP2); | |
1825 | |
1826 vis_padd16(TMP0, TMP4, TMP0); | |
1827 vis_mul8x16au(REF_4, CONST_256, TMP4); | |
1828 | |
1829 vis_padd16(TMP2, TMP6, TMP2); | |
1830 vis_mul8x16au(REF_4_1, CONST_256, TMP6); | |
1831 | |
1832 vis_padd16(TMP12, TMP0, TMP12); | |
1833 vis_mul8x16au(REF_6, CONST_256, TMP8); | |
1834 | |
1835 vis_padd16(TMP14, TMP2, TMP14); | |
1836 vis_mul8x16au(REF_6_1, CONST_256, TMP10); | |
1837 | |
1838 vis_padd16(TMP12, TMP16, TMP12); | |
1839 vis_mul8x16au(REF_S0, CONST_256, REF_4); | |
1840 | |
1841 vis_padd16(TMP14, TMP18, TMP14); | |
1842 vis_mul8x16au(REF_S0_1, CONST_256, REF_6); | |
1843 | |
1844 vis_padd16(TMP12, TMP30, TMP12); | |
1845 | |
1846 vis_padd16(TMP14, TMP32, TMP14); | |
1847 vis_pack16(TMP12, DST_0); | |
1848 | |
1849 vis_pack16(TMP14, DST_1); | |
1850 vis_st64(DST_0, dest[0]); | |
1851 vis_padd16(TMP4, CONST_6, TMP4); | |
1852 | |
1853 vis_ld64_2(dest, stride, DST_0); | |
1854 vis_padd16(TMP6, CONST_6, TMP6); | |
1855 vis_mul8x16au(REF_S2, CONST_256, TMP12); | |
1856 | |
1857 vis_padd16(TMP4, TMP8, TMP4); | |
1858 vis_mul8x16au(REF_S2_1, CONST_256, TMP14); | |
1859 | |
1860 vis_padd16(TMP6, TMP10, TMP6); | |
1861 | |
1862 vis_padd16(TMP20, TMP4, TMP20); | |
1863 | |
1864 vis_padd16(TMP22, TMP6, TMP22); | |
1865 | |
1866 vis_padd16(TMP20, TMP24, TMP20); | |
1867 | |
1868 vis_padd16(TMP22, TMP26, TMP22); | |
1869 | |
1870 vis_padd16(TMP20, REF_0, TMP20); | |
1871 vis_mul8x16au(REF_S4, CONST_256, REF_0); | |
1872 | |
1873 vis_padd16(TMP22, REF_2, TMP22); | |
1874 vis_pack16(TMP20, DST_2); | |
1875 | |
1876 vis_pack16(TMP22, DST_3); | |
1877 vis_st64_2(DST_2, dest, 8); | |
1878 dest += stride; | |
1879 | |
1880 vis_ld64_2(dest, 8, DST_2); | |
1881 vis_mul8x16al(DST_0, CONST_1024, TMP30); | |
1882 vis_pmerge(ZERO, REF_S4_1, REF_2); | |
1883 | |
1884 vis_mul8x16al(DST_1, CONST_1024, TMP32); | |
1885 vis_padd16(REF_4, TMP0, TMP8); | |
1886 | |
1887 vis_mul8x16au(REF_S6, CONST_256, REF_4); | |
1888 vis_padd16(REF_6, TMP2, TMP10); | |
1889 | |
1890 vis_mul8x16au(REF_S6_1, CONST_256, REF_6); | |
1891 vis_padd16(TMP8, TMP12, TMP8); | |
1892 | |
1893 vis_padd16(TMP10, TMP14, TMP10); | |
1894 | |
1895 vis_padd16(TMP8, TMP30, TMP8); | |
1896 | |
1897 vis_padd16(TMP10, TMP32, TMP10); | |
1898 vis_pack16(TMP8, DST_0); | |
1899 | |
1900 vis_pack16(TMP10, DST_1); | |
1901 vis_st64(DST_0, dest[0]); | |
1902 | |
1903 vis_padd16(REF_0, TMP4, REF_0); | |
1904 | |
1905 vis_mul8x16al(DST_2, CONST_1024, TMP30); | |
1906 vis_padd16(REF_2, TMP6, REF_2); | |
1907 | |
1908 vis_mul8x16al(DST_3, CONST_1024, TMP32); | |
1909 vis_padd16(REF_0, REF_4, REF_0); | |
1910 | |
1911 vis_padd16(REF_2, REF_6, REF_2); | |
1912 | |
1913 vis_padd16(REF_0, TMP30, REF_0); | |
1914 | |
1915 /* stall */ | |
1916 | |
1917 vis_padd16(REF_2, TMP32, REF_2); | |
1918 vis_pack16(REF_0, DST_2); | |
1919 | |
1920 vis_pack16(REF_2, DST_3); | |
1921 vis_st64_2(DST_2, dest, 8); | |
1922 dest += stride; | |
1923 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1924 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1925 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1926 static void MC_avg_xy_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 1927 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
1928 { |
2979 | 1929 uint8_t *ref = (uint8_t *) _ref; |
1930 unsigned long off = (unsigned long) ref & 0x7; | |
1931 unsigned long off_plus_1 = off + 1; | |
1932 int stride_8 = stride + 8; | |
1933 | |
1934 vis_set_gsr(4 << VIS_GSR_SCALEFACT_SHIFT); | |
1935 | |
1936 ref = vis_alignaddr(ref); | |
1937 | |
1938 vis_ld64(ref[0], TMP0); | |
1939 vis_fzero(ZERO); | |
1940 | |
1941 vis_ld64_2(ref, 8, TMP2); | |
1942 | |
1943 vis_ld64(constants6[0], CONST_6); | |
1944 | |
1945 vis_ld64(constants256_1024[0], CONST_256); | |
1946 vis_faligndata(TMP0, TMP2, REF_S0); | |
1947 | |
1948 if (off != 0x7) { | |
1949 vis_alignaddr_g0((void *)off_plus_1); | |
1950 vis_faligndata(TMP0, TMP2, REF_S2); | |
1951 } else { | |
1952 vis_src1(TMP2, REF_S2); | |
1953 } | |
1954 | |
1955 height >>= 1; | |
1956 do { /* 31 cycles */ | |
1957 vis_ld64_2(ref, stride, TMP0); | |
1958 vis_mul8x16au(REF_S0, CONST_256, TMP8); | |
1959 vis_pmerge(ZERO, REF_S0_1, TMP10); | |
1960 | |
1961 vis_ld64_2(ref, stride_8, TMP2); | |
1962 ref += stride; | |
1963 vis_mul8x16au(REF_S2, CONST_256, TMP12); | |
1964 vis_pmerge(ZERO, REF_S2_1, TMP14); | |
1965 | |
1966 vis_alignaddr_g0((void *)off); | |
1967 | |
1968 vis_ld64_2(ref, stride, TMP4); | |
1969 vis_faligndata(TMP0, TMP2, REF_S4); | |
1970 | |
1971 vis_ld64_2(ref, stride_8, TMP6); | |
1972 ref += stride; | |
1973 | |
1974 vis_ld64(dest[0], DST_0); | |
1975 vis_faligndata(TMP4, TMP6, REF_S0); | |
1976 | |
1977 vis_ld64_2(dest, stride, DST_2); | |
1978 | |
1979 if (off != 0x7) { | |
1980 vis_alignaddr_g0((void *)off_plus_1); | |
1981 vis_faligndata(TMP0, TMP2, REF_S6); | |
1982 vis_faligndata(TMP4, TMP6, REF_S2); | |
1983 } else { | |
1984 vis_src1(TMP2, REF_S6); | |
1985 vis_src1(TMP6, REF_S2); | |
1986 } | |
1987 | |
1988 vis_mul8x16al(DST_0, CONST_1024, TMP30); | |
1989 vis_pmerge(ZERO, REF_S4, TMP22); | |
1990 | |
1991 vis_mul8x16al(DST_1, CONST_1024, TMP32); | |
1992 vis_pmerge(ZERO, REF_S4_1, TMP24); | |
1993 | |
1994 vis_mul8x16au(REF_S6, CONST_256, TMP26); | |
1995 vis_pmerge(ZERO, REF_S6_1, TMP28); | |
1996 | |
1997 vis_mul8x16au(REF_S0, CONST_256, REF_S4); | |
1998 vis_padd16(TMP22, CONST_6, TMP22); | |
1999 | |
2000 vis_mul8x16au(REF_S0_1, CONST_256, REF_S6); | |
2001 vis_padd16(TMP24, CONST_6, TMP24); | |
2002 | |
2003 vis_mul8x16al(DST_2, CONST_1024, REF_0); | |
2004 vis_padd16(TMP22, TMP26, TMP22); | |
2005 | |
2006 vis_mul8x16al(DST_3, CONST_1024, REF_2); | |
2007 vis_padd16(TMP24, TMP28, TMP24); | |
2008 | |
2009 vis_mul8x16au(REF_S2, CONST_256, TMP26); | |
2010 vis_padd16(TMP8, TMP22, TMP8); | |
2011 | |
2012 vis_mul8x16au(REF_S2_1, CONST_256, TMP28); | |
2013 vis_padd16(TMP10, TMP24, TMP10); | |
2014 | |
2015 vis_padd16(TMP8, TMP12, TMP8); | |
2016 | |
2017 vis_padd16(TMP10, TMP14, TMP10); | |
2018 | |
2019 vis_padd16(TMP8, TMP30, TMP8); | |
2020 | |
2021 vis_padd16(TMP10, TMP32, TMP10); | |
2022 vis_pack16(TMP8, DST_0); | |
2023 | |
2024 vis_pack16(TMP10, DST_1); | |
2025 vis_st64(DST_0, dest[0]); | |
2026 dest += stride; | |
2027 | |
2028 vis_padd16(REF_S4, TMP22, TMP12); | |
2029 | |
2030 vis_padd16(REF_S6, TMP24, TMP14); | |
2031 | |
2032 vis_padd16(TMP12, TMP26, TMP12); | |
2033 | |
2034 vis_padd16(TMP14, TMP28, TMP14); | |
2035 | |
2036 vis_padd16(TMP12, REF_0, TMP12); | |
2037 | |
2038 vis_padd16(TMP14, REF_2, TMP14); | |
2039 vis_pack16(TMP12, DST_2); | |
2040 | |
2041 vis_pack16(TMP14, DST_3); | |
2042 vis_st64(DST_2, dest[0]); | |
2043 dest += stride; | |
2044 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2045 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2046 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2047 /* End of rounding code */ |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2048 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2049 /* Start of no rounding code */ |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2050 /* The trick used in some of this file is the formula from the MMX |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2051 * motion comp code, which is: |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2052 * |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2053 * (x+y)>>1 == (x&y)+((x^y)>>1) |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2054 * |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2055 * This allows us to average 8 bytes at a time in a 64-bit FPU reg. |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2056 * We avoid overflows by masking before we do the shift, and we |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2057 * implement the shift by multiplying by 1/2 using mul8x16. So in |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2058 * VIS this is (assume 'x' is in f0, 'y' is in f2, a repeating mask |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2059 * of '0xfe' is in f4, a repeating mask of '0x7f' is in f6, and |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2060 * the value 0x80808080 is in f8): |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2061 * |
2979 | 2062 * fxor f0, f2, f10 |
2063 * fand f10, f4, f10 | |
2064 * fmul8x16 f8, f10, f10 | |
2065 * fand f10, f6, f10 | |
2066 * fand f0, f2, f12 | |
2067 * fpadd16 f12, f10, f10 | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2068 */ |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2069 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2070 static void MC_put_no_round_o_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 2071 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2072 { |
2979 | 2073 uint8_t *ref = (uint8_t *) _ref; |
2074 | |
2075 ref = vis_alignaddr(ref); | |
2076 do { /* 5 cycles */ | |
2077 vis_ld64(ref[0], TMP0); | |
2078 | |
2079 vis_ld64_2(ref, 8, TMP2); | |
2080 | |
2081 vis_ld64_2(ref, 16, TMP4); | |
2082 ref += stride; | |
2083 | |
2084 vis_faligndata(TMP0, TMP2, REF_0); | |
2085 vis_st64(REF_0, dest[0]); | |
2086 | |
2087 vis_faligndata(TMP2, TMP4, REF_2); | |
2088 vis_st64_2(REF_2, dest, 8); | |
2089 dest += stride; | |
2090 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2091 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2092 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2093 static void MC_put_no_round_o_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 2094 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2095 { |
2979 | 2096 uint8_t *ref = (uint8_t *) _ref; |
2097 | |
2098 ref = vis_alignaddr(ref); | |
2099 do { /* 4 cycles */ | |
2100 vis_ld64(ref[0], TMP0); | |
2101 | |
2102 vis_ld64(ref[8], TMP2); | |
2103 ref += stride; | |
2104 | |
2105 /* stall */ | |
2106 | |
2107 vis_faligndata(TMP0, TMP2, REF_0); | |
2108 vis_st64(REF_0, dest[0]); | |
2109 dest += stride; | |
2110 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2111 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2112 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2113 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2114 static void MC_avg_no_round_o_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 2115 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2116 { |
2979 | 2117 uint8_t *ref = (uint8_t *) _ref; |
2118 int stride_8 = stride + 8; | |
2119 | |
2120 ref = vis_alignaddr(ref); | |
2121 | |
2122 vis_ld64(ref[0], TMP0); | |
2123 | |
2124 vis_ld64(ref[8], TMP2); | |
2125 | |
2126 vis_ld64(ref[16], TMP4); | |
2127 | |
2128 vis_ld64(dest[0], DST_0); | |
2129 | |
2130 vis_ld64(dest[8], DST_2); | |
2131 | |
2132 vis_ld64(constants_fe[0], MASK_fe); | |
2133 vis_faligndata(TMP0, TMP2, REF_0); | |
2134 | |
2135 vis_ld64(constants_7f[0], MASK_7f); | |
2136 vis_faligndata(TMP2, TMP4, REF_2); | |
2137 | |
2138 vis_ld64(constants128[0], CONST_128); | |
2139 | |
2140 ref += stride; | |
2141 height = (height >> 1) - 1; | |
2142 | |
2143 do { /* 24 cycles */ | |
2144 vis_ld64(ref[0], TMP0); | |
2145 vis_xor(DST_0, REF_0, TMP6); | |
2146 | |
2147 vis_ld64_2(ref, 8, TMP2); | |
2148 vis_and(TMP6, MASK_fe, TMP6); | |
2149 | |
2150 vis_ld64_2(ref, 16, TMP4); | |
2151 ref += stride; | |
2152 vis_mul8x16(CONST_128, TMP6, TMP6); | |
2153 vis_xor(DST_2, REF_2, TMP8); | |
2154 | |
2155 vis_and(TMP8, MASK_fe, TMP8); | |
2156 | |
2157 vis_and(DST_0, REF_0, TMP10); | |
2158 vis_ld64_2(dest, stride, DST_0); | |
2159 vis_mul8x16(CONST_128, TMP8, TMP8); | |
2160 | |
2161 vis_and(DST_2, REF_2, TMP12); | |
2162 vis_ld64_2(dest, stride_8, DST_2); | |
2163 | |
2164 vis_ld64(ref[0], TMP14); | |
2165 vis_and(TMP6, MASK_7f, TMP6); | |
2166 | |
2167 vis_and(TMP8, MASK_7f, TMP8); | |
2168 | |
2169 vis_padd16(TMP10, TMP6, TMP6); | |
2170 vis_st64(TMP6, dest[0]); | |
2171 | |
2172 vis_padd16(TMP12, TMP8, TMP8); | |
2173 vis_st64_2(TMP8, dest, 8); | |
2174 | |
2175 dest += stride; | |
2176 vis_ld64_2(ref, 8, TMP16); | |
2177 vis_faligndata(TMP0, TMP2, REF_0); | |
2178 | |
2179 vis_ld64_2(ref, 16, TMP18); | |
2180 vis_faligndata(TMP2, TMP4, REF_2); | |
2181 ref += stride; | |
2182 | |
2183 vis_xor(DST_0, REF_0, TMP20); | |
2184 | |
2185 vis_and(TMP20, MASK_fe, TMP20); | |
2186 | |
2187 vis_xor(DST_2, REF_2, TMP22); | |
2188 vis_mul8x16(CONST_128, TMP20, TMP20); | |
2189 | |
2190 vis_and(TMP22, MASK_fe, TMP22); | |
2191 | |
2192 vis_and(DST_0, REF_0, TMP24); | |
2193 vis_mul8x16(CONST_128, TMP22, TMP22); | |
2194 | |
2195 vis_and(DST_2, REF_2, TMP26); | |
2196 | |
2197 vis_ld64_2(dest, stride, DST_0); | |
2198 vis_faligndata(TMP14, TMP16, REF_0); | |
2199 | |
2200 vis_ld64_2(dest, stride_8, DST_2); | |
2201 vis_faligndata(TMP16, TMP18, REF_2); | |
2202 | |
2203 vis_and(TMP20, MASK_7f, TMP20); | |
2204 | |
2205 vis_and(TMP22, MASK_7f, TMP22); | |
2206 | |
2207 vis_padd16(TMP24, TMP20, TMP20); | |
2208 vis_st64(TMP20, dest[0]); | |
2209 | |
2210 vis_padd16(TMP26, TMP22, TMP22); | |
2211 vis_st64_2(TMP22, dest, 8); | |
2212 dest += stride; | |
2213 } while (--height); | |
2214 | |
2215 vis_ld64(ref[0], TMP0); | |
2216 vis_xor(DST_0, REF_0, TMP6); | |
2217 | |
2218 vis_ld64_2(ref, 8, TMP2); | |
2219 vis_and(TMP6, MASK_fe, TMP6); | |
2220 | |
2221 vis_ld64_2(ref, 16, TMP4); | |
2222 vis_mul8x16(CONST_128, TMP6, TMP6); | |
2223 vis_xor(DST_2, REF_2, TMP8); | |
2224 | |
2225 vis_and(TMP8, MASK_fe, TMP8); | |
2226 | |
2227 vis_and(DST_0, REF_0, TMP10); | |
2228 vis_ld64_2(dest, stride, DST_0); | |
2229 vis_mul8x16(CONST_128, TMP8, TMP8); | |
2230 | |
2231 vis_and(DST_2, REF_2, TMP12); | |
2232 vis_ld64_2(dest, stride_8, DST_2); | |
2233 | |
2234 vis_ld64(ref[0], TMP14); | |
2235 vis_and(TMP6, MASK_7f, TMP6); | |
2236 | |
2237 vis_and(TMP8, MASK_7f, TMP8); | |
2238 | |
2239 vis_padd16(TMP10, TMP6, TMP6); | |
2240 vis_st64(TMP6, dest[0]); | |
2241 | |
2242 vis_padd16(TMP12, TMP8, TMP8); | |
2243 vis_st64_2(TMP8, dest, 8); | |
2244 | |
2245 dest += stride; | |
2246 vis_faligndata(TMP0, TMP2, REF_0); | |
2247 | |
2248 vis_faligndata(TMP2, TMP4, REF_2); | |
2249 | |
2250 vis_xor(DST_0, REF_0, TMP20); | |
2251 | |
2252 vis_and(TMP20, MASK_fe, TMP20); | |
2253 | |
2254 vis_xor(DST_2, REF_2, TMP22); | |
2255 vis_mul8x16(CONST_128, TMP20, TMP20); | |
2256 | |
2257 vis_and(TMP22, MASK_fe, TMP22); | |
2258 | |
2259 vis_and(DST_0, REF_0, TMP24); | |
2260 vis_mul8x16(CONST_128, TMP22, TMP22); | |
2261 | |
2262 vis_and(DST_2, REF_2, TMP26); | |
2263 | |
2264 vis_and(TMP20, MASK_7f, TMP20); | |
2265 | |
2266 vis_and(TMP22, MASK_7f, TMP22); | |
2267 | |
2268 vis_padd16(TMP24, TMP20, TMP20); | |
2269 vis_st64(TMP20, dest[0]); | |
2270 | |
2271 vis_padd16(TMP26, TMP22, TMP22); | |
2272 vis_st64_2(TMP22, dest, 8); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2273 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2274 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2275 static void MC_avg_no_round_o_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 2276 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2277 { |
2979 | 2278 uint8_t *ref = (uint8_t *) _ref; |
2279 | |
2280 ref = vis_alignaddr(ref); | |
2281 | |
2282 vis_ld64(ref[0], TMP0); | |
2283 | |
2284 vis_ld64(ref[8], TMP2); | |
2285 | |
2286 vis_ld64(dest[0], DST_0); | |
2287 | |
2288 vis_ld64(constants_fe[0], MASK_fe); | |
2289 | |
2290 vis_ld64(constants_7f[0], MASK_7f); | |
2291 vis_faligndata(TMP0, TMP2, REF_0); | |
2292 | |
2293 vis_ld64(constants128[0], CONST_128); | |
2294 | |
2295 ref += stride; | |
2296 height = (height >> 1) - 1; | |
2297 | |
2298 do { /* 12 cycles */ | |
2299 vis_ld64(ref[0], TMP0); | |
2300 vis_xor(DST_0, REF_0, TMP4); | |
2301 | |
2302 vis_ld64(ref[8], TMP2); | |
2303 vis_and(TMP4, MASK_fe, TMP4); | |
2304 | |
2305 vis_and(DST_0, REF_0, TMP6); | |
2306 vis_ld64_2(dest, stride, DST_0); | |
2307 ref += stride; | |
2308 vis_mul8x16(CONST_128, TMP4, TMP4); | |
2309 | |
2310 vis_ld64(ref[0], TMP12); | |
2311 vis_faligndata(TMP0, TMP2, REF_0); | |
2312 | |
2313 vis_ld64(ref[8], TMP2); | |
2314 vis_xor(DST_0, REF_0, TMP0); | |
2315 ref += stride; | |
2316 | |
2317 vis_and(TMP0, MASK_fe, TMP0); | |
2318 | |
2319 vis_and(TMP4, MASK_7f, TMP4); | |
2320 | |
2321 vis_padd16(TMP6, TMP4, TMP4); | |
2322 vis_st64(TMP4, dest[0]); | |
2323 dest += stride; | |
2324 vis_mul8x16(CONST_128, TMP0, TMP0); | |
2325 | |
2326 vis_and(DST_0, REF_0, TMP6); | |
2327 vis_ld64_2(dest, stride, DST_0); | |
2328 | |
2329 vis_faligndata(TMP12, TMP2, REF_0); | |
2330 | |
2331 vis_and(TMP0, MASK_7f, TMP0); | |
2332 | |
2333 vis_padd16(TMP6, TMP0, TMP4); | |
2334 vis_st64(TMP4, dest[0]); | |
2335 dest += stride; | |
2336 } while (--height); | |
2337 | |
2338 vis_ld64(ref[0], TMP0); | |
2339 vis_xor(DST_0, REF_0, TMP4); | |
2340 | |
2341 vis_ld64(ref[8], TMP2); | |
2342 vis_and(TMP4, MASK_fe, TMP4); | |
2343 | |
2344 vis_and(DST_0, REF_0, TMP6); | |
2345 vis_ld64_2(dest, stride, DST_0); | |
2346 vis_mul8x16(CONST_128, TMP4, TMP4); | |
2347 | |
2348 vis_faligndata(TMP0, TMP2, REF_0); | |
2349 | |
2350 vis_xor(DST_0, REF_0, TMP0); | |
2351 | |
2352 vis_and(TMP0, MASK_fe, TMP0); | |
2353 | |
2354 vis_and(TMP4, MASK_7f, TMP4); | |
2355 | |
2356 vis_padd16(TMP6, TMP4, TMP4); | |
2357 vis_st64(TMP4, dest[0]); | |
2358 dest += stride; | |
2359 vis_mul8x16(CONST_128, TMP0, TMP0); | |
2360 | |
2361 vis_and(DST_0, REF_0, TMP6); | |
2362 | |
2363 vis_and(TMP0, MASK_7f, TMP0); | |
2364 | |
2365 vis_padd16(TMP6, TMP0, TMP4); | |
2366 vis_st64(TMP4, dest[0]); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2367 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2368 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2369 static void MC_put_no_round_x_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 2370 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2371 { |
2979 | 2372 uint8_t *ref = (uint8_t *) _ref; |
2373 unsigned long off = (unsigned long) ref & 0x7; | |
2374 unsigned long off_plus_1 = off + 1; | |
2375 | |
2376 ref = vis_alignaddr(ref); | |
2377 | |
2378 vis_ld64(ref[0], TMP0); | |
2379 | |
2380 vis_ld64_2(ref, 8, TMP2); | |
2381 | |
2382 vis_ld64_2(ref, 16, TMP4); | |
2383 | |
2384 vis_ld64(constants_fe[0], MASK_fe); | |
2385 | |
2386 vis_ld64(constants_7f[0], MASK_7f); | |
2387 vis_faligndata(TMP0, TMP2, REF_0); | |
2388 | |
2389 vis_ld64(constants128[0], CONST_128); | |
2390 vis_faligndata(TMP2, TMP4, REF_4); | |
2391 | |
2392 if (off != 0x7) { | |
2393 vis_alignaddr_g0((void *)off_plus_1); | |
2394 vis_faligndata(TMP0, TMP2, REF_2); | |
2395 vis_faligndata(TMP2, TMP4, REF_6); | |
2396 } else { | |
2397 vis_src1(TMP2, REF_2); | |
2398 vis_src1(TMP4, REF_6); | |
2399 } | |
2400 | |
2401 ref += stride; | |
2402 height = (height >> 1) - 1; | |
2403 | |
2404 do { /* 34 cycles */ | |
2405 vis_ld64(ref[0], TMP0); | |
2406 vis_xor(REF_0, REF_2, TMP6); | |
2407 | |
2408 vis_ld64_2(ref, 8, TMP2); | |
2409 vis_xor(REF_4, REF_6, TMP8); | |
2410 | |
2411 vis_ld64_2(ref, 16, TMP4); | |
2412 vis_and(TMP6, MASK_fe, TMP6); | |
2413 ref += stride; | |
2414 | |
2415 vis_ld64(ref[0], TMP14); | |
2416 vis_mul8x16(CONST_128, TMP6, TMP6); | |
2417 vis_and(TMP8, MASK_fe, TMP8); | |
2418 | |
2419 vis_ld64_2(ref, 8, TMP16); | |
2420 vis_mul8x16(CONST_128, TMP8, TMP8); | |
2421 vis_and(REF_0, REF_2, TMP10); | |
2422 | |
2423 vis_ld64_2(ref, 16, TMP18); | |
2424 ref += stride; | |
2425 vis_and(REF_4, REF_6, TMP12); | |
2426 | |
2427 vis_alignaddr_g0((void *)off); | |
2428 | |
2429 vis_faligndata(TMP0, TMP2, REF_0); | |
2430 | |
2431 vis_faligndata(TMP2, TMP4, REF_4); | |
2432 | |
2433 if (off != 0x7) { | |
2434 vis_alignaddr_g0((void *)off_plus_1); | |
2435 vis_faligndata(TMP0, TMP2, REF_2); | |
2436 vis_faligndata(TMP2, TMP4, REF_6); | |
2437 } else { | |
2438 vis_src1(TMP2, REF_2); | |
2439 vis_src1(TMP4, REF_6); | |
2440 } | |
2441 | |
2442 vis_and(TMP6, MASK_7f, TMP6); | |
2443 | |
2444 vis_and(TMP8, MASK_7f, TMP8); | |
2445 | |
2446 vis_padd16(TMP10, TMP6, TMP6); | |
2447 vis_st64(TMP6, dest[0]); | |
2448 | |
2449 vis_padd16(TMP12, TMP8, TMP8); | |
2450 vis_st64_2(TMP8, dest, 8); | |
2451 dest += stride; | |
2452 | |
2453 vis_xor(REF_0, REF_2, TMP6); | |
2454 | |
2455 vis_xor(REF_4, REF_6, TMP8); | |
2456 | |
2457 vis_and(TMP6, MASK_fe, TMP6); | |
2458 | |
2459 vis_mul8x16(CONST_128, TMP6, TMP6); | |
2460 vis_and(TMP8, MASK_fe, TMP8); | |
2461 | |
2462 vis_mul8x16(CONST_128, TMP8, TMP8); | |
2463 vis_and(REF_0, REF_2, TMP10); | |
2464 | |
2465 vis_and(REF_4, REF_6, TMP12); | |
2466 | |
2467 vis_alignaddr_g0((void *)off); | |
2468 | |
2469 vis_faligndata(TMP14, TMP16, REF_0); | |
2470 | |
2471 vis_faligndata(TMP16, TMP18, REF_4); | |
2472 | |
2473 if (off != 0x7) { | |
2474 vis_alignaddr_g0((void *)off_plus_1); | |
2475 vis_faligndata(TMP14, TMP16, REF_2); | |
2476 vis_faligndata(TMP16, TMP18, REF_6); | |
2477 } else { | |
2478 vis_src1(TMP16, REF_2); | |
2479 vis_src1(TMP18, REF_6); | |
2480 } | |
2481 | |
2482 vis_and(TMP6, MASK_7f, TMP6); | |
2483 | |
2484 vis_and(TMP8, MASK_7f, TMP8); | |
2485 | |
2486 vis_padd16(TMP10, TMP6, TMP6); | |
2487 vis_st64(TMP6, dest[0]); | |
2488 | |
2489 vis_padd16(TMP12, TMP8, TMP8); | |
2490 vis_st64_2(TMP8, dest, 8); | |
2491 dest += stride; | |
2492 } while (--height); | |
2493 | |
2494 vis_ld64(ref[0], TMP0); | |
2495 vis_xor(REF_0, REF_2, TMP6); | |
2496 | |
2497 vis_ld64_2(ref, 8, TMP2); | |
2498 vis_xor(REF_4, REF_6, TMP8); | |
2499 | |
2500 vis_ld64_2(ref, 16, TMP4); | |
2501 vis_and(TMP6, MASK_fe, TMP6); | |
2502 | |
2503 vis_mul8x16(CONST_128, TMP6, TMP6); | |
2504 vis_and(TMP8, MASK_fe, TMP8); | |
2505 | |
2506 vis_mul8x16(CONST_128, TMP8, TMP8); | |
2507 vis_and(REF_0, REF_2, TMP10); | |
2508 | |
2509 vis_and(REF_4, REF_6, TMP12); | |
2510 | |
2511 vis_alignaddr_g0((void *)off); | |
2512 | |
2513 vis_faligndata(TMP0, TMP2, REF_0); | |
2514 | |
2515 vis_faligndata(TMP2, TMP4, REF_4); | |
2516 | |
2517 if (off != 0x7) { | |
2518 vis_alignaddr_g0((void *)off_plus_1); | |
2519 vis_faligndata(TMP0, TMP2, REF_2); | |
2520 vis_faligndata(TMP2, TMP4, REF_6); | |
2521 } else { | |
2522 vis_src1(TMP2, REF_2); | |
2523 vis_src1(TMP4, REF_6); | |
2524 } | |
2525 | |
2526 vis_and(TMP6, MASK_7f, TMP6); | |
2527 | |
2528 vis_and(TMP8, MASK_7f, TMP8); | |
2529 | |
2530 vis_padd16(TMP10, TMP6, TMP6); | |
2531 vis_st64(TMP6, dest[0]); | |
2532 | |
2533 vis_padd16(TMP12, TMP8, TMP8); | |
2534 vis_st64_2(TMP8, dest, 8); | |
2535 dest += stride; | |
2536 | |
2537 vis_xor(REF_0, REF_2, TMP6); | |
2538 | |
2539 vis_xor(REF_4, REF_6, TMP8); | |
2540 | |
2541 vis_and(TMP6, MASK_fe, TMP6); | |
2542 | |
2543 vis_mul8x16(CONST_128, TMP6, TMP6); | |
2544 vis_and(TMP8, MASK_fe, TMP8); | |
2545 | |
2546 vis_mul8x16(CONST_128, TMP8, TMP8); | |
2547 vis_and(REF_0, REF_2, TMP10); | |
2548 | |
2549 vis_and(REF_4, REF_6, TMP12); | |
2550 | |
2551 vis_and(TMP6, MASK_7f, TMP6); | |
2552 | |
2553 vis_and(TMP8, MASK_7f, TMP8); | |
2554 | |
2555 vis_padd16(TMP10, TMP6, TMP6); | |
2556 vis_st64(TMP6, dest[0]); | |
2557 | |
2558 vis_padd16(TMP12, TMP8, TMP8); | |
2559 vis_st64_2(TMP8, dest, 8); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2560 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2561 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2562 static void MC_put_no_round_x_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 2563 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2564 { |
2979 | 2565 uint8_t *ref = (uint8_t *) _ref; |
2566 unsigned long off = (unsigned long) ref & 0x7; | |
2567 unsigned long off_plus_1 = off + 1; | |
2568 | |
2569 ref = vis_alignaddr(ref); | |
2570 | |
2571 vis_ld64(ref[0], TMP0); | |
2572 | |
2573 vis_ld64(ref[8], TMP2); | |
2574 | |
2575 vis_ld64(constants_fe[0], MASK_fe); | |
2576 | |
2577 vis_ld64(constants_7f[0], MASK_7f); | |
2578 | |
2579 vis_ld64(constants128[0], CONST_128); | |
2580 vis_faligndata(TMP0, TMP2, REF_0); | |
2581 | |
2582 if (off != 0x7) { | |
2583 vis_alignaddr_g0((void *)off_plus_1); | |
2584 vis_faligndata(TMP0, TMP2, REF_2); | |
2585 } else { | |
2586 vis_src1(TMP2, REF_2); | |
2587 } | |
2588 | |
2589 ref += stride; | |
2590 height = (height >> 1) - 1; | |
2591 | |
2592 do { /* 20 cycles */ | |
2593 vis_ld64(ref[0], TMP0); | |
2594 vis_xor(REF_0, REF_2, TMP4); | |
2595 | |
2596 vis_ld64_2(ref, 8, TMP2); | |
2597 vis_and(TMP4, MASK_fe, TMP4); | |
2598 ref += stride; | |
2599 | |
2600 vis_ld64(ref[0], TMP8); | |
2601 vis_and(REF_0, REF_2, TMP6); | |
2602 vis_mul8x16(CONST_128, TMP4, TMP4); | |
2603 | |
2604 vis_alignaddr_g0((void *)off); | |
2605 | |
2606 vis_ld64_2(ref, 8, TMP10); | |
2607 ref += stride; | |
2608 vis_faligndata(TMP0, TMP2, REF_0); | |
2609 | |
2610 if (off != 0x7) { | |
2611 vis_alignaddr_g0((void *)off_plus_1); | |
2612 vis_faligndata(TMP0, TMP2, REF_2); | |
2613 } else { | |
2614 vis_src1(TMP2, REF_2); | |
2615 } | |
2616 | |
2617 vis_and(TMP4, MASK_7f, TMP4); | |
2618 | |
2619 vis_padd16(TMP6, TMP4, DST_0); | |
2620 vis_st64(DST_0, dest[0]); | |
2621 dest += stride; | |
2622 | |
2623 vis_xor(REF_0, REF_2, TMP12); | |
2624 | |
2625 vis_and(TMP12, MASK_fe, TMP12); | |
2626 | |
2627 vis_and(REF_0, REF_2, TMP14); | |
2628 vis_mul8x16(CONST_128, TMP12, TMP12); | |
2629 | |
2630 vis_alignaddr_g0((void *)off); | |
2631 vis_faligndata(TMP8, TMP10, REF_0); | |
2632 if (off != 0x7) { | |
2633 vis_alignaddr_g0((void *)off_plus_1); | |
2634 vis_faligndata(TMP8, TMP10, REF_2); | |
2635 } else { | |
2636 vis_src1(TMP10, REF_2); | |
2637 } | |
2638 | |
2639 vis_and(TMP12, MASK_7f, TMP12); | |
2640 | |
2641 vis_padd16(TMP14, TMP12, DST_0); | |
2642 vis_st64(DST_0, dest[0]); | |
2643 dest += stride; | |
2644 } while (--height); | |
2645 | |
2646 vis_ld64(ref[0], TMP0); | |
2647 vis_xor(REF_0, REF_2, TMP4); | |
2648 | |
2649 vis_ld64_2(ref, 8, TMP2); | |
2650 vis_and(TMP4, MASK_fe, TMP4); | |
2651 | |
2652 vis_and(REF_0, REF_2, TMP6); | |
2653 vis_mul8x16(CONST_128, TMP4, TMP4); | |
2654 | |
2655 vis_alignaddr_g0((void *)off); | |
2656 | |
2657 vis_faligndata(TMP0, TMP2, REF_0); | |
2658 | |
2659 if (off != 0x7) { | |
2660 vis_alignaddr_g0((void *)off_plus_1); | |
2661 vis_faligndata(TMP0, TMP2, REF_2); | |
2662 } else { | |
2663 vis_src1(TMP2, REF_2); | |
2664 } | |
2665 | |
2666 vis_and(TMP4, MASK_7f, TMP4); | |
2667 | |
2668 vis_padd16(TMP6, TMP4, DST_0); | |
2669 vis_st64(DST_0, dest[0]); | |
2670 dest += stride; | |
2671 | |
2672 vis_xor(REF_0, REF_2, TMP12); | |
2673 | |
2674 vis_and(TMP12, MASK_fe, TMP12); | |
2675 | |
2676 vis_and(REF_0, REF_2, TMP14); | |
2677 vis_mul8x16(CONST_128, TMP12, TMP12); | |
2678 | |
2679 vis_and(TMP12, MASK_7f, TMP12); | |
2680 | |
2681 vis_padd16(TMP14, TMP12, DST_0); | |
2682 vis_st64(DST_0, dest[0]); | |
2683 dest += stride; | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2684 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2685 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2686 static void MC_avg_no_round_x_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 2687 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2688 { |
2979 | 2689 uint8_t *ref = (uint8_t *) _ref; |
2690 unsigned long off = (unsigned long) ref & 0x7; | |
2691 unsigned long off_plus_1 = off + 1; | |
2692 | |
2693 vis_set_gsr(5 << VIS_GSR_SCALEFACT_SHIFT); | |
2694 | |
2695 vis_ld64(constants3[0], CONST_3); | |
2696 vis_fzero(ZERO); | |
2697 vis_ld64(constants256_512[0], CONST_256); | |
2698 | |
2699 ref = vis_alignaddr(ref); | |
2700 do { /* 26 cycles */ | |
2701 vis_ld64(ref[0], TMP0); | |
2702 | |
2703 vis_ld64(ref[8], TMP2); | |
2704 | |
2705 vis_alignaddr_g0((void *)off); | |
2706 | |
2707 vis_ld64(ref[16], TMP4); | |
2708 | |
2709 vis_ld64(dest[0], DST_0); | |
2710 vis_faligndata(TMP0, TMP2, REF_0); | |
2711 | |
2712 vis_ld64(dest[8], DST_2); | |
2713 vis_faligndata(TMP2, TMP4, REF_4); | |
2714 | |
2715 if (off != 0x7) { | |
2716 vis_alignaddr_g0((void *)off_plus_1); | |
2717 vis_faligndata(TMP0, TMP2, REF_2); | |
2718 vis_faligndata(TMP2, TMP4, REF_6); | |
2719 } else { | |
2720 vis_src1(TMP2, REF_2); | |
2721 vis_src1(TMP4, REF_6); | |
2722 } | |
2723 | |
2724 vis_mul8x16au(REF_0, CONST_256, TMP0); | |
2725 | |
2726 vis_pmerge(ZERO, REF_2, TMP4); | |
2727 vis_mul8x16au(REF_0_1, CONST_256, TMP2); | |
2728 | |
2729 vis_pmerge(ZERO, REF_2_1, TMP6); | |
2730 | |
2731 vis_padd16(TMP0, TMP4, TMP0); | |
2732 | |
2733 vis_mul8x16al(DST_0, CONST_512, TMP4); | |
2734 vis_padd16(TMP2, TMP6, TMP2); | |
2735 | |
2736 vis_mul8x16al(DST_1, CONST_512, TMP6); | |
2737 | |
2738 vis_mul8x16au(REF_6, CONST_256, TMP12); | |
2739 | |
2740 vis_padd16(TMP0, TMP4, TMP0); | |
2741 vis_mul8x16au(REF_6_1, CONST_256, TMP14); | |
2742 | |
2743 vis_padd16(TMP2, TMP6, TMP2); | |
2744 vis_mul8x16au(REF_4, CONST_256, TMP16); | |
2745 | |
2746 vis_padd16(TMP0, CONST_3, TMP8); | |
2747 vis_mul8x16au(REF_4_1, CONST_256, TMP18); | |
2748 | |
2749 vis_padd16(TMP2, CONST_3, TMP10); | |
2750 vis_pack16(TMP8, DST_0); | |
2751 | |
2752 vis_pack16(TMP10, DST_1); | |
2753 vis_padd16(TMP16, TMP12, TMP0); | |
2754 | |
2755 vis_st64(DST_0, dest[0]); | |
2756 vis_mul8x16al(DST_2, CONST_512, TMP4); | |
2757 vis_padd16(TMP18, TMP14, TMP2); | |
2758 | |
2759 vis_mul8x16al(DST_3, CONST_512, TMP6); | |
2760 vis_padd16(TMP0, CONST_3, TMP0); | |
2761 | |
2762 vis_padd16(TMP2, CONST_3, TMP2); | |
2763 | |
2764 vis_padd16(TMP0, TMP4, TMP0); | |
2765 | |
2766 vis_padd16(TMP2, TMP6, TMP2); | |
2767 vis_pack16(TMP0, DST_2); | |
2768 | |
2769 vis_pack16(TMP2, DST_3); | |
2770 vis_st64(DST_2, dest[8]); | |
2771 | |
2772 ref += stride; | |
2773 dest += stride; | |
2774 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2775 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2776 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2777 static void MC_avg_no_round_x_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 2778 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2779 { |
2979 | 2780 uint8_t *ref = (uint8_t *) _ref; |
2781 unsigned long off = (unsigned long) ref & 0x7; | |
2782 unsigned long off_plus_1 = off + 1; | |
2783 int stride_times_2 = stride << 1; | |
2784 | |
2785 vis_set_gsr(5 << VIS_GSR_SCALEFACT_SHIFT); | |
2786 | |
2787 vis_ld64(constants3[0], CONST_3); | |
2788 vis_fzero(ZERO); | |
2789 vis_ld64(constants256_512[0], CONST_256); | |
2790 | |
2791 ref = vis_alignaddr(ref); | |
2792 height >>= 2; | |
2793 do { /* 47 cycles */ | |
2794 vis_ld64(ref[0], TMP0); | |
2795 | |
2796 vis_ld64_2(ref, 8, TMP2); | |
2797 ref += stride; | |
2798 | |
2799 vis_alignaddr_g0((void *)off); | |
2800 | |
2801 vis_ld64(ref[0], TMP4); | |
2802 vis_faligndata(TMP0, TMP2, REF_0); | |
2803 | |
2804 vis_ld64_2(ref, 8, TMP6); | |
2805 ref += stride; | |
2806 | |
2807 vis_ld64(ref[0], TMP8); | |
2808 | |
2809 vis_ld64_2(ref, 8, TMP10); | |
2810 ref += stride; | |
2811 vis_faligndata(TMP4, TMP6, REF_4); | |
2812 | |
2813 vis_ld64(ref[0], TMP12); | |
2814 | |
2815 vis_ld64_2(ref, 8, TMP14); | |
2816 ref += stride; | |
2817 vis_faligndata(TMP8, TMP10, REF_S0); | |
2818 | |
2819 vis_faligndata(TMP12, TMP14, REF_S4); | |
2820 | |
2821 if (off != 0x7) { | |
2822 vis_alignaddr_g0((void *)off_plus_1); | |
2823 | |
2824 vis_ld64(dest[0], DST_0); | |
2825 vis_faligndata(TMP0, TMP2, REF_2); | |
2826 | |
2827 vis_ld64_2(dest, stride, DST_2); | |
2828 vis_faligndata(TMP4, TMP6, REF_6); | |
2829 | |
2830 vis_faligndata(TMP8, TMP10, REF_S2); | |
2831 | |
2832 vis_faligndata(TMP12, TMP14, REF_S6); | |
2833 } else { | |
2834 vis_ld64(dest[0], DST_0); | |
2835 vis_src1(TMP2, REF_2); | |
2836 | |
2837 vis_ld64_2(dest, stride, DST_2); | |
2838 vis_src1(TMP6, REF_6); | |
2839 | |
2840 vis_src1(TMP10, REF_S2); | |
2841 | |
2842 vis_src1(TMP14, REF_S6); | |
2843 } | |
2844 | |
2845 vis_pmerge(ZERO, REF_0, TMP0); | |
2846 vis_mul8x16au(REF_0_1, CONST_256, TMP2); | |
2847 | |
2848 vis_pmerge(ZERO, REF_2, TMP4); | |
2849 vis_mul8x16au(REF_2_1, CONST_256, TMP6); | |
2850 | |
2851 vis_padd16(TMP0, CONST_3, TMP0); | |
2852 vis_mul8x16al(DST_0, CONST_512, TMP16); | |
2853 | |
2854 vis_padd16(TMP2, CONST_3, TMP2); | |
2855 vis_mul8x16al(DST_1, CONST_512, TMP18); | |
2856 | |
2857 vis_padd16(TMP0, TMP4, TMP0); | |
2858 vis_mul8x16au(REF_4, CONST_256, TMP8); | |
2859 | |
2860 vis_padd16(TMP2, TMP6, TMP2); | |
2861 vis_mul8x16au(REF_4_1, CONST_256, TMP10); | |
2862 | |
2863 vis_padd16(TMP0, TMP16, TMP0); | |
2864 vis_mul8x16au(REF_6, CONST_256, TMP12); | |
2865 | |
2866 vis_padd16(TMP2, TMP18, TMP2); | |
2867 vis_mul8x16au(REF_6_1, CONST_256, TMP14); | |
2868 | |
2869 vis_padd16(TMP8, CONST_3, TMP8); | |
2870 vis_mul8x16al(DST_2, CONST_512, TMP16); | |
2871 | |
2872 vis_padd16(TMP8, TMP12, TMP8); | |
2873 vis_mul8x16al(DST_3, CONST_512, TMP18); | |
2874 | |
2875 vis_padd16(TMP10, TMP14, TMP10); | |
2876 vis_pack16(TMP0, DST_0); | |
2877 | |
2878 vis_pack16(TMP2, DST_1); | |
2879 vis_st64(DST_0, dest[0]); | |
2880 dest += stride; | |
2881 vis_padd16(TMP10, CONST_3, TMP10); | |
2882 | |
2883 vis_ld64_2(dest, stride, DST_0); | |
2884 vis_padd16(TMP8, TMP16, TMP8); | |
2885 | |
2886 vis_ld64_2(dest, stride_times_2, TMP4/*DST_2*/); | |
2887 vis_padd16(TMP10, TMP18, TMP10); | |
2888 vis_pack16(TMP8, DST_2); | |
2889 | |
2890 vis_pack16(TMP10, DST_3); | |
2891 vis_st64(DST_2, dest[0]); | |
2892 dest += stride; | |
2893 | |
2894 vis_mul8x16au(REF_S0_1, CONST_256, TMP2); | |
2895 vis_pmerge(ZERO, REF_S0, TMP0); | |
2896 | |
2897 vis_pmerge(ZERO, REF_S2, TMP24); | |
2898 vis_mul8x16au(REF_S2_1, CONST_256, TMP6); | |
2899 | |
2900 vis_padd16(TMP0, CONST_3, TMP0); | |
2901 vis_mul8x16au(REF_S4, CONST_256, TMP8); | |
2902 | |
2903 vis_padd16(TMP2, CONST_3, TMP2); | |
2904 vis_mul8x16au(REF_S4_1, CONST_256, TMP10); | |
2905 | |
2906 vis_padd16(TMP0, TMP24, TMP0); | |
2907 vis_mul8x16au(REF_S6, CONST_256, TMP12); | |
2908 | |
2909 vis_padd16(TMP2, TMP6, TMP2); | |
2910 vis_mul8x16au(REF_S6_1, CONST_256, TMP14); | |
2911 | |
2912 vis_padd16(TMP8, CONST_3, TMP8); | |
2913 vis_mul8x16al(DST_0, CONST_512, TMP16); | |
2914 | |
2915 vis_padd16(TMP10, CONST_3, TMP10); | |
2916 vis_mul8x16al(DST_1, CONST_512, TMP18); | |
2917 | |
2918 vis_padd16(TMP8, TMP12, TMP8); | |
2919 vis_mul8x16al(TMP4/*DST_2*/, CONST_512, TMP20); | |
2920 | |
2921 vis_mul8x16al(TMP5/*DST_3*/, CONST_512, TMP22); | |
2922 vis_padd16(TMP0, TMP16, TMP0); | |
2923 | |
2924 vis_padd16(TMP2, TMP18, TMP2); | |
2925 vis_pack16(TMP0, DST_0); | |
2926 | |
2927 vis_padd16(TMP10, TMP14, TMP10); | |
2928 vis_pack16(TMP2, DST_1); | |
2929 vis_st64(DST_0, dest[0]); | |
2930 dest += stride; | |
2931 | |
2932 vis_padd16(TMP8, TMP20, TMP8); | |
2933 | |
2934 vis_padd16(TMP10, TMP22, TMP10); | |
2935 vis_pack16(TMP8, DST_2); | |
2936 | |
2937 vis_pack16(TMP10, DST_3); | |
2938 vis_st64(DST_2, dest[0]); | |
2939 dest += stride; | |
2940 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2941 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2942 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2943 static void MC_put_no_round_y_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 2944 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
2945 { |
2979 | 2946 uint8_t *ref = (uint8_t *) _ref; |
2947 | |
2948 ref = vis_alignaddr(ref); | |
2949 vis_ld64(ref[0], TMP0); | |
2950 | |
2951 vis_ld64_2(ref, 8, TMP2); | |
2952 | |
2953 vis_ld64_2(ref, 16, TMP4); | |
2954 ref += stride; | |
2955 | |
2956 vis_ld64(ref[0], TMP6); | |
2957 vis_faligndata(TMP0, TMP2, REF_0); | |
2958 | |
2959 vis_ld64_2(ref, 8, TMP8); | |
2960 vis_faligndata(TMP2, TMP4, REF_4); | |
2961 | |
2962 vis_ld64_2(ref, 16, TMP10); | |
2963 ref += stride; | |
2964 | |
2965 vis_ld64(constants_fe[0], MASK_fe); | |
2966 vis_faligndata(TMP6, TMP8, REF_2); | |
2967 | |
2968 vis_ld64(constants_7f[0], MASK_7f); | |
2969 vis_faligndata(TMP8, TMP10, REF_6); | |
2970 | |
2971 vis_ld64(constants128[0], CONST_128); | |
2972 height = (height >> 1) - 1; | |
2973 do { /* 24 cycles */ | |
2974 vis_ld64(ref[0], TMP0); | |
2975 vis_xor(REF_0, REF_2, TMP12); | |
2976 | |
2977 vis_ld64_2(ref, 8, TMP2); | |
2978 vis_xor(REF_4, REF_6, TMP16); | |
2979 | |
2980 vis_ld64_2(ref, 16, TMP4); | |
2981 ref += stride; | |
2982 vis_and(REF_0, REF_2, TMP14); | |
2983 | |
2984 vis_ld64(ref[0], TMP6); | |
2985 vis_and(REF_4, REF_6, TMP18); | |
2986 | |
2987 vis_ld64_2(ref, 8, TMP8); | |
2988 vis_faligndata(TMP0, TMP2, REF_0); | |
2989 | |
2990 vis_ld64_2(ref, 16, TMP10); | |
2991 ref += stride; | |
2992 vis_faligndata(TMP2, TMP4, REF_4); | |
2993 | |
2994 vis_and(TMP12, MASK_fe, TMP12); | |
2995 | |
2996 vis_and(TMP16, MASK_fe, TMP16); | |
2997 vis_mul8x16(CONST_128, TMP12, TMP12); | |
2998 | |
2999 vis_mul8x16(CONST_128, TMP16, TMP16); | |
3000 vis_xor(REF_0, REF_2, TMP0); | |
3001 | |
3002 vis_xor(REF_4, REF_6, TMP2); | |
3003 | |
3004 vis_and(REF_0, REF_2, TMP20); | |
3005 | |
3006 vis_and(TMP12, MASK_7f, TMP12); | |
3007 | |
3008 vis_and(TMP16, MASK_7f, TMP16); | |
3009 | |
3010 vis_padd16(TMP14, TMP12, TMP12); | |
3011 vis_st64(TMP12, dest[0]); | |
3012 | |
3013 vis_padd16(TMP18, TMP16, TMP16); | |
3014 vis_st64_2(TMP16, dest, 8); | |
3015 dest += stride; | |
3016 | |
3017 vis_and(REF_4, REF_6, TMP18); | |
3018 | |
3019 vis_and(TMP0, MASK_fe, TMP0); | |
3020 | |
3021 vis_and(TMP2, MASK_fe, TMP2); | |
3022 vis_mul8x16(CONST_128, TMP0, TMP0); | |
3023 | |
3024 vis_faligndata(TMP6, TMP8, REF_2); | |
3025 vis_mul8x16(CONST_128, TMP2, TMP2); | |
3026 | |
3027 vis_faligndata(TMP8, TMP10, REF_6); | |
3028 | |
3029 vis_and(TMP0, MASK_7f, TMP0); | |
3030 | |
3031 vis_and(TMP2, MASK_7f, TMP2); | |
3032 | |
3033 vis_padd16(TMP20, TMP0, TMP0); | |
3034 vis_st64(TMP0, dest[0]); | |
3035 | |
3036 vis_padd16(TMP18, TMP2, TMP2); | |
3037 vis_st64_2(TMP2, dest, 8); | |
3038 dest += stride; | |
3039 } while (--height); | |
3040 | |
3041 vis_ld64(ref[0], TMP0); | |
3042 vis_xor(REF_0, REF_2, TMP12); | |
3043 | |
3044 vis_ld64_2(ref, 8, TMP2); | |
3045 vis_xor(REF_4, REF_6, TMP16); | |
3046 | |
3047 vis_ld64_2(ref, 16, TMP4); | |
3048 vis_and(REF_0, REF_2, TMP14); | |
3049 | |
3050 vis_and(REF_4, REF_6, TMP18); | |
3051 | |
3052 vis_faligndata(TMP0, TMP2, REF_0); | |
3053 | |
3054 vis_faligndata(TMP2, TMP4, REF_4); | |
3055 | |
3056 vis_and(TMP12, MASK_fe, TMP12); | |
3057 | |
3058 vis_and(TMP16, MASK_fe, TMP16); | |
3059 vis_mul8x16(CONST_128, TMP12, TMP12); | |
3060 | |
3061 vis_mul8x16(CONST_128, TMP16, TMP16); | |
3062 vis_xor(REF_0, REF_2, TMP0); | |
3063 | |
3064 vis_xor(REF_4, REF_6, TMP2); | |
3065 | |
3066 vis_and(REF_0, REF_2, TMP20); | |
3067 | |
3068 vis_and(TMP12, MASK_7f, TMP12); | |
3069 | |
3070 vis_and(TMP16, MASK_7f, TMP16); | |
3071 | |
3072 vis_padd16(TMP14, TMP12, TMP12); | |
3073 vis_st64(TMP12, dest[0]); | |
3074 | |
3075 vis_padd16(TMP18, TMP16, TMP16); | |
3076 vis_st64_2(TMP16, dest, 8); | |
3077 dest += stride; | |
3078 | |
3079 vis_and(REF_4, REF_6, TMP18); | |
3080 | |
3081 vis_and(TMP0, MASK_fe, TMP0); | |
3082 | |
3083 vis_and(TMP2, MASK_fe, TMP2); | |
3084 vis_mul8x16(CONST_128, TMP0, TMP0); | |
3085 | |
3086 vis_mul8x16(CONST_128, TMP2, TMP2); | |
3087 | |
3088 vis_and(TMP0, MASK_7f, TMP0); | |
3089 | |
3090 vis_and(TMP2, MASK_7f, TMP2); | |
3091 | |
3092 vis_padd16(TMP20, TMP0, TMP0); | |
3093 vis_st64(TMP0, dest[0]); | |
3094 | |
3095 vis_padd16(TMP18, TMP2, TMP2); | |
3096 vis_st64_2(TMP2, dest, 8); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3097 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3098 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3099 static void MC_put_no_round_y_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 3100 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3101 { |
2979 | 3102 uint8_t *ref = (uint8_t *) _ref; |
3103 | |
3104 ref = vis_alignaddr(ref); | |
3105 vis_ld64(ref[0], TMP0); | |
3106 | |
3107 vis_ld64_2(ref, 8, TMP2); | |
3108 ref += stride; | |
3109 | |
3110 vis_ld64(ref[0], TMP4); | |
3111 | |
3112 vis_ld64_2(ref, 8, TMP6); | |
3113 ref += stride; | |
3114 | |
3115 vis_ld64(constants_fe[0], MASK_fe); | |
3116 vis_faligndata(TMP0, TMP2, REF_0); | |
3117 | |
3118 vis_ld64(constants_7f[0], MASK_7f); | |
3119 vis_faligndata(TMP4, TMP6, REF_2); | |
3120 | |
3121 vis_ld64(constants128[0], CONST_128); | |
3122 height = (height >> 1) - 1; | |
3123 do { /* 12 cycles */ | |
3124 vis_ld64(ref[0], TMP0); | |
3125 vis_xor(REF_0, REF_2, TMP4); | |
3126 | |
3127 vis_ld64_2(ref, 8, TMP2); | |
3128 ref += stride; | |
3129 vis_and(TMP4, MASK_fe, TMP4); | |
3130 | |
3131 vis_and(REF_0, REF_2, TMP6); | |
3132 vis_mul8x16(CONST_128, TMP4, TMP4); | |
3133 | |
3134 vis_faligndata(TMP0, TMP2, REF_0); | |
3135 vis_ld64(ref[0], TMP0); | |
3136 | |
3137 vis_ld64_2(ref, 8, TMP2); | |
3138 ref += stride; | |
3139 vis_xor(REF_0, REF_2, TMP12); | |
3140 | |
3141 vis_and(TMP4, MASK_7f, TMP4); | |
3142 | |
3143 vis_and(TMP12, MASK_fe, TMP12); | |
3144 | |
3145 vis_mul8x16(CONST_128, TMP12, TMP12); | |
3146 vis_and(REF_0, REF_2, TMP14); | |
3147 | |
3148 vis_padd16(TMP6, TMP4, DST_0); | |
3149 vis_st64(DST_0, dest[0]); | |
3150 dest += stride; | |
3151 | |
3152 vis_faligndata(TMP0, TMP2, REF_2); | |
3153 | |
3154 vis_and(TMP12, MASK_7f, TMP12); | |
3155 | |
3156 vis_padd16(TMP14, TMP12, DST_0); | |
3157 vis_st64(DST_0, dest[0]); | |
3158 dest += stride; | |
3159 } while (--height); | |
3160 | |
3161 vis_ld64(ref[0], TMP0); | |
3162 vis_xor(REF_0, REF_2, TMP4); | |
3163 | |
3164 vis_ld64_2(ref, 8, TMP2); | |
3165 vis_and(TMP4, MASK_fe, TMP4); | |
3166 | |
3167 vis_and(REF_0, REF_2, TMP6); | |
3168 vis_mul8x16(CONST_128, TMP4, TMP4); | |
3169 | |
3170 vis_faligndata(TMP0, TMP2, REF_0); | |
3171 | |
3172 vis_xor(REF_0, REF_2, TMP12); | |
3173 | |
3174 vis_and(TMP4, MASK_7f, TMP4); | |
3175 | |
3176 vis_and(TMP12, MASK_fe, TMP12); | |
3177 | |
3178 vis_mul8x16(CONST_128, TMP12, TMP12); | |
3179 vis_and(REF_0, REF_2, TMP14); | |
3180 | |
3181 vis_padd16(TMP6, TMP4, DST_0); | |
3182 vis_st64(DST_0, dest[0]); | |
3183 dest += stride; | |
3184 | |
3185 vis_and(TMP12, MASK_7f, TMP12); | |
3186 | |
3187 vis_padd16(TMP14, TMP12, DST_0); | |
3188 vis_st64(DST_0, dest[0]); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3189 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3190 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3191 static void MC_avg_no_round_y_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 3192 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3193 { |
2979 | 3194 uint8_t *ref = (uint8_t *) _ref; |
3195 int stride_8 = stride + 8; | |
3196 int stride_16 = stride + 16; | |
3197 | |
3198 vis_set_gsr(5 << VIS_GSR_SCALEFACT_SHIFT); | |
3199 | |
3200 ref = vis_alignaddr(ref); | |
3201 | |
3202 vis_ld64(ref[ 0], TMP0); | |
3203 vis_fzero(ZERO); | |
3204 | |
3205 vis_ld64(ref[ 8], TMP2); | |
3206 | |
3207 vis_ld64(ref[16], TMP4); | |
3208 | |
3209 vis_ld64(constants3[0], CONST_3); | |
3210 vis_faligndata(TMP0, TMP2, REF_2); | |
3211 | |
3212 vis_ld64(constants256_512[0], CONST_256); | |
3213 vis_faligndata(TMP2, TMP4, REF_6); | |
3214 height >>= 1; | |
3215 | |
3216 do { /* 31 cycles */ | |
3217 vis_ld64_2(ref, stride, TMP0); | |
3218 vis_pmerge(ZERO, REF_2, TMP12); | |
3219 vis_mul8x16au(REF_2_1, CONST_256, TMP14); | |
3220 | |
3221 vis_ld64_2(ref, stride_8, TMP2); | |
3222 vis_pmerge(ZERO, REF_6, TMP16); | |
3223 vis_mul8x16au(REF_6_1, CONST_256, TMP18); | |
3224 | |
3225 vis_ld64_2(ref, stride_16, TMP4); | |
3226 ref += stride; | |
3227 | |
3228 vis_ld64(dest[0], DST_0); | |
3229 vis_faligndata(TMP0, TMP2, REF_0); | |
3230 | |
3231 vis_ld64_2(dest, 8, DST_2); | |
3232 vis_faligndata(TMP2, TMP4, REF_4); | |
3233 | |
3234 vis_ld64_2(ref, stride, TMP6); | |
3235 vis_pmerge(ZERO, REF_0, TMP0); | |
3236 vis_mul8x16au(REF_0_1, CONST_256, TMP2); | |
3237 | |
3238 vis_ld64_2(ref, stride_8, TMP8); | |
3239 vis_pmerge(ZERO, REF_4, TMP4); | |
3240 | |
3241 vis_ld64_2(ref, stride_16, TMP10); | |
3242 ref += stride; | |
3243 | |
3244 vis_ld64_2(dest, stride, REF_S0/*DST_4*/); | |
3245 vis_faligndata(TMP6, TMP8, REF_2); | |
3246 vis_mul8x16au(REF_4_1, CONST_256, TMP6); | |
3247 | |
3248 vis_ld64_2(dest, stride_8, REF_S2/*DST_6*/); | |
3249 vis_faligndata(TMP8, TMP10, REF_6); | |
3250 vis_mul8x16al(DST_0, CONST_512, TMP20); | |
3251 | |
3252 vis_padd16(TMP0, CONST_3, TMP0); | |
3253 vis_mul8x16al(DST_1, CONST_512, TMP22); | |
3254 | |
3255 vis_padd16(TMP2, CONST_3, TMP2); | |
3256 vis_mul8x16al(DST_2, CONST_512, TMP24); | |
3257 | |
3258 vis_padd16(TMP4, CONST_3, TMP4); | |
3259 vis_mul8x16al(DST_3, CONST_512, TMP26); | |
3260 | |
3261 vis_padd16(TMP6, CONST_3, TMP6); | |
3262 | |
3263 vis_padd16(TMP12, TMP20, TMP12); | |
3264 vis_mul8x16al(REF_S0, CONST_512, TMP20); | |
3265 | |
3266 vis_padd16(TMP14, TMP22, TMP14); | |
3267 vis_mul8x16al(REF_S0_1, CONST_512, TMP22); | |
3268 | |
3269 vis_padd16(TMP16, TMP24, TMP16); | |
3270 vis_mul8x16al(REF_S2, CONST_512, TMP24); | |
3271 | |
3272 vis_padd16(TMP18, TMP26, TMP18); | |
3273 vis_mul8x16al(REF_S2_1, CONST_512, TMP26); | |
3274 | |
3275 vis_padd16(TMP12, TMP0, TMP12); | |
3276 vis_mul8x16au(REF_2, CONST_256, TMP28); | |
3277 | |
3278 vis_padd16(TMP14, TMP2, TMP14); | |
3279 vis_mul8x16au(REF_2_1, CONST_256, TMP30); | |
3280 | |
3281 vis_padd16(TMP16, TMP4, TMP16); | |
3282 vis_mul8x16au(REF_6, CONST_256, REF_S4); | |
3283 | |
3284 vis_padd16(TMP18, TMP6, TMP18); | |
3285 vis_mul8x16au(REF_6_1, CONST_256, REF_S6); | |
3286 | |
3287 vis_pack16(TMP12, DST_0); | |
3288 vis_padd16(TMP28, TMP0, TMP12); | |
3289 | |
3290 vis_pack16(TMP14, DST_1); | |
3291 vis_st64(DST_0, dest[0]); | |
3292 vis_padd16(TMP30, TMP2, TMP14); | |
3293 | |
3294 vis_pack16(TMP16, DST_2); | |
3295 vis_padd16(REF_S4, TMP4, TMP16); | |
3296 | |
3297 vis_pack16(TMP18, DST_3); | |
3298 vis_st64_2(DST_2, dest, 8); | |
3299 dest += stride; | |
3300 vis_padd16(REF_S6, TMP6, TMP18); | |
3301 | |
3302 vis_padd16(TMP12, TMP20, TMP12); | |
3303 | |
3304 vis_padd16(TMP14, TMP22, TMP14); | |
3305 vis_pack16(TMP12, DST_0); | |
3306 | |
3307 vis_padd16(TMP16, TMP24, TMP16); | |
3308 vis_pack16(TMP14, DST_1); | |
3309 vis_st64(DST_0, dest[0]); | |
3310 | |
3311 vis_padd16(TMP18, TMP26, TMP18); | |
3312 vis_pack16(TMP16, DST_2); | |
3313 | |
3314 vis_pack16(TMP18, DST_3); | |
3315 vis_st64_2(DST_2, dest, 8); | |
3316 dest += stride; | |
3317 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3318 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3319 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3320 static void MC_avg_no_round_y_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 3321 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3322 { |
2979 | 3323 uint8_t *ref = (uint8_t *) _ref; |
3324 int stride_8 = stride + 8; | |
3325 | |
3326 vis_set_gsr(5 << VIS_GSR_SCALEFACT_SHIFT); | |
3327 | |
3328 ref = vis_alignaddr(ref); | |
3329 | |
3330 vis_ld64(ref[ 0], TMP0); | |
3331 vis_fzero(ZERO); | |
3332 | |
3333 vis_ld64(ref[ 8], TMP2); | |
3334 | |
3335 vis_ld64(constants3[0], CONST_3); | |
3336 vis_faligndata(TMP0, TMP2, REF_2); | |
3337 | |
3338 vis_ld64(constants256_512[0], CONST_256); | |
3339 | |
3340 height >>= 1; | |
3341 do { /* 20 cycles */ | |
3342 vis_ld64_2(ref, stride, TMP0); | |
3343 vis_pmerge(ZERO, REF_2, TMP8); | |
3344 vis_mul8x16au(REF_2_1, CONST_256, TMP10); | |
3345 | |
3346 vis_ld64_2(ref, stride_8, TMP2); | |
3347 ref += stride; | |
3348 | |
3349 vis_ld64(dest[0], DST_0); | |
3350 | |
3351 vis_ld64_2(dest, stride, DST_2); | |
3352 vis_faligndata(TMP0, TMP2, REF_0); | |
3353 | |
3354 vis_ld64_2(ref, stride, TMP4); | |
3355 vis_mul8x16al(DST_0, CONST_512, TMP16); | |
3356 vis_pmerge(ZERO, REF_0, TMP12); | |
3357 | |
3358 vis_ld64_2(ref, stride_8, TMP6); | |
3359 ref += stride; | |
3360 vis_mul8x16al(DST_1, CONST_512, TMP18); | |
3361 vis_pmerge(ZERO, REF_0_1, TMP14); | |
3362 | |
3363 vis_padd16(TMP12, CONST_3, TMP12); | |
3364 vis_mul8x16al(DST_2, CONST_512, TMP24); | |
3365 | |
3366 vis_padd16(TMP14, CONST_3, TMP14); | |
3367 vis_mul8x16al(DST_3, CONST_512, TMP26); | |
3368 | |
3369 vis_faligndata(TMP4, TMP6, REF_2); | |
3370 | |
3371 vis_padd16(TMP8, TMP12, TMP8); | |
3372 | |
3373 vis_padd16(TMP10, TMP14, TMP10); | |
3374 vis_mul8x16au(REF_2, CONST_256, TMP20); | |
3375 | |
3376 vis_padd16(TMP8, TMP16, TMP0); | |
3377 vis_mul8x16au(REF_2_1, CONST_256, TMP22); | |
3378 | |
3379 vis_padd16(TMP10, TMP18, TMP2); | |
3380 vis_pack16(TMP0, DST_0); | |
3381 | |
3382 vis_pack16(TMP2, DST_1); | |
3383 vis_st64(DST_0, dest[0]); | |
3384 dest += stride; | |
3385 vis_padd16(TMP12, TMP20, TMP12); | |
3386 | |
3387 vis_padd16(TMP14, TMP22, TMP14); | |
3388 | |
3389 vis_padd16(TMP12, TMP24, TMP0); | |
3390 | |
3391 vis_padd16(TMP14, TMP26, TMP2); | |
3392 vis_pack16(TMP0, DST_2); | |
3393 | |
3394 vis_pack16(TMP2, DST_3); | |
3395 vis_st64(DST_2, dest[0]); | |
3396 dest += stride; | |
3397 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3398 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3399 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3400 static void MC_put_no_round_xy_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 3401 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3402 { |
2979 | 3403 uint8_t *ref = (uint8_t *) _ref; |
3404 unsigned long off = (unsigned long) ref & 0x7; | |
3405 unsigned long off_plus_1 = off + 1; | |
3406 int stride_8 = stride + 8; | |
3407 int stride_16 = stride + 16; | |
3408 | |
3409 vis_set_gsr(5 << VIS_GSR_SCALEFACT_SHIFT); | |
3410 | |
3411 ref = vis_alignaddr(ref); | |
3412 | |
3413 vis_ld64(ref[ 0], TMP0); | |
3414 vis_fzero(ZERO); | |
3415 | |
3416 vis_ld64(ref[ 8], TMP2); | |
3417 | |
3418 vis_ld64(ref[16], TMP4); | |
3419 | |
3420 vis_ld64(constants1[0], CONST_1); | |
3421 vis_faligndata(TMP0, TMP2, REF_S0); | |
3422 | |
3423 vis_ld64(constants256_512[0], CONST_256); | |
3424 vis_faligndata(TMP2, TMP4, REF_S4); | |
3425 | |
3426 if (off != 0x7) { | |
3427 vis_alignaddr_g0((void *)off_plus_1); | |
3428 vis_faligndata(TMP0, TMP2, REF_S2); | |
3429 vis_faligndata(TMP2, TMP4, REF_S6); | |
3430 } else { | |
3431 vis_src1(TMP2, REF_S2); | |
3432 vis_src1(TMP4, REF_S6); | |
3433 } | |
3434 | |
3435 height >>= 1; | |
3436 do { | |
3437 vis_ld64_2(ref, stride, TMP0); | |
3438 vis_mul8x16au(REF_S0, CONST_256, TMP12); | |
3439 vis_pmerge(ZERO, REF_S0_1, TMP14); | |
3440 | |
3441 vis_alignaddr_g0((void *)off); | |
3442 | |
3443 vis_ld64_2(ref, stride_8, TMP2); | |
3444 vis_mul8x16au(REF_S2, CONST_256, TMP16); | |
3445 vis_pmerge(ZERO, REF_S2_1, TMP18); | |
3446 | |
3447 vis_ld64_2(ref, stride_16, TMP4); | |
3448 ref += stride; | |
3449 vis_mul8x16au(REF_S4, CONST_256, TMP20); | |
3450 vis_pmerge(ZERO, REF_S4_1, TMP22); | |
3451 | |
3452 vis_ld64_2(ref, stride, TMP6); | |
3453 vis_mul8x16au(REF_S6, CONST_256, TMP24); | |
3454 vis_pmerge(ZERO, REF_S6_1, TMP26); | |
3455 | |
3456 vis_ld64_2(ref, stride_8, TMP8); | |
3457 vis_faligndata(TMP0, TMP2, REF_0); | |
3458 | |
3459 vis_ld64_2(ref, stride_16, TMP10); | |
3460 ref += stride; | |
3461 vis_faligndata(TMP2, TMP4, REF_4); | |
3462 | |
3463 vis_faligndata(TMP6, TMP8, REF_S0); | |
3464 | |
3465 vis_faligndata(TMP8, TMP10, REF_S4); | |
3466 | |
3467 if (off != 0x7) { | |
3468 vis_alignaddr_g0((void *)off_plus_1); | |
3469 vis_faligndata(TMP0, TMP2, REF_2); | |
3470 vis_faligndata(TMP2, TMP4, REF_6); | |
3471 vis_faligndata(TMP6, TMP8, REF_S2); | |
3472 vis_faligndata(TMP8, TMP10, REF_S6); | |
3473 } else { | |
3474 vis_src1(TMP2, REF_2); | |
3475 vis_src1(TMP4, REF_6); | |
3476 vis_src1(TMP8, REF_S2); | |
3477 vis_src1(TMP10, REF_S6); | |
3478 } | |
3479 | |
3480 vis_mul8x16au(REF_0, CONST_256, TMP0); | |
3481 vis_pmerge(ZERO, REF_0_1, TMP2); | |
3482 | |
3483 vis_mul8x16au(REF_2, CONST_256, TMP4); | |
3484 vis_pmerge(ZERO, REF_2_1, TMP6); | |
3485 | |
3486 vis_padd16(TMP0, CONST_2, TMP8); | |
3487 vis_mul8x16au(REF_4, CONST_256, TMP0); | |
3488 | |
3489 vis_padd16(TMP2, CONST_1, TMP10); | |
3490 vis_mul8x16au(REF_4_1, CONST_256, TMP2); | |
3491 | |
3492 vis_padd16(TMP8, TMP4, TMP8); | |
3493 vis_mul8x16au(REF_6, CONST_256, TMP4); | |
3494 | |
3495 vis_padd16(TMP10, TMP6, TMP10); | |
3496 vis_mul8x16au(REF_6_1, CONST_256, TMP6); | |
3497 | |
3498 vis_padd16(TMP12, TMP8, TMP12); | |
3499 | |
3500 vis_padd16(TMP14, TMP10, TMP14); | |
3501 | |
3502 vis_padd16(TMP12, TMP16, TMP12); | |
3503 | |
3504 vis_padd16(TMP14, TMP18, TMP14); | |
3505 vis_pack16(TMP12, DST_0); | |
3506 | |
3507 vis_pack16(TMP14, DST_1); | |
3508 vis_st64(DST_0, dest[0]); | |
3509 vis_padd16(TMP0, CONST_1, TMP12); | |
3510 | |
3511 vis_mul8x16au(REF_S0, CONST_256, TMP0); | |
3512 vis_padd16(TMP2, CONST_1, TMP14); | |
3513 | |
3514 vis_mul8x16au(REF_S0_1, CONST_256, TMP2); | |
3515 vis_padd16(TMP12, TMP4, TMP12); | |
3516 | |
3517 vis_mul8x16au(REF_S2, CONST_256, TMP4); | |
3518 vis_padd16(TMP14, TMP6, TMP14); | |
3519 | |
3520 vis_mul8x16au(REF_S2_1, CONST_256, TMP6); | |
3521 vis_padd16(TMP20, TMP12, TMP20); | |
3522 | |
3523 vis_padd16(TMP22, TMP14, TMP22); | |
3524 | |
3525 vis_padd16(TMP20, TMP24, TMP20); | |
3526 | |
3527 vis_padd16(TMP22, TMP26, TMP22); | |
3528 vis_pack16(TMP20, DST_2); | |
3529 | |
3530 vis_pack16(TMP22, DST_3); | |
3531 vis_st64_2(DST_2, dest, 8); | |
3532 dest += stride; | |
3533 vis_padd16(TMP0, TMP4, TMP24); | |
3534 | |
3535 vis_mul8x16au(REF_S4, CONST_256, TMP0); | |
3536 vis_padd16(TMP2, TMP6, TMP26); | |
3537 | |
3538 vis_mul8x16au(REF_S4_1, CONST_256, TMP2); | |
3539 vis_padd16(TMP24, TMP8, TMP24); | |
3540 | |
3541 vis_padd16(TMP26, TMP10, TMP26); | |
3542 vis_pack16(TMP24, DST_0); | |
3543 | |
3544 vis_pack16(TMP26, DST_1); | |
3545 vis_st64(DST_0, dest[0]); | |
3546 vis_pmerge(ZERO, REF_S6, TMP4); | |
3547 | |
3548 vis_pmerge(ZERO, REF_S6_1, TMP6); | |
3549 | |
3550 vis_padd16(TMP0, TMP4, TMP0); | |
3551 | |
3552 vis_padd16(TMP2, TMP6, TMP2); | |
3553 | |
3554 vis_padd16(TMP0, TMP12, TMP0); | |
3555 | |
3556 vis_padd16(TMP2, TMP14, TMP2); | |
3557 vis_pack16(TMP0, DST_2); | |
3558 | |
3559 vis_pack16(TMP2, DST_3); | |
3560 vis_st64_2(DST_2, dest, 8); | |
3561 dest += stride; | |
3562 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3563 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3564 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3565 static void MC_put_no_round_xy_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 3566 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3567 { |
2979 | 3568 uint8_t *ref = (uint8_t *) _ref; |
3569 unsigned long off = (unsigned long) ref & 0x7; | |
3570 unsigned long off_plus_1 = off + 1; | |
3571 int stride_8 = stride + 8; | |
3572 | |
3573 vis_set_gsr(5 << VIS_GSR_SCALEFACT_SHIFT); | |
3574 | |
3575 ref = vis_alignaddr(ref); | |
3576 | |
3577 vis_ld64(ref[ 0], TMP0); | |
3578 vis_fzero(ZERO); | |
3579 | |
3580 vis_ld64(ref[ 8], TMP2); | |
3581 | |
3582 vis_ld64(constants1[0], CONST_1); | |
3583 | |
3584 vis_ld64(constants256_512[0], CONST_256); | |
3585 vis_faligndata(TMP0, TMP2, REF_S0); | |
3586 | |
3587 if (off != 0x7) { | |
3588 vis_alignaddr_g0((void *)off_plus_1); | |
3589 vis_faligndata(TMP0, TMP2, REF_S2); | |
3590 } else { | |
3591 vis_src1(TMP2, REF_S2); | |
3592 } | |
3593 | |
3594 height >>= 1; | |
3595 do { /* 26 cycles */ | |
3596 vis_ld64_2(ref, stride, TMP0); | |
3597 vis_mul8x16au(REF_S0, CONST_256, TMP8); | |
3598 vis_pmerge(ZERO, REF_S2, TMP12); | |
3599 | |
3600 vis_alignaddr_g0((void *)off); | |
3601 | |
3602 vis_ld64_2(ref, stride_8, TMP2); | |
3603 ref += stride; | |
3604 vis_mul8x16au(REF_S0_1, CONST_256, TMP10); | |
3605 vis_pmerge(ZERO, REF_S2_1, TMP14); | |
3606 | |
3607 vis_ld64_2(ref, stride, TMP4); | |
3608 | |
3609 vis_ld64_2(ref, stride_8, TMP6); | |
3610 ref += stride; | |
3611 vis_faligndata(TMP0, TMP2, REF_S4); | |
3612 | |
3613 vis_pmerge(ZERO, REF_S4, TMP18); | |
3614 | |
3615 vis_pmerge(ZERO, REF_S4_1, TMP20); | |
3616 | |
3617 vis_faligndata(TMP4, TMP6, REF_S0); | |
3618 | |
3619 if (off != 0x7) { | |
3620 vis_alignaddr_g0((void *)off_plus_1); | |
3621 vis_faligndata(TMP0, TMP2, REF_S6); | |
3622 vis_faligndata(TMP4, TMP6, REF_S2); | |
3623 } else { | |
3624 vis_src1(TMP2, REF_S6); | |
3625 vis_src1(TMP6, REF_S2); | |
3626 } | |
3627 | |
3628 vis_padd16(TMP18, CONST_1, TMP18); | |
3629 vis_mul8x16au(REF_S6, CONST_256, TMP22); | |
3630 | |
3631 vis_padd16(TMP20, CONST_1, TMP20); | |
3632 vis_mul8x16au(REF_S6_1, CONST_256, TMP24); | |
3633 | |
3634 vis_mul8x16au(REF_S0, CONST_256, TMP26); | |
3635 vis_pmerge(ZERO, REF_S0_1, TMP28); | |
3636 | |
3637 vis_mul8x16au(REF_S2, CONST_256, TMP30); | |
3638 vis_padd16(TMP18, TMP22, TMP18); | |
3639 | |
3640 vis_mul8x16au(REF_S2_1, CONST_256, TMP32); | |
3641 vis_padd16(TMP20, TMP24, TMP20); | |
3642 | |
3643 vis_padd16(TMP8, TMP18, TMP8); | |
3644 | |
3645 vis_padd16(TMP10, TMP20, TMP10); | |
3646 | |
3647 vis_padd16(TMP8, TMP12, TMP8); | |
3648 | |
3649 vis_padd16(TMP10, TMP14, TMP10); | |
3650 vis_pack16(TMP8, DST_0); | |
3651 | |
3652 vis_pack16(TMP10, DST_1); | |
3653 vis_st64(DST_0, dest[0]); | |
3654 dest += stride; | |
3655 vis_padd16(TMP18, TMP26, TMP18); | |
3656 | |
3657 vis_padd16(TMP20, TMP28, TMP20); | |
3658 | |
3659 vis_padd16(TMP18, TMP30, TMP18); | |
3660 | |
3661 vis_padd16(TMP20, TMP32, TMP20); | |
3662 vis_pack16(TMP18, DST_2); | |
3663 | |
3664 vis_pack16(TMP20, DST_3); | |
3665 vis_st64(DST_2, dest[0]); | |
3666 dest += stride; | |
3667 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3668 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3669 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3670 static void MC_avg_no_round_xy_16_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 3671 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3672 { |
2979 | 3673 uint8_t *ref = (uint8_t *) _ref; |
3674 unsigned long off = (unsigned long) ref & 0x7; | |
3675 unsigned long off_plus_1 = off + 1; | |
3676 int stride_8 = stride + 8; | |
3677 int stride_16 = stride + 16; | |
3678 | |
3679 vis_set_gsr(4 << VIS_GSR_SCALEFACT_SHIFT); | |
3680 | |
3681 ref = vis_alignaddr(ref); | |
3682 | |
3683 vis_ld64(ref[ 0], TMP0); | |
3684 vis_fzero(ZERO); | |
3685 | |
3686 vis_ld64(ref[ 8], TMP2); | |
3687 | |
3688 vis_ld64(ref[16], TMP4); | |
3689 | |
3690 vis_ld64(constants6[0], CONST_6); | |
3691 vis_faligndata(TMP0, TMP2, REF_S0); | |
3692 | |
3693 vis_ld64(constants256_1024[0], CONST_256); | |
3694 vis_faligndata(TMP2, TMP4, REF_S4); | |
3695 | |
3696 if (off != 0x7) { | |
3697 vis_alignaddr_g0((void *)off_plus_1); | |
3698 vis_faligndata(TMP0, TMP2, REF_S2); | |
3699 vis_faligndata(TMP2, TMP4, REF_S6); | |
3700 } else { | |
3701 vis_src1(TMP2, REF_S2); | |
3702 vis_src1(TMP4, REF_S6); | |
3703 } | |
3704 | |
3705 height >>= 1; | |
3706 do { /* 55 cycles */ | |
3707 vis_ld64_2(ref, stride, TMP0); | |
3708 vis_mul8x16au(REF_S0, CONST_256, TMP12); | |
3709 vis_pmerge(ZERO, REF_S0_1, TMP14); | |
3710 | |
3711 vis_alignaddr_g0((void *)off); | |
3712 | |
3713 vis_ld64_2(ref, stride_8, TMP2); | |
3714 vis_mul8x16au(REF_S2, CONST_256, TMP16); | |
3715 vis_pmerge(ZERO, REF_S2_1, TMP18); | |
3716 | |
3717 vis_ld64_2(ref, stride_16, TMP4); | |
3718 ref += stride; | |
3719 vis_mul8x16au(REF_S4, CONST_256, TMP20); | |
3720 vis_pmerge(ZERO, REF_S4_1, TMP22); | |
3721 | |
3722 vis_ld64_2(ref, stride, TMP6); | |
3723 vis_mul8x16au(REF_S6, CONST_256, TMP24); | |
3724 vis_pmerge(ZERO, REF_S6_1, TMP26); | |
3725 | |
3726 vis_ld64_2(ref, stride_8, TMP8); | |
3727 vis_faligndata(TMP0, TMP2, REF_0); | |
3728 | |
3729 vis_ld64_2(ref, stride_16, TMP10); | |
3730 ref += stride; | |
3731 vis_faligndata(TMP2, TMP4, REF_4); | |
3732 | |
3733 vis_ld64(dest[0], DST_0); | |
3734 vis_faligndata(TMP6, TMP8, REF_S0); | |
3735 | |
3736 vis_ld64_2(dest, 8, DST_2); | |
3737 vis_faligndata(TMP8, TMP10, REF_S4); | |
3738 | |
3739 if (off != 0x7) { | |
3740 vis_alignaddr_g0((void *)off_plus_1); | |
3741 vis_faligndata(TMP0, TMP2, REF_2); | |
3742 vis_faligndata(TMP2, TMP4, REF_6); | |
3743 vis_faligndata(TMP6, TMP8, REF_S2); | |
3744 vis_faligndata(TMP8, TMP10, REF_S6); | |
3745 } else { | |
3746 vis_src1(TMP2, REF_2); | |
3747 vis_src1(TMP4, REF_6); | |
3748 vis_src1(TMP8, REF_S2); | |
3749 vis_src1(TMP10, REF_S6); | |
3750 } | |
3751 | |
3752 vis_mul8x16al(DST_0, CONST_1024, TMP30); | |
3753 vis_pmerge(ZERO, REF_0, TMP0); | |
3754 | |
3755 vis_mul8x16al(DST_1, CONST_1024, TMP32); | |
3756 vis_pmerge(ZERO, REF_0_1, TMP2); | |
3757 | |
3758 vis_mul8x16au(REF_2, CONST_256, TMP4); | |
3759 vis_pmerge(ZERO, REF_2_1, TMP6); | |
3760 | |
3761 vis_mul8x16al(DST_2, CONST_1024, REF_0); | |
3762 vis_padd16(TMP0, CONST_6, TMP0); | |
3763 | |
3764 vis_mul8x16al(DST_3, CONST_1024, REF_2); | |
3765 vis_padd16(TMP2, CONST_6, TMP2); | |
3766 | |
3767 vis_padd16(TMP0, TMP4, TMP0); | |
3768 vis_mul8x16au(REF_4, CONST_256, TMP4); | |
3769 | |
3770 vis_padd16(TMP2, TMP6, TMP2); | |
3771 vis_mul8x16au(REF_4_1, CONST_256, TMP6); | |
3772 | |
3773 vis_padd16(TMP12, TMP0, TMP12); | |
3774 vis_mul8x16au(REF_6, CONST_256, TMP8); | |
3775 | |
3776 vis_padd16(TMP14, TMP2, TMP14); | |
3777 vis_mul8x16au(REF_6_1, CONST_256, TMP10); | |
3778 | |
3779 vis_padd16(TMP12, TMP16, TMP12); | |
3780 vis_mul8x16au(REF_S0, CONST_256, REF_4); | |
3781 | |
3782 vis_padd16(TMP14, TMP18, TMP14); | |
3783 vis_mul8x16au(REF_S0_1, CONST_256, REF_6); | |
3784 | |
3785 vis_padd16(TMP12, TMP30, TMP12); | |
3786 | |
3787 vis_padd16(TMP14, TMP32, TMP14); | |
3788 vis_pack16(TMP12, DST_0); | |
3789 | |
3790 vis_pack16(TMP14, DST_1); | |
3791 vis_st64(DST_0, dest[0]); | |
3792 vis_padd16(TMP4, CONST_6, TMP4); | |
3793 | |
3794 vis_ld64_2(dest, stride, DST_0); | |
3795 vis_padd16(TMP6, CONST_6, TMP6); | |
3796 vis_mul8x16au(REF_S2, CONST_256, TMP12); | |
3797 | |
3798 vis_padd16(TMP4, TMP8, TMP4); | |
3799 vis_mul8x16au(REF_S2_1, CONST_256, TMP14); | |
3800 | |
3801 vis_padd16(TMP6, TMP10, TMP6); | |
3802 | |
3803 vis_padd16(TMP20, TMP4, TMP20); | |
3804 | |
3805 vis_padd16(TMP22, TMP6, TMP22); | |
3806 | |
3807 vis_padd16(TMP20, TMP24, TMP20); | |
3808 | |
3809 vis_padd16(TMP22, TMP26, TMP22); | |
3810 | |
3811 vis_padd16(TMP20, REF_0, TMP20); | |
3812 vis_mul8x16au(REF_S4, CONST_256, REF_0); | |
3813 | |
3814 vis_padd16(TMP22, REF_2, TMP22); | |
3815 vis_pack16(TMP20, DST_2); | |
3816 | |
3817 vis_pack16(TMP22, DST_3); | |
3818 vis_st64_2(DST_2, dest, 8); | |
3819 dest += stride; | |
3820 | |
3821 vis_ld64_2(dest, 8, DST_2); | |
3822 vis_mul8x16al(DST_0, CONST_1024, TMP30); | |
3823 vis_pmerge(ZERO, REF_S4_1, REF_2); | |
3824 | |
3825 vis_mul8x16al(DST_1, CONST_1024, TMP32); | |
3826 vis_padd16(REF_4, TMP0, TMP8); | |
3827 | |
3828 vis_mul8x16au(REF_S6, CONST_256, REF_4); | |
3829 vis_padd16(REF_6, TMP2, TMP10); | |
3830 | |
3831 vis_mul8x16au(REF_S6_1, CONST_256, REF_6); | |
3832 vis_padd16(TMP8, TMP12, TMP8); | |
3833 | |
3834 vis_padd16(TMP10, TMP14, TMP10); | |
3835 | |
3836 vis_padd16(TMP8, TMP30, TMP8); | |
3837 | |
3838 vis_padd16(TMP10, TMP32, TMP10); | |
3839 vis_pack16(TMP8, DST_0); | |
3840 | |
3841 vis_pack16(TMP10, DST_1); | |
3842 vis_st64(DST_0, dest[0]); | |
3843 | |
3844 vis_padd16(REF_0, TMP4, REF_0); | |
3845 | |
3846 vis_mul8x16al(DST_2, CONST_1024, TMP30); | |
3847 vis_padd16(REF_2, TMP6, REF_2); | |
3848 | |
3849 vis_mul8x16al(DST_3, CONST_1024, TMP32); | |
3850 vis_padd16(REF_0, REF_4, REF_0); | |
3851 | |
3852 vis_padd16(REF_2, REF_6, REF_2); | |
3853 | |
3854 vis_padd16(REF_0, TMP30, REF_0); | |
3855 | |
3856 /* stall */ | |
3857 | |
3858 vis_padd16(REF_2, TMP32, REF_2); | |
3859 vis_pack16(REF_0, DST_2); | |
3860 | |
3861 vis_pack16(REF_2, DST_3); | |
3862 vis_st64_2(DST_2, dest, 8); | |
3863 dest += stride; | |
3864 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3865 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3866 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3867 static void MC_avg_no_round_xy_8_vis (uint8_t * dest, const uint8_t * _ref, |
2979 | 3868 const int stride, int height) |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3869 { |
2979 | 3870 uint8_t *ref = (uint8_t *) _ref; |
3871 unsigned long off = (unsigned long) ref & 0x7; | |
3872 unsigned long off_plus_1 = off + 1; | |
3873 int stride_8 = stride + 8; | |
3874 | |
3875 vis_set_gsr(4 << VIS_GSR_SCALEFACT_SHIFT); | |
3876 | |
3877 ref = vis_alignaddr(ref); | |
3878 | |
3879 vis_ld64(ref[0], TMP0); | |
3880 vis_fzero(ZERO); | |
3881 | |
3882 vis_ld64_2(ref, 8, TMP2); | |
3883 | |
3884 vis_ld64(constants6[0], CONST_6); | |
3885 | |
3886 vis_ld64(constants256_1024[0], CONST_256); | |
3887 vis_faligndata(TMP0, TMP2, REF_S0); | |
3888 | |
3889 if (off != 0x7) { | |
3890 vis_alignaddr_g0((void *)off_plus_1); | |
3891 vis_faligndata(TMP0, TMP2, REF_S2); | |
3892 } else { | |
3893 vis_src1(TMP2, REF_S2); | |
3894 } | |
3895 | |
3896 height >>= 1; | |
3897 do { /* 31 cycles */ | |
3898 vis_ld64_2(ref, stride, TMP0); | |
3899 vis_mul8x16au(REF_S0, CONST_256, TMP8); | |
3900 vis_pmerge(ZERO, REF_S0_1, TMP10); | |
3901 | |
3902 vis_ld64_2(ref, stride_8, TMP2); | |
3903 ref += stride; | |
3904 vis_mul8x16au(REF_S2, CONST_256, TMP12); | |
3905 vis_pmerge(ZERO, REF_S2_1, TMP14); | |
3906 | |
3907 vis_alignaddr_g0((void *)off); | |
3908 | |
3909 vis_ld64_2(ref, stride, TMP4); | |
3910 vis_faligndata(TMP0, TMP2, REF_S4); | |
3911 | |
3912 vis_ld64_2(ref, stride_8, TMP6); | |
3913 ref += stride; | |
3914 | |
3915 vis_ld64(dest[0], DST_0); | |
3916 vis_faligndata(TMP4, TMP6, REF_S0); | |
3917 | |
3918 vis_ld64_2(dest, stride, DST_2); | |
3919 | |
3920 if (off != 0x7) { | |
3921 vis_alignaddr_g0((void *)off_plus_1); | |
3922 vis_faligndata(TMP0, TMP2, REF_S6); | |
3923 vis_faligndata(TMP4, TMP6, REF_S2); | |
3924 } else { | |
3925 vis_src1(TMP2, REF_S6); | |
3926 vis_src1(TMP6, REF_S2); | |
3927 } | |
3928 | |
3929 vis_mul8x16al(DST_0, CONST_1024, TMP30); | |
3930 vis_pmerge(ZERO, REF_S4, TMP22); | |
3931 | |
3932 vis_mul8x16al(DST_1, CONST_1024, TMP32); | |
3933 vis_pmerge(ZERO, REF_S4_1, TMP24); | |
3934 | |
3935 vis_mul8x16au(REF_S6, CONST_256, TMP26); | |
3936 vis_pmerge(ZERO, REF_S6_1, TMP28); | |
3937 | |
3938 vis_mul8x16au(REF_S0, CONST_256, REF_S4); | |
3939 vis_padd16(TMP22, CONST_6, TMP22); | |
3940 | |
3941 vis_mul8x16au(REF_S0_1, CONST_256, REF_S6); | |
3942 vis_padd16(TMP24, CONST_6, TMP24); | |
3943 | |
3944 vis_mul8x16al(DST_2, CONST_1024, REF_0); | |
3945 vis_padd16(TMP22, TMP26, TMP22); | |
3946 | |
3947 vis_mul8x16al(DST_3, CONST_1024, REF_2); | |
3948 vis_padd16(TMP24, TMP28, TMP24); | |
3949 | |
3950 vis_mul8x16au(REF_S2, CONST_256, TMP26); | |
3951 vis_padd16(TMP8, TMP22, TMP8); | |
3952 | |
3953 vis_mul8x16au(REF_S2_1, CONST_256, TMP28); | |
3954 vis_padd16(TMP10, TMP24, TMP10); | |
3955 | |
3956 vis_padd16(TMP8, TMP12, TMP8); | |
3957 | |
3958 vis_padd16(TMP10, TMP14, TMP10); | |
3959 | |
3960 vis_padd16(TMP8, TMP30, TMP8); | |
3961 | |
3962 vis_padd16(TMP10, TMP32, TMP10); | |
3963 vis_pack16(TMP8, DST_0); | |
3964 | |
3965 vis_pack16(TMP10, DST_1); | |
3966 vis_st64(DST_0, dest[0]); | |
3967 dest += stride; | |
3968 | |
3969 vis_padd16(REF_S4, TMP22, TMP12); | |
3970 | |
3971 vis_padd16(REF_S6, TMP24, TMP14); | |
3972 | |
3973 vis_padd16(TMP12, TMP26, TMP12); | |
3974 | |
3975 vis_padd16(TMP14, TMP28, TMP14); | |
3976 | |
3977 vis_padd16(TMP12, REF_0, TMP12); | |
3978 | |
3979 vis_padd16(TMP14, REF_2, TMP14); | |
3980 vis_pack16(TMP12, DST_2); | |
3981 | |
3982 vis_pack16(TMP14, DST_3); | |
3983 vis_st64(DST_2, dest[0]); | |
3984 dest += stride; | |
3985 } while (--height); | |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3986 } |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3987 |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3988 /* End of no rounding code */ |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
3989 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
3990 static sigjmp_buf jmpbuf; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
3991 static volatile sig_atomic_t canjump = 0; |
2967 | 3992 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
3993 static void sigill_handler (int sig) |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
3994 { |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
3995 if (!canjump) { |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
3996 signal (sig, SIG_DFL); |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
3997 raise (sig); |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
3998 } |
2361
8616fd2dd2ef
whitespace cleanup patch by (James A. Morrison <ja2morri>@<csclub>dot<uwaterloo>point<ca>)
michael
parents:
2136
diff
changeset
|
3999 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4000 canjump = 0; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4001 siglongjmp (jmpbuf, 1); |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4002 } |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4003 |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4004 #define ACCEL_SPARC_VIS 1 |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4005 #define ACCEL_SPARC_VIS2 2 |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4006 |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4007 static int vis_level () |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4008 { |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4009 int accel = 0; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4010 |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4011 signal (SIGILL, sigill_handler); |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4012 if (sigsetjmp (jmpbuf, 1)) { |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4013 signal (SIGILL, SIG_DFL); |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4014 return accel; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4015 } |
2967 | 4016 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4017 canjump = 1; |
2967 | 4018 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4019 /* pdist %f0, %f0, %f0 */ |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4020 __asm__ __volatile__(".word\t0x81b007c0"); |
2361
8616fd2dd2ef
whitespace cleanup patch by (James A. Morrison <ja2morri>@<csclub>dot<uwaterloo>point<ca>)
michael
parents:
2136
diff
changeset
|
4021 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4022 canjump = 0; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4023 accel |= ACCEL_SPARC_VIS; |
2361
8616fd2dd2ef
whitespace cleanup patch by (James A. Morrison <ja2morri>@<csclub>dot<uwaterloo>point<ca>)
michael
parents:
2136
diff
changeset
|
4024 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4025 if (sigsetjmp (jmpbuf, 1)) { |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4026 signal (SIGILL, SIG_DFL); |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4027 return accel; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4028 } |
2361
8616fd2dd2ef
whitespace cleanup patch by (James A. Morrison <ja2morri>@<csclub>dot<uwaterloo>point<ca>)
michael
parents:
2136
diff
changeset
|
4029 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4030 canjump = 1; |
2361
8616fd2dd2ef
whitespace cleanup patch by (James A. Morrison <ja2morri>@<csclub>dot<uwaterloo>point<ca>)
michael
parents:
2136
diff
changeset
|
4031 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4032 /* edge8n %g0, %g0, %g0 */ |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4033 __asm__ __volatile__(".word\t0x81b00020"); |
2361
8616fd2dd2ef
whitespace cleanup patch by (James A. Morrison <ja2morri>@<csclub>dot<uwaterloo>point<ca>)
michael
parents:
2136
diff
changeset
|
4034 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4035 canjump = 0; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4036 accel |= ACCEL_SPARC_VIS2; |
2361
8616fd2dd2ef
whitespace cleanup patch by (James A. Morrison <ja2morri>@<csclub>dot<uwaterloo>point<ca>)
michael
parents:
2136
diff
changeset
|
4037 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4038 signal (SIGILL, SIG_DFL); |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4039 |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4040 return accel; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4041 } |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4042 |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
4043 /* libavcodec initialization code */ |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
4044 void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx) |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
4045 { |
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
4046 /* VIS specific optimisations */ |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4047 int accel = vis_level (); |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4048 |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4049 if (accel & ACCEL_SPARC_VIS) { |
5618 | 4050 if(avctx->idct_algo==FF_IDCT_SIMPLEVIS){ |
4051 c->idct_put = ff_simple_idct_put_vis; | |
4052 c->idct_add = ff_simple_idct_add_vis; | |
4053 c->idct = ff_simple_idct_vis; | |
4054 c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; | |
4055 } | |
4056 | |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4057 c->put_pixels_tab[0][0] = MC_put_o_16_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4058 c->put_pixels_tab[0][1] = MC_put_x_16_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4059 c->put_pixels_tab[0][2] = MC_put_y_16_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4060 c->put_pixels_tab[0][3] = MC_put_xy_16_vis; |
2967 | 4061 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4062 c->put_pixels_tab[1][0] = MC_put_o_8_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4063 c->put_pixels_tab[1][1] = MC_put_x_8_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4064 c->put_pixels_tab[1][2] = MC_put_y_8_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4065 c->put_pixels_tab[1][3] = MC_put_xy_8_vis; |
2967 | 4066 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4067 c->avg_pixels_tab[0][0] = MC_avg_o_16_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4068 c->avg_pixels_tab[0][1] = MC_avg_x_16_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4069 c->avg_pixels_tab[0][2] = MC_avg_y_16_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4070 c->avg_pixels_tab[0][3] = MC_avg_xy_16_vis; |
2967 | 4071 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4072 c->avg_pixels_tab[1][0] = MC_avg_o_8_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4073 c->avg_pixels_tab[1][1] = MC_avg_x_8_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4074 c->avg_pixels_tab[1][2] = MC_avg_y_8_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4075 c->avg_pixels_tab[1][3] = MC_avg_xy_8_vis; |
2967 | 4076 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4077 c->put_no_rnd_pixels_tab[0][0] = MC_put_no_round_o_16_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4078 c->put_no_rnd_pixels_tab[0][1] = MC_put_no_round_x_16_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4079 c->put_no_rnd_pixels_tab[0][2] = MC_put_no_round_y_16_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4080 c->put_no_rnd_pixels_tab[0][3] = MC_put_no_round_xy_16_vis; |
2967 | 4081 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4082 c->put_no_rnd_pixels_tab[1][0] = MC_put_no_round_o_8_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4083 c->put_no_rnd_pixels_tab[1][1] = MC_put_no_round_x_8_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4084 c->put_no_rnd_pixels_tab[1][2] = MC_put_no_round_y_8_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4085 c->put_no_rnd_pixels_tab[1][3] = MC_put_no_round_xy_8_vis; |
2967 | 4086 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4087 c->avg_no_rnd_pixels_tab[0][0] = MC_avg_no_round_o_16_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4088 c->avg_no_rnd_pixels_tab[0][1] = MC_avg_no_round_x_16_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4089 c->avg_no_rnd_pixels_tab[0][2] = MC_avg_no_round_y_16_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4090 c->avg_no_rnd_pixels_tab[0][3] = MC_avg_no_round_xy_16_vis; |
2967 | 4091 |
1966
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4092 c->avg_no_rnd_pixels_tab[1][0] = MC_avg_no_round_o_8_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4093 c->avg_no_rnd_pixels_tab[1][1] = MC_avg_no_round_x_8_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4094 c->avg_no_rnd_pixels_tab[1][2] = MC_avg_no_round_y_8_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4095 c->avg_no_rnd_pixels_tab[1][3] = MC_avg_no_round_xy_8_vis; |
e1fc7c598558
License change and cpu detection patch by (James Morrison <ja2morri at csclub dot uwaterloo dot ca>)
michael
parents:
1959
diff
changeset
|
4096 } |
1959
55b7435c59b8
VIS optimized motion compensation code. by (David S. Miller <davem at redhat dot com>)
michael
parents:
diff
changeset
|
4097 } |