8430
|
1 /*
|
|
2 * MMX and SSE2 optimized snow DSP utils
|
|
3 * Copyright (c) 2005-2006 Robert Edele <yartrebo@earthlink.net>
|
|
4 *
|
|
5 * This file is part of FFmpeg.
|
|
6 *
|
|
7 * FFmpeg is free software; you can redistribute it and/or
|
|
8 * modify it under the terms of the GNU Lesser General Public
|
|
9 * License as published by the Free Software Foundation; either
|
|
10 * version 2.1 of the License, or (at your option) any later version.
|
|
11 *
|
|
12 * FFmpeg is distributed in the hope that it will be useful,
|
|
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
15 * Lesser General Public License for more details.
|
|
16 *
|
|
17 * You should have received a copy of the GNU Lesser General Public
|
|
18 * License along with FFmpeg; if not, write to the Free Software
|
|
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
20 */
|
|
21
|
|
22 #include "libavutil/x86_cpu.h"
|
|
23 #include "libavcodec/avcodec.h"
|
|
24 #include "libavcodec/snow.h"
|
|
25
|
|
26 void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){
|
|
27 const int w2= (width+1)>>1;
|
|
28 DECLARE_ALIGNED_16(IDWTELEM, temp[width>>1]);
|
|
29 const int w_l= (width>>1);
|
|
30 const int w_r= w2 - 1;
|
|
31 int i;
|
|
32
|
|
33 { // Lift 0
|
|
34 IDWTELEM * const ref = b + w2 - 1;
|
|
35 IDWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice
|
|
36 // (the first time erroneously), we allow the SSE2 code to run an extra pass.
|
|
37 // The savings in code and time are well worth having to store this value and
|
|
38 // calculate b[0] correctly afterwards.
|
|
39
|
|
40 i = 0;
|
|
41 __asm__ volatile(
|
|
42 "pcmpeqd %%xmm7, %%xmm7 \n\t"
|
|
43 "pcmpeqd %%xmm3, %%xmm3 \n\t"
|
|
44 "psllw $1, %%xmm3 \n\t"
|
|
45 "paddw %%xmm7, %%xmm3 \n\t"
|
|
46 "psllw $13, %%xmm3 \n\t"
|
|
47 ::);
|
|
48 for(; i<w_l-15; i+=16){
|
|
49 __asm__ volatile(
|
|
50 "movdqu (%1), %%xmm1 \n\t"
|
|
51 "movdqu 16(%1), %%xmm5 \n\t"
|
|
52 "movdqu 2(%1), %%xmm2 \n\t"
|
|
53 "movdqu 18(%1), %%xmm6 \n\t"
|
|
54 "paddw %%xmm1, %%xmm2 \n\t"
|
|
55 "paddw %%xmm5, %%xmm6 \n\t"
|
|
56 "paddw %%xmm7, %%xmm2 \n\t"
|
|
57 "paddw %%xmm7, %%xmm6 \n\t"
|
|
58 "pmulhw %%xmm3, %%xmm2 \n\t"
|
|
59 "pmulhw %%xmm3, %%xmm6 \n\t"
|
|
60 "paddw (%0), %%xmm2 \n\t"
|
|
61 "paddw 16(%0), %%xmm6 \n\t"
|
|
62 "movdqa %%xmm2, (%0) \n\t"
|
|
63 "movdqa %%xmm6, 16(%0) \n\t"
|
|
64 :: "r"(&b[i]), "r"(&ref[i])
|
|
65 : "memory"
|
|
66 );
|
|
67 }
|
|
68 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
|
|
69 b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
|
|
70 }
|
|
71
|
|
72 { // Lift 1
|
|
73 IDWTELEM * const dst = b+w2;
|
|
74
|
|
75 i = 0;
|
|
76 for(; (((x86_reg)&dst[i]) & 0x1F) && i<w_r; i++){
|
|
77 dst[i] = dst[i] - (b[i] + b[i + 1]);
|
|
78 }
|
|
79 for(; i<w_r-15; i+=16){
|
|
80 __asm__ volatile(
|
|
81 "movdqu (%1), %%xmm1 \n\t"
|
|
82 "movdqu 16(%1), %%xmm5 \n\t"
|
|
83 "movdqu 2(%1), %%xmm2 \n\t"
|
|
84 "movdqu 18(%1), %%xmm6 \n\t"
|
|
85 "paddw %%xmm1, %%xmm2 \n\t"
|
|
86 "paddw %%xmm5, %%xmm6 \n\t"
|
|
87 "movdqa (%0), %%xmm0 \n\t"
|
|
88 "movdqa 16(%0), %%xmm4 \n\t"
|
|
89 "psubw %%xmm2, %%xmm0 \n\t"
|
|
90 "psubw %%xmm6, %%xmm4 \n\t"
|
|
91 "movdqa %%xmm0, (%0) \n\t"
|
|
92 "movdqa %%xmm4, 16(%0) \n\t"
|
|
93 :: "r"(&dst[i]), "r"(&b[i])
|
|
94 : "memory"
|
|
95 );
|
|
96 }
|
|
97 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
|
|
98 }
|
|
99
|
|
100 { // Lift 2
|
|
101 IDWTELEM * const ref = b+w2 - 1;
|
|
102 IDWTELEM b_0 = b[0];
|
|
103
|
|
104 i = 0;
|
|
105 __asm__ volatile(
|
|
106 "psllw $15, %%xmm7 \n\t"
|
|
107 "pcmpeqw %%xmm6, %%xmm6 \n\t"
|
|
108 "psrlw $13, %%xmm6 \n\t"
|
|
109 "paddw %%xmm7, %%xmm6 \n\t"
|
|
110 ::);
|
|
111 for(; i<w_l-15; i+=16){
|
|
112 __asm__ volatile(
|
|
113 "movdqu (%1), %%xmm0 \n\t"
|
|
114 "movdqu 16(%1), %%xmm4 \n\t"
|
|
115 "movdqu 2(%1), %%xmm1 \n\t"
|
|
116 "movdqu 18(%1), %%xmm5 \n\t" //FIXME try aligned reads and shifts
|
|
117 "paddw %%xmm6, %%xmm0 \n\t"
|
|
118 "paddw %%xmm6, %%xmm4 \n\t"
|
|
119 "paddw %%xmm7, %%xmm1 \n\t"
|
|
120 "paddw %%xmm7, %%xmm5 \n\t"
|
|
121 "pavgw %%xmm1, %%xmm0 \n\t"
|
|
122 "pavgw %%xmm5, %%xmm4 \n\t"
|
|
123 "psubw %%xmm7, %%xmm0 \n\t"
|
|
124 "psubw %%xmm7, %%xmm4 \n\t"
|
|
125 "psraw $1, %%xmm0 \n\t"
|
|
126 "psraw $1, %%xmm4 \n\t"
|
|
127 "movdqa (%0), %%xmm1 \n\t"
|
|
128 "movdqa 16(%0), %%xmm5 \n\t"
|
|
129 "paddw %%xmm1, %%xmm0 \n\t"
|
|
130 "paddw %%xmm5, %%xmm4 \n\t"
|
|
131 "psraw $2, %%xmm0 \n\t"
|
|
132 "psraw $2, %%xmm4 \n\t"
|
|
133 "paddw %%xmm1, %%xmm0 \n\t"
|
|
134 "paddw %%xmm5, %%xmm4 \n\t"
|
|
135 "movdqa %%xmm0, (%0) \n\t"
|
|
136 "movdqa %%xmm4, 16(%0) \n\t"
|
|
137 :: "r"(&b[i]), "r"(&ref[i])
|
|
138 : "memory"
|
|
139 );
|
|
140 }
|
|
141 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
|
|
142 b[0] = b_0 + ((2 * ref[1] + W_BO-1 + 4 * b_0) >> W_BS);
|
|
143 }
|
|
144
|
|
145 { // Lift 3
|
|
146 IDWTELEM * const src = b+w2;
|
|
147
|
|
148 i = 0;
|
|
149 for(; (((x86_reg)&temp[i]) & 0x1F) && i<w_r; i++){
|
|
150 temp[i] = src[i] - ((-W_AM*(b[i] + b[i+1]))>>W_AS);
|
|
151 }
|
|
152 for(; i<w_r-7; i+=8){
|
|
153 __asm__ volatile(
|
|
154 "movdqu 2(%1), %%xmm2 \n\t"
|
|
155 "movdqu 18(%1), %%xmm6 \n\t"
|
|
156 "paddw (%1), %%xmm2 \n\t"
|
|
157 "paddw 16(%1), %%xmm6 \n\t"
|
|
158 "movdqu (%0), %%xmm0 \n\t"
|
|
159 "movdqu 16(%0), %%xmm4 \n\t"
|
|
160 "paddw %%xmm2, %%xmm0 \n\t"
|
|
161 "paddw %%xmm6, %%xmm4 \n\t"
|
|
162 "psraw $1, %%xmm2 \n\t"
|
|
163 "psraw $1, %%xmm6 \n\t"
|
|
164 "paddw %%xmm0, %%xmm2 \n\t"
|
|
165 "paddw %%xmm4, %%xmm6 \n\t"
|
|
166 "movdqa %%xmm2, (%2) \n\t"
|
|
167 "movdqa %%xmm6, 16(%2) \n\t"
|
|
168 :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
|
|
169 : "memory"
|
|
170 );
|
|
171 }
|
|
172 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS);
|
|
173 }
|
|
174
|
|
175 {
|
|
176 snow_interleave_line_header(&i, width, b, temp);
|
|
177
|
|
178 for (; (i & 0x3E) != 0x3E; i-=2){
|
|
179 b[i+1] = temp[i>>1];
|
|
180 b[i] = b[i>>1];
|
|
181 }
|
|
182 for (i-=62; i>=0; i-=64){
|
|
183 __asm__ volatile(
|
|
184 "movdqa (%1), %%xmm0 \n\t"
|
|
185 "movdqa 16(%1), %%xmm2 \n\t"
|
|
186 "movdqa 32(%1), %%xmm4 \n\t"
|
|
187 "movdqa 48(%1), %%xmm6 \n\t"
|
|
188 "movdqa (%1), %%xmm1 \n\t"
|
|
189 "movdqa 16(%1), %%xmm3 \n\t"
|
|
190 "movdqa 32(%1), %%xmm5 \n\t"
|
|
191 "movdqa 48(%1), %%xmm7 \n\t"
|
|
192 "punpcklwd (%2), %%xmm0 \n\t"
|
|
193 "punpcklwd 16(%2), %%xmm2 \n\t"
|
|
194 "punpcklwd 32(%2), %%xmm4 \n\t"
|
|
195 "punpcklwd 48(%2), %%xmm6 \n\t"
|
|
196 "movdqa %%xmm0, (%0) \n\t"
|
|
197 "movdqa %%xmm2, 32(%0) \n\t"
|
|
198 "movdqa %%xmm4, 64(%0) \n\t"
|
|
199 "movdqa %%xmm6, 96(%0) \n\t"
|
|
200 "punpckhwd (%2), %%xmm1 \n\t"
|
|
201 "punpckhwd 16(%2), %%xmm3 \n\t"
|
|
202 "punpckhwd 32(%2), %%xmm5 \n\t"
|
|
203 "punpckhwd 48(%2), %%xmm7 \n\t"
|
|
204 "movdqa %%xmm1, 16(%0) \n\t"
|
|
205 "movdqa %%xmm3, 48(%0) \n\t"
|
|
206 "movdqa %%xmm5, 80(%0) \n\t"
|
|
207 "movdqa %%xmm7, 112(%0) \n\t"
|
|
208 :: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1])
|
|
209 : "memory"
|
|
210 );
|
|
211 }
|
|
212 }
|
|
213 }
|
|
214
|
|
215 void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width){
|
|
216 const int w2= (width+1)>>1;
|
|
217 IDWTELEM temp[width >> 1];
|
|
218 const int w_l= (width>>1);
|
|
219 const int w_r= w2 - 1;
|
|
220 int i;
|
|
221
|
|
222 { // Lift 0
|
|
223 IDWTELEM * const ref = b + w2 - 1;
|
|
224
|
|
225 i = 1;
|
|
226 b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
|
|
227 __asm__ volatile(
|
|
228 "pcmpeqw %%mm7, %%mm7 \n\t"
|
|
229 "pcmpeqw %%mm3, %%mm3 \n\t"
|
|
230 "psllw $1, %%mm3 \n\t"
|
|
231 "paddw %%mm7, %%mm3 \n\t"
|
|
232 "psllw $13, %%mm3 \n\t"
|
|
233 ::);
|
|
234 for(; i<w_l-7; i+=8){
|
|
235 __asm__ volatile(
|
|
236 "movq (%1), %%mm2 \n\t"
|
|
237 "movq 8(%1), %%mm6 \n\t"
|
|
238 "paddw 2(%1), %%mm2 \n\t"
|
|
239 "paddw 10(%1), %%mm6 \n\t"
|
|
240 "paddw %%mm7, %%mm2 \n\t"
|
|
241 "paddw %%mm7, %%mm6 \n\t"
|
|
242 "pmulhw %%mm3, %%mm2 \n\t"
|
|
243 "pmulhw %%mm3, %%mm6 \n\t"
|
|
244 "paddw (%0), %%mm2 \n\t"
|
|
245 "paddw 8(%0), %%mm6 \n\t"
|
|
246 "movq %%mm2, (%0) \n\t"
|
|
247 "movq %%mm6, 8(%0) \n\t"
|
|
248 :: "r"(&b[i]), "r"(&ref[i])
|
|
249 : "memory"
|
|
250 );
|
|
251 }
|
|
252 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
|
|
253 }
|
|
254
|
|
255 { // Lift 1
|
|
256 IDWTELEM * const dst = b+w2;
|
|
257
|
|
258 i = 0;
|
|
259 for(; i<w_r-7; i+=8){
|
|
260 __asm__ volatile(
|
|
261 "movq (%1), %%mm2 \n\t"
|
|
262 "movq 8(%1), %%mm6 \n\t"
|
|
263 "paddw 2(%1), %%mm2 \n\t"
|
|
264 "paddw 10(%1), %%mm6 \n\t"
|
|
265 "movq (%0), %%mm0 \n\t"
|
|
266 "movq 8(%0), %%mm4 \n\t"
|
|
267 "psubw %%mm2, %%mm0 \n\t"
|
|
268 "psubw %%mm6, %%mm4 \n\t"
|
|
269 "movq %%mm0, (%0) \n\t"
|
|
270 "movq %%mm4, 8(%0) \n\t"
|
|
271 :: "r"(&dst[i]), "r"(&b[i])
|
|
272 : "memory"
|
|
273 );
|
|
274 }
|
|
275 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
|
|
276 }
|
|
277
|
|
278 { // Lift 2
|
|
279 IDWTELEM * const ref = b+w2 - 1;
|
|
280
|
|
281 i = 1;
|
|
282 b[0] = b[0] + (((2 * ref[1] + W_BO) + 4 * b[0]) >> W_BS);
|
|
283 __asm__ volatile(
|
|
284 "psllw $15, %%mm7 \n\t"
|
|
285 "pcmpeqw %%mm6, %%mm6 \n\t"
|
|
286 "psrlw $13, %%mm6 \n\t"
|
|
287 "paddw %%mm7, %%mm6 \n\t"
|
|
288 ::);
|
|
289 for(; i<w_l-7; i+=8){
|
|
290 __asm__ volatile(
|
|
291 "movq (%1), %%mm0 \n\t"
|
|
292 "movq 8(%1), %%mm4 \n\t"
|
|
293 "movq 2(%1), %%mm1 \n\t"
|
|
294 "movq 10(%1), %%mm5 \n\t"
|
|
295 "paddw %%mm6, %%mm0 \n\t"
|
|
296 "paddw %%mm6, %%mm4 \n\t"
|
|
297 "paddw %%mm7, %%mm1 \n\t"
|
|
298 "paddw %%mm7, %%mm5 \n\t"
|
|
299 "pavgw %%mm1, %%mm0 \n\t"
|
|
300 "pavgw %%mm5, %%mm4 \n\t"
|
|
301 "psubw %%mm7, %%mm0 \n\t"
|
|
302 "psubw %%mm7, %%mm4 \n\t"
|
|
303 "psraw $1, %%mm0 \n\t"
|
|
304 "psraw $1, %%mm4 \n\t"
|
|
305 "movq (%0), %%mm1 \n\t"
|
|
306 "movq 8(%0), %%mm5 \n\t"
|
|
307 "paddw %%mm1, %%mm0 \n\t"
|
|
308 "paddw %%mm5, %%mm4 \n\t"
|
|
309 "psraw $2, %%mm0 \n\t"
|
|
310 "psraw $2, %%mm4 \n\t"
|
|
311 "paddw %%mm1, %%mm0 \n\t"
|
|
312 "paddw %%mm5, %%mm4 \n\t"
|
|
313 "movq %%mm0, (%0) \n\t"
|
|
314 "movq %%mm4, 8(%0) \n\t"
|
|
315 :: "r"(&b[i]), "r"(&ref[i])
|
|
316 : "memory"
|
|
317 );
|
|
318 }
|
|
319 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
|
|
320 }
|
|
321
|
|
322 { // Lift 3
|
|
323 IDWTELEM * const src = b+w2;
|
|
324 i = 0;
|
|
325
|
|
326 for(; i<w_r-7; i+=8){
|
|
327 __asm__ volatile(
|
|
328 "movq 2(%1), %%mm2 \n\t"
|
|
329 "movq 10(%1), %%mm6 \n\t"
|
|
330 "paddw (%1), %%mm2 \n\t"
|
|
331 "paddw 8(%1), %%mm6 \n\t"
|
|
332 "movq (%0), %%mm0 \n\t"
|
|
333 "movq 8(%0), %%mm4 \n\t"
|
|
334 "paddw %%mm2, %%mm0 \n\t"
|
|
335 "paddw %%mm6, %%mm4 \n\t"
|
|
336 "psraw $1, %%mm2 \n\t"
|
|
337 "psraw $1, %%mm6 \n\t"
|
|
338 "paddw %%mm0, %%mm2 \n\t"
|
|
339 "paddw %%mm4, %%mm6 \n\t"
|
|
340 "movq %%mm2, (%2) \n\t"
|
|
341 "movq %%mm6, 8(%2) \n\t"
|
|
342 :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
|
|
343 : "memory"
|
|
344 );
|
|
345 }
|
|
346 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS);
|
|
347 }
|
|
348
|
|
349 {
|
|
350 snow_interleave_line_header(&i, width, b, temp);
|
|
351
|
|
352 for (; (i & 0x1E) != 0x1E; i-=2){
|
|
353 b[i+1] = temp[i>>1];
|
|
354 b[i] = b[i>>1];
|
|
355 }
|
|
356 for (i-=30; i>=0; i-=32){
|
|
357 __asm__ volatile(
|
|
358 "movq (%1), %%mm0 \n\t"
|
|
359 "movq 8(%1), %%mm2 \n\t"
|
|
360 "movq 16(%1), %%mm4 \n\t"
|
|
361 "movq 24(%1), %%mm6 \n\t"
|
|
362 "movq (%1), %%mm1 \n\t"
|
|
363 "movq 8(%1), %%mm3 \n\t"
|
|
364 "movq 16(%1), %%mm5 \n\t"
|
|
365 "movq 24(%1), %%mm7 \n\t"
|
|
366 "punpcklwd (%2), %%mm0 \n\t"
|
|
367 "punpcklwd 8(%2), %%mm2 \n\t"
|
|
368 "punpcklwd 16(%2), %%mm4 \n\t"
|
|
369 "punpcklwd 24(%2), %%mm6 \n\t"
|
|
370 "movq %%mm0, (%0) \n\t"
|
|
371 "movq %%mm2, 16(%0) \n\t"
|
|
372 "movq %%mm4, 32(%0) \n\t"
|
|
373 "movq %%mm6, 48(%0) \n\t"
|
|
374 "punpckhwd (%2), %%mm1 \n\t"
|
|
375 "punpckhwd 8(%2), %%mm3 \n\t"
|
|
376 "punpckhwd 16(%2), %%mm5 \n\t"
|
|
377 "punpckhwd 24(%2), %%mm7 \n\t"
|
|
378 "movq %%mm1, 8(%0) \n\t"
|
|
379 "movq %%mm3, 24(%0) \n\t"
|
|
380 "movq %%mm5, 40(%0) \n\t"
|
|
381 "movq %%mm7, 56(%0) \n\t"
|
|
382 :: "r"(&b[i]), "r"(&b[i>>1]), "r"(&temp[i>>1])
|
|
383 : "memory"
|
|
384 );
|
|
385 }
|
|
386 }
|
|
387 }
|
|
388
|
8590
|
389 #if HAVE_7REGS
|
8430
|
390 #define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\
|
|
391 ""op" ("r",%%"REG_d"), %%"t0" \n\t"\
|
|
392 ""op" 16("r",%%"REG_d"), %%"t1" \n\t"\
|
|
393 ""op" 32("r",%%"REG_d"), %%"t2" \n\t"\
|
|
394 ""op" 48("r",%%"REG_d"), %%"t3" \n\t"
|
|
395
|
|
396 #define snow_vertical_compose_sse2_load(r,t0,t1,t2,t3)\
|
|
397 snow_vertical_compose_sse2_load_add("movdqa",r,t0,t1,t2,t3)
|
|
398
|
|
399 #define snow_vertical_compose_sse2_add(r,t0,t1,t2,t3)\
|
|
400 snow_vertical_compose_sse2_load_add("paddw",r,t0,t1,t2,t3)
|
|
401
|
|
402 #define snow_vertical_compose_r2r_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
|
|
403 "psubw %%"s0", %%"t0" \n\t"\
|
|
404 "psubw %%"s1", %%"t1" \n\t"\
|
|
405 "psubw %%"s2", %%"t2" \n\t"\
|
|
406 "psubw %%"s3", %%"t3" \n\t"
|
|
407
|
|
408 #define snow_vertical_compose_sse2_store(w,s0,s1,s2,s3)\
|
|
409 "movdqa %%"s0", ("w",%%"REG_d") \n\t"\
|
|
410 "movdqa %%"s1", 16("w",%%"REG_d") \n\t"\
|
|
411 "movdqa %%"s2", 32("w",%%"REG_d") \n\t"\
|
|
412 "movdqa %%"s3", 48("w",%%"REG_d") \n\t"
|
|
413
|
|
414 #define snow_vertical_compose_sra(n,t0,t1,t2,t3)\
|
|
415 "psraw $"n", %%"t0" \n\t"\
|
|
416 "psraw $"n", %%"t1" \n\t"\
|
|
417 "psraw $"n", %%"t2" \n\t"\
|
|
418 "psraw $"n", %%"t3" \n\t"
|
|
419
|
|
420 #define snow_vertical_compose_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
|
|
421 "paddw %%"s0", %%"t0" \n\t"\
|
|
422 "paddw %%"s1", %%"t1" \n\t"\
|
|
423 "paddw %%"s2", %%"t2" \n\t"\
|
|
424 "paddw %%"s3", %%"t3" \n\t"
|
|
425
|
|
426 #define snow_vertical_compose_r2r_pmulhw(s0,s1,s2,s3,t0,t1,t2,t3)\
|
|
427 "pmulhw %%"s0", %%"t0" \n\t"\
|
|
428 "pmulhw %%"s1", %%"t1" \n\t"\
|
|
429 "pmulhw %%"s2", %%"t2" \n\t"\
|
|
430 "pmulhw %%"s3", %%"t3" \n\t"
|
|
431
|
|
432 #define snow_vertical_compose_sse2_move(s0,s1,s2,s3,t0,t1,t2,t3)\
|
|
433 "movdqa %%"s0", %%"t0" \n\t"\
|
|
434 "movdqa %%"s1", %%"t1" \n\t"\
|
|
435 "movdqa %%"s2", %%"t2" \n\t"\
|
|
436 "movdqa %%"s3", %%"t3" \n\t"
|
|
437
|
|
438 void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){
|
|
439 x86_reg i = width;
|
|
440
|
|
441 while(i & 0x1F)
|
|
442 {
|
|
443 i--;
|
|
444 b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
|
|
445 b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
|
|
446 b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
|
|
447 b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
|
|
448 }
|
|
449 i+=i;
|
|
450
|
|
451 __asm__ volatile (
|
|
452 "jmp 2f \n\t"
|
|
453 "1: \n\t"
|
|
454 snow_vertical_compose_sse2_load("%4","xmm0","xmm2","xmm4","xmm6")
|
|
455 snow_vertical_compose_sse2_add("%6","xmm0","xmm2","xmm4","xmm6")
|
|
456
|
|
457
|
|
458 "pcmpeqw %%xmm0, %%xmm0 \n\t"
|
|
459 "pcmpeqw %%xmm2, %%xmm2 \n\t"
|
|
460 "paddw %%xmm2, %%xmm2 \n\t"
|
|
461 "paddw %%xmm0, %%xmm2 \n\t"
|
|
462 "psllw $13, %%xmm2 \n\t"
|
|
463 snow_vertical_compose_r2r_add("xmm0","xmm0","xmm0","xmm0","xmm1","xmm3","xmm5","xmm7")
|
|
464 snow_vertical_compose_r2r_pmulhw("xmm2","xmm2","xmm2","xmm2","xmm1","xmm3","xmm5","xmm7")
|
|
465 snow_vertical_compose_sse2_add("%5","xmm1","xmm3","xmm5","xmm7")
|
|
466 snow_vertical_compose_sse2_store("%5","xmm1","xmm3","xmm5","xmm7")
|
|
467 snow_vertical_compose_sse2_load("%4","xmm0","xmm2","xmm4","xmm6")
|
|
468 snow_vertical_compose_sse2_add("%3","xmm1","xmm3","xmm5","xmm7")
|
|
469 snow_vertical_compose_r2r_sub("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
|
|
470 snow_vertical_compose_sse2_store("%4","xmm0","xmm2","xmm4","xmm6")
|
|
471
|
|
472 "pcmpeqw %%xmm7, %%xmm7 \n\t"
|
|
473 "pcmpeqw %%xmm5, %%xmm5 \n\t"
|
|
474 "psllw $15, %%xmm7 \n\t"
|
|
475 "psrlw $13, %%xmm5 \n\t"
|
|
476 "paddw %%xmm7, %%xmm5 \n\t"
|
|
477 snow_vertical_compose_r2r_add("xmm5","xmm5","xmm5","xmm5","xmm0","xmm2","xmm4","xmm6")
|
|
478 "movq (%2,%%"REG_d"), %%xmm1 \n\t"
|
|
479 "movq 8(%2,%%"REG_d"), %%xmm3 \n\t"
|
|
480 "paddw %%xmm7, %%xmm1 \n\t"
|
|
481 "paddw %%xmm7, %%xmm3 \n\t"
|
|
482 "pavgw %%xmm1, %%xmm0 \n\t"
|
|
483 "pavgw %%xmm3, %%xmm2 \n\t"
|
|
484 "movq 16(%2,%%"REG_d"), %%xmm1 \n\t"
|
|
485 "movq 24(%2,%%"REG_d"), %%xmm3 \n\t"
|
|
486 "paddw %%xmm7, %%xmm1 \n\t"
|
|
487 "paddw %%xmm7, %%xmm3 \n\t"
|
|
488 "pavgw %%xmm1, %%xmm4 \n\t"
|
|
489 "pavgw %%xmm3, %%xmm6 \n\t"
|
|
490 snow_vertical_compose_r2r_sub("xmm7","xmm7","xmm7","xmm7","xmm0","xmm2","xmm4","xmm6")
|
|
491 snow_vertical_compose_sra("1","xmm0","xmm2","xmm4","xmm6")
|
|
492 snow_vertical_compose_sse2_add("%3","xmm0","xmm2","xmm4","xmm6")
|
|
493
|
|
494 snow_vertical_compose_sra("2","xmm0","xmm2","xmm4","xmm6")
|
|
495 snow_vertical_compose_sse2_add("%3","xmm0","xmm2","xmm4","xmm6")
|
|
496 snow_vertical_compose_sse2_store("%3","xmm0","xmm2","xmm4","xmm6")
|
|
497 snow_vertical_compose_sse2_add("%1","xmm0","xmm2","xmm4","xmm6")
|
|
498 snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
|
|
499 snow_vertical_compose_sra("1","xmm0","xmm2","xmm4","xmm6")
|
|
500 snow_vertical_compose_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
|
|
501 snow_vertical_compose_sse2_add("%2","xmm0","xmm2","xmm4","xmm6")
|
|
502 snow_vertical_compose_sse2_store("%2","xmm0","xmm2","xmm4","xmm6")
|
|
503
|
|
504 "2: \n\t"
|
|
505 "sub $64, %%"REG_d" \n\t"
|
|
506 "jge 1b \n\t"
|
|
507 :"+d"(i)
|
|
508 :"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5));
|
|
509 }
|
|
510
|
|
511 #define snow_vertical_compose_mmx_load_add(op,r,t0,t1,t2,t3)\
|
|
512 ""op" ("r",%%"REG_d"), %%"t0" \n\t"\
|
|
513 ""op" 8("r",%%"REG_d"), %%"t1" \n\t"\
|
|
514 ""op" 16("r",%%"REG_d"), %%"t2" \n\t"\
|
|
515 ""op" 24("r",%%"REG_d"), %%"t3" \n\t"
|
|
516
|
|
517 #define snow_vertical_compose_mmx_load(r,t0,t1,t2,t3)\
|
|
518 snow_vertical_compose_mmx_load_add("movq",r,t0,t1,t2,t3)
|
|
519
|
|
520 #define snow_vertical_compose_mmx_add(r,t0,t1,t2,t3)\
|
|
521 snow_vertical_compose_mmx_load_add("paddw",r,t0,t1,t2,t3)
|
|
522
|
|
523 #define snow_vertical_compose_mmx_store(w,s0,s1,s2,s3)\
|
|
524 "movq %%"s0", ("w",%%"REG_d") \n\t"\
|
|
525 "movq %%"s1", 8("w",%%"REG_d") \n\t"\
|
|
526 "movq %%"s2", 16("w",%%"REG_d") \n\t"\
|
|
527 "movq %%"s3", 24("w",%%"REG_d") \n\t"
|
|
528
|
|
529 #define snow_vertical_compose_mmx_move(s0,s1,s2,s3,t0,t1,t2,t3)\
|
|
530 "movq %%"s0", %%"t0" \n\t"\
|
|
531 "movq %%"s1", %%"t1" \n\t"\
|
|
532 "movq %%"s2", %%"t2" \n\t"\
|
|
533 "movq %%"s3", %%"t3" \n\t"
|
|
534
|
|
535
|
|
536 void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){
|
|
537 x86_reg i = width;
|
|
538 while(i & 15)
|
|
539 {
|
|
540 i--;
|
|
541 b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
|
|
542 b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
|
|
543 b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
|
|
544 b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
|
|
545 }
|
|
546 i+=i;
|
|
547 __asm__ volatile(
|
|
548 "jmp 2f \n\t"
|
|
549 "1: \n\t"
|
|
550
|
|
551 snow_vertical_compose_mmx_load("%4","mm1","mm3","mm5","mm7")
|
|
552 snow_vertical_compose_mmx_add("%6","mm1","mm3","mm5","mm7")
|
|
553 "pcmpeqw %%mm0, %%mm0 \n\t"
|
|
554 "pcmpeqw %%mm2, %%mm2 \n\t"
|
|
555 "paddw %%mm2, %%mm2 \n\t"
|
|
556 "paddw %%mm0, %%mm2 \n\t"
|
|
557 "psllw $13, %%mm2 \n\t"
|
|
558 snow_vertical_compose_r2r_add("mm0","mm0","mm0","mm0","mm1","mm3","mm5","mm7")
|
|
559 snow_vertical_compose_r2r_pmulhw("mm2","mm2","mm2","mm2","mm1","mm3","mm5","mm7")
|
|
560 snow_vertical_compose_mmx_add("%5","mm1","mm3","mm5","mm7")
|
|
561 snow_vertical_compose_mmx_store("%5","mm1","mm3","mm5","mm7")
|
|
562 snow_vertical_compose_mmx_load("%4","mm0","mm2","mm4","mm6")
|
|
563 snow_vertical_compose_mmx_add("%3","mm1","mm3","mm5","mm7")
|
|
564 snow_vertical_compose_r2r_sub("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
|
|
565 snow_vertical_compose_mmx_store("%4","mm0","mm2","mm4","mm6")
|
|
566 "pcmpeqw %%mm7, %%mm7 \n\t"
|
|
567 "pcmpeqw %%mm5, %%mm5 \n\t"
|
|
568 "psllw $15, %%mm7 \n\t"
|
|
569 "psrlw $13, %%mm5 \n\t"
|
|
570 "paddw %%mm7, %%mm5 \n\t"
|
|
571 snow_vertical_compose_r2r_add("mm5","mm5","mm5","mm5","mm0","mm2","mm4","mm6")
|
|
572 "movq (%2,%%"REG_d"), %%mm1 \n\t"
|
|
573 "movq 8(%2,%%"REG_d"), %%mm3 \n\t"
|
|
574 "paddw %%mm7, %%mm1 \n\t"
|
|
575 "paddw %%mm7, %%mm3 \n\t"
|
|
576 "pavgw %%mm1, %%mm0 \n\t"
|
|
577 "pavgw %%mm3, %%mm2 \n\t"
|
|
578 "movq 16(%2,%%"REG_d"), %%mm1 \n\t"
|
|
579 "movq 24(%2,%%"REG_d"), %%mm3 \n\t"
|
|
580 "paddw %%mm7, %%mm1 \n\t"
|
|
581 "paddw %%mm7, %%mm3 \n\t"
|
|
582 "pavgw %%mm1, %%mm4 \n\t"
|
|
583 "pavgw %%mm3, %%mm6 \n\t"
|
|
584 snow_vertical_compose_r2r_sub("mm7","mm7","mm7","mm7","mm0","mm2","mm4","mm6")
|
|
585 snow_vertical_compose_sra("1","mm0","mm2","mm4","mm6")
|
|
586 snow_vertical_compose_mmx_add("%3","mm0","mm2","mm4","mm6")
|
|
587
|
|
588 snow_vertical_compose_sra("2","mm0","mm2","mm4","mm6")
|
|
589 snow_vertical_compose_mmx_add("%3","mm0","mm2","mm4","mm6")
|
|
590 snow_vertical_compose_mmx_store("%3","mm0","mm2","mm4","mm6")
|
|
591 snow_vertical_compose_mmx_add("%1","mm0","mm2","mm4","mm6")
|
|
592 snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
|
|
593 snow_vertical_compose_sra("1","mm0","mm2","mm4","mm6")
|
|
594 snow_vertical_compose_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
|
|
595 snow_vertical_compose_mmx_add("%2","mm0","mm2","mm4","mm6")
|
|
596 snow_vertical_compose_mmx_store("%2","mm0","mm2","mm4","mm6")
|
|
597
|
|
598 "2: \n\t"
|
|
599 "sub $32, %%"REG_d" \n\t"
|
|
600 "jge 1b \n\t"
|
|
601 :"+d"(i)
|
|
602 :"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5));
|
|
603 }
|
|
604 #endif //HAVE_7REGS
|
|
605
|
|
606 #define snow_inner_add_yblock_sse2_header \
|
|
607 IDWTELEM * * dst_array = sb->line + src_y;\
|
|
608 x86_reg tmp;\
|
|
609 __asm__ volatile(\
|
|
610 "mov %7, %%"REG_c" \n\t"\
|
|
611 "mov %6, %2 \n\t"\
|
|
612 "mov %4, %%"REG_S" \n\t"\
|
|
613 "pxor %%xmm7, %%xmm7 \n\t" /* 0 */\
|
|
614 "pcmpeqd %%xmm3, %%xmm3 \n\t"\
|
|
615 "psllw $15, %%xmm3 \n\t"\
|
|
616 "psrlw $12, %%xmm3 \n\t" /* FRAC_BITS >> 1 */\
|
|
617 "1: \n\t"\
|
|
618 "mov %1, %%"REG_D" \n\t"\
|
|
619 "mov (%%"REG_D"), %%"REG_D" \n\t"\
|
|
620 "add %3, %%"REG_D" \n\t"
|
|
621
|
|
622 #define snow_inner_add_yblock_sse2_start_8(out_reg1, out_reg2, ptr_offset, s_offset)\
|
|
623 "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
|
|
624 "movq (%%"REG_d"), %%"out_reg1" \n\t"\
|
|
625 "movq (%%"REG_d", %%"REG_c"), %%"out_reg2" \n\t"\
|
|
626 "punpcklbw %%xmm7, %%"out_reg1" \n\t"\
|
|
627 "punpcklbw %%xmm7, %%"out_reg2" \n\t"\
|
|
628 "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
|
|
629 "movq "s_offset"+16(%%"REG_S"), %%xmm4 \n\t"\
|
|
630 "punpcklbw %%xmm7, %%xmm0 \n\t"\
|
|
631 "punpcklbw %%xmm7, %%xmm4 \n\t"\
|
|
632 "pmullw %%xmm0, %%"out_reg1" \n\t"\
|
|
633 "pmullw %%xmm4, %%"out_reg2" \n\t"
|
|
634
|
|
635 #define snow_inner_add_yblock_sse2_start_16(out_reg1, out_reg2, ptr_offset, s_offset)\
|
|
636 "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
|
|
637 "movq (%%"REG_d"), %%"out_reg1" \n\t"\
|
|
638 "movq 8(%%"REG_d"), %%"out_reg2" \n\t"\
|
|
639 "punpcklbw %%xmm7, %%"out_reg1" \n\t"\
|
|
640 "punpcklbw %%xmm7, %%"out_reg2" \n\t"\
|
|
641 "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
|
|
642 "movq "s_offset"+8(%%"REG_S"), %%xmm4 \n\t"\
|
|
643 "punpcklbw %%xmm7, %%xmm0 \n\t"\
|
|
644 "punpcklbw %%xmm7, %%xmm4 \n\t"\
|
|
645 "pmullw %%xmm0, %%"out_reg1" \n\t"\
|
|
646 "pmullw %%xmm4, %%"out_reg2" \n\t"
|
|
647
|
|
648 #define snow_inner_add_yblock_sse2_accum_8(ptr_offset, s_offset) \
|
|
649 snow_inner_add_yblock_sse2_start_8("xmm2", "xmm6", ptr_offset, s_offset)\
|
|
650 "paddusw %%xmm2, %%xmm1 \n\t"\
|
|
651 "paddusw %%xmm6, %%xmm5 \n\t"
|
|
652
|
|
653 #define snow_inner_add_yblock_sse2_accum_16(ptr_offset, s_offset) \
|
|
654 snow_inner_add_yblock_sse2_start_16("xmm2", "xmm6", ptr_offset, s_offset)\
|
|
655 "paddusw %%xmm2, %%xmm1 \n\t"\
|
|
656 "paddusw %%xmm6, %%xmm5 \n\t"
|
|
657
|
|
658 #define snow_inner_add_yblock_sse2_end_common1\
|
|
659 "add $32, %%"REG_S" \n\t"\
|
|
660 "add %%"REG_c", %0 \n\t"\
|
|
661 "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
|
|
662 "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
|
|
663 "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
|
|
664 "add %%"REG_c", (%%"REG_a") \n\t"
|
|
665
|
|
666 #define snow_inner_add_yblock_sse2_end_common2\
|
|
667 "jnz 1b \n\t"\
|
|
668 :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\
|
|
669 :\
|
|
670 "rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"((x86_reg)b_h),"m"((x86_reg)src_stride):\
|
|
671 "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
|
|
672
|
|
673 #define snow_inner_add_yblock_sse2_end_8\
|
|
674 "sal $1, %%"REG_c" \n\t"\
|
|
675 "add $"PTR_SIZE"*2, %1 \n\t"\
|
|
676 snow_inner_add_yblock_sse2_end_common1\
|
|
677 "sar $1, %%"REG_c" \n\t"\
|
|
678 "sub $2, %2 \n\t"\
|
|
679 snow_inner_add_yblock_sse2_end_common2
|
|
680
|
|
681 #define snow_inner_add_yblock_sse2_end_16\
|
|
682 "add $"PTR_SIZE"*1, %1 \n\t"\
|
|
683 snow_inner_add_yblock_sse2_end_common1\
|
|
684 "dec %2 \n\t"\
|
|
685 snow_inner_add_yblock_sse2_end_common2
|
|
686
|
|
687 static void inner_add_yblock_bw_8_obmc_16_bh_even_sse2(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h,
|
|
688 int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){
|
|
689 snow_inner_add_yblock_sse2_header
|
|
690 snow_inner_add_yblock_sse2_start_8("xmm1", "xmm5", "3", "0")
|
|
691 snow_inner_add_yblock_sse2_accum_8("2", "8")
|
|
692 snow_inner_add_yblock_sse2_accum_8("1", "128")
|
|
693 snow_inner_add_yblock_sse2_accum_8("0", "136")
|
|
694
|
|
695 "mov %0, %%"REG_d" \n\t"
|
|
696 "movdqa (%%"REG_D"), %%xmm0 \n\t"
|
|
697 "movdqa %%xmm1, %%xmm2 \n\t"
|
|
698
|
|
699 "punpckhwd %%xmm7, %%xmm1 \n\t"
|
|
700 "punpcklwd %%xmm7, %%xmm2 \n\t"
|
|
701 "paddd %%xmm2, %%xmm0 \n\t"
|
|
702 "movdqa 16(%%"REG_D"), %%xmm2 \n\t"
|
|
703 "paddd %%xmm1, %%xmm2 \n\t"
|
|
704 "paddd %%xmm3, %%xmm0 \n\t"
|
|
705 "paddd %%xmm3, %%xmm2 \n\t"
|
|
706
|
|
707 "mov %1, %%"REG_D" \n\t"
|
|
708 "mov "PTR_SIZE"(%%"REG_D"), %%"REG_D";\n\t"
|
|
709 "add %3, %%"REG_D" \n\t"
|
|
710
|
|
711 "movdqa (%%"REG_D"), %%xmm4 \n\t"
|
|
712 "movdqa %%xmm5, %%xmm6 \n\t"
|
|
713 "punpckhwd %%xmm7, %%xmm5 \n\t"
|
|
714 "punpcklwd %%xmm7, %%xmm6 \n\t"
|
|
715 "paddd %%xmm6, %%xmm4 \n\t"
|
|
716 "movdqa 16(%%"REG_D"), %%xmm6 \n\t"
|
|
717 "paddd %%xmm5, %%xmm6 \n\t"
|
|
718 "paddd %%xmm3, %%xmm4 \n\t"
|
|
719 "paddd %%xmm3, %%xmm6 \n\t"
|
|
720
|
|
721 "psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */
|
|
722 "psrad $8, %%xmm2 \n\t" /* FRAC_BITS. */
|
|
723 "packssdw %%xmm2, %%xmm0 \n\t"
|
|
724 "packuswb %%xmm7, %%xmm0 \n\t"
|
|
725 "movq %%xmm0, (%%"REG_d") \n\t"
|
|
726
|
|
727 "psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */
|
|
728 "psrad $8, %%xmm6 \n\t" /* FRAC_BITS. */
|
|
729 "packssdw %%xmm6, %%xmm4 \n\t"
|
|
730 "packuswb %%xmm7, %%xmm4 \n\t"
|
|
731 "movq %%xmm4, (%%"REG_d",%%"REG_c");\n\t"
|
|
732 snow_inner_add_yblock_sse2_end_8
|
|
733 }
|
|
734
|
|
735 static void inner_add_yblock_bw_16_obmc_32_sse2(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h,
|
|
736 int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){
|
|
737 snow_inner_add_yblock_sse2_header
|
|
738 snow_inner_add_yblock_sse2_start_16("xmm1", "xmm5", "3", "0")
|
|
739 snow_inner_add_yblock_sse2_accum_16("2", "16")
|
|
740 snow_inner_add_yblock_sse2_accum_16("1", "512")
|
|
741 snow_inner_add_yblock_sse2_accum_16("0", "528")
|
|
742
|
|
743 "mov %0, %%"REG_d" \n\t"
|
|
744 "psrlw $4, %%xmm1 \n\t"
|
|
745 "psrlw $4, %%xmm5 \n\t"
|
|
746 "paddw (%%"REG_D"), %%xmm1 \n\t"
|
|
747 "paddw 16(%%"REG_D"), %%xmm5 \n\t"
|
|
748 "paddw %%xmm3, %%xmm1 \n\t"
|
|
749 "paddw %%xmm3, %%xmm5 \n\t"
|
|
750 "psraw $4, %%xmm1 \n\t" /* FRAC_BITS. */
|
|
751 "psraw $4, %%xmm5 \n\t" /* FRAC_BITS. */
|
|
752 "packuswb %%xmm5, %%xmm1 \n\t"
|
|
753
|
|
754 "movdqu %%xmm1, (%%"REG_d") \n\t"
|
|
755
|
|
756 snow_inner_add_yblock_sse2_end_16
|
|
757 }
|
|
758
|
|
759 #define snow_inner_add_yblock_mmx_header \
|
|
760 IDWTELEM * * dst_array = sb->line + src_y;\
|
|
761 x86_reg tmp;\
|
|
762 __asm__ volatile(\
|
|
763 "mov %7, %%"REG_c" \n\t"\
|
|
764 "mov %6, %2 \n\t"\
|
|
765 "mov %4, %%"REG_S" \n\t"\
|
|
766 "pxor %%mm7, %%mm7 \n\t" /* 0 */\
|
|
767 "pcmpeqd %%mm3, %%mm3 \n\t"\
|
|
768 "psllw $15, %%mm3 \n\t"\
|
|
769 "psrlw $12, %%mm3 \n\t" /* FRAC_BITS >> 1 */\
|
|
770 "1: \n\t"\
|
|
771 "mov %1, %%"REG_D" \n\t"\
|
|
772 "mov (%%"REG_D"), %%"REG_D" \n\t"\
|
|
773 "add %3, %%"REG_D" \n\t"
|
|
774
|
|
775 #define snow_inner_add_yblock_mmx_start(out_reg1, out_reg2, ptr_offset, s_offset, d_offset)\
|
|
776 "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
|
|
777 "movd "d_offset"(%%"REG_d"), %%"out_reg1" \n\t"\
|
|
778 "movd "d_offset"+4(%%"REG_d"), %%"out_reg2" \n\t"\
|
|
779 "punpcklbw %%mm7, %%"out_reg1" \n\t"\
|
|
780 "punpcklbw %%mm7, %%"out_reg2" \n\t"\
|
|
781 "movd "s_offset"(%%"REG_S"), %%mm0 \n\t"\
|
|
782 "movd "s_offset"+4(%%"REG_S"), %%mm4 \n\t"\
|
|
783 "punpcklbw %%mm7, %%mm0 \n\t"\
|
|
784 "punpcklbw %%mm7, %%mm4 \n\t"\
|
|
785 "pmullw %%mm0, %%"out_reg1" \n\t"\
|
|
786 "pmullw %%mm4, %%"out_reg2" \n\t"
|
|
787
|
|
788 #define snow_inner_add_yblock_mmx_accum(ptr_offset, s_offset, d_offset) \
|
|
789 snow_inner_add_yblock_mmx_start("mm2", "mm6", ptr_offset, s_offset, d_offset)\
|
|
790 "paddusw %%mm2, %%mm1 \n\t"\
|
|
791 "paddusw %%mm6, %%mm5 \n\t"
|
|
792
|
|
793 #define snow_inner_add_yblock_mmx_mix(read_offset, write_offset)\
|
|
794 "mov %0, %%"REG_d" \n\t"\
|
|
795 "psrlw $4, %%mm1 \n\t"\
|
|
796 "psrlw $4, %%mm5 \n\t"\
|
|
797 "paddw "read_offset"(%%"REG_D"), %%mm1 \n\t"\
|
|
798 "paddw "read_offset"+8(%%"REG_D"), %%mm5 \n\t"\
|
|
799 "paddw %%mm3, %%mm1 \n\t"\
|
|
800 "paddw %%mm3, %%mm5 \n\t"\
|
|
801 "psraw $4, %%mm1 \n\t"\
|
|
802 "psraw $4, %%mm5 \n\t"\
|
|
803 "packuswb %%mm5, %%mm1 \n\t"\
|
|
804 "movq %%mm1, "write_offset"(%%"REG_d") \n\t"
|
|
805
|
|
806 #define snow_inner_add_yblock_mmx_end(s_step)\
|
|
807 "add $"s_step", %%"REG_S" \n\t"\
|
|
808 "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
|
|
809 "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
|
|
810 "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
|
|
811 "add %%"REG_c", (%%"REG_a") \n\t"\
|
|
812 "add $"PTR_SIZE"*1, %1 \n\t"\
|
|
813 "add %%"REG_c", %0 \n\t"\
|
|
814 "dec %2 \n\t"\
|
|
815 "jnz 1b \n\t"\
|
|
816 :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\
|
|
817 :\
|
|
818 "rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"((x86_reg)b_h),"m"((x86_reg)src_stride):\
|
|
819 "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
|
|
820
|
|
821 static void inner_add_yblock_bw_8_obmc_16_mmx(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h,
|
|
822 int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){
|
|
823 snow_inner_add_yblock_mmx_header
|
|
824 snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
|
|
825 snow_inner_add_yblock_mmx_accum("2", "8", "0")
|
|
826 snow_inner_add_yblock_mmx_accum("1", "128", "0")
|
|
827 snow_inner_add_yblock_mmx_accum("0", "136", "0")
|
|
828 snow_inner_add_yblock_mmx_mix("0", "0")
|
|
829 snow_inner_add_yblock_mmx_end("16")
|
|
830 }
|
|
831
|
|
832 static void inner_add_yblock_bw_16_obmc_32_mmx(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h,
|
|
833 int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){
|
|
834 snow_inner_add_yblock_mmx_header
|
|
835 snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
|
|
836 snow_inner_add_yblock_mmx_accum("2", "16", "0")
|
|
837 snow_inner_add_yblock_mmx_accum("1", "512", "0")
|
|
838 snow_inner_add_yblock_mmx_accum("0", "528", "0")
|
|
839 snow_inner_add_yblock_mmx_mix("0", "0")
|
|
840
|
|
841 snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "8", "8")
|
|
842 snow_inner_add_yblock_mmx_accum("2", "24", "8")
|
|
843 snow_inner_add_yblock_mmx_accum("1", "520", "8")
|
|
844 snow_inner_add_yblock_mmx_accum("0", "536", "8")
|
|
845 snow_inner_add_yblock_mmx_mix("16", "8")
|
|
846 snow_inner_add_yblock_mmx_end("32")
|
|
847 }
|
|
848
|
|
849 void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
|
|
850 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
|
|
851
|
|
852 if (b_w == 16)
|
|
853 inner_add_yblock_bw_16_obmc_32_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
|
|
854 else if (b_w == 8 && obmc_stride == 16) {
|
|
855 if (!(b_h & 1))
|
|
856 inner_add_yblock_bw_8_obmc_16_bh_even_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
|
|
857 else
|
|
858 inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
|
|
859 } else
|
|
860 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
|
|
861 }
|
|
862
|
|
863 void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
|
|
864 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
|
|
865 if (b_w == 16)
|
|
866 inner_add_yblock_bw_16_obmc_32_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
|
|
867 else if (b_w == 8 && obmc_stride == 16)
|
|
868 inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
|
|
869 else
|
|
870 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
|
|
871 }
|