Mercurial > libavcodec.hg
comparison i386/snowdsp_mmx.c @ 3210:81cafbc23b8d libavcodec
snow mmx+sse2 optimizations, part 4
Patch by Robert Edele, yartrebo <<at>> earthlink <<dot>> net
author | corey |
---|---|
date | Tue, 21 Mar 2006 21:51:07 +0000 |
parents | 33110c1008a4 |
children | b77b5e7072d6 |
comparison
equal
deleted
inserted
replaced
3209:cc86ebc32143 | 3210:81cafbc23b8d |
---|---|
19 | 19 |
20 #include "../avcodec.h" | 20 #include "../avcodec.h" |
21 #include "../snow.h" | 21 #include "../snow.h" |
22 #include "mmx.h" | 22 #include "mmx.h" |
23 | 23 |
24 static void always_inline snow_interleave_line_header(int * i, int width, DWTELEM * low, DWTELEM * high){ | |
25 (*i) = (width) - 2; | |
26 | |
27 if (width & 1){ | |
28 low[(*i)+1] = low[((*i)+1)>>1]; | |
29 (*i)--; | |
30 } | |
31 } | |
32 | |
33 static void always_inline snow_horizontal_compose_lift_lead_out(int i, DWTELEM * dst, DWTELEM * src, DWTELEM * ref, int width, int w, int lift_high, int mul, int add, int shift){ | |
34 for(; i<w; i++){ | |
35 dst[i] = src[i] - ((mul * (ref[i] + ref[i + 1]) + add) >> shift); | |
36 } | |
37 | |
38 if((width^lift_high)&1){ | |
39 dst[w] = src[w] - ((mul * 2 * ref[w] + add) >> shift); | |
40 } | |
41 } | |
42 | |
43 static void always_inline snow_horizontal_compose_liftS_lead_out(int i, DWTELEM * dst, DWTELEM * src, DWTELEM * ref, int width, int w){ | |
44 for(; i<w; i++){ | |
45 dst[i] = src[i] - (((-(ref[i] + ref[(i+1)])+W_BO) - 4 * src[i]) >> W_BS); | |
46 } | |
47 | |
48 if(width&1){ | |
49 dst[w] = src[w] - (((-2 * ref[w] + W_BO) - 4 * src[w]) >> W_BS); | |
50 } | |
51 } | |
52 | |
53 void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width){ | |
54 const int w2= (width+1)>>1; | |
55 // SSE2 code runs faster with pointers aligned on a 32-byte boundary. | |
56 DWTELEM temp_buf[(width>>1) + 4]; | |
57 DWTELEM * const temp = temp_buf + 4 - (((int)temp_buf & 0xF) >> 2); | |
58 const int w_l= (width>>1); | |
59 const int w_r= w2 - 1; | |
60 int i; | |
61 | |
62 { // Lift 0 | |
63 DWTELEM * const ref = b + w2 - 1; | |
64 DWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice | |
65 // (the first time erroneously), we allow the SSE2 code to run an extra pass. | |
66 // The savings in code and time are well worth having to store this value and | |
67 // calculate b[0] correctly afterwards. | |
68 | |
69 i = 0; | |
70 asm volatile( | |
71 "pcmpeqd %%xmm7, %%xmm7 \n\t" | |
72 "pslld $31, %%xmm7 \n\t" | |
73 "psrld $29, %%xmm7 \n\t" | |
74 ::); | |
75 for(; i<w_l-7; i+=8){ | |
76 asm volatile( | |
77 "movdqu (%1), %%xmm1 \n\t" | |
78 "movdqu 16(%1), %%xmm5 \n\t" | |
79 "movdqu 4(%1), %%xmm2 \n\t" | |
80 "movdqu 20(%1), %%xmm6 \n\t" | |
81 "paddd %%xmm1, %%xmm2 \n\t" | |
82 "paddd %%xmm5, %%xmm6 \n\t" | |
83 "movdqa %%xmm2, %%xmm0 \n\t" | |
84 "movdqa %%xmm6, %%xmm4 \n\t" | |
85 "paddd %%xmm2, %%xmm2 \n\t" | |
86 "paddd %%xmm6, %%xmm6 \n\t" | |
87 "paddd %%xmm0, %%xmm2 \n\t" | |
88 "paddd %%xmm4, %%xmm6 \n\t" | |
89 "paddd %%xmm7, %%xmm2 \n\t" | |
90 "paddd %%xmm7, %%xmm6 \n\t" | |
91 "psrad $3, %%xmm2 \n\t" | |
92 "psrad $3, %%xmm6 \n\t" | |
93 "movdqa (%0), %%xmm0 \n\t" | |
94 "movdqa 16(%0), %%xmm4 \n\t" | |
95 "psubd %%xmm2, %%xmm0 \n\t" | |
96 "psubd %%xmm6, %%xmm4 \n\t" | |
97 "movdqa %%xmm0, (%0) \n\t" | |
98 "movdqa %%xmm4, 16(%0) \n\t" | |
99 :: "r"(&b[i]), "r"(&ref[i]) | |
100 : "memory" | |
101 ); | |
102 } | |
103 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS); | |
104 b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); | |
105 } | |
106 | |
107 { // Lift 1 | |
108 DWTELEM * const dst = b+w2; | |
109 | |
110 i = 0; | |
111 for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){ | |
112 dst[i] = dst[i] - (b[i] + b[i + 1]); | |
113 } | |
114 for(; i<w_r-7; i+=8){ | |
115 asm volatile( | |
116 "movdqu (%1), %%xmm1 \n\t" | |
117 "movdqu 16(%1), %%xmm5 \n\t" | |
118 "movdqu 4(%1), %%xmm2 \n\t" | |
119 "movdqu 20(%1), %%xmm6 \n\t" | |
120 "paddd %%xmm1, %%xmm2 \n\t" | |
121 "paddd %%xmm5, %%xmm6 \n\t" | |
122 "movdqa (%0), %%xmm0 \n\t" | |
123 "movdqa 16(%0), %%xmm4 \n\t" | |
124 "psubd %%xmm2, %%xmm0 \n\t" | |
125 "psubd %%xmm6, %%xmm4 \n\t" | |
126 "movdqa %%xmm0, (%0) \n\t" | |
127 "movdqa %%xmm4, 16(%0) \n\t" | |
128 :: "r"(&dst[i]), "r"(&b[i]) | |
129 : "memory" | |
130 ); | |
131 } | |
132 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS); | |
133 } | |
134 | |
135 { // Lift 2 | |
136 DWTELEM * const ref = b+w2 - 1; | |
137 DWTELEM b_0 = b[0]; | |
138 | |
139 i = 0; | |
140 asm volatile( | |
141 "pslld $1, %%xmm7 \n\t" /* xmm7 already holds a '4' from 2 lifts ago. */ | |
142 ::); | |
143 for(; i<w_l-7; i+=8){ | |
144 asm volatile( | |
145 "movdqu (%1), %%xmm1 \n\t" | |
146 "movdqu 16(%1), %%xmm5 \n\t" | |
147 "movdqu 4(%1), %%xmm0 \n\t" | |
148 "movdqu 20(%1), %%xmm4 \n\t" | |
149 "paddd %%xmm1, %%xmm0 \n\t" | |
150 "paddd %%xmm5, %%xmm4 \n\t" | |
151 "movdqa %%xmm7, %%xmm1 \n\t" | |
152 "movdqa %%xmm7, %%xmm5 \n\t" | |
153 "psubd %%xmm0, %%xmm1 \n\t" | |
154 "psubd %%xmm4, %%xmm5 \n\t" | |
155 "movdqa (%0), %%xmm0 \n\t" | |
156 "movdqa 16(%0), %%xmm4 \n\t" | |
157 "pslld $2, %%xmm0 \n\t" | |
158 "pslld $2, %%xmm4 \n\t" | |
159 "psubd %%xmm0, %%xmm1 \n\t" | |
160 "psubd %%xmm4, %%xmm5 \n\t" | |
161 "psrad $4, %%xmm1 \n\t" | |
162 "psrad $4, %%xmm5 \n\t" | |
163 "movdqa (%0), %%xmm0 \n\t" | |
164 "movdqa 16(%0), %%xmm4 \n\t" | |
165 "psubd %%xmm1, %%xmm0 \n\t" | |
166 "psubd %%xmm5, %%xmm4 \n\t" | |
167 "movdqa %%xmm0, (%0) \n\t" | |
168 "movdqa %%xmm4, 16(%0) \n\t" | |
169 :: "r"(&b[i]), "r"(&ref[i]) | |
170 : "memory" | |
171 ); | |
172 } | |
173 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l); | |
174 b[0] = b_0 - (((-2 * ref[1] + W_BO) - 4 * b_0) >> W_BS); | |
175 } | |
176 | |
177 { // Lift 3 | |
178 DWTELEM * const src = b+w2; | |
179 | |
180 i = 0; | |
181 for(; (((long)&temp[i]) & 0xF) && i<w_r; i++){ | |
182 temp[i] = src[i] - ((-W_AM*(b[i] + b[i+1]))>>W_AS); | |
183 } | |
184 for(; i<w_r-7; i+=8){ | |
185 asm volatile( | |
186 "movdqu 4(%1), %%xmm2 \n\t" | |
187 "movdqu 20(%1), %%xmm6 \n\t" | |
188 "paddd (%1), %%xmm2 \n\t" | |
189 "paddd 16(%1), %%xmm6 \n\t" | |
190 "movdqa %%xmm2, %%xmm0 \n\t" | |
191 "movdqa %%xmm6, %%xmm4 \n\t" | |
192 "pslld $2, %%xmm2 \n\t" | |
193 "pslld $2, %%xmm6 \n\t" | |
194 "psubd %%xmm2, %%xmm0 \n\t" | |
195 "psubd %%xmm6, %%xmm4 \n\t" | |
196 "psrad $1, %%xmm0 \n\t" | |
197 "psrad $1, %%xmm4 \n\t" | |
198 "movdqu (%0), %%xmm2 \n\t" | |
199 "movdqu 16(%0), %%xmm6 \n\t" | |
200 "psubd %%xmm0, %%xmm2 \n\t" | |
201 "psubd %%xmm4, %%xmm6 \n\t" | |
202 "movdqa %%xmm2, (%2) \n\t" | |
203 "movdqa %%xmm6, 16(%2) \n\t" | |
204 :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i]) | |
205 : "memory" | |
206 ); | |
207 } | |
208 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO, W_AS); | |
209 } | |
210 | |
211 { | |
212 snow_interleave_line_header(&i, width, b, temp); | |
213 | |
214 for (; (i & 0x1E) != 0x1E; i-=2){ | |
215 b[i+1] = temp[i>>1]; | |
216 b[i] = b[i>>1]; | |
217 } | |
218 for (i-=30; i>=0; i-=32){ | |
219 asm volatile( | |
220 "movdqa (%1), %%xmm0 \n\t" | |
221 "movdqa 16(%1), %%xmm2 \n\t" | |
222 "movdqa 32(%1), %%xmm4 \n\t" | |
223 "movdqa 48(%1), %%xmm6 \n\t" | |
224 "movdqa (%1), %%xmm1 \n\t" | |
225 "movdqa 16(%1), %%xmm3 \n\t" | |
226 "movdqa 32(%1), %%xmm5 \n\t" | |
227 "movdqa 48(%1), %%xmm7 \n\t" | |
228 "punpckldq (%2), %%xmm0 \n\t" | |
229 "punpckldq 16(%2), %%xmm2 \n\t" | |
230 "punpckldq 32(%2), %%xmm4 \n\t" | |
231 "punpckldq 48(%2), %%xmm6 \n\t" | |
232 "movdqa %%xmm0, (%0) \n\t" | |
233 "movdqa %%xmm2, 32(%0) \n\t" | |
234 "movdqa %%xmm4, 64(%0) \n\t" | |
235 "movdqa %%xmm6, 96(%0) \n\t" | |
236 "punpckhdq (%2), %%xmm1 \n\t" | |
237 "punpckhdq 16(%2), %%xmm3 \n\t" | |
238 "punpckhdq 32(%2), %%xmm5 \n\t" | |
239 "punpckhdq 48(%2), %%xmm7 \n\t" | |
240 "movdqa %%xmm1, 16(%0) \n\t" | |
241 "movdqa %%xmm3, 48(%0) \n\t" | |
242 "movdqa %%xmm5, 80(%0) \n\t" | |
243 "movdqa %%xmm7, 112(%0) \n\t" | |
244 :: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1]) | |
245 : "memory" | |
246 ); | |
247 } | |
248 } | |
249 } | |
250 | |
251 void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width){ | |
252 const int w2= (width+1)>>1; | |
253 DWTELEM temp[width >> 1]; | |
254 const int w_l= (width>>1); | |
255 const int w_r= w2 - 1; | |
256 int i; | |
257 | |
258 { // Lift 0 | |
259 DWTELEM * const ref = b + w2 - 1; | |
260 | |
261 i = 1; | |
262 b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); | |
263 asm volatile( | |
264 "pcmpeqd %%mm7, %%mm7 \n\t" | |
265 "pslld $31, %%mm7 \n\t" | |
266 "psrld $29, %%mm7 \n\t" | |
267 ::); | |
268 for(; i<w_l-3; i+=4){ | |
269 asm volatile( | |
270 "movq (%1), %%mm2 \n\t" | |
271 "movq 8(%1), %%mm6 \n\t" | |
272 "paddd 4(%1), %%mm2 \n\t" | |
273 "paddd 12(%1), %%mm6 \n\t" | |
274 "movq %%mm2, %%mm0 \n\t" | |
275 "movq %%mm6, %%mm4 \n\t" | |
276 "paddd %%mm2, %%mm2 \n\t" | |
277 "paddd %%mm6, %%mm6 \n\t" | |
278 "paddd %%mm0, %%mm2 \n\t" | |
279 "paddd %%mm4, %%mm6 \n\t" | |
280 "paddd %%mm7, %%mm2 \n\t" | |
281 "paddd %%mm7, %%mm6 \n\t" | |
282 "psrad $3, %%mm2 \n\t" | |
283 "psrad $3, %%mm6 \n\t" | |
284 "movq (%0), %%mm0 \n\t" | |
285 "movq 8(%0), %%mm4 \n\t" | |
286 "psubd %%mm2, %%mm0 \n\t" | |
287 "psubd %%mm6, %%mm4 \n\t" | |
288 "movq %%mm0, (%0) \n\t" | |
289 "movq %%mm4, 8(%0) \n\t" | |
290 :: "r"(&b[i]), "r"(&ref[i]) | |
291 : "memory" | |
292 ); | |
293 } | |
294 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS); | |
295 } | |
296 | |
297 { // Lift 1 | |
298 DWTELEM * const dst = b+w2; | |
299 | |
300 i = 0; | |
301 for(; i<w_r-3; i+=4){ | |
302 asm volatile( | |
303 "movq (%1), %%mm2 \n\t" | |
304 "movq 8(%1), %%mm6 \n\t" | |
305 "paddd 4(%1), %%mm2 \n\t" | |
306 "paddd 12(%1), %%mm6 \n\t" | |
307 "movq (%0), %%mm0 \n\t" | |
308 "movq 8(%0), %%mm4 \n\t" | |
309 "psubd %%mm2, %%mm0 \n\t" | |
310 "psubd %%mm6, %%mm4 \n\t" | |
311 "movq %%mm0, (%0) \n\t" | |
312 "movq %%mm4, 8(%0) \n\t" | |
313 :: "r"(&dst[i]), "r"(&b[i]) | |
314 : "memory" | |
315 ); | |
316 } | |
317 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS); | |
318 } | |
319 | |
320 { // Lift 2 | |
321 DWTELEM * const ref = b+w2 - 1; | |
322 | |
323 i = 1; | |
324 b[0] = b[0] - (((-2 * ref[1] + W_BO) - 4 * b[0]) >> W_BS); | |
325 asm volatile( | |
326 "pslld $1, %%mm7 \n\t" /* xmm7 already holds a '4' from 2 lifts ago. */ | |
327 ::); | |
328 for(; i<w_l-3; i+=4){ | |
329 asm volatile( | |
330 "movq (%1), %%mm0 \n\t" | |
331 "movq 8(%1), %%mm4 \n\t" | |
332 "paddd 4(%1), %%mm0 \n\t" | |
333 "paddd 12(%1), %%mm4 \n\t" | |
334 "movq %%mm7, %%mm1 \n\t" | |
335 "movq %%mm7, %%mm5 \n\t" | |
336 "psubd %%mm0, %%mm1 \n\t" | |
337 "psubd %%mm4, %%mm5 \n\t" | |
338 "movq (%0), %%mm0 \n\t" | |
339 "movq 8(%0), %%mm4 \n\t" | |
340 "pslld $2, %%mm0 \n\t" | |
341 "pslld $2, %%mm4 \n\t" | |
342 "psubd %%mm0, %%mm1 \n\t" | |
343 "psubd %%mm4, %%mm5 \n\t" | |
344 "psrad $4, %%mm1 \n\t" | |
345 "psrad $4, %%mm5 \n\t" | |
346 "movq (%0), %%mm0 \n\t" | |
347 "movq 8(%0), %%mm4 \n\t" | |
348 "psubd %%mm1, %%mm0 \n\t" | |
349 "psubd %%mm5, %%mm4 \n\t" | |
350 "movq %%mm0, (%0) \n\t" | |
351 "movq %%mm4, 8(%0) \n\t" | |
352 :: "r"(&b[i]), "r"(&ref[i]) | |
353 : "memory" | |
354 ); | |
355 } | |
356 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l); | |
357 } | |
358 | |
359 { // Lift 3 | |
360 DWTELEM * const src = b+w2; | |
361 i = 0; | |
362 | |
363 for(; i<w_r-3; i+=4){ | |
364 asm volatile( | |
365 "movq 4(%1), %%mm2 \n\t" | |
366 "movq 12(%1), %%mm6 \n\t" | |
367 "paddd (%1), %%mm2 \n\t" | |
368 "paddd 8(%1), %%mm6 \n\t" | |
369 "movq %%mm2, %%mm0 \n\t" | |
370 "movq %%mm6, %%mm4 \n\t" | |
371 "pslld $2, %%mm2 \n\t" | |
372 "pslld $2, %%mm6 \n\t" | |
373 "psubd %%mm2, %%mm0 \n\t" | |
374 "psubd %%mm6, %%mm4 \n\t" | |
375 "psrad $1, %%mm0 \n\t" | |
376 "psrad $1, %%mm4 \n\t" | |
377 "movq (%0), %%mm2 \n\t" | |
378 "movq 8(%0), %%mm6 \n\t" | |
379 "psubd %%mm0, %%mm2 \n\t" | |
380 "psubd %%mm4, %%mm6 \n\t" | |
381 "movq %%mm2, (%2) \n\t" | |
382 "movq %%mm6, 8(%2) \n\t" | |
383 :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i]) | |
384 : "memory" | |
385 ); | |
386 } | |
387 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO, W_AS); | |
388 } | |
389 | |
390 { | |
391 snow_interleave_line_header(&i, width, b, temp); | |
392 | |
393 for (; (i & 0xE) != 0xE; i-=2){ | |
394 b[i+1] = temp[i>>1]; | |
395 b[i] = b[i>>1]; | |
396 } | |
397 for (i-=14; i>=0; i-=16){ | |
398 asm volatile( | |
399 "movq (%1), %%mm0 \n\t" | |
400 "movq 8(%1), %%mm2 \n\t" | |
401 "movq 16(%1), %%mm4 \n\t" | |
402 "movq 24(%1), %%mm6 \n\t" | |
403 "movq (%1), %%mm1 \n\t" | |
404 "movq 8(%1), %%mm3 \n\t" | |
405 "movq 16(%1), %%mm5 \n\t" | |
406 "movq 24(%1), %%mm7 \n\t" | |
407 "punpckldq (%2), %%mm0 \n\t" | |
408 "punpckldq 8(%2), %%mm2 \n\t" | |
409 "punpckldq 16(%2), %%mm4 \n\t" | |
410 "punpckldq 24(%2), %%mm6 \n\t" | |
411 "movq %%mm0, (%0) \n\t" | |
412 "movq %%mm2, 16(%0) \n\t" | |
413 "movq %%mm4, 32(%0) \n\t" | |
414 "movq %%mm6, 48(%0) \n\t" | |
415 "punpckhdq (%2), %%mm1 \n\t" | |
416 "punpckhdq 8(%2), %%mm3 \n\t" | |
417 "punpckhdq 16(%2), %%mm5 \n\t" | |
418 "punpckhdq 24(%2), %%mm7 \n\t" | |
419 "movq %%mm1, 8(%0) \n\t" | |
420 "movq %%mm3, 24(%0) \n\t" | |
421 "movq %%mm5, 40(%0) \n\t" | |
422 "movq %%mm7, 56(%0) \n\t" | |
423 :: "r"(&b[i]), "r"(&b[i>>1]), "r"(&temp[i>>1]) | |
424 : "memory" | |
425 ); | |
426 } | |
427 } | |
428 } | |
429 | |
24 #define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\ | 430 #define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\ |
25 ""op" (%%"r",%%"REG_d",4), %%"t0" \n\t"\ | 431 ""op" (%%"r",%%"REG_d",4), %%"t0" \n\t"\ |
26 ""op" 16(%%"r",%%"REG_d",4), %%"t1" \n\t"\ | 432 ""op" 16(%%"r",%%"REG_d",4), %%"t1" \n\t"\ |
27 ""op" 32(%%"r",%%"REG_d",4), %%"t2" \n\t"\ | 433 ""op" 32(%%"r",%%"REG_d",4), %%"t2" \n\t"\ |
28 ""op" 48(%%"r",%%"REG_d",4), %%"t3" \n\t" | 434 ""op" 48(%%"r",%%"REG_d",4), %%"t3" \n\t" |