Mercurial > mplayer.hg
annotate libmpeg2/idct_mmx.c @ 8278:1cbee32a0627
if no have subwindow ... some menupoint is disabled:)
author | pontscho |
---|---|
date | Mon, 25 Nov 2002 16:52:12 +0000 |
parents | 4fa90be8da03 |
children | 47984e3f54ce |
rev | line source |
---|---|
1 | 1 /* |
2 * idct_mmx.c | |
36 | 3 * Copyright (C) 1999-2001 Aaron Holtzman <aholtzma@ess.engr.uvic.ca> |
1 | 4 * |
5 * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. | |
6 * | |
7 * mpeg2dec is free software; you can redistribute it and/or modify | |
8 * it under the terms of the GNU General Public License as published by | |
9 * the Free Software Foundation; either version 2 of the License, or | |
10 * (at your option) any later version. | |
11 * | |
12 * mpeg2dec is distributed in the hope that it will be useful, | |
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 * GNU General Public License for more details. | |
16 * | |
17 * You should have received a copy of the GNU General Public License | |
18 * along with this program; if not, write to the Free Software | |
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
20 */ | |
21 | |
22 #include "config.h" | |
23 | |
24 #ifdef ARCH_X86 | |
25 | |
26 #include <inttypes.h> | |
27 | |
28 #include "mpeg2_internal.h" | |
29 #include "attributes.h" | |
30 #include "mmx.h" | |
31 | |
32 #define ROW_SHIFT 11 | |
33 #define COL_SHIFT 6 | |
34 | |
35 #define round(bias) ((int)(((bias)+0.5) * (1<<ROW_SHIFT))) | |
36 #define rounder(bias) {round (bias), round (bias)} | |
37 | |
38 | |
39 #if 0 | |
36 | 40 /* C row IDCT - its just here to document the MMXEXT and MMX versions */ |
1 | 41 static inline void idct_row (int16_t * row, int offset, |
42 int16_t * table, int32_t * rounder) | |
43 { | |
44 int C1, C2, C3, C4, C5, C6, C7; | |
45 int a0, a1, a2, a3, b0, b1, b2, b3; | |
46 | |
47 row += offset; | |
48 | |
49 C1 = table[1]; | |
50 C2 = table[2]; | |
51 C3 = table[3]; | |
52 C4 = table[4]; | |
53 C5 = table[5]; | |
54 C6 = table[6]; | |
55 C7 = table[7]; | |
56 | |
57 a0 = C4*row[0] + C2*row[2] + C4*row[4] + C6*row[6] + *rounder; | |
58 a1 = C4*row[0] + C6*row[2] - C4*row[4] - C2*row[6] + *rounder; | |
59 a2 = C4*row[0] - C6*row[2] - C4*row[4] + C2*row[6] + *rounder; | |
60 a3 = C4*row[0] - C2*row[2] + C4*row[4] - C6*row[6] + *rounder; | |
61 | |
62 b0 = C1*row[1] + C3*row[3] + C5*row[5] + C7*row[7]; | |
63 b1 = C3*row[1] - C7*row[3] - C1*row[5] - C5*row[7]; | |
64 b2 = C5*row[1] - C1*row[3] + C7*row[5] + C3*row[7]; | |
65 b3 = C7*row[1] - C5*row[3] + C3*row[5] - C1*row[7]; | |
66 | |
67 row[0] = (a0 + b0) >> ROW_SHIFT; | |
68 row[1] = (a1 + b1) >> ROW_SHIFT; | |
69 row[2] = (a2 + b2) >> ROW_SHIFT; | |
70 row[3] = (a3 + b3) >> ROW_SHIFT; | |
71 row[4] = (a3 - b3) >> ROW_SHIFT; | |
72 row[5] = (a2 - b2) >> ROW_SHIFT; | |
73 row[6] = (a1 - b1) >> ROW_SHIFT; | |
74 row[7] = (a0 - b0) >> ROW_SHIFT; | |
75 } | |
76 #endif | |
77 | |
78 | |
36 | 79 /* MMXEXT row IDCT */ |
1 | 80 |
81 #define mmxext_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, -c4, -c2, \ | |
82 c4, c6, c4, c6, \ | |
83 c1, c3, -c1, -c5, \ | |
84 c5, c7, c3, -c7, \ | |
85 c4, -c6, c4, -c6, \ | |
86 -c4, c2, c4, -c2, \ | |
87 c5, -c1, c3, -c1, \ | |
88 c7, c3, c7, -c5 } | |
89 | |
90 static inline void mmxext_row_head (int16_t * row, int offset, int16_t * table) | |
91 { | |
92 movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0 | |
93 | |
94 movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1 | |
95 movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0 | |
96 | |
97 movq_m2r (*table, mm3); // mm3 = -C2 -C4 C2 C4 | |
98 movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1 | |
99 | |
100 movq_m2r (*(table+4), mm4); // mm4 = C6 C4 C6 C4 | |
101 pmaddwd_r2r (mm0, mm3); // mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 | |
102 | |
103 pshufw_r2r (mm2, mm2, 0x4e); // mm2 = x2 x0 x6 x4 | |
104 } | |
105 | |
106 static inline void mmxext_row (int16_t * table, int32_t * rounder) | |
107 { | |
108 movq_m2r (*(table+8), mm1); // mm1 = -C5 -C1 C3 C1 | |
109 pmaddwd_r2r (mm2, mm4); // mm4 = C4*x0+C6*x2 C4*x4+C6*x6 | |
110 | |
111 pmaddwd_m2r (*(table+16), mm0); // mm0 = C4*x4-C6*x6 C4*x0-C6*x2 | |
112 pshufw_r2r (mm6, mm6, 0x4e); // mm6 = x3 x1 x7 x5 | |
113 | |
114 movq_m2r (*(table+12), mm7); // mm7 = -C7 C3 C7 C5 | |
115 pmaddwd_r2r (mm5, mm1); // mm1 = -C1*x5-C5*x7 C1*x1+C3*x3 | |
116 | |
117 paddd_m2r (*rounder, mm3); // mm3 += rounder | |
118 pmaddwd_r2r (mm6, mm7); // mm7 = C3*x1-C7*x3 C5*x5+C7*x7 | |
119 | |
120 pmaddwd_m2r (*(table+20), mm2); // mm2 = C4*x0-C2*x2 -C4*x4+C2*x6 | |
121 paddd_r2r (mm4, mm3); // mm3 = a1 a0 + rounder | |
122 | |
123 pmaddwd_m2r (*(table+24), mm5); // mm5 = C3*x5-C1*x7 C5*x1-C1*x3 | |
124 movq_r2r (mm3, mm4); // mm4 = a1 a0 + rounder | |
125 | |
126 pmaddwd_m2r (*(table+28), mm6); // mm6 = C7*x1-C5*x3 C7*x5+C3*x7 | |
127 paddd_r2r (mm7, mm1); // mm1 = b1 b0 | |
128 | |
129 paddd_m2r (*rounder, mm0); // mm0 += rounder | |
130 psubd_r2r (mm1, mm3); // mm3 = a1-b1 a0-b0 + rounder | |
131 | |
132 psrad_i2r (ROW_SHIFT, mm3); // mm3 = y6 y7 | |
133 paddd_r2r (mm4, mm1); // mm1 = a1+b1 a0+b0 + rounder | |
134 | |
135 paddd_r2r (mm2, mm0); // mm0 = a3 a2 + rounder | |
136 psrad_i2r (ROW_SHIFT, mm1); // mm1 = y1 y0 | |
137 | |
138 paddd_r2r (mm6, mm5); // mm5 = b3 b2 | |
139 movq_r2r (mm0, mm4); // mm4 = a3 a2 + rounder | |
140 | |
141 paddd_r2r (mm5, mm0); // mm0 = a3+b3 a2+b2 + rounder | |
142 psubd_r2r (mm5, mm4); // mm4 = a3-b3 a2-b2 + rounder | |
143 } | |
144 | |
145 static inline void mmxext_row_tail (int16_t * row, int store) | |
146 { | |
147 psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2 | |
148 | |
149 psrad_i2r (ROW_SHIFT, mm4); // mm4 = y4 y5 | |
150 | |
151 packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0 | |
152 | |
153 packssdw_r2r (mm3, mm4); // mm4 = y6 y7 y4 y5 | |
154 | |
155 movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0 | |
156 pshufw_r2r (mm4, mm4, 0xb1); // mm4 = y7 y6 y5 y4 | |
157 | |
36 | 158 /* slot */ |
1 | 159 |
160 movq_r2m (mm4, *(row+store+4)); // save y7 y6 y5 y4 | |
161 } | |
162 | |
163 static inline void mmxext_row_mid (int16_t * row, int store, | |
164 int offset, int16_t * table) | |
165 { | |
166 movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0 | |
167 psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2 | |
168 | |
169 movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1 | |
170 psrad_i2r (ROW_SHIFT, mm4); // mm4 = y4 y5 | |
171 | |
172 packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0 | |
173 movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1 | |
174 | |
175 packssdw_r2r (mm3, mm4); // mm4 = y6 y7 y4 y5 | |
176 movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0 | |
177 | |
178 movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0 | |
179 pshufw_r2r (mm4, mm4, 0xb1); // mm4 = y7 y6 y5 y4 | |
180 | |
181 movq_m2r (*table, mm3); // mm3 = -C2 -C4 C2 C4 | |
182 movq_r2m (mm4, *(row+store+4)); // save y7 y6 y5 y4 | |
183 | |
184 pmaddwd_r2r (mm0, mm3); // mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 | |
185 | |
186 movq_m2r (*(table+4), mm4); // mm4 = C6 C4 C6 C4 | |
187 pshufw_r2r (mm2, mm2, 0x4e); // mm2 = x2 x0 x6 x4 | |
188 } | |
189 | |
190 | |
36 | 191 /* MMX row IDCT */ |
1 | 192 |
193 #define mmx_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, c4, c6, \ | |
194 c4, c6, -c4, -c2, \ | |
195 c1, c3, c3, -c7, \ | |
196 c5, c7, -c1, -c5, \ | |
197 c4, -c6, c4, -c2, \ | |
198 -c4, c2, c4, -c6, \ | |
199 c5, -c1, c7, -c5, \ | |
200 c7, c3, c3, -c1 } | |
201 | |
202 static inline void mmx_row_head (int16_t * row, int offset, int16_t * table) | |
203 { | |
204 movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0 | |
205 | |
206 movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1 | |
207 movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0 | |
208 | |
209 movq_m2r (*table, mm3); // mm3 = C6 C4 C2 C4 | |
210 movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1 | |
211 | |
212 punpckldq_r2r (mm0, mm0); // mm0 = x2 x0 x2 x0 | |
213 | |
214 movq_m2r (*(table+4), mm4); // mm4 = -C2 -C4 C6 C4 | |
215 pmaddwd_r2r (mm0, mm3); // mm3 = C4*x0+C6*x2 C4*x0+C2*x2 | |
216 | |
217 movq_m2r (*(table+8), mm1); // mm1 = -C7 C3 C3 C1 | |
218 punpckhdq_r2r (mm2, mm2); // mm2 = x6 x4 x6 x4 | |
219 } | |
220 | |
221 static inline void mmx_row (int16_t * table, int32_t * rounder) | |
222 { | |
223 pmaddwd_r2r (mm2, mm4); // mm4 = -C4*x4-C2*x6 C4*x4+C6*x6 | |
224 punpckldq_r2r (mm5, mm5); // mm5 = x3 x1 x3 x1 | |
225 | |
226 pmaddwd_m2r (*(table+16), mm0); // mm0 = C4*x0-C2*x2 C4*x0-C6*x2 | |
227 punpckhdq_r2r (mm6, mm6); // mm6 = x7 x5 x7 x5 | |
228 | |
229 movq_m2r (*(table+12), mm7); // mm7 = -C5 -C1 C7 C5 | |
230 pmaddwd_r2r (mm5, mm1); // mm1 = C3*x1-C7*x3 C1*x1+C3*x3 | |
231 | |
232 paddd_m2r (*rounder, mm3); // mm3 += rounder | |
233 pmaddwd_r2r (mm6, mm7); // mm7 = -C1*x5-C5*x7 C5*x5+C7*x7 | |
234 | |
235 pmaddwd_m2r (*(table+20), mm2); // mm2 = C4*x4-C6*x6 -C4*x4+C2*x6 | |
236 paddd_r2r (mm4, mm3); // mm3 = a1 a0 + rounder | |
237 | |
238 pmaddwd_m2r (*(table+24), mm5); // mm5 = C7*x1-C5*x3 C5*x1-C1*x3 | |
239 movq_r2r (mm3, mm4); // mm4 = a1 a0 + rounder | |
240 | |
241 pmaddwd_m2r (*(table+28), mm6); // mm6 = C3*x5-C1*x7 C7*x5+C3*x7 | |
242 paddd_r2r (mm7, mm1); // mm1 = b1 b0 | |
243 | |
244 paddd_m2r (*rounder, mm0); // mm0 += rounder | |
245 psubd_r2r (mm1, mm3); // mm3 = a1-b1 a0-b0 + rounder | |
246 | |
247 psrad_i2r (ROW_SHIFT, mm3); // mm3 = y6 y7 | |
248 paddd_r2r (mm4, mm1); // mm1 = a1+b1 a0+b0 + rounder | |
249 | |
250 paddd_r2r (mm2, mm0); // mm0 = a3 a2 + rounder | |
251 psrad_i2r (ROW_SHIFT, mm1); // mm1 = y1 y0 | |
252 | |
253 paddd_r2r (mm6, mm5); // mm5 = b3 b2 | |
254 movq_r2r (mm0, mm7); // mm7 = a3 a2 + rounder | |
255 | |
256 paddd_r2r (mm5, mm0); // mm0 = a3+b3 a2+b2 + rounder | |
257 psubd_r2r (mm5, mm7); // mm7 = a3-b3 a2-b2 + rounder | |
258 } | |
259 | |
260 static inline void mmx_row_tail (int16_t * row, int store) | |
261 { | |
262 psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2 | |
263 | |
264 psrad_i2r (ROW_SHIFT, mm7); // mm7 = y4 y5 | |
265 | |
266 packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0 | |
267 | |
268 packssdw_r2r (mm3, mm7); // mm7 = y6 y7 y4 y5 | |
269 | |
270 movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0 | |
271 movq_r2r (mm7, mm4); // mm4 = y6 y7 y4 y5 | |
272 | |
273 pslld_i2r (16, mm7); // mm7 = y7 0 y5 0 | |
274 | |
275 psrld_i2r (16, mm4); // mm4 = 0 y6 0 y4 | |
276 | |
277 por_r2r (mm4, mm7); // mm7 = y7 y6 y5 y4 | |
278 | |
36 | 279 /* slot */ |
1 | 280 |
281 movq_r2m (mm7, *(row+store+4)); // save y7 y6 y5 y4 | |
282 } | |
283 | |
284 static inline void mmx_row_mid (int16_t * row, int store, | |
285 int offset, int16_t * table) | |
286 { | |
287 movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0 | |
288 psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2 | |
289 | |
290 movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1 | |
291 psrad_i2r (ROW_SHIFT, mm7); // mm7 = y4 y5 | |
292 | |
293 packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0 | |
294 movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1 | |
295 | |
296 packssdw_r2r (mm3, mm7); // mm7 = y6 y7 y4 y5 | |
297 movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0 | |
298 | |
299 movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0 | |
300 movq_r2r (mm7, mm1); // mm1 = y6 y7 y4 y5 | |
301 | |
302 punpckldq_r2r (mm0, mm0); // mm0 = x2 x0 x2 x0 | |
303 psrld_i2r (16, mm7); // mm7 = 0 y6 0 y4 | |
304 | |
305 movq_m2r (*table, mm3); // mm3 = C6 C4 C2 C4 | |
306 pslld_i2r (16, mm1); // mm1 = y7 0 y5 0 | |
307 | |
308 movq_m2r (*(table+4), mm4); // mm4 = -C2 -C4 C6 C4 | |
309 por_r2r (mm1, mm7); // mm7 = y7 y6 y5 y4 | |
310 | |
311 movq_m2r (*(table+8), mm1); // mm1 = -C7 C3 C3 C1 | |
312 punpckhdq_r2r (mm2, mm2); // mm2 = x6 x4 x6 x4 | |
313 | |
314 movq_r2m (mm7, *(row+store+4)); // save y7 y6 y5 y4 | |
315 pmaddwd_r2r (mm0, mm3); // mm3 = C4*x0+C6*x2 C4*x0+C2*x2 | |
316 } | |
317 | |
318 | |
319 #if 0 | |
320 // C column IDCT - its just here to document the MMXEXT and MMX versions | |
321 static inline void idct_col (int16_t * col, int offset) | |
322 { | |
36 | 323 /* multiplication - as implemented on mmx */ |
1 | 324 #define F(c,x) (((c) * (x)) >> 16) |
325 | |
36 | 326 /* saturation - it helps us handle torture test cases */ |
1 | 327 #define S(x) (((x)>32767) ? 32767 : ((x)<-32768) ? -32768 : (x)) |
328 | |
329 int16_t x0, x1, x2, x3, x4, x5, x6, x7; | |
330 int16_t y0, y1, y2, y3, y4, y5, y6, y7; | |
331 int16_t a0, a1, a2, a3, b0, b1, b2, b3; | |
332 int16_t u04, v04, u26, v26, u17, v17, u35, v35, u12, v12; | |
333 | |
334 col += offset; | |
335 | |
336 x0 = col[0*8]; | |
337 x1 = col[1*8]; | |
338 x2 = col[2*8]; | |
339 x3 = col[3*8]; | |
340 x4 = col[4*8]; | |
341 x5 = col[5*8]; | |
342 x6 = col[6*8]; | |
343 x7 = col[7*8]; | |
344 | |
345 u04 = S (x0 + x4); | |
346 v04 = S (x0 - x4); | |
36 | 347 u26 = S (F (T2, x6) + x2); |
348 v26 = S (F (T2, x2) - x6); | |
1 | 349 |
350 a0 = S (u04 + u26); | |
351 a1 = S (v04 + v26); | |
352 a2 = S (v04 - v26); | |
353 a3 = S (u04 - u26); | |
354 | |
36 | 355 u17 = S (F (T1, x7) + x1); |
356 v17 = S (F (T1, x1) - x7); | |
357 u35 = S (F (T3, x5) + x3); | |
358 v35 = S (F (T3, x3) - x5); | |
1 | 359 |
360 b0 = S (u17 + u35); | |
361 b3 = S (v17 - v35); | |
362 u12 = S (u17 - u35); | |
363 v12 = S (v17 + v35); | |
36 | 364 u12 = S (2 * F (C4, u12)); |
365 v12 = S (2 * F (C4, v12)); | |
1 | 366 b1 = S (u12 + v12); |
367 b2 = S (u12 - v12); | |
368 | |
369 y0 = S (a0 + b0) >> COL_SHIFT; | |
370 y1 = S (a1 + b1) >> COL_SHIFT; | |
371 y2 = S (a2 + b2) >> COL_SHIFT; | |
372 y3 = S (a3 + b3) >> COL_SHIFT; | |
373 | |
374 y4 = S (a3 - b3) >> COL_SHIFT; | |
375 y5 = S (a2 - b2) >> COL_SHIFT; | |
376 y6 = S (a1 - b1) >> COL_SHIFT; | |
377 y7 = S (a0 - b0) >> COL_SHIFT; | |
378 | |
379 col[0*8] = y0; | |
380 col[1*8] = y1; | |
381 col[2*8] = y2; | |
382 col[3*8] = y3; | |
383 col[4*8] = y4; | |
384 col[5*8] = y5; | |
385 col[6*8] = y6; | |
386 col[7*8] = y7; | |
387 } | |
388 #endif | |
389 | |
390 | |
391 // MMX column IDCT | |
392 static inline void idct_col (int16_t * col, int offset) | |
393 { | |
394 #define T1 13036 | |
395 #define T2 27146 | |
396 #define T3 43790 | |
397 #define C4 23170 | |
398 | |
399 static short _T1[] ATTR_ALIGN(8) = {T1,T1,T1,T1}; | |
400 static short _T2[] ATTR_ALIGN(8) = {T2,T2,T2,T2}; | |
401 static short _T3[] ATTR_ALIGN(8) = {T3,T3,T3,T3}; | |
402 static short _C4[] ATTR_ALIGN(8) = {C4,C4,C4,C4}; | |
403 | |
404 /* column code adapted from peter gubanov */ | |
405 /* http://www.elecard.com/peter/idct.shtml */ | |
406 | |
407 movq_m2r (*_T1, mm0); // mm0 = T1 | |
408 | |
409 movq_m2r (*(col+offset+1*8), mm1); // mm1 = x1 | |
410 movq_r2r (mm0, mm2); // mm2 = T1 | |
411 | |
412 movq_m2r (*(col+offset+7*8), mm4); // mm4 = x7 | |
413 pmulhw_r2r (mm1, mm0); // mm0 = T1*x1 | |
414 | |
415 movq_m2r (*_T3, mm5); // mm5 = T3 | |
416 pmulhw_r2r (mm4, mm2); // mm2 = T1*x7 | |
417 | |
418 movq_m2r (*(col+offset+5*8), mm6); // mm6 = x5 | |
419 movq_r2r (mm5, mm7); // mm7 = T3-1 | |
420 | |
421 movq_m2r (*(col+offset+3*8), mm3); // mm3 = x3 | |
422 psubsw_r2r (mm4, mm0); // mm0 = v17 | |
423 | |
424 movq_m2r (*_T2, mm4); // mm4 = T2 | |
425 pmulhw_r2r (mm3, mm5); // mm5 = (T3-1)*x3 | |
426 | |
427 paddsw_r2r (mm2, mm1); // mm1 = u17 | |
428 pmulhw_r2r (mm6, mm7); // mm7 = (T3-1)*x5 | |
429 | |
36 | 430 /* slot */ |
1 | 431 |
432 movq_r2r (mm4, mm2); // mm2 = T2 | |
433 paddsw_r2r (mm3, mm5); // mm5 = T3*x3 | |
434 | |
435 pmulhw_m2r (*(col+offset+2*8), mm4);// mm4 = T2*x2 | |
436 paddsw_r2r (mm6, mm7); // mm7 = T3*x5 | |
437 | |
438 psubsw_r2r (mm6, mm5); // mm5 = v35 | |
439 paddsw_r2r (mm3, mm7); // mm7 = u35 | |
440 | |
441 movq_m2r (*(col+offset+6*8), mm3); // mm3 = x6 | |
442 movq_r2r (mm0, mm6); // mm6 = v17 | |
443 | |
444 pmulhw_r2r (mm3, mm2); // mm2 = T2*x6 | |
445 psubsw_r2r (mm5, mm0); // mm0 = b3 | |
446 | |
447 psubsw_r2r (mm3, mm4); // mm4 = v26 | |
448 paddsw_r2r (mm6, mm5); // mm5 = v12 | |
449 | |
36 | 450 movq_r2m (mm0, *(col+offset+3*8)); // save b3 in scratch0 |
1 | 451 movq_r2r (mm1, mm6); // mm6 = u17 |
452 | |
453 paddsw_m2r (*(col+offset+2*8), mm2);// mm2 = u26 | |
454 paddsw_r2r (mm7, mm6); // mm6 = b0 | |
455 | |
456 psubsw_r2r (mm7, mm1); // mm1 = u12 | |
457 movq_r2r (mm1, mm7); // mm7 = u12 | |
458 | |
459 movq_m2r (*(col+offset+0*8), mm3); // mm3 = x0 | |
460 paddsw_r2r (mm5, mm1); // mm1 = u12+v12 | |
461 | |
462 movq_m2r (*_C4, mm0); // mm0 = C4/2 | |
463 psubsw_r2r (mm5, mm7); // mm7 = u12-v12 | |
464 | |
36 | 465 movq_r2m (mm6, *(col+offset+5*8)); // save b0 in scratch1 |
1 | 466 pmulhw_r2r (mm0, mm1); // mm1 = b1/2 |
467 | |
468 movq_r2r (mm4, mm6); // mm6 = v26 | |
469 pmulhw_r2r (mm0, mm7); // mm7 = b2/2 | |
470 | |
471 movq_m2r (*(col+offset+4*8), mm5); // mm5 = x4 | |
472 movq_r2r (mm3, mm0); // mm0 = x0 | |
473 | |
474 psubsw_r2r (mm5, mm3); // mm3 = v04 | |
475 paddsw_r2r (mm5, mm0); // mm0 = u04 | |
476 | |
477 paddsw_r2r (mm3, mm4); // mm4 = a1 | |
478 movq_r2r (mm0, mm5); // mm5 = u04 | |
479 | |
480 psubsw_r2r (mm6, mm3); // mm3 = a2 | |
481 paddsw_r2r (mm2, mm5); // mm5 = a0 | |
482 | |
483 paddsw_r2r (mm1, mm1); // mm1 = b1 | |
484 psubsw_r2r (mm2, mm0); // mm0 = a3 | |
485 | |
486 paddsw_r2r (mm7, mm7); // mm7 = b2 | |
487 movq_r2r (mm3, mm2); // mm2 = a2 | |
488 | |
489 movq_r2r (mm4, mm6); // mm6 = a1 | |
490 paddsw_r2r (mm7, mm3); // mm3 = a2+b2 | |
491 | |
492 psraw_i2r (COL_SHIFT, mm3); // mm3 = y2 | |
493 paddsw_r2r (mm1, mm4); // mm4 = a1+b1 | |
494 | |
495 psraw_i2r (COL_SHIFT, mm4); // mm4 = y1 | |
496 psubsw_r2r (mm1, mm6); // mm6 = a1-b1 | |
497 | |
36 | 498 movq_m2r (*(col+offset+5*8), mm1); // mm1 = b0 |
1 | 499 psubsw_r2r (mm7, mm2); // mm2 = a2-b2 |
500 | |
501 psraw_i2r (COL_SHIFT, mm6); // mm6 = y6 | |
502 movq_r2r (mm5, mm7); // mm7 = a0 | |
503 | |
504 movq_r2m (mm4, *(col+offset+1*8)); // save y1 | |
505 psraw_i2r (COL_SHIFT, mm2); // mm2 = y5 | |
506 | |
507 movq_r2m (mm3, *(col+offset+2*8)); // save y2 | |
508 paddsw_r2r (mm1, mm5); // mm5 = a0+b0 | |
509 | |
36 | 510 movq_m2r (*(col+offset+3*8), mm4); // mm4 = b3 |
1 | 511 psubsw_r2r (mm1, mm7); // mm7 = a0-b0 |
512 | |
513 psraw_i2r (COL_SHIFT, mm5); // mm5 = y0 | |
514 movq_r2r (mm0, mm3); // mm3 = a3 | |
515 | |
516 movq_r2m (mm2, *(col+offset+5*8)); // save y5 | |
517 psubsw_r2r (mm4, mm3); // mm3 = a3-b3 | |
518 | |
519 psraw_i2r (COL_SHIFT, mm7); // mm7 = y7 | |
520 paddsw_r2r (mm0, mm4); // mm4 = a3+b3 | |
521 | |
522 movq_r2m (mm5, *(col+offset+0*8)); // save y0 | |
523 psraw_i2r (COL_SHIFT, mm3); // mm3 = y4 | |
524 | |
525 movq_r2m (mm6, *(col+offset+6*8)); // save y6 | |
526 psraw_i2r (COL_SHIFT, mm4); // mm4 = y3 | |
527 | |
528 movq_r2m (mm7, *(col+offset+7*8)); // save y7 | |
529 | |
530 movq_r2m (mm3, *(col+offset+4*8)); // save y4 | |
531 | |
532 movq_r2m (mm4, *(col+offset+3*8)); // save y3 | |
533 } | |
534 | |
535 | |
536 static int32_t rounder0[] ATTR_ALIGN(8) = | |
537 rounder ((1 << (COL_SHIFT - 1)) - 0.5); | |
538 static int32_t rounder4[] ATTR_ALIGN(8) = rounder (0); | |
539 static int32_t rounder1[] ATTR_ALIGN(8) = | |
36 | 540 rounder (1.25683487303); /* C1*(C1/C4+C1+C7)/2 */ |
1 | 541 static int32_t rounder7[] ATTR_ALIGN(8) = |
36 | 542 rounder (-0.25); /* C1*(C7/C4+C7-C1)/2 */ |
1 | 543 static int32_t rounder2[] ATTR_ALIGN(8) = |
36 | 544 rounder (0.60355339059); /* C2 * (C6+C2)/2 */ |
1 | 545 static int32_t rounder6[] ATTR_ALIGN(8) = |
36 | 546 rounder (-0.25); /* C2 * (C6-C2)/2 */ |
1 | 547 static int32_t rounder3[] ATTR_ALIGN(8) = |
36 | 548 rounder (0.087788325588); /* C3*(-C3/C4+C3+C5)/2 */ |
1 | 549 static int32_t rounder5[] ATTR_ALIGN(8) = |
36 | 550 rounder (-0.441341716183); /* C3*(-C5/C4+C5-C3)/2 */ |
1 | 551 |
552 | |
553 #define declare_idct(idct,table,idct_row_head,idct_row,idct_row_tail,idct_row_mid) \ | |
1437
4fa90be8da03
made mmx/mmxext idct public -> now libavcodec can use it
arpi
parents:
36
diff
changeset
|
554 inline void idct (int16_t * block) \ |
1 | 555 { \ |
556 static int16_t table04[] ATTR_ALIGN(16) = \ | |
557 table (22725, 21407, 19266, 16384, 12873, 8867, 4520); \ | |
558 static int16_t table17[] ATTR_ALIGN(16) = \ | |
559 table (31521, 29692, 26722, 22725, 17855, 12299, 6270); \ | |
560 static int16_t table26[] ATTR_ALIGN(16) = \ | |
561 table (29692, 27969, 25172, 21407, 16819, 11585, 5906); \ | |
562 static int16_t table35[] ATTR_ALIGN(16) = \ | |
563 table (26722, 25172, 22654, 19266, 15137, 10426, 5315); \ | |
564 \ | |
565 idct_row_head (block, 0*8, table04); \ | |
566 idct_row (table04, rounder0); \ | |
567 idct_row_mid (block, 0*8, 4*8, table04); \ | |
568 idct_row (table04, rounder4); \ | |
569 idct_row_mid (block, 4*8, 1*8, table17); \ | |
570 idct_row (table17, rounder1); \ | |
571 idct_row_mid (block, 1*8, 7*8, table17); \ | |
572 idct_row (table17, rounder7); \ | |
573 idct_row_mid (block, 7*8, 2*8, table26); \ | |
574 idct_row (table26, rounder2); \ | |
575 idct_row_mid (block, 2*8, 6*8, table26); \ | |
576 idct_row (table26, rounder6); \ | |
577 idct_row_mid (block, 6*8, 3*8, table35); \ | |
578 idct_row (table35, rounder3); \ | |
579 idct_row_mid (block, 3*8, 5*8, table35); \ | |
580 idct_row (table35, rounder5); \ | |
581 idct_row_tail (block, 5*8); \ | |
582 \ | |
583 idct_col (block, 0); \ | |
584 idct_col (block, 4); \ | |
585 } | |
586 | |
587 | |
588 #define COPY_MMX(offset,r0,r1,r2) \ | |
589 do { \ | |
590 movq_m2r (*(block+offset), r0); \ | |
591 dest += stride; \ | |
592 movq_m2r (*(block+offset+4), r1); \ | |
593 movq_r2m (r2, *dest); \ | |
594 packuswb_r2r (r1, r0); \ | |
595 } while (0) | |
596 | |
597 static void block_copy (int16_t * block, uint8_t * dest, int stride) | |
598 { | |
599 movq_m2r (*(block+0*8), mm0); | |
600 movq_m2r (*(block+0*8+4), mm1); | |
601 movq_m2r (*(block+1*8), mm2); | |
602 packuswb_r2r (mm1, mm0); | |
603 movq_m2r (*(block+1*8+4), mm3); | |
604 movq_r2m (mm0, *dest); | |
605 packuswb_r2r (mm3, mm2); | |
606 COPY_MMX (2*8, mm0, mm1, mm2); | |
607 COPY_MMX (3*8, mm2, mm3, mm0); | |
608 COPY_MMX (4*8, mm0, mm1, mm2); | |
609 COPY_MMX (5*8, mm2, mm3, mm0); | |
610 COPY_MMX (6*8, mm0, mm1, mm2); | |
611 COPY_MMX (7*8, mm2, mm3, mm0); | |
612 movq_r2m (mm2, *(dest+stride)); | |
613 } | |
614 | |
615 | |
616 #define ADD_MMX(offset,r1,r2,r3,r4) \ | |
617 do { \ | |
618 movq_m2r (*(dest+2*stride), r1); \ | |
619 packuswb_r2r (r4, r3); \ | |
620 movq_r2r (r1, r2); \ | |
621 dest += stride; \ | |
622 movq_r2m (r3, *dest); \ | |
623 punpcklbw_r2r (mm0, r1); \ | |
624 paddsw_m2r (*(block+offset), r1); \ | |
625 punpckhbw_r2r (mm0, r2); \ | |
626 paddsw_m2r (*(block+offset+4), r2); \ | |
627 } while (0) | |
628 | |
629 static void block_add (int16_t * block, uint8_t * dest, int stride) | |
630 { | |
631 movq_m2r (*dest, mm1); | |
632 pxor_r2r (mm0, mm0); | |
633 movq_m2r (*(dest+stride), mm3); | |
634 movq_r2r (mm1, mm2); | |
635 punpcklbw_r2r (mm0, mm1); | |
636 movq_r2r (mm3, mm4); | |
637 paddsw_m2r (*(block+0*8), mm1); | |
638 punpckhbw_r2r (mm0, mm2); | |
639 paddsw_m2r (*(block+0*8+4), mm2); | |
640 punpcklbw_r2r (mm0, mm3); | |
641 paddsw_m2r (*(block+1*8), mm3); | |
642 packuswb_r2r (mm2, mm1); | |
643 punpckhbw_r2r (mm0, mm4); | |
644 movq_r2m (mm1, *dest); | |
645 paddsw_m2r (*(block+1*8+4), mm4); | |
646 ADD_MMX (2*8, mm1, mm2, mm3, mm4); | |
647 ADD_MMX (3*8, mm3, mm4, mm1, mm2); | |
648 ADD_MMX (4*8, mm1, mm2, mm3, mm4); | |
649 ADD_MMX (5*8, mm3, mm4, mm1, mm2); | |
650 ADD_MMX (6*8, mm1, mm2, mm3, mm4); | |
651 ADD_MMX (7*8, mm3, mm4, mm1, mm2); | |
652 packuswb_r2r (mm4, mm3); | |
653 movq_r2m (mm3, *(dest+stride)); | |
654 } | |
655 | |
656 | |
657 declare_idct (mmxext_idct, mmxext_table, | |
658 mmxext_row_head, mmxext_row, mmxext_row_tail, mmxext_row_mid) | |
659 | |
660 void idct_block_copy_mmxext (int16_t * block, uint8_t * dest, int stride) | |
661 { | |
662 mmxext_idct (block); | |
663 block_copy (block, dest, stride); | |
664 } | |
665 | |
666 void idct_block_add_mmxext (int16_t * block, uint8_t * dest, int stride) | |
667 { | |
668 mmxext_idct (block); | |
669 block_add (block, dest, stride); | |
670 } | |
671 | |
672 | |
673 declare_idct (mmx_idct, mmx_table, | |
674 mmx_row_head, mmx_row, mmx_row_tail, mmx_row_mid) | |
675 | |
676 void idct_block_copy_mmx (int16_t * block, uint8_t * dest, int stride) | |
677 { | |
678 mmx_idct (block); | |
679 block_copy (block, dest, stride); | |
680 } | |
681 | |
682 void idct_block_add_mmx (int16_t * block, uint8_t * dest, int stride) | |
683 { | |
684 mmx_idct (block); | |
685 block_add (block, dest, stride); | |
686 } | |
687 | |
688 | |
689 void idct_mmx_init (void) | |
690 { | |
691 extern uint8_t scan_norm[64]; | |
692 extern uint8_t scan_alt[64]; | |
693 int i, j; | |
694 | |
36 | 695 /* the mmx/mmxext idct uses a reordered input, so we patch scan tables */ |
1 | 696 |
697 for (i = 0; i < 64; i++) { | |
698 j = scan_norm[i]; | |
699 scan_norm[i] = (j & 0x38) | ((j & 6) >> 1) | ((j & 1) << 2); | |
700 j = scan_alt[i]; | |
701 scan_alt[i] = (j & 0x38) | ((j & 6) >> 1) | ((j & 1) << 2); | |
702 } | |
703 } | |
704 | |
705 #endif |