Mercurial > mplayer.hg
annotate libmpcodecs/native/rtjpegn.c @ 28347:1f4b3aaefcd8
Avoid a division by 0 when using -oac mp3lame but no audio data actually is encoded.
author | reimar |
---|---|
date | Wed, 28 Jan 2009 12:46:05 +0000 |
parents | a7124a264ea6 |
children | 87b59e8d3c26 |
rev | line source |
---|---|
3802 | 1 /* |
2 RTjpeg (C) Justin Schoeman 1998 (justin@suntiger.ee.up.ac.za) | |
3 | |
4 With modifications by: | |
5 (c) 1998, 1999 by Joerg Walter <trouble@moes.pmnet.uni-oldenburg.de> | |
6 and | |
7 (c) 1999 by Wim Taymans <wim.taymans@tvd.be> | |
8 | |
9 This program is free software; you can redistribute it and/or modify | |
10 it under the terms of the GNU General Public License as published by | |
11 the Free Software Foundation; either version 2 of the License, or | |
12 (at your option) any later version. | |
13 | |
14 This program is distributed in the hope that it will be useful, | |
15 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 GNU General Public License for more details. | |
18 | |
19 You should have received a copy of the GNU General Public License | |
20 along with this program; if not, write to the Free Software | |
21977
cea0eb833758
Fix FSF address and otherwise broken license headers.
diego
parents:
21507
diff
changeset
|
21 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
3802 | 22 */ |
23 | |
24 #include <stdio.h> | |
25 #include <stdlib.h> | |
26 #include <string.h> | |
3805 | 27 |
28 #include "config.h" | |
29 | |
21507
fa99b3d31d13
Hack around libavutil/bswap.h compilation problems due to always_inline undefined.
reimar
parents:
21372
diff
changeset
|
30 #include "mpbswap.h" |
26304
5f526e8e3988
Rename RTJPEG files so that filenames consist of lowercase name only.
diego
parents:
26280
diff
changeset
|
31 #include "rtjpegn.h" |
3802 | 32 |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
33 #if HAVE_MMX |
3802 | 34 #include "mmx.h" |
35 #endif | |
36 | |
37 //#define SHOWBLOCK 1 | |
38 #define BETTERCOMPRESSION 1 | |
39 | |
40 static const unsigned char RTjpeg_ZZ[64]={ | |
41 0, | |
42 8, 1, | |
43 2, 9, 16, | |
44 24, 17, 10, 3, | |
45 4, 11, 18, 25, 32, | |
46 40, 33, 26, 19, 12, 5, | |
47 6, 13, 20, 27, 34, 41, 48, | |
48 56, 49, 42, 35, 28, 21, 14, 7, | |
49 15, 22, 29, 36, 43, 50, 57, | |
50 58, 51, 44, 37, 30, 23, | |
51 31, 38, 45, 52, 59, | |
52 60, 53, 46, 39, | |
53 47, 54, 61, | |
54 62, 55, | |
55 63 }; | |
56 | |
57 static const __u64 RTjpeg_aan_tab[64]={ | |
58 4294967296ULL, 5957222912ULL, 5611718144ULL, 5050464768ULL, 4294967296ULL, 3374581504ULL, 2324432128ULL, 1184891264ULL, | |
59 5957222912ULL, 8263040512ULL, 7783580160ULL, 7005009920ULL, 5957222912ULL, 4680582144ULL, 3224107520ULL, 1643641088ULL, | |
60 5611718144ULL, 7783580160ULL, 7331904512ULL, 6598688768ULL, 5611718144ULL, 4408998912ULL, 3036936960ULL, 1548224000ULL, | |
61 5050464768ULL, 7005009920ULL, 6598688768ULL, 5938608128ULL, 5050464768ULL, 3968072960ULL, 2733115392ULL, 1393296000ULL, | |
62 4294967296ULL, 5957222912ULL, 5611718144ULL, 5050464768ULL, 4294967296ULL, 3374581504ULL, 2324432128ULL, 1184891264ULL, | |
63 3374581504ULL, 4680582144ULL, 4408998912ULL, 3968072960ULL, 3374581504ULL, 2651326208ULL, 1826357504ULL, 931136000ULL, | |
64 2324432128ULL, 3224107520ULL, 3036936960ULL, 2733115392ULL, 2324432128ULL, 1826357504ULL, 1258030336ULL, 641204288ULL, | |
65 1184891264ULL, 1643641088ULL, 1548224000ULL, 1393296000ULL, 1184891264ULL, 931136000ULL, 641204288ULL, 326894240ULL, | |
66 }; | |
67 | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
68 #if !HAVE_MMX |
3802 | 69 static __s32 RTjpeg_ws[64+31]; |
70 #endif | |
71 __u8 RTjpeg_alldata[2*64+4*64+4*64+4*64+4*64+32]; | |
72 | |
3835 | 73 static __s16 *block; // rh |
74 static __s16 *RTjpeg_block; | |
75 static __s32 *RTjpeg_lqt; | |
76 static __s32 *RTjpeg_cqt; | |
77 static __u32 *RTjpeg_liqt; | |
78 static __u32 *RTjpeg_ciqt; | |
79 | |
80 static unsigned char RTjpeg_lb8; | |
81 static unsigned char RTjpeg_cb8; | |
82 static int RTjpeg_width, RTjpeg_height; | |
83 static int RTjpeg_Ywidth, RTjpeg_Cwidth; | |
84 static int RTjpeg_Ysize, RTjpeg_Csize; | |
85 | |
86 static __s16 *RTjpeg_old=NULL; | |
3802 | 87 |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
88 #if HAVE_MMX |
3802 | 89 mmx_t RTjpeg_lmask; |
90 mmx_t RTjpeg_cmask; | |
91 #else | |
92 __u16 RTjpeg_lmask; | |
93 __u16 RTjpeg_cmask; | |
94 #endif | |
95 int RTjpeg_mtest=0; | |
96 | |
97 static const unsigned char RTjpeg_lum_quant_tbl[64] = { | |
98 16, 11, 10, 16, 24, 40, 51, 61, | |
99 12, 12, 14, 19, 26, 58, 60, 55, | |
100 14, 13, 16, 24, 40, 57, 69, 56, | |
101 14, 17, 22, 29, 51, 87, 80, 62, | |
102 18, 22, 37, 56, 68, 109, 103, 77, | |
103 24, 35, 55, 64, 81, 104, 113, 92, | |
104 49, 64, 78, 87, 103, 121, 120, 101, | |
105 72, 92, 95, 98, 112, 100, 103, 99 | |
106 }; | |
107 | |
108 static const unsigned char RTjpeg_chrom_quant_tbl[64] = { | |
109 17, 18, 24, 47, 99, 99, 99, 99, | |
110 18, 21, 26, 66, 99, 99, 99, 99, | |
111 24, 26, 56, 99, 99, 99, 99, 99, | |
112 47, 66, 99, 99, 99, 99, 99, 99, | |
113 99, 99, 99, 99, 99, 99, 99, 99, | |
114 99, 99, 99, 99, 99, 99, 99, 99, | |
115 99, 99, 99, 99, 99, 99, 99, 99, | |
116 99, 99, 99, 99, 99, 99, 99, 99 | |
117 }; | |
118 | |
119 #ifdef BETTERCOMPRESSION | |
120 | |
121 /*--------------------------------------------------*/ | |
122 /* better encoding, but needs a lot more cpu time */ | |
123 /* seems to be more effective than old method +lzo */ | |
124 /* with this encoding lzo isn't efficient anymore */ | |
125 /* there is still more potential for better */ | |
126 /* encoding but that would need even more cputime */ | |
127 /* anyway your mileage may vary */ | |
128 /* */ | |
129 /* written by Martin BIELY and Roman HOCHLEITNER */ | |
130 /*--------------------------------------------------*/ | |
131 | |
132 /* +++++++++++++++++++++++++++++++++++++++++++++++++++*/ | |
133 /* Block to Stream (encoding) */ | |
134 /* */ | |
135 | |
136 int RTjpeg_b2s(__s16 *data, __s8 *strm, __u8 bt8) | |
137 { | |
138 register int ci, co=1; | |
139 register __s16 ZZvalue; | |
140 register unsigned char bitten; | |
141 register unsigned char bitoff; | |
142 | |
143 #ifdef SHOWBLOCK | |
144 | |
145 int ii; | |
146 for (ii=0; ii < 64; ii++) { | |
147 fprintf(stdout, "%d ", data[RTjpeg_ZZ[ii]]); | |
148 } | |
149 fprintf(stdout, "\n\n"); | |
150 | |
151 #endif | |
152 | |
153 // *strm++ = 0x10; | |
154 // *strm = 0x00; | |
155 // | |
156 // return 2; | |
157 | |
158 // first byte allways written | |
12378 | 159 ((__u8*)strm)[0]= |
3802 | 160 (__u8)(data[RTjpeg_ZZ[0]]>254) ? 254:((data[RTjpeg_ZZ[0]]<0)?0:data[RTjpeg_ZZ[0]]); |
161 | |
162 | |
163 ci=63; | |
164 while (data[RTjpeg_ZZ[ci]]==0 && ci>0) ci--; | |
165 | |
166 bitten = ((unsigned char)ci) << 2; | |
167 | |
168 if (ci==0) { | |
12378 | 169 ((__u8*)strm)[1]= bitten; |
3802 | 170 co = 2; |
171 return (int)co; | |
172 } | |
173 | |
174 /* bitoff=0 because the high 6bit contain first non zero position */ | |
175 bitoff = 0; | |
176 co = 1; | |
177 | |
178 for(; ci>0; ci--) { | |
179 | |
180 ZZvalue = data[RTjpeg_ZZ[ci]]; | |
181 | |
182 switch(ZZvalue) { | |
183 case 0: | |
184 break; | |
185 case 1: | |
186 bitten |= (0x01<<bitoff); | |
187 break; | |
188 case -1: | |
189 bitten |= (0x03<<bitoff); | |
190 break; | |
191 default: | |
192 bitten |= (0x02<<bitoff); | |
193 goto HERZWEH; | |
194 break; | |
195 } | |
196 | |
197 if( bitoff == 0 ) { | |
12378 | 198 ((__u8*)strm)[co]= bitten; |
3802 | 199 bitten = 0; |
200 bitoff = 8; | |
201 co++; | |
202 } /* "fall through" */ | |
203 bitoff-=2; | |
204 | |
205 } | |
206 | |
207 /* ci must be 0 */ | |
208 if(bitoff != 6) { | |
209 | |
12378 | 210 ((__u8*)strm)[co]= bitten; |
3802 | 211 co++; |
212 | |
213 } | |
214 goto BAUCHWEH; | |
215 | |
216 HERZWEH: | |
217 /* ci cannot be 0 */ | |
218 /* correct bitoff to nibble boundaries */ | |
219 | |
220 switch(bitoff){ | |
221 case 4: | |
222 case 6: | |
223 bitoff = 0; | |
224 break; | |
225 case 2: | |
226 case 0: | |
12378 | 227 ((__u8*)strm)[co]= bitten; |
3802 | 228 bitoff = 4; |
229 co++; | |
230 bitten = 0; // clear half nibble values in bitten | |
231 break; | |
232 default: | |
233 break; | |
234 } | |
235 | |
236 for(; ci>0; ci--) { | |
237 | |
238 ZZvalue = data[RTjpeg_ZZ[ci]]; | |
239 | |
240 if( (ZZvalue > 7) || (ZZvalue < -7) ) { | |
241 bitten |= (0x08<<bitoff); | |
242 goto HIRNWEH; | |
243 } | |
244 | |
245 bitten |= (ZZvalue&0xf)<<bitoff; | |
246 | |
247 if( bitoff == 0 ) { | |
12378 | 248 ((__u8*)strm)[co]= bitten; |
3802 | 249 bitten = 0; |
250 bitoff = 8; | |
251 co++; | |
252 } /* "fall thru" */ | |
253 bitoff-=4; | |
254 } | |
255 | |
256 /* ci must be 0 */ | |
257 if( bitoff == 0 ) { | |
12378 | 258 ((__u8*)strm)[co]= bitten; |
3802 | 259 co++; |
260 } | |
261 goto BAUCHWEH; | |
262 | |
263 HIRNWEH: | |
264 | |
12378 | 265 ((__u8*)strm)[co]= bitten; |
3802 | 266 co++; |
267 | |
268 | |
269 /* bitting is over now we bite */ | |
270 for(; ci>0; ci--) { | |
271 | |
272 ZZvalue = data[RTjpeg_ZZ[ci]]; | |
273 | |
274 if(ZZvalue>0) | |
275 { | |
276 strm[co++]=(__s8)(ZZvalue>127)?127:ZZvalue; | |
277 } | |
278 else | |
279 { | |
280 strm[co++]=(__s8)(ZZvalue<-128)?-128:ZZvalue; | |
281 } | |
282 | |
283 } | |
284 | |
285 | |
286 BAUCHWEH: | |
287 /* we gotoo much now we are ill */ | |
288 #ifdef SHOWBLOCK | |
289 { | |
290 int i; | |
291 fprintf(stdout, "\nco = '%d'\n", co); | |
292 for (i=0; i < co+2; i++) { | |
293 fprintf(stdout, "%d ", strm[i]); | |
294 } | |
295 fprintf(stdout, "\n\n"); | |
296 } | |
297 #endif | |
298 | |
299 return (int)co; | |
300 } | |
301 | |
302 /* +++++++++++++++++++++++++++++++++++++++++++++++++++*/ | |
303 /* Stream to Block (decoding) */ | |
304 /* */ | |
305 | |
306 int RTjpeg_s2b(__s16 *data, __s8 *strm, __u8 bt8, __u32 *qtbl) | |
307 { | |
308 int ci; | |
309 register int co; | |
310 register int i; | |
311 register unsigned char bitten; | |
312 register unsigned char bitoff; | |
313 | |
314 /* first byte always read */ | |
315 i=RTjpeg_ZZ[0]; | |
316 data[i]=((__u8)strm[0])*qtbl[i]; | |
317 | |
318 /* we start at the behind */ | |
319 | |
320 bitten = ((unsigned char)strm[1]) >> 2; | |
321 co = 63; | |
322 for(; co > bitten; co--) { | |
323 | |
324 data[RTjpeg_ZZ[co]] = 0; | |
325 | |
326 } | |
327 | |
328 if (co==0) { | |
329 ci = 2; | |
330 goto AUTOBAHN; | |
331 } | |
332 | |
333 /* we have to read the last 2 bits of the second byte */ | |
334 ci=1; | |
335 bitoff = 0; | |
336 | |
337 for(; co>0; co--) { | |
338 | |
339 bitten = ((unsigned char)strm[ci]) >> bitoff; | |
340 bitten &= 0x03; | |
341 | |
342 i=RTjpeg_ZZ[co]; | |
343 | |
344 switch( bitten ) { | |
345 case 0x03: | |
346 data[i]= -qtbl[i]; | |
347 break; | |
348 case 0x02: | |
349 goto FUSSWEG; | |
350 break; | |
351 case 0x01: | |
352 data[i]= qtbl[i]; | |
353 break; | |
354 case 0x00: | |
355 data[i]= 0; | |
356 break; | |
357 default: | |
6335
e9bd97d5c5cc
warning & newline fixes by Dominik Mierzejewski <dominik@rangers.eu.org>
arpi
parents:
5602
diff
changeset
|
358 break; |
3802 | 359 } |
360 | |
361 if( bitoff == 0 ) { | |
362 bitoff = 8; | |
363 ci++; | |
364 } | |
365 bitoff -= 2; | |
366 } | |
367 /* co is 0 now */ | |
368 /* data is written properly */ | |
369 | |
370 /* if bitoff!=6 then ci is the index, but should be the byte count, so we increment by 1 */ | |
371 if (bitoff!=6) ci++; | |
372 | |
373 goto AUTOBAHN; | |
374 | |
375 | |
376 FUSSWEG: | |
377 /* correct bitoff to nibble */ | |
378 switch(bitoff){ | |
379 case 4: | |
380 case 6: | |
381 bitoff = 0; | |
382 break; | |
383 case 2: | |
384 case 0: | |
385 /* we have to read from the next byte */ | |
386 ci++; | |
387 bitoff = 4; | |
388 break; | |
389 default: | |
390 break; | |
391 } | |
392 | |
393 for(; co>0; co--) { | |
394 | |
395 bitten = ((unsigned char)strm[ci]) >> bitoff; | |
396 bitten &= 0x0f; | |
397 | |
398 i=RTjpeg_ZZ[co]; | |
399 | |
400 if( bitten == 0x08 ) { | |
401 goto STRASSE; | |
402 } | |
403 | |
404 /* the compiler cannot do sign extension for signed nibbles */ | |
405 if( bitten & 0x08 ) { | |
406 bitten |= 0xf0; | |
407 } | |
408 /* the unsigned char bitten now is a valid signed char */ | |
409 | |
410 data[i]=((signed char)bitten)*qtbl[i]; | |
411 | |
412 if( bitoff == 0 ) { | |
413 bitoff = 8; | |
414 ci++; | |
415 } | |
416 bitoff -= 4; | |
417 } | |
418 /* co is 0 */ | |
419 | |
420 /* if bitoff!=4 then ci is the index, but should be the byte count, so we increment by 1 */ | |
421 if (bitoff!=4) ci++; | |
422 | |
423 goto AUTOBAHN; | |
424 | |
425 STRASSE: | |
426 ci++; | |
427 | |
428 for(; co>0; co--) { | |
429 i=RTjpeg_ZZ[co]; | |
430 data[i]=strm[ci++]*qtbl[i]; | |
431 } | |
432 | |
433 /* ci now is the count, because it points to next element => no incrementing */ | |
434 | |
435 AUTOBAHN: | |
436 | |
437 #ifdef SHOWBLOCK | |
438 fprintf(stdout, "\nci = '%d'\n", ci); | |
439 for (i=0; i < 64; i++) { | |
440 fprintf(stdout, "%d ", data[RTjpeg_ZZ[i]]); | |
441 } | |
442 fprintf(stdout, "\n\n"); | |
443 #endif | |
444 | |
445 return ci; | |
446 } | |
447 | |
448 #else | |
449 | |
450 int RTjpeg_b2s(__s16 *data, __s8 *strm, __u8 bt8) | |
451 { | |
452 register int ci, co=1, tmp; | |
453 register __s16 ZZvalue; | |
454 | |
455 #ifdef SHOWBLOCK | |
456 | |
457 int ii; | |
458 for (ii=0; ii < 64; ii++) { | |
459 fprintf(stdout, "%d ", data[RTjpeg_ZZ[ii]]); | |
460 } | |
461 fprintf(stdout, "\n\n"); | |
462 | |
463 #endif | |
464 | |
465 (__u8)strm[0]=(__u8)(data[RTjpeg_ZZ[0]]>254) ? 254:((data[RTjpeg_ZZ[0]]<0)?0:data[RTjpeg_ZZ[0]]); | |
466 | |
467 for(ci=1; ci<=bt8; ci++) | |
468 { | |
469 ZZvalue = data[RTjpeg_ZZ[ci]]; | |
470 | |
471 if(ZZvalue>0) | |
472 { | |
473 strm[co++]=(__s8)(ZZvalue>127)?127:ZZvalue; | |
474 } | |
475 else | |
476 { | |
477 strm[co++]=(__s8)(ZZvalue<-128)?-128:ZZvalue; | |
478 } | |
479 } | |
480 | |
481 for(; ci<64; ci++) | |
482 { | |
483 ZZvalue = data[RTjpeg_ZZ[ci]]; | |
484 | |
485 if(ZZvalue>0) | |
486 { | |
487 strm[co++]=(__s8)(ZZvalue>63)?63:ZZvalue; | |
488 } | |
489 else if(ZZvalue<0) | |
490 { | |
491 strm[co++]=(__s8)(ZZvalue<-64)?-64:ZZvalue; | |
492 } | |
493 else /* compress zeros */ | |
494 { | |
495 tmp=ci; | |
496 do | |
497 { | |
498 ci++; | |
499 } | |
500 while((ci<64)&&(data[RTjpeg_ZZ[ci]]==0)); | |
501 | |
502 strm[co++]=(__s8)(63+(ci-tmp)); | |
503 ci--; | |
504 } | |
505 } | |
506 return (int)co; | |
507 } | |
508 | |
509 int RTjpeg_s2b(__s16 *data, __s8 *strm, __u8 bt8, __u32 *qtbl) | |
510 { | |
511 int ci=1, co=1, tmp; | |
512 register int i; | |
513 | |
514 i=RTjpeg_ZZ[0]; | |
515 data[i]=((__u8)strm[0])*qtbl[i]; | |
516 | |
517 for(co=1; co<=bt8; co++) | |
518 { | |
519 i=RTjpeg_ZZ[co]; | |
520 data[i]=strm[ci++]*qtbl[i]; | |
521 } | |
522 | |
523 for(; co<64; co++) | |
524 { | |
525 if(strm[ci]>63) | |
526 { | |
527 tmp=co+strm[ci]-63; | |
528 for(; co<tmp; co++)data[RTjpeg_ZZ[co]]=0; | |
529 co--; | |
530 } else | |
531 { | |
532 i=RTjpeg_ZZ[co]; | |
533 data[i]=strm[ci]*qtbl[i]; | |
534 } | |
535 ci++; | |
536 } | |
537 return (int)ci; | |
538 } | |
539 #endif | |
540 | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
541 #if HAVE_MMX |
3802 | 542 void RTjpeg_quant_init(void) |
543 { | |
544 int i; | |
545 __s16 *qtbl; | |
546 | |
547 qtbl=(__s16 *)RTjpeg_lqt; | |
548 for(i=0; i<64; i++)qtbl[i]=(__s16)RTjpeg_lqt[i]; | |
549 | |
550 qtbl=(__s16 *)RTjpeg_cqt; | |
551 for(i=0; i<64; i++)qtbl[i]=(__s16)RTjpeg_cqt[i]; | |
552 } | |
553 | |
12928 | 554 static mmx_t RTjpeg_ones={0x0001000100010001LL}; |
555 static mmx_t RTjpeg_half={0x7fff7fff7fff7fffLL}; | |
3802 | 556 |
557 void RTjpeg_quant(__s16 *block, __s32 *qtbl) | |
558 { | |
559 int i; | |
560 mmx_t *bl, *ql; | |
561 | |
562 ql=(mmx_t *)qtbl; | |
563 bl=(mmx_t *)block; | |
564 | |
565 movq_m2r(RTjpeg_ones, mm6); | |
566 movq_m2r(RTjpeg_half, mm7); | |
567 | |
568 for(i=16; i; i--) | |
569 { | |
570 movq_m2r(*(ql++), mm0); /* quant vals (4) */ | |
571 movq_m2r(*bl, mm2); /* block vals (4) */ | |
572 movq_r2r(mm0, mm1); | |
573 movq_r2r(mm2, mm3); | |
574 | |
575 punpcklwd_r2r(mm6, mm0); /* 1 qb 1 qa */ | |
576 punpckhwd_r2r(mm6, mm1); /* 1 qd 1 qc */ | |
577 | |
578 punpcklwd_r2r(mm7, mm2); /* 32767 bb 32767 ba */ | |
579 punpckhwd_r2r(mm7, mm3); /* 32767 bd 32767 bc */ | |
580 | |
581 pmaddwd_r2r(mm2, mm0); /* 32767+bb*qb 32767+ba*qa */ | |
582 pmaddwd_r2r(mm3, mm1); /* 32767+bd*qd 32767+bc*qc */ | |
583 | |
584 psrad_i2r(16, mm0); | |
585 psrad_i2r(16, mm1); | |
586 | |
587 packssdw_r2r(mm1, mm0); | |
588 | |
589 movq_r2m(mm0, *(bl++)); | |
590 | |
591 } | |
592 } | |
593 #else | |
594 void RTjpeg_quant_init(void) | |
595 { | |
596 } | |
597 | |
598 void RTjpeg_quant(__s16 *block, __s32 *qtbl) | |
599 { | |
600 int i; | |
601 | |
602 for(i=0; i<64; i++) | |
603 block[i]=(__s16)((block[i]*qtbl[i]+32767)>>16); | |
604 } | |
605 #endif | |
606 | |
607 /* | |
608 * Perform the forward DCT on one block of samples. | |
609 */ | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
610 #if HAVE_MMX |
12928 | 611 static mmx_t RTjpeg_C4 ={0x2D412D412D412D41LL}; |
612 static mmx_t RTjpeg_C6 ={0x187E187E187E187ELL}; | |
613 static mmx_t RTjpeg_C2mC6={0x22A322A322A322A3LL}; | |
614 static mmx_t RTjpeg_C2pC6={0x539F539F539F539FLL}; | |
615 static mmx_t RTjpeg_zero ={0x0000000000000000LL}; | |
3802 | 616 |
617 #else | |
618 | |
619 #define FIX_0_382683433 ((__s32) 98) /* FIX(0.382683433) */ | |
620 #define FIX_0_541196100 ((__s32) 139) /* FIX(0.541196100) */ | |
621 #define FIX_0_707106781 ((__s32) 181) /* FIX(0.707106781) */ | |
622 #define FIX_1_306562965 ((__s32) 334) /* FIX(1.306562965) */ | |
623 | |
624 #define DESCALE10(x) (__s16)( ((x)+128) >> 8) | |
625 #define DESCALE20(x) (__s16)(((x)+32768) >> 16) | |
626 #define D_MULTIPLY(var,const) ((__s32) ((var) * (const))) | |
627 #endif | |
628 | |
629 void RTjpeg_dct_init(void) | |
630 { | |
631 int i; | |
632 | |
633 for(i=0; i<64; i++) | |
634 { | |
635 RTjpeg_lqt[i]=(((__u64)RTjpeg_lqt[i]<<32)/RTjpeg_aan_tab[i]); | |
636 RTjpeg_cqt[i]=(((__u64)RTjpeg_cqt[i]<<32)/RTjpeg_aan_tab[i]); | |
637 } | |
638 } | |
639 | |
640 void RTjpeg_dctY(__u8 *idata, __s16 *odata, int rskip) | |
641 { | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
642 #if !HAVE_MMX |
3802 | 643 __s32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; |
644 __s32 tmp10, tmp11, tmp12, tmp13; | |
645 __s32 z1, z2, z3, z4, z5, z11, z13; | |
646 __u8 *idataptr; | |
647 __s16 *odataptr; | |
648 __s32 *wsptr; | |
649 int ctr; | |
650 | |
651 idataptr = idata; | |
652 wsptr = RTjpeg_ws; | |
653 for (ctr = 7; ctr >= 0; ctr--) { | |
654 tmp0 = idataptr[0] + idataptr[7]; | |
655 tmp7 = idataptr[0] - idataptr[7]; | |
656 tmp1 = idataptr[1] + idataptr[6]; | |
657 tmp6 = idataptr[1] - idataptr[6]; | |
658 tmp2 = idataptr[2] + idataptr[5]; | |
659 tmp5 = idataptr[2] - idataptr[5]; | |
660 tmp3 = idataptr[3] + idataptr[4]; | |
661 tmp4 = idataptr[3] - idataptr[4]; | |
662 | |
663 tmp10 = (tmp0 + tmp3); /* phase 2 */ | |
664 tmp13 = tmp0 - tmp3; | |
665 tmp11 = (tmp1 + tmp2); | |
666 tmp12 = tmp1 - tmp2; | |
667 | |
668 wsptr[0] = (tmp10 + tmp11)<<8; /* phase 3 */ | |
669 wsptr[4] = (tmp10 - tmp11)<<8; | |
670 | |
671 z1 = D_MULTIPLY(tmp12 + tmp13, FIX_0_707106781); /* c4 */ | |
672 wsptr[2] = (tmp13<<8) + z1; /* phase 5 */ | |
673 wsptr[6] = (tmp13<<8) - z1; | |
674 | |
675 tmp10 = tmp4 + tmp5; /* phase 2 */ | |
676 tmp11 = tmp5 + tmp6; | |
677 tmp12 = tmp6 + tmp7; | |
678 | |
679 z5 = D_MULTIPLY(tmp10 - tmp12, FIX_0_382683433); /* c6 */ | |
680 z2 = D_MULTIPLY(tmp10, FIX_0_541196100) + z5; /* c2-c6 */ | |
681 z4 = D_MULTIPLY(tmp12, FIX_1_306562965) + z5; /* c2+c6 */ | |
682 z3 = D_MULTIPLY(tmp11, FIX_0_707106781); /* c4 */ | |
683 | |
684 z11 = (tmp7<<8) + z3; /* phase 5 */ | |
685 z13 = (tmp7<<8) - z3; | |
686 | |
687 wsptr[5] = z13 + z2; /* phase 6 */ | |
688 wsptr[3] = z13 - z2; | |
689 wsptr[1] = z11 + z4; | |
690 wsptr[7] = z11 - z4; | |
691 | |
692 idataptr += rskip<<3; /* advance pointer to next row */ | |
693 wsptr += 8; | |
694 } | |
695 | |
696 wsptr = RTjpeg_ws; | |
697 odataptr=odata; | |
698 for (ctr = 7; ctr >= 0; ctr--) { | |
699 tmp0 = wsptr[0] + wsptr[56]; | |
700 tmp7 = wsptr[0] - wsptr[56]; | |
701 tmp1 = wsptr[8] + wsptr[48]; | |
702 tmp6 = wsptr[8] - wsptr[48]; | |
703 tmp2 = wsptr[16] + wsptr[40]; | |
704 tmp5 = wsptr[16] - wsptr[40]; | |
705 tmp3 = wsptr[24] + wsptr[32]; | |
706 tmp4 = wsptr[24] - wsptr[32]; | |
707 | |
708 tmp10 = tmp0 + tmp3; /* phase 2 */ | |
709 tmp13 = tmp0 - tmp3; | |
710 tmp11 = tmp1 + tmp2; | |
711 tmp12 = tmp1 - tmp2; | |
712 | |
713 odataptr[0] = DESCALE10(tmp10 + tmp11); /* phase 3 */ | |
714 odataptr[32] = DESCALE10(tmp10 - tmp11); | |
715 | |
716 z1 = D_MULTIPLY(tmp12 + tmp13, FIX_0_707106781); /* c4 */ | |
717 odataptr[16] = DESCALE20((tmp13<<8) + z1); /* phase 5 */ | |
718 odataptr[48] = DESCALE20((tmp13<<8) - z1); | |
719 | |
720 tmp10 = tmp4 + tmp5; /* phase 2 */ | |
721 tmp11 = tmp5 + tmp6; | |
722 tmp12 = tmp6 + tmp7; | |
723 | |
724 z5 = D_MULTIPLY(tmp10 - tmp12, FIX_0_382683433); /* c6 */ | |
725 z2 = D_MULTIPLY(tmp10, FIX_0_541196100) + z5; /* c2-c6 */ | |
726 z4 = D_MULTIPLY(tmp12, FIX_1_306562965) + z5; /* c2+c6 */ | |
727 z3 = D_MULTIPLY(tmp11, FIX_0_707106781); /* c4 */ | |
728 | |
729 z11 = (tmp7<<8) + z3; /* phase 5 */ | |
730 z13 = (tmp7<<8) - z3; | |
731 | |
732 odataptr[40] = DESCALE20(z13 + z2); /* phase 6 */ | |
733 odataptr[24] = DESCALE20(z13 - z2); | |
734 odataptr[8] = DESCALE20(z11 + z4); | |
735 odataptr[56] = DESCALE20(z11 - z4); | |
736 | |
737 odataptr++; /* advance pointer to next column */ | |
738 wsptr++; | |
739 } | |
740 #else | |
741 volatile mmx_t tmp6, tmp7; | |
742 register mmx_t *dataptr = (mmx_t *)odata; | |
743 mmx_t *idata2 = (mmx_t *)idata; | |
744 | |
745 // first copy the input 8 bit to the destination 16 bits | |
746 | |
747 movq_m2r(RTjpeg_zero, mm2); | |
748 | |
749 | |
750 movq_m2r(*idata2, mm0); | |
751 movq_r2r(mm0, mm1); | |
752 | |
753 punpcklbw_r2r(mm2, mm0); | |
754 movq_r2m(mm0, *(dataptr)); | |
755 | |
756 punpckhbw_r2r(mm2, mm1); | |
757 movq_r2m(mm1, *(dataptr+1)); | |
758 | |
759 idata2 += rskip; | |
760 | |
761 movq_m2r(*idata2, mm0); | |
762 movq_r2r(mm0, mm1); | |
763 | |
764 punpcklbw_r2r(mm2, mm0); | |
765 movq_r2m(mm0, *(dataptr+2)); | |
766 | |
767 punpckhbw_r2r(mm2, mm1); | |
768 movq_r2m(mm1, *(dataptr+3)); | |
769 | |
770 idata2 += rskip; | |
771 | |
772 movq_m2r(*idata2, mm0); | |
773 movq_r2r(mm0, mm1); | |
774 | |
775 punpcklbw_r2r(mm2, mm0); | |
776 movq_r2m(mm0, *(dataptr+4)); | |
777 | |
778 punpckhbw_r2r(mm2, mm1); | |
779 movq_r2m(mm1, *(dataptr+5)); | |
780 | |
781 idata2 += rskip; | |
782 | |
783 movq_m2r(*idata2, mm0); | |
784 movq_r2r(mm0, mm1); | |
785 | |
786 punpcklbw_r2r(mm2, mm0); | |
787 movq_r2m(mm0, *(dataptr+6)); | |
788 | |
789 punpckhbw_r2r(mm2, mm1); | |
790 movq_r2m(mm1, *(dataptr+7)); | |
791 | |
792 idata2 += rskip; | |
793 | |
794 movq_m2r(*idata2, mm0); | |
795 movq_r2r(mm0, mm1); | |
796 | |
797 punpcklbw_r2r(mm2, mm0); | |
798 movq_r2m(mm0, *(dataptr+8)); | |
799 | |
800 punpckhbw_r2r(mm2, mm1); | |
801 movq_r2m(mm1, *(dataptr+9)); | |
802 | |
803 idata2 += rskip; | |
804 | |
805 movq_m2r(*idata2, mm0); | |
806 movq_r2r(mm0, mm1); | |
807 | |
808 punpcklbw_r2r(mm2, mm0); | |
809 movq_r2m(mm0, *(dataptr+10)); | |
810 | |
811 punpckhbw_r2r(mm2, mm1); | |
812 movq_r2m(mm1, *(dataptr+11)); | |
813 | |
814 idata2 += rskip; | |
815 | |
816 movq_m2r(*idata2, mm0); | |
817 movq_r2r(mm0, mm1); | |
818 | |
819 punpcklbw_r2r(mm2, mm0); | |
820 movq_r2m(mm0, *(dataptr+12)); | |
821 | |
822 punpckhbw_r2r(mm2, mm1); | |
823 movq_r2m(mm1, *(dataptr+13)); | |
824 | |
825 idata2 += rskip; | |
826 | |
827 movq_m2r(*idata2, mm0); | |
828 movq_r2r(mm0, mm1); | |
829 | |
830 punpcklbw_r2r(mm2, mm0); | |
831 movq_r2m(mm0, *(dataptr+14)); | |
832 | |
833 punpckhbw_r2r(mm2, mm1); | |
834 movq_r2m(mm1, *(dataptr+15)); | |
835 | |
836 /* Start Transpose to do calculations on rows */ | |
837 | |
838 movq_m2r(*(dataptr+9), mm7); // m03:m02|m01:m00 - first line (line 4)and copy into m5 | |
839 | |
840 movq_m2r(*(dataptr+13), mm6); // m23:m22|m21:m20 - third line (line 6)and copy into m2 | |
841 movq_r2r(mm7, mm5); | |
842 | |
843 punpcklwd_m2r(*(dataptr+11), mm7); // m11:m01|m10:m00 - interleave first and second lines | |
844 movq_r2r(mm6, mm2); | |
845 | |
846 punpcklwd_m2r(*(dataptr+15), mm6); // m31:m21|m30:m20 - interleave third and fourth lines | |
847 movq_r2r(mm7, mm1); | |
848 | |
849 movq_m2r(*(dataptr+11), mm3); // m13:m13|m11:m10 - second line | |
850 punpckldq_r2r(mm6, mm7); // m30:m20|m10:m00 - interleave to produce result 1 | |
851 | |
852 movq_m2r(*(dataptr+15), mm0); // m13:m13|m11:m10 - fourth line | |
853 punpckhdq_r2r(mm6, mm1); // m31:m21|m11:m01 - interleave to produce result 2 | |
854 | |
855 movq_r2m(mm7,*(dataptr+9)); // write result 1 | |
856 punpckhwd_r2r(mm3, mm5); // m13:m03|m12:m02 - interleave first and second lines | |
857 | |
858 movq_r2m(mm1,*(dataptr+11)); // write result 2 | |
859 punpckhwd_r2r(mm0, mm2); // m33:m23|m32:m22 - interleave third and fourth lines | |
860 | |
861 movq_r2r(mm5, mm1); | |
862 punpckldq_r2r(mm2, mm5); // m32:m22|m12:m02 - interleave to produce result 3 | |
863 | |
864 movq_m2r(*(dataptr+1), mm0); // m03:m02|m01:m00 - first line, 4x4 | |
865 punpckhdq_r2r(mm2, mm1); // m33:m23|m13:m03 - interleave to produce result 4 | |
866 | |
867 movq_r2m(mm5,*(dataptr+13)); // write result 3 | |
868 | |
869 // last 4x4 done | |
870 | |
871 movq_r2m(mm1, *(dataptr+15)); // write result 4, last 4x4 | |
872 | |
873 movq_m2r(*(dataptr+5), mm2); // m23:m22|m21:m20 - third line | |
874 movq_r2r(mm0, mm6); | |
875 | |
876 punpcklwd_m2r(*(dataptr+3), mm0); // m11:m01|m10:m00 - interleave first and second lines | |
877 movq_r2r(mm2, mm7); | |
878 | |
879 punpcklwd_m2r(*(dataptr+7), mm2); // m31:m21|m30:m20 - interleave third and fourth lines | |
880 movq_r2r(mm0, mm4); | |
881 | |
882 // | |
883 movq_m2r(*(dataptr+8), mm1); // n03:n02|n01:n00 - first line | |
884 punpckldq_r2r(mm2, mm0); // m30:m20|m10:m00 - interleave to produce first result | |
885 | |
886 movq_m2r(*(dataptr+12), mm3); // n23:n22|n21:n20 - third line | |
887 punpckhdq_r2r(mm2, mm4); // m31:m21|m11:m01 - interleave to produce second result | |
888 | |
889 punpckhwd_m2r(*(dataptr+3), mm6); // m13:m03|m12:m02 - interleave first and second lines | |
890 movq_r2r(mm1, mm2); // copy first line | |
891 | |
892 punpckhwd_m2r(*(dataptr+7), mm7); // m33:m23|m32:m22 - interleave third and fourth lines | |
893 movq_r2r(mm6, mm5); // copy first intermediate result | |
894 | |
895 movq_r2m(mm0, *(dataptr+8)); // write result 1 | |
896 punpckhdq_r2r(mm7, mm5); // m33:m23|m13:m03 - produce third result | |
897 | |
898 punpcklwd_m2r(*(dataptr+10), mm1); // n11:n01|n10:n00 - interleave first and second lines | |
899 movq_r2r(mm3, mm0); // copy third line | |
900 | |
901 punpckhwd_m2r(*(dataptr+10), mm2); // n13:n03|n12:n02 - interleave first and second lines | |
902 | |
903 movq_r2m(mm4, *(dataptr+10)); // write result 2 out | |
904 punpckldq_r2r(mm7, mm6); // m32:m22|m12:m02 - produce fourth result | |
905 | |
906 punpcklwd_m2r(*(dataptr+14), mm3); // n31:n21|n30:n20 - interleave third and fourth lines | |
907 movq_r2r(mm1, mm4); | |
908 | |
909 movq_r2m(mm6, *(dataptr+12)); // write result 3 out | |
910 punpckldq_r2r(mm3, mm1); // n30:n20|n10:n00 - produce first result | |
911 | |
912 punpckhwd_m2r(*(dataptr+14), mm0); // n33:n23|n32:n22 - interleave third and fourth lines | |
913 movq_r2r(mm2, mm6); | |
914 | |
915 movq_r2m(mm5, *(dataptr+14)); // write result 4 out | |
916 punpckhdq_r2r(mm3, mm4); // n31:n21|n11:n01- produce second result | |
917 | |
918 movq_r2m(mm1, *(dataptr+1)); // write result 5 out - (first result for other 4 x 4 block) | |
919 punpckldq_r2r(mm0, mm2); // n32:n22|n12:n02- produce third result | |
920 | |
921 movq_r2m(mm4, *(dataptr+3)); // write result 6 out | |
922 punpckhdq_r2r(mm0, mm6); // n33:n23|n13:n03 - produce fourth result | |
923 | |
924 movq_r2m(mm2, *(dataptr+5)); // write result 7 out | |
925 | |
926 movq_m2r(*dataptr, mm0); // m03:m02|m01:m00 - first line, first 4x4 | |
927 | |
928 movq_r2m(mm6, *(dataptr+7)); // write result 8 out | |
929 | |
930 | |
931 // Do first 4x4 quadrant, which is used in the beginning of the DCT: | |
932 | |
933 movq_m2r(*(dataptr+4), mm7); // m23:m22|m21:m20 - third line | |
934 movq_r2r(mm0, mm2); | |
935 | |
936 punpcklwd_m2r(*(dataptr+2), mm0); // m11:m01|m10:m00 - interleave first and second lines | |
937 movq_r2r(mm7, mm4); | |
938 | |
939 punpcklwd_m2r(*(dataptr+6), mm7); // m31:m21|m30:m20 - interleave third and fourth lines | |
940 movq_r2r(mm0, mm1); | |
941 | |
942 movq_m2r(*(dataptr+2), mm6); // m13:m12|m11:m10 - second line | |
943 punpckldq_r2r(mm7, mm0); // m30:m20|m10:m00 - interleave to produce result 1 | |
944 | |
945 movq_m2r(*(dataptr+6), mm5); // m33:m32|m31:m30 - fourth line | |
946 punpckhdq_r2r(mm7, mm1); // m31:m21|m11:m01 - interleave to produce result 2 | |
947 | |
948 movq_r2r(mm0, mm7); // write result 1 | |
949 punpckhwd_r2r(mm6, mm2); // m13:m03|m12:m02 - interleave first and second lines | |
950 | |
951 psubw_m2r(*(dataptr+14), mm7); // tmp07=x0-x7 /* Stage 1 */ | |
952 movq_r2r(mm1, mm6); // write result 2 | |
953 | |
954 paddw_m2r(*(dataptr+14), mm0); // tmp00=x0+x7 /* Stage 1 */ | |
955 punpckhwd_r2r(mm5, mm4); // m33:m23|m32:m22 - interleave third and fourth lines | |
956 | |
957 paddw_m2r(*(dataptr+12), mm1); // tmp01=x1+x6 /* Stage 1 */ | |
958 movq_r2r(mm2, mm3); // copy first intermediate result | |
959 | |
960 psubw_m2r(*(dataptr+12), mm6); // tmp06=x1-x6 /* Stage 1 */ | |
961 punpckldq_r2r(mm4, mm2); // m32:m22|m12:m02 - interleave to produce result 3 | |
962 | |
963 movq_r2m(mm7, tmp7); | |
964 movq_r2r(mm2, mm5); // write result 3 | |
965 | |
966 movq_r2m(mm6, tmp6); | |
967 punpckhdq_r2r(mm4, mm3); // m33:m23|m13:m03 - interleave to produce result 4 | |
968 | |
969 paddw_m2r(*(dataptr+10), mm2); // tmp02=x2+5 /* Stage 1 */ | |
970 movq_r2r(mm3, mm4); // write result 4 | |
971 | |
972 /************************************************************************************************ | |
973 End of Transpose | |
974 ************************************************************************************************/ | |
975 | |
976 | |
977 paddw_m2r(*(dataptr+8), mm3); // tmp03=x3+x4 /* stage 1*/ | |
978 movq_r2r(mm0, mm7); | |
979 | |
980 psubw_m2r(*(dataptr+8), mm4); // tmp04=x3-x4 /* stage 1*/ | |
981 movq_r2r(mm1, mm6); | |
982 | |
983 paddw_r2r(mm3, mm0); // tmp10 = tmp00 + tmp03 /* even 2 */ | |
984 psubw_r2r(mm3, mm7); // tmp13 = tmp00 - tmp03 /* even 2 */ | |
985 | |
986 psubw_r2r(mm2, mm6); // tmp12 = tmp01 - tmp02 /* even 2 */ | |
987 paddw_r2r(mm2, mm1); // tmp11 = tmp01 + tmp02 /* even 2 */ | |
988 | |
989 psubw_m2r(*(dataptr+10), mm5); // tmp05=x2-x5 /* stage 1*/ | |
990 paddw_r2r(mm7, mm6); // tmp12 + tmp13 | |
991 | |
992 /* stage 3 */ | |
993 | |
994 movq_m2r(tmp6, mm2); | |
995 movq_r2r(mm0, mm3); | |
996 | |
997 psllw_i2r(2, mm6); // m8 * 2^2 | |
998 paddw_r2r(mm1, mm0); | |
999 | |
1000 pmulhw_m2r(RTjpeg_C4, mm6); // z1 | |
1001 psubw_r2r(mm1, mm3); | |
1002 | |
1003 movq_r2m(mm0, *dataptr); | |
1004 movq_r2r(mm7, mm0); | |
1005 | |
1006 /* Odd part */ | |
1007 movq_r2m(mm3, *(dataptr+8)); | |
1008 paddw_r2r(mm5, mm4); // tmp10 | |
1009 | |
1010 movq_m2r(tmp7, mm3); | |
1011 paddw_r2r(mm6, mm0); // tmp32 | |
1012 | |
1013 paddw_r2r(mm2, mm5); // tmp11 | |
1014 psubw_r2r(mm6, mm7); // tmp33 | |
1015 | |
1016 movq_r2m(mm0, *(dataptr+4)); | |
1017 paddw_r2r(mm3, mm2); // tmp12 | |
1018 | |
1019 /* stage 4 */ | |
1020 | |
1021 movq_r2m(mm7, *(dataptr+12)); | |
1022 movq_r2r(mm4, mm1); // copy of tmp10 | |
1023 | |
1024 psubw_r2r(mm2, mm1); // tmp10 - tmp12 | |
1025 psllw_i2r(2, mm4); // m8 * 2^2 | |
1026 | |
1027 movq_m2r(RTjpeg_C2mC6, mm0); | |
1028 psllw_i2r(2, mm1); | |
1029 | |
1030 pmulhw_m2r(RTjpeg_C6, mm1); // z5 | |
1031 psllw_i2r(2, mm2); | |
1032 | |
1033 pmulhw_r2r(mm0, mm4); // z5 | |
1034 | |
1035 /* stage 5 */ | |
1036 | |
1037 pmulhw_m2r(RTjpeg_C2pC6, mm2); | |
1038 psllw_i2r(2, mm5); | |
1039 | |
1040 pmulhw_m2r(RTjpeg_C4, mm5); // z3 | |
1041 movq_r2r(mm3, mm0); // copy tmp7 | |
1042 | |
1043 movq_m2r(*(dataptr+1), mm7); | |
1044 paddw_r2r(mm1, mm4); // z2 | |
1045 | |
1046 paddw_r2r(mm1, mm2); // z4 | |
1047 | |
1048 paddw_r2r(mm5, mm0); // z11 | |
1049 psubw_r2r(mm5, mm3); // z13 | |
1050 | |
1051 /* stage 6 */ | |
1052 | |
1053 movq_r2r(mm3, mm5); // copy z13 | |
1054 psubw_r2r(mm4, mm3); // y3=z13 - z2 | |
1055 | |
1056 paddw_r2r(mm4, mm5); // y5=z13 + z2 | |
1057 movq_r2r(mm0, mm6); // copy z11 | |
1058 | |
1059 movq_r2m(mm3, *(dataptr+6)); //save y3 | |
1060 psubw_r2r(mm2, mm0); // y7=z11 - z4 | |
1061 | |
1062 movq_r2m(mm5, *(dataptr+10)); //save y5 | |
1063 paddw_r2r(mm2, mm6); // y1=z11 + z4 | |
1064 | |
1065 movq_r2m(mm0, *(dataptr+14)); //save y7 | |
1066 | |
1067 /************************************************ | |
1068 * End of 1st 4 rows | |
1069 ************************************************/ | |
1070 | |
1071 movq_m2r(*(dataptr+3), mm1); // load x1 /* stage 1 */ | |
1072 movq_r2r(mm7, mm0); // copy x0 | |
1073 | |
1074 movq_r2m(mm6, *(dataptr+2)); //save y1 | |
1075 | |
1076 movq_m2r(*(dataptr+5), mm2); // load x2 /* stage 1 */ | |
1077 movq_r2r(mm1, mm6); // copy x1 | |
1078 | |
1079 paddw_m2r(*(dataptr+15), mm0); // tmp00 = x0 + x7 | |
1080 | |
1081 movq_m2r(*(dataptr+7), mm3); // load x3 /* stage 1 */ | |
1082 movq_r2r(mm2, mm5); // copy x2 | |
1083 | |
1084 psubw_m2r(*(dataptr+15), mm7); // tmp07 = x0 - x7 | |
1085 movq_r2r(mm3, mm4); // copy x3 | |
1086 | |
1087 paddw_m2r(*(dataptr+13), mm1); // tmp01 = x1 + x6 | |
1088 | |
1089 movq_r2m(mm7, tmp7); // save tmp07 | |
1090 movq_r2r(mm0, mm7); // copy tmp00 | |
1091 | |
1092 psubw_m2r(*(dataptr+13), mm6); // tmp06 = x1 - x6 | |
1093 | |
1094 /* stage 2, Even Part */ | |
1095 | |
1096 paddw_m2r(*(dataptr+9), mm3); // tmp03 = x3 + x4 | |
1097 | |
1098 movq_r2m(mm6, tmp6); // save tmp07 | |
1099 movq_r2r(mm1, mm6); // copy tmp01 | |
1100 | |
1101 paddw_m2r(*(dataptr+11), mm2); // tmp02 = x2 + x5 | |
1102 paddw_r2r(mm3, mm0); // tmp10 = tmp00 + tmp03 | |
1103 | |
1104 psubw_r2r(mm3, mm7); // tmp13 = tmp00 - tmp03 | |
1105 | |
1106 psubw_m2r(*(dataptr+9), mm4); // tmp04 = x3 - x4 | |
1107 psubw_r2r(mm2, mm6); // tmp12 = tmp01 - tmp02 | |
1108 | |
1109 paddw_r2r(mm2, mm1); // tmp11 = tmp01 + tmp02 | |
1110 | |
1111 psubw_m2r(*(dataptr+11), mm5); // tmp05 = x2 - x5 | |
1112 paddw_r2r(mm7, mm6); // tmp12 + tmp13 | |
1113 | |
1114 /* stage 3, Even and stage 4 & 5 even */ | |
1115 | |
1116 movq_m2r(tmp6, mm2); // load tmp6 | |
1117 movq_r2r(mm0, mm3); // copy tmp10 | |
1118 | |
1119 psllw_i2r(2, mm6); // shift z1 | |
1120 paddw_r2r(mm1, mm0); // y0=tmp10 + tmp11 | |
1121 | |
1122 pmulhw_m2r(RTjpeg_C4, mm6); // z1 | |
1123 psubw_r2r(mm1, mm3); // y4=tmp10 - tmp11 | |
1124 | |
1125 movq_r2m(mm0, *(dataptr+1)); //save y0 | |
1126 movq_r2r(mm7, mm0); // copy tmp13 | |
1127 | |
1128 /* odd part */ | |
1129 | |
1130 movq_r2m(mm3, *(dataptr+9)); //save y4 | |
1131 paddw_r2r(mm5, mm4); // tmp10 = tmp4 + tmp5 | |
1132 | |
1133 movq_m2r(tmp7, mm3); // load tmp7 | |
1134 paddw_r2r(mm6, mm0); // tmp32 = tmp13 + z1 | |
1135 | |
1136 paddw_r2r(mm2, mm5); // tmp11 = tmp5 + tmp6 | |
1137 psubw_r2r(mm6, mm7); // tmp33 = tmp13 - z1 | |
1138 | |
1139 movq_r2m(mm0, *(dataptr+5)); //save y2 | |
1140 paddw_r2r(mm3, mm2); // tmp12 = tmp6 + tmp7 | |
1141 | |
1142 /* stage 4 */ | |
1143 | |
1144 movq_r2m(mm7, *(dataptr+13)); //save y6 | |
1145 movq_r2r(mm4, mm1); // copy tmp10 | |
1146 | |
1147 psubw_r2r(mm2, mm1); // tmp10 - tmp12 | |
1148 psllw_i2r(2, mm4); // shift tmp10 | |
1149 | |
1150 movq_m2r(RTjpeg_C2mC6, mm0); // load C2mC6 | |
1151 psllw_i2r(2, mm1); // shift (tmp10-tmp12) | |
1152 | |
1153 pmulhw_m2r(RTjpeg_C6, mm1); // z5 | |
1154 psllw_i2r(2, mm5); // prepare for multiply | |
1155 | |
1156 pmulhw_r2r(mm0, mm4); // multiply by converted real | |
1157 | |
1158 /* stage 5 */ | |
1159 | |
1160 pmulhw_m2r(RTjpeg_C4, mm5); // z3 | |
1161 psllw_i2r(2, mm2); // prepare for multiply | |
1162 | |
1163 pmulhw_m2r(RTjpeg_C2pC6, mm2); // multiply | |
1164 movq_r2r(mm3, mm0); // copy tmp7 | |
1165 | |
1166 movq_m2r(*(dataptr+9), mm7); // m03:m02|m01:m00 - first line (line 4)and copy into mm7 | |
1167 paddw_r2r(mm1, mm4); // z2 | |
1168 | |
1169 paddw_r2r(mm5, mm0); // z11 | |
1170 psubw_r2r(mm5, mm3); // z13 | |
1171 | |
1172 /* stage 6 */ | |
1173 | |
1174 movq_r2r(mm3, mm5); // copy z13 | |
1175 paddw_r2r(mm1, mm2); // z4 | |
1176 | |
1177 movq_r2r(mm0, mm6); // copy z11 | |
1178 psubw_r2r(mm4, mm5); // y3 | |
1179 | |
1180 paddw_r2r(mm2, mm6); // y1 | |
1181 paddw_r2r(mm4, mm3); // y5 | |
1182 | |
1183 movq_r2m(mm5, *(dataptr+7)); //save y3 | |
1184 | |
1185 movq_r2m(mm6, *(dataptr+3)); //save y1 | |
1186 psubw_r2r(mm2, mm0); // y7 | |
1187 | |
1188 /************************************************************************************************ | |
1189 Start of Transpose | |
1190 ************************************************************************************************/ | |
1191 | |
1192 movq_m2r(*(dataptr+13), mm6); // m23:m22|m21:m20 - third line (line 6)and copy into m2 | |
1193 movq_r2r(mm7, mm5); // copy first line | |
1194 | |
1195 punpcklwd_r2r(mm3, mm7); // m11:m01|m10:m00 - interleave first and second lines | |
1196 movq_r2r(mm6, mm2); // copy third line | |
1197 | |
1198 punpcklwd_r2r(mm0, mm6); // m31:m21|m30:m20 - interleave third and fourth lines | |
1199 movq_r2r(mm7, mm1); // copy first intermediate result | |
1200 | |
1201 punpckldq_r2r(mm6, mm7); // m30:m20|m10:m00 - interleave to produce result 1 | |
1202 | |
1203 punpckhdq_r2r(mm6, mm1); // m31:m21|m11:m01 - interleave to produce result 2 | |
1204 | |
1205 movq_r2m(mm7, *(dataptr+9)); // write result 1 | |
1206 punpckhwd_r2r(mm3, mm5); // m13:m03|m12:m02 - interleave first and second lines | |
1207 | |
1208 movq_r2m(mm1, *(dataptr+11)); // write result 2 | |
1209 punpckhwd_r2r(mm0, mm2); // m33:m23|m32:m22 - interleave third and fourth lines | |
1210 | |
1211 movq_r2r(mm5, mm1); // copy first intermediate result | |
1212 punpckldq_r2r(mm2, mm5); // m32:m22|m12:m02 - interleave to produce result 3 | |
1213 | |
1214 movq_m2r(*(dataptr+1), mm0); // m03:m02|m01:m00 - first line, 4x4 | |
1215 punpckhdq_r2r(mm2, mm1); // m33:m23|m13:m03 - interleave to produce result 4 | |
1216 | |
1217 movq_r2m(mm5, *(dataptr+13)); // write result 3 | |
1218 | |
1219 /****** last 4x4 done */ | |
1220 | |
1221 movq_r2m(mm1, *(dataptr+15)); // write result 4, last 4x4 | |
1222 | |
1223 movq_m2r(*(dataptr+5), mm2); // m23:m22|m21:m20 - third line | |
1224 movq_r2r(mm0, mm6); // copy first line | |
1225 | |
1226 punpcklwd_m2r(*(dataptr+3), mm0); // m11:m01|m10:m00 - interleave first and second lines | |
1227 movq_r2r(mm2, mm7); // copy third line | |
1228 | |
1229 punpcklwd_m2r(*(dataptr+7), mm2); // m31:m21|m30:m20 - interleave third and fourth lines | |
1230 movq_r2r(mm0, mm4); // copy first intermediate result | |
1231 | |
1232 | |
1233 | |
1234 movq_m2r(*(dataptr+8), mm1); // n03:n02|n01:n00 - first line | |
1235 punpckldq_r2r(mm2, mm0); // m30:m20|m10:m00 - interleave to produce first result | |
1236 | |
1237 movq_m2r(*(dataptr+12), mm3); // n23:n22|n21:n20 - third line | |
1238 punpckhdq_r2r(mm2, mm4); // m31:m21|m11:m01 - interleave to produce second result | |
1239 | |
1240 punpckhwd_m2r(*(dataptr+3), mm6); // m13:m03|m12:m02 - interleave first and second lines | |
1241 movq_r2r(mm1, mm2); // copy first line | |
1242 | |
1243 punpckhwd_m2r(*(dataptr+7), mm7); // m33:m23|m32:m22 - interleave third and fourth lines | |
1244 movq_r2r(mm6, mm5); // copy first intermediate result | |
1245 | |
1246 movq_r2m(mm0, *(dataptr+8)); // write result 1 | |
1247 punpckhdq_r2r(mm7, mm5); // m33:m23|m13:m03 - produce third result | |
1248 | |
1249 punpcklwd_m2r(*(dataptr+10), mm1); // n11:n01|n10:n00 - interleave first and second lines | |
1250 movq_r2r(mm3, mm0); // copy third line | |
1251 | |
1252 punpckhwd_m2r(*(dataptr+10), mm2); // n13:n03|n12:n02 - interleave first and second lines | |
1253 | |
1254 movq_r2m(mm4, *(dataptr+10)); // write result 2 out | |
1255 punpckldq_r2r(mm7, mm6); // m32:m22|m12:m02 - produce fourth result | |
1256 | |
1257 punpcklwd_m2r(*(dataptr+14), mm3); // n33:n23|n32:n22 - interleave third and fourth lines | |
1258 movq_r2r(mm1, mm4); // copy second intermediate result | |
1259 | |
1260 movq_r2m(mm6, *(dataptr+12)); // write result 3 out | |
1261 punpckldq_r2r(mm3, mm1); // | |
1262 | |
1263 punpckhwd_m2r(*(dataptr+14), mm0); // n33:n23|n32:n22 - interleave third and fourth lines | |
1264 movq_r2r(mm2, mm6); // copy second intermediate result | |
1265 | |
1266 movq_r2m(mm5, *(dataptr+14)); // write result 4 out | |
1267 punpckhdq_r2r(mm3, mm4); // n31:n21|n11:n01- produce second result | |
1268 | |
1269 movq_r2m(mm1, *(dataptr+1)); // write result 5 out - (first result for other 4 x 4 block) | |
1270 punpckldq_r2r(mm0, mm2); // n32:n22|n12:n02- produce third result | |
1271 | |
1272 movq_r2m(mm4, *(dataptr+3)); // write result 6 out | |
1273 punpckhdq_r2r(mm0, mm6); // n33:n23|n13:n03 - produce fourth result | |
1274 | |
1275 movq_r2m(mm2, *(dataptr+5)); // write result 7 out | |
1276 | |
1277 movq_m2r(*dataptr, mm0); // m03:m02|m01:m00 - first line, first 4x4 | |
1278 | |
1279 movq_r2m(mm6, *(dataptr+7)); // write result 8 out | |
1280 | |
1281 // Do first 4x4 quadrant, which is used in the beginning of the DCT: | |
1282 | |
1283 movq_m2r(*(dataptr+4), mm7); // m23:m22|m21:m20 - third line | |
1284 movq_r2r(mm0, mm2); // copy first line | |
1285 | |
1286 punpcklwd_m2r(*(dataptr+2), mm0); // m11:m01|m10:m00 - interleave first and second lines | |
1287 movq_r2r(mm7, mm4); // copy third line | |
1288 | |
1289 punpcklwd_m2r(*(dataptr+6), mm7); // m31:m21|m30:m20 - interleave third and fourth lines | |
1290 movq_r2r(mm0, mm1); // copy first intermediate result | |
1291 | |
1292 movq_m2r(*(dataptr+2), mm6); // m13:m12|m11:m10 - second line | |
1293 punpckldq_r2r(mm7, mm0); // m30:m20|m10:m00 - interleave to produce result 1 | |
1294 | |
1295 movq_m2r(*(dataptr+6), mm5); // m33:m32|m31:m30 - fourth line | |
1296 punpckhdq_r2r(mm7, mm1); // m31:m21|m11:m01 - interleave to produce result 2 | |
1297 | |
1298 movq_r2r(mm0, mm7); // write result 1 | |
1299 punpckhwd_r2r(mm6, mm2); // m13:m03|m12:m02 - interleave first and second lines | |
1300 | |
1301 psubw_m2r(*(dataptr+14), mm7); // tmp07=x0-x7 /* Stage 1 */ | |
1302 movq_r2r(mm1, mm6); // write result 2 | |
1303 | |
1304 paddw_m2r(*(dataptr+14), mm0); // tmp00=x0+x7 /* Stage 1 */ | |
1305 punpckhwd_r2r(mm5, mm4); // m33:m23|m32:m22 - interleave third and fourth lines | |
1306 | |
1307 paddw_m2r(*(dataptr+12), mm1); // tmp01=x1+x6 /* Stage 1 */ | |
1308 movq_r2r(mm2, mm3); // copy first intermediate result | |
1309 | |
1310 psubw_m2r(*(dataptr+12), mm6); // tmp06=x1-x6 /* Stage 1 */ | |
1311 punpckldq_r2r(mm4, mm2); // m32:m22|m12:m02 - interleave to produce result 3 | |
1312 | |
1313 movq_r2m(mm7, tmp7); // save tmp07 | |
1314 movq_r2r(mm2, mm5); // write result 3 | |
1315 | |
1316 movq_r2m(mm6, tmp6); // save tmp06 | |
1317 | |
1318 punpckhdq_r2r(mm4, mm3); // m33:m23|m13:m03 - interleave to produce result 4 | |
1319 | |
1320 paddw_m2r(*(dataptr+10), mm2); // tmp02=x2+x5 /* stage 1 */ | |
1321 movq_r2r(mm3, mm4); // write result 4 | |
1322 | |
1323 /************************************************************************************************ | |
1324 End of Transpose 2 | |
1325 ************************************************************************************************/ | |
1326 | |
1327 paddw_m2r(*(dataptr+8), mm3); // tmp03=x3+x4 /* stage 1*/ | |
1328 movq_r2r(mm0, mm7); | |
1329 | |
1330 psubw_m2r(*(dataptr+8), mm4); // tmp04=x3-x4 /* stage 1*/ | |
1331 movq_r2r(mm1, mm6); | |
1332 | |
1333 paddw_r2r(mm3, mm0); // tmp10 = tmp00 + tmp03 /* even 2 */ | |
1334 psubw_r2r(mm3, mm7); // tmp13 = tmp00 - tmp03 /* even 2 */ | |
1335 | |
1336 psubw_r2r(mm2, mm6); // tmp12 = tmp01 - tmp02 /* even 2 */ | |
1337 paddw_r2r(mm2, mm1); // tmp11 = tmp01 + tmp02 /* even 2 */ | |
1338 | |
1339 psubw_m2r(*(dataptr+10), mm5); // tmp05=x2-x5 /* stage 1*/ | |
1340 paddw_r2r(mm7, mm6); // tmp12 + tmp13 | |
1341 | |
1342 /* stage 3 */ | |
1343 | |
1344 movq_m2r(tmp6, mm2); | |
1345 movq_r2r(mm0, mm3); | |
1346 | |
1347 psllw_i2r(2, mm6); // m8 * 2^2 | |
1348 paddw_r2r(mm1, mm0); | |
1349 | |
1350 pmulhw_m2r(RTjpeg_C4, mm6); // z1 | |
1351 psubw_r2r(mm1, mm3); | |
1352 | |
1353 movq_r2m(mm0, *dataptr); | |
1354 movq_r2r(mm7, mm0); | |
1355 | |
1356 /* Odd part */ | |
1357 movq_r2m(mm3, *(dataptr+8)); | |
1358 paddw_r2r(mm5, mm4); // tmp10 | |
1359 | |
1360 movq_m2r(tmp7, mm3); | |
1361 paddw_r2r(mm6, mm0); // tmp32 | |
1362 | |
1363 paddw_r2r(mm2, mm5); // tmp11 | |
1364 psubw_r2r(mm6, mm7); // tmp33 | |
1365 | |
1366 movq_r2m(mm0, *(dataptr+4)); | |
1367 paddw_r2r(mm3, mm2); // tmp12 | |
1368 | |
1369 /* stage 4 */ | |
1370 movq_r2m(mm7, *(dataptr+12)); | |
1371 movq_r2r(mm4, mm1); // copy of tmp10 | |
1372 | |
1373 psubw_r2r(mm2, mm1); // tmp10 - tmp12 | |
1374 psllw_i2r(2, mm4); // m8 * 2^2 | |
1375 | |
1376 movq_m2r(RTjpeg_C2mC6, mm0); | |
1377 psllw_i2r(2, mm1); | |
1378 | |
1379 pmulhw_m2r(RTjpeg_C6, mm1); // z5 | |
1380 psllw_i2r(2, mm2); | |
1381 | |
1382 pmulhw_r2r(mm0, mm4); // z5 | |
1383 | |
1384 /* stage 5 */ | |
1385 | |
1386 pmulhw_m2r(RTjpeg_C2pC6, mm2); | |
1387 psllw_i2r(2, mm5); | |
1388 | |
1389 pmulhw_m2r(RTjpeg_C4, mm5); // z3 | |
1390 movq_r2r(mm3, mm0); // copy tmp7 | |
1391 | |
1392 movq_m2r(*(dataptr+1), mm7); | |
1393 paddw_r2r(mm1, mm4); // z2 | |
1394 | |
1395 paddw_r2r(mm1, mm2); // z4 | |
1396 | |
1397 paddw_r2r(mm5, mm0); // z11 | |
1398 psubw_r2r(mm5, mm3); // z13 | |
1399 | |
1400 /* stage 6 */ | |
1401 | |
1402 movq_r2r(mm3, mm5); // copy z13 | |
1403 psubw_r2r(mm4, mm3); // y3=z13 - z2 | |
1404 | |
1405 paddw_r2r(mm4, mm5); // y5=z13 + z2 | |
1406 movq_r2r(mm0, mm6); // copy z11 | |
1407 | |
1408 movq_r2m(mm3, *(dataptr+6)); //save y3 | |
1409 psubw_r2r(mm2, mm0); // y7=z11 - z4 | |
1410 | |
1411 movq_r2m(mm5, *(dataptr+10)); //save y5 | |
1412 paddw_r2r(mm2, mm6); // y1=z11 + z4 | |
1413 | |
1414 movq_r2m(mm0, *(dataptr+14)); //save y7 | |
1415 | |
1416 /************************************************ | |
1417 * End of 1st 4 rows | |
1418 ************************************************/ | |
1419 | |
1420 movq_m2r(*(dataptr+3), mm1); // load x1 /* stage 1 */ | |
1421 movq_r2r(mm7, mm0); // copy x0 | |
1422 | |
1423 movq_r2m(mm6, *(dataptr+2)); //save y1 | |
1424 | |
1425 movq_m2r(*(dataptr+5), mm2); // load x2 /* stage 1 */ | |
1426 movq_r2r(mm1, mm6); // copy x1 | |
1427 | |
1428 paddw_m2r(*(dataptr+15), mm0); // tmp00 = x0 + x7 | |
1429 | |
1430 movq_m2r(*(dataptr+7), mm3); // load x3 /* stage 1 */ | |
1431 movq_r2r(mm2, mm5); // copy x2 | |
1432 | |
1433 psubw_m2r(*(dataptr+15), mm7); // tmp07 = x0 - x7 | |
1434 movq_r2r(mm3, mm4); // copy x3 | |
1435 | |
1436 paddw_m2r(*(dataptr+13), mm1); // tmp01 = x1 + x6 | |
1437 | |
1438 movq_r2m(mm7, tmp7); // save tmp07 | |
1439 movq_r2r(mm0, mm7); // copy tmp00 | |
1440 | |
1441 psubw_m2r(*(dataptr+13), mm6); // tmp06 = x1 - x6 | |
1442 | |
1443 /* stage 2, Even Part */ | |
1444 | |
1445 paddw_m2r(*(dataptr+9), mm3); // tmp03 = x3 + x4 | |
1446 | |
1447 movq_r2m(mm6, tmp6); // save tmp07 | |
1448 movq_r2r(mm1, mm6); // copy tmp01 | |
1449 | |
1450 paddw_m2r(*(dataptr+11), mm2); // tmp02 = x2 + x5 | |
1451 paddw_r2r(mm3, mm0); // tmp10 = tmp00 + tmp03 | |
1452 | |
1453 psubw_r2r(mm3, mm7); // tmp13 = tmp00 - tmp03 | |
1454 | |
1455 psubw_m2r(*(dataptr+9), mm4); // tmp04 = x3 - x4 | |
1456 psubw_r2r(mm2, mm6); // tmp12 = tmp01 - tmp02 | |
1457 | |
1458 paddw_r2r(mm2, mm1); // tmp11 = tmp01 + tmp02 | |
1459 | |
1460 psubw_m2r(*(dataptr+11), mm5); // tmp05 = x2 - x5 | |
1461 paddw_r2r(mm7, mm6); // tmp12 + tmp13 | |
1462 | |
1463 /* stage 3, Even and stage 4 & 5 even */ | |
1464 | |
1465 movq_m2r(tmp6, mm2); // load tmp6 | |
1466 movq_r2r(mm0, mm3); // copy tmp10 | |
1467 | |
1468 psllw_i2r(2, mm6); // shift z1 | |
1469 paddw_r2r(mm1, mm0); // y0=tmp10 + tmp11 | |
1470 | |
1471 pmulhw_m2r(RTjpeg_C4, mm6); // z1 | |
1472 psubw_r2r(mm1, mm3); // y4=tmp10 - tmp11 | |
1473 | |
1474 movq_r2m(mm0, *(dataptr+1)); //save y0 | |
1475 movq_r2r(mm7, mm0); // copy tmp13 | |
1476 | |
1477 /* odd part */ | |
1478 | |
1479 movq_r2m(mm3, *(dataptr+9)); //save y4 | |
1480 paddw_r2r(mm5, mm4); // tmp10 = tmp4 + tmp5 | |
1481 | |
1482 movq_m2r(tmp7, mm3); // load tmp7 | |
1483 paddw_r2r(mm6, mm0); // tmp32 = tmp13 + z1 | |
1484 | |
1485 paddw_r2r(mm2, mm5); // tmp11 = tmp5 + tmp6 | |
1486 psubw_r2r(mm6, mm7); // tmp33 = tmp13 - z1 | |
1487 | |
1488 movq_r2m(mm0, *(dataptr+5)); //save y2 | |
1489 paddw_r2r(mm3, mm2); // tmp12 = tmp6 + tmp7 | |
1490 | |
1491 /* stage 4 */ | |
1492 | |
1493 movq_r2m(mm7, *(dataptr+13)); //save y6 | |
1494 movq_r2r(mm4, mm1); // copy tmp10 | |
1495 | |
1496 psubw_r2r(mm2, mm1); // tmp10 - tmp12 | |
1497 psllw_i2r(2, mm4); // shift tmp10 | |
1498 | |
1499 movq_m2r(RTjpeg_C2mC6, mm0); // load C2mC6 | |
1500 psllw_i2r(2, mm1); // shift (tmp10-tmp12) | |
1501 | |
1502 pmulhw_m2r(RTjpeg_C6, mm1); // z5 | |
1503 psllw_i2r(2, mm5); // prepare for multiply | |
1504 | |
1505 pmulhw_r2r(mm0, mm4); // multiply by converted real | |
1506 | |
1507 /* stage 5 */ | |
1508 | |
1509 pmulhw_m2r(RTjpeg_C4, mm5); // z3 | |
1510 psllw_i2r(2, mm2); // prepare for multiply | |
1511 | |
1512 pmulhw_m2r(RTjpeg_C2pC6, mm2); // multiply | |
1513 movq_r2r(mm3, mm0); // copy tmp7 | |
1514 | |
1515 movq_m2r(*(dataptr+9), mm7); // m03:m02|m01:m00 - first line (line 4)and copy into mm7 | |
1516 paddw_r2r(mm1, mm4); // z2 | |
1517 | |
1518 paddw_r2r(mm5, mm0); // z11 | |
1519 psubw_r2r(mm5, mm3); // z13 | |
1520 | |
1521 /* stage 6 */ | |
1522 | |
1523 movq_r2r(mm3, mm5); // copy z13 | |
1524 paddw_r2r(mm1, mm2); // z4 | |
1525 | |
1526 movq_r2r(mm0, mm6); // copy z11 | |
1527 psubw_r2r(mm4, mm5); // y3 | |
1528 | |
1529 paddw_r2r(mm2, mm6); // y1 | |
1530 paddw_r2r(mm4, mm3); // y5 | |
1531 | |
1532 movq_r2m(mm5, *(dataptr+7)); //save y3 | |
1533 psubw_r2r(mm2, mm0); // yè=z11 - z4 | |
1534 | |
1535 movq_r2m(mm3, *(dataptr+11)); //save y5 | |
1536 | |
1537 movq_r2m(mm6, *(dataptr+3)); //save y1 | |
1538 | |
1539 movq_r2m(mm0, *(dataptr+15)); //save y7 | |
1540 | |
1541 | |
1542 #endif | |
1543 } | |
1544 | |
1545 #define FIX_1_082392200 ((__s32) 277) /* FIX(1.082392200) */ | |
1546 #define FIX_1_414213562 ((__s32) 362) /* FIX(1.414213562) */ | |
1547 #define FIX_1_847759065 ((__s32) 473) /* FIX(1.847759065) */ | |
1548 #define FIX_2_613125930 ((__s32) 669) /* FIX(2.613125930) */ | |
1549 | |
1550 #define DESCALE(x) (__s16)( ((x)+4) >> 3) | |
1551 | |
1552 /* clip yuv to 16..235 (should be 16..240 for cr/cb but ... */ | |
1553 | |
1554 #define RL(x) ((x)>235) ? 235 : (((x)<16) ? 16 : (x)) | |
1555 #define MULTIPLY(var,const) (((__s32) ((var) * (const)) + 128)>>8) | |
1556 | |
1557 void RTjpeg_idct_init(void) | |
1558 { | |
1559 int i; | |
1560 | |
1561 for(i=0; i<64; i++) | |
1562 { | |
1563 RTjpeg_liqt[i]=((__u64)RTjpeg_liqt[i]*RTjpeg_aan_tab[i])>>32; | |
1564 RTjpeg_ciqt[i]=((__u64)RTjpeg_ciqt[i]*RTjpeg_aan_tab[i])>>32; | |
1565 } | |
1566 } | |
1567 | |
1568 void RTjpeg_idct(__u8 *odata, __s16 *data, int rskip) | |
1569 { | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
1570 #if HAVE_MMX |
3802 | 1571 |
12928 | 1572 static mmx_t fix_141 = {0x5a825a825a825a82LL}; |
1573 static mmx_t fix_184n261 = {0xcf04cf04cf04cf04LL}; | |
1574 static mmx_t fix_184 = {0x7641764176417641LL}; | |
1575 static mmx_t fix_n184 = {0x896f896f896f896fLL}; | |
1576 static mmx_t fix_108n184 = {0xcf04cf04cf04cf04LL}; | |
3802 | 1577 |
1578 mmx_t workspace[64]; | |
1579 mmx_t *wsptr = workspace; | |
1580 register mmx_t *dataptr = (mmx_t *)odata; | |
1581 mmx_t *idata = (mmx_t *)data; | |
1582 | |
1583 rskip = rskip>>3; | |
1584 /* | |
1585 * Perform inverse DCT on one block of coefficients. | |
1586 */ | |
1587 | |
1588 /* Odd part */ | |
1589 | |
1590 movq_m2r(*(idata+10), mm1); // load idata[DCTSIZE*5] | |
1591 | |
1592 movq_m2r(*(idata+6), mm0); // load idata[DCTSIZE*3] | |
1593 | |
1594 movq_m2r(*(idata+2), mm3); // load idata[DCTSIZE*1] | |
1595 | |
1596 movq_r2r(mm1, mm2); // copy tmp6 /* phase 6 */ | |
1597 | |
1598 movq_m2r(*(idata+14), mm4); // load idata[DCTSIZE*7] | |
1599 | |
1600 paddw_r2r(mm0, mm1); // z13 = tmp6 + tmp5; | |
1601 | |
1602 psubw_r2r(mm0, mm2); // z10 = tmp6 - tmp5 | |
1603 | |
1604 psllw_i2r(2, mm2); // shift z10 | |
1605 movq_r2r(mm2, mm0); // copy z10 | |
1606 | |
1607 pmulhw_m2r(fix_184n261, mm2); // MULTIPLY( z12, FIX_1_847759065); /* 2*c2 */ | |
1608 movq_r2r(mm3, mm5); // copy tmp4 | |
1609 | |
1610 pmulhw_m2r(fix_n184, mm0); // MULTIPLY(z10, -FIX_1_847759065); /* 2*c2 */ | |
1611 paddw_r2r(mm4, mm3); // z11 = tmp4 + tmp7; | |
1612 | |
1613 movq_r2r(mm3, mm6); // copy z11 /* phase 5 */ | |
1614 psubw_r2r(mm4, mm5); // z12 = tmp4 - tmp7; | |
1615 | |
1616 psubw_r2r(mm1, mm6); // z11-z13 | |
1617 psllw_i2r(2, mm5); // shift z12 | |
1618 | |
1619 movq_m2r(*(idata+12), mm4); // load idata[DCTSIZE*6], even part | |
1620 movq_r2r(mm5, mm7); // copy z12 | |
1621 | |
1622 pmulhw_m2r(fix_108n184, mm5); // MULT(z12, (FIX_1_08-FIX_1_84)) //- z5; /* 2*(c2-c6) */ even part | |
1623 paddw_r2r(mm1, mm3); // tmp7 = z11 + z13; | |
1624 | |
1625 //ok | |
1626 | |
1627 /* Even part */ | |
1628 pmulhw_m2r(fix_184, mm7); // MULTIPLY(z10,(FIX_1_847759065 - FIX_2_613125930)) //+ z5; /* -2*(c2+c6) */ | |
1629 psllw_i2r(2, mm6); | |
1630 | |
1631 movq_m2r(*(idata+4), mm1); // load idata[DCTSIZE*2] | |
1632 | |
1633 paddw_r2r(mm5, mm0); // tmp10 | |
1634 | |
1635 paddw_r2r(mm7, mm2); // tmp12 | |
1636 | |
1637 pmulhw_m2r(fix_141, mm6); // tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); /* 2*c4 */ | |
1638 psubw_r2r(mm3, mm2); // tmp6 = tmp12 - tmp7 | |
1639 | |
1640 movq_r2r(mm1, mm5); // copy tmp1 | |
1641 paddw_r2r(mm4, mm1); // tmp13= tmp1 + tmp3; /* phases 5-3 */ | |
1642 | |
1643 psubw_r2r(mm4, mm5); // tmp1-tmp3 | |
1644 psubw_r2r(mm2, mm6); // tmp5 = tmp11 - tmp6; | |
1645 | |
1646 movq_r2m(mm1, *(wsptr)); // save tmp13 in workspace | |
1647 psllw_i2r(2, mm5); // shift tmp1-tmp3 | |
1648 | |
1649 movq_m2r(*(idata), mm7); // load idata[DCTSIZE*0] | |
1650 | |
1651 pmulhw_m2r(fix_141, mm5); // MULTIPLY(tmp1 - tmp3, FIX_1_414213562) | |
1652 paddw_r2r(mm6, mm0); // tmp4 = tmp10 + tmp5; | |
1653 | |
1654 movq_m2r(*(idata+8), mm4); // load idata[DCTSIZE*4] | |
1655 | |
1656 psubw_r2r(mm1, mm5); // tmp12 = MULTIPLY(tmp1 - tmp3, FIX_1_414213562) - tmp13; /* 2*c4 */ | |
1657 | |
1658 movq_r2m(mm0, *(wsptr+4)); // save tmp4 in workspace | |
1659 movq_r2r(mm7, mm1); // copy tmp0 /* phase 3 */ | |
1660 | |
1661 movq_r2m(mm5, *(wsptr+2)); // save tmp12 in workspace | |
1662 psubw_r2r(mm4, mm1); // tmp11 = tmp0 - tmp2; | |
1663 | |
1664 paddw_r2r(mm4, mm7); // tmp10 = tmp0 + tmp2; | |
1665 movq_r2r(mm1, mm5); // copy tmp11 | |
1666 | |
1667 paddw_m2r(*(wsptr+2), mm1); // tmp1 = tmp11 + tmp12; | |
1668 movq_r2r(mm7, mm4); // copy tmp10 /* phase 2 */ | |
1669 | |
1670 paddw_m2r(*(wsptr), mm7); // tmp0 = tmp10 + tmp13; | |
1671 | |
1672 psubw_m2r(*(wsptr), mm4); // tmp3 = tmp10 - tmp13; | |
1673 movq_r2r(mm7, mm0); // copy tmp0 | |
1674 | |
1675 psubw_m2r(*(wsptr+2), mm5); // tmp2 = tmp11 - tmp12; | |
1676 paddw_r2r(mm3, mm7); // wsptr[DCTSIZE*0] = (int) (tmp0 + tmp7); | |
1677 | |
1678 psubw_r2r(mm3, mm0); // wsptr[DCTSIZE*7] = (int) (tmp0 - tmp7); | |
1679 | |
1680 movq_r2m(mm7, *(wsptr)); // wsptr[DCTSIZE*0] | |
1681 movq_r2r(mm1, mm3); // copy tmp1 | |
1682 | |
1683 movq_r2m(mm0, *(wsptr+14)); // wsptr[DCTSIZE*7] | |
1684 paddw_r2r(mm2, mm1); // wsptr[DCTSIZE*1] = (int) (tmp1 + tmp6); | |
1685 | |
1686 psubw_r2r(mm2, mm3); // wsptr[DCTSIZE*6] = (int) (tmp1 - tmp6); | |
1687 | |
1688 movq_r2m(mm1, *(wsptr+2)); // wsptr[DCTSIZE*1] | |
1689 movq_r2r(mm4, mm1); // copy tmp3 | |
1690 | |
1691 movq_r2m(mm3, *(wsptr+12)); // wsptr[DCTSIZE*6] | |
1692 | |
1693 paddw_m2r(*(wsptr+4), mm4); // wsptr[DCTSIZE*4] = (int) (tmp3 + tmp4); | |
1694 | |
1695 psubw_m2r(*(wsptr+4), mm1); // wsptr[DCTSIZE*3] = (int) (tmp3 - tmp4); | |
1696 | |
1697 movq_r2m(mm4, *(wsptr+8)); | |
1698 movq_r2r(mm5, mm7); // copy tmp2 | |
1699 | |
1700 paddw_r2r(mm6, mm5); // wsptr[DCTSIZE*2] = (int) (tmp2 + tmp5) | |
1701 | |
1702 movq_r2m(mm1, *(wsptr+6)); | |
1703 psubw_r2r(mm6, mm7); // wsptr[DCTSIZE*5] = (int) (tmp2 - tmp5); | |
1704 | |
1705 movq_r2m(mm5, *(wsptr+4)); | |
1706 | |
1707 movq_r2m(mm7, *(wsptr+10)); | |
1708 | |
1709 //ok | |
1710 | |
1711 | |
1712 /*****************************************************************/ | |
1713 | |
1714 idata++; | |
1715 wsptr++; | |
1716 | |
1717 /*****************************************************************/ | |
1718 | |
1719 movq_m2r(*(idata+10), mm1); // load idata[DCTSIZE*5] | |
1720 | |
1721 movq_m2r(*(idata+6), mm0); // load idata[DCTSIZE*3] | |
1722 | |
1723 movq_m2r(*(idata+2), mm3); // load idata[DCTSIZE*1] | |
1724 movq_r2r(mm1, mm2); // copy tmp6 /* phase 6 */ | |
1725 | |
1726 movq_m2r(*(idata+14), mm4); // load idata[DCTSIZE*7] | |
1727 paddw_r2r(mm0, mm1); // z13 = tmp6 + tmp5; | |
1728 | |
1729 psubw_r2r(mm0, mm2); // z10 = tmp6 - tmp5 | |
1730 | |
1731 psllw_i2r(2, mm2); // shift z10 | |
1732 movq_r2r(mm2, mm0); // copy z10 | |
1733 | |
1734 pmulhw_m2r(fix_184n261, mm2); // MULTIPLY( z12, FIX_1_847759065); /* 2*c2 */ | |
1735 movq_r2r(mm3, mm5); // copy tmp4 | |
1736 | |
1737 pmulhw_m2r(fix_n184, mm0); // MULTIPLY(z10, -FIX_1_847759065); /* 2*c2 */ | |
1738 paddw_r2r(mm4, mm3); // z11 = tmp4 + tmp7; | |
1739 | |
1740 movq_r2r(mm3, mm6); // copy z11 /* phase 5 */ | |
1741 psubw_r2r(mm4, mm5); // z12 = tmp4 - tmp7; | |
1742 | |
1743 psubw_r2r(mm1, mm6); // z11-z13 | |
1744 psllw_i2r(2, mm5); // shift z12 | |
1745 | |
1746 movq_m2r(*(idata+12), mm4); // load idata[DCTSIZE*6], even part | |
1747 movq_r2r(mm5, mm7); // copy z12 | |
1748 | |
1749 pmulhw_m2r(fix_108n184, mm5); // MULT(z12, (FIX_1_08-FIX_1_84)) //- z5; /* 2*(c2-c6) */ even part | |
1750 paddw_r2r(mm1, mm3); // tmp7 = z11 + z13; | |
1751 | |
1752 //ok | |
1753 | |
1754 /* Even part */ | |
1755 pmulhw_m2r(fix_184, mm7); // MULTIPLY(z10,(FIX_1_847759065 - FIX_2_613125930)) //+ z5; /* -2*(c2+c6) */ | |
1756 psllw_i2r(2, mm6); | |
1757 | |
1758 movq_m2r(*(idata+4), mm1); // load idata[DCTSIZE*2] | |
1759 | |
1760 paddw_r2r(mm5, mm0); // tmp10 | |
1761 | |
1762 paddw_r2r(mm7, mm2); // tmp12 | |
1763 | |
1764 pmulhw_m2r(fix_141, mm6); // tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); /* 2*c4 */ | |
1765 psubw_r2r(mm3, mm2); // tmp6 = tmp12 - tmp7 | |
1766 | |
1767 movq_r2r(mm1, mm5); // copy tmp1 | |
1768 paddw_r2r(mm4, mm1); // tmp13= tmp1 + tmp3; /* phases 5-3 */ | |
1769 | |
1770 psubw_r2r(mm4, mm5); // tmp1-tmp3 | |
1771 psubw_r2r(mm2, mm6); // tmp5 = tmp11 - tmp6; | |
1772 | |
1773 movq_r2m(mm1, *(wsptr)); // save tmp13 in workspace | |
1774 psllw_i2r(2, mm5); // shift tmp1-tmp3 | |
1775 | |
1776 movq_m2r(*(idata), mm7); // load idata[DCTSIZE*0] | |
1777 paddw_r2r(mm6, mm0); // tmp4 = tmp10 + tmp5; | |
1778 | |
1779 pmulhw_m2r(fix_141, mm5); // MULTIPLY(tmp1 - tmp3, FIX_1_414213562) | |
1780 | |
1781 movq_m2r(*(idata+8), mm4); // load idata[DCTSIZE*4] | |
1782 | |
1783 psubw_r2r(mm1, mm5); // tmp12 = MULTIPLY(tmp1 - tmp3, FIX_1_414213562) - tmp13; /* 2*c4 */ | |
1784 | |
1785 movq_r2m(mm0, *(wsptr+4)); // save tmp4 in workspace | |
1786 movq_r2r(mm7, mm1); // copy tmp0 /* phase 3 */ | |
1787 | |
1788 movq_r2m(mm5, *(wsptr+2)); // save tmp12 in workspace | |
1789 psubw_r2r(mm4, mm1); // tmp11 = tmp0 - tmp2; | |
1790 | |
1791 paddw_r2r(mm4, mm7); // tmp10 = tmp0 + tmp2; | |
1792 movq_r2r(mm1, mm5); // copy tmp11 | |
1793 | |
1794 paddw_m2r(*(wsptr+2), mm1); // tmp1 = tmp11 + tmp12; | |
1795 movq_r2r(mm7, mm4); // copy tmp10 /* phase 2 */ | |
1796 | |
1797 paddw_m2r(*(wsptr), mm7); // tmp0 = tmp10 + tmp13; | |
1798 | |
1799 psubw_m2r(*(wsptr), mm4); // tmp3 = tmp10 - tmp13; | |
1800 movq_r2r(mm7, mm0); // copy tmp0 | |
1801 | |
1802 psubw_m2r(*(wsptr+2), mm5); // tmp2 = tmp11 - tmp12; | |
1803 paddw_r2r(mm3, mm7); // wsptr[DCTSIZE*0] = (int) (tmp0 + tmp7); | |
1804 | |
1805 psubw_r2r(mm3, mm0); // wsptr[DCTSIZE*7] = (int) (tmp0 - tmp7); | |
1806 | |
1807 movq_r2m(mm7, *(wsptr)); // wsptr[DCTSIZE*0] | |
1808 movq_r2r(mm1, mm3); // copy tmp1 | |
1809 | |
1810 movq_r2m(mm0, *(wsptr+14)); // wsptr[DCTSIZE*7] | |
1811 paddw_r2r(mm2, mm1); // wsptr[DCTSIZE*1] = (int) (tmp1 + tmp6); | |
1812 | |
1813 psubw_r2r(mm2, mm3); // wsptr[DCTSIZE*6] = (int) (tmp1 - tmp6); | |
1814 | |
1815 movq_r2m(mm1, *(wsptr+2)); // wsptr[DCTSIZE*1] | |
1816 movq_r2r(mm4, mm1); // copy tmp3 | |
1817 | |
1818 movq_r2m(mm3, *(wsptr+12)); // wsptr[DCTSIZE*6] | |
1819 | |
1820 paddw_m2r(*(wsptr+4), mm4); // wsptr[DCTSIZE*4] = (int) (tmp3 + tmp4); | |
1821 | |
1822 psubw_m2r(*(wsptr+4), mm1); // wsptr[DCTSIZE*3] = (int) (tmp3 - tmp4); | |
1823 | |
1824 movq_r2m(mm4, *(wsptr+8)); | |
1825 movq_r2r(mm5, mm7); // copy tmp2 | |
1826 | |
1827 paddw_r2r(mm6, mm5); // wsptr[DCTSIZE*2] = (int) (tmp2 + tmp5) | |
1828 | |
1829 movq_r2m(mm1, *(wsptr+6)); | |
1830 psubw_r2r(mm6, mm7); // wsptr[DCTSIZE*5] = (int) (tmp2 - tmp5); | |
1831 | |
1832 movq_r2m(mm5, *(wsptr+4)); | |
1833 | |
1834 movq_r2m(mm7, *(wsptr+10)); | |
1835 | |
1836 /*****************************************************************/ | |
1837 | |
1838 /* Pass 2: process rows from work array, store into output array. */ | |
1839 /* Note that we must descale the results by a factor of 8 == 2**3, */ | |
1840 /* and also undo the PASS1_BITS scaling. */ | |
1841 | |
1842 /*****************************************************************/ | |
1843 /* Even part */ | |
1844 | |
1845 wsptr--; | |
1846 | |
1847 // tmp10 = ((DCTELEM) wsptr[0] + (DCTELEM) wsptr[4]); | |
1848 // tmp13 = ((DCTELEM) wsptr[2] + (DCTELEM) wsptr[6]); | |
1849 // tmp11 = ((DCTELEM) wsptr[0] - (DCTELEM) wsptr[4]); | |
1850 // tmp14 = ((DCTELEM) wsptr[2] - (DCTELEM) wsptr[6]); | |
1851 movq_m2r(*(wsptr), mm0); // wsptr[0,0],[0,1],[0,2],[0,3] | |
1852 | |
1853 movq_m2r(*(wsptr+1), mm1); // wsptr[0,4],[0,5],[0,6],[0,7] | |
1854 movq_r2r(mm0, mm2); | |
1855 | |
1856 movq_m2r(*(wsptr+2), mm3); // wsptr[1,0],[1,1],[1,2],[1,3] | |
1857 paddw_r2r(mm1, mm0); // wsptr[0,tmp10],[xxx],[0,tmp13],[xxx] | |
1858 | |
1859 movq_m2r(*(wsptr+3), mm4); // wsptr[1,4],[1,5],[1,6],[1,7] | |
1860 psubw_r2r(mm1, mm2); // wsptr[0,tmp11],[xxx],[0,tmp14],[xxx] | |
1861 | |
1862 movq_r2r(mm0, mm6); | |
1863 movq_r2r(mm3, mm5); | |
1864 | |
1865 paddw_r2r(mm4, mm3); // wsptr[1,tmp10],[xxx],[1,tmp13],[xxx] | |
1866 movq_r2r(mm2, mm1); | |
1867 | |
1868 psubw_r2r(mm4, mm5); // wsptr[1,tmp11],[xxx],[1,tmp14],[xxx] | |
1869 punpcklwd_r2r(mm3, mm0); // wsptr[0,tmp10],[1,tmp10],[xxx],[xxx] | |
1870 | |
1871 movq_m2r(*(wsptr+7), mm7); // wsptr[3,4],[3,5],[3,6],[3,7] | |
1872 punpckhwd_r2r(mm3, mm6); // wsptr[0,tmp13],[1,tmp13],[xxx],[xxx] | |
1873 | |
1874 movq_m2r(*(wsptr+4), mm3); // wsptr[2,0],[2,1],[2,2],[2,3] | |
1875 punpckldq_r2r(mm6, mm0); // wsptr[0,tmp10],[1,tmp10],[0,tmp13],[1,tmp13] | |
1876 | |
1877 punpcklwd_r2r(mm5, mm1); // wsptr[0,tmp11],[1,tmp11],[xxx],[xxx] | |
1878 movq_r2r(mm3, mm4); | |
1879 | |
1880 movq_m2r(*(wsptr+6), mm6); // wsptr[3,0],[3,1],[3,2],[3,3] | |
1881 punpckhwd_r2r(mm5, mm2); // wsptr[0,tmp14],[1,tmp14],[xxx],[xxx] | |
1882 | |
1883 movq_m2r(*(wsptr+5), mm5); // wsptr[2,4],[2,5],[2,6],[2,7] | |
1884 punpckldq_r2r(mm2, mm1); // wsptr[0,tmp11],[1,tmp11],[0,tmp14],[1,tmp14] | |
1885 | |
1886 | |
1887 paddw_r2r(mm5, mm3); // wsptr[2,tmp10],[xxx],[2,tmp13],[xxx] | |
1888 movq_r2r(mm6, mm2); | |
1889 | |
1890 psubw_r2r(mm5, mm4); // wsptr[2,tmp11],[xxx],[2,tmp14],[xxx] | |
1891 paddw_r2r(mm7, mm6); // wsptr[3,tmp10],[xxx],[3,tmp13],[xxx] | |
1892 | |
1893 movq_r2r(mm3, mm5); | |
1894 punpcklwd_r2r(mm6, mm3); // wsptr[2,tmp10],[3,tmp10],[xxx],[xxx] | |
1895 | |
1896 psubw_r2r(mm7, mm2); // wsptr[3,tmp11],[xxx],[3,tmp14],[xxx] | |
1897 punpckhwd_r2r(mm6, mm5); // wsptr[2,tmp13],[3,tmp13],[xxx],[xxx] | |
1898 | |
1899 movq_r2r(mm4, mm7); | |
1900 punpckldq_r2r(mm5, mm3); // wsptr[2,tmp10],[3,tmp10],[2,tmp13],[3,tmp13] | |
1901 | |
1902 punpcklwd_r2r(mm2, mm4); // wsptr[2,tmp11],[3,tmp11],[xxx],[xxx] | |
1903 | |
1904 punpckhwd_r2r(mm2, mm7); // wsptr[2,tmp14],[3,tmp14],[xxx],[xxx] | |
1905 | |
1906 punpckldq_r2r(mm7, mm4); // wsptr[2,tmp11],[3,tmp11],[2,tmp14],[3,tmp14] | |
1907 movq_r2r(mm1, mm6); | |
1908 | |
1909 //ok | |
1910 | |
1911 // mm0 = ;wsptr[0,tmp10],[1,tmp10],[0,tmp13],[1,tmp13] | |
1912 // mm1 = ;wsptr[0,tmp11],[1,tmp11],[0,tmp14],[1,tmp14] | |
1913 | |
1914 | |
1915 movq_r2r(mm0, mm2); | |
1916 punpckhdq_r2r(mm4, mm6); // wsptr[0,tmp14],[1,tmp14],[2,tmp14],[3,tmp14] | |
1917 | |
1918 punpckldq_r2r(mm4, mm1); // wsptr[0,tmp11],[1,tmp11],[2,tmp11],[3,tmp11] | |
1919 psllw_i2r(2, mm6); | |
1920 | |
1921 pmulhw_m2r(fix_141, mm6); | |
1922 punpckldq_r2r(mm3, mm0); // wsptr[0,tmp10],[1,tmp10],[2,tmp10],[3,tmp10] | |
1923 | |
1924 punpckhdq_r2r(mm3, mm2); // wsptr[0,tmp13],[1,tmp13],[2,tmp13],[3,tmp13] | |
1925 movq_r2r(mm0, mm7); | |
1926 | |
1927 // tmp0 = tmp10 + tmp13; | |
1928 // tmp3 = tmp10 - tmp13; | |
1929 paddw_r2r(mm2, mm0); // [0,tmp0],[1,tmp0],[2,tmp0],[3,tmp0] | |
1930 psubw_r2r(mm2, mm7); // [0,tmp3],[1,tmp3],[2,tmp3],[3,tmp3] | |
1931 | |
1932 // tmp12 = MULTIPLY(tmp14, FIX_1_414213562) - tmp13; | |
1933 psubw_r2r(mm2, mm6); // wsptr[0,tmp12],[1,tmp12],[2,tmp12],[3,tmp12] | |
1934 // tmp1 = tmp11 + tmp12; | |
1935 // tmp2 = tmp11 - tmp12; | |
1936 movq_r2r(mm1, mm5); | |
1937 | |
1938 //OK | |
1939 | |
1940 /* Odd part */ | |
1941 | |
1942 // z13 = (DCTELEM) wsptr[5] + (DCTELEM) wsptr[3]; | |
1943 // z10 = (DCTELEM) wsptr[5] - (DCTELEM) wsptr[3]; | |
1944 // z11 = (DCTELEM) wsptr[1] + (DCTELEM) wsptr[7]; | |
1945 // z12 = (DCTELEM) wsptr[1] - (DCTELEM) wsptr[7]; | |
1946 movq_m2r(*(wsptr), mm3); // wsptr[0,0],[0,1],[0,2],[0,3] | |
1947 paddw_r2r(mm6, mm1); // [0,tmp1],[1,tmp1],[2,tmp1],[3,tmp1] | |
1948 | |
1949 movq_m2r(*(wsptr+1), mm4); // wsptr[0,4],[0,5],[0,6],[0,7] | |
1950 psubw_r2r(mm6, mm5); // [0,tmp2],[1,tmp2],[2,tmp2],[3,tmp2] | |
1951 | |
1952 movq_r2r(mm3, mm6); | |
1953 punpckldq_r2r(mm4, mm3); // wsptr[0,0],[0,1],[0,4],[0,5] | |
1954 | |
1955 punpckhdq_r2r(mm6, mm4); // wsptr[0,6],[0,7],[0,2],[0,3] | |
1956 movq_r2r(mm3, mm2); | |
1957 | |
1958 //Save tmp0 and tmp1 in wsptr | |
1959 movq_r2m(mm0, *(wsptr)); // save tmp0 | |
1960 paddw_r2r(mm4, mm2); // wsptr[xxx],[0,z11],[xxx],[0,z13] | |
1961 | |
1962 | |
1963 //Continue with z10 --- z13 | |
1964 movq_m2r(*(wsptr+2), mm6); // wsptr[1,0],[1,1],[1,2],[1,3] | |
1965 psubw_r2r(mm4, mm3); // wsptr[xxx],[0,z12],[xxx],[0,z10] | |
1966 | |
1967 movq_m2r(*(wsptr+3), mm0); // wsptr[1,4],[1,5],[1,6],[1,7] | |
1968 movq_r2r(mm6, mm4); | |
1969 | |
1970 movq_r2m(mm1, *(wsptr+1)); // save tmp1 | |
1971 punpckldq_r2r(mm0, mm6); // wsptr[1,0],[1,1],[1,4],[1,5] | |
1972 | |
1973 punpckhdq_r2r(mm4, mm0); // wsptr[1,6],[1,7],[1,2],[1,3] | |
1974 movq_r2r(mm6, mm1); | |
1975 | |
1976 //Save tmp2 and tmp3 in wsptr | |
1977 paddw_r2r(mm0, mm6); // wsptr[xxx],[1,z11],[xxx],[1,z13] | |
1978 movq_r2r(mm2, mm4); | |
1979 | |
1980 //Continue with z10 --- z13 | |
1981 movq_r2m(mm5, *(wsptr+2)); // save tmp2 | |
1982 punpcklwd_r2r(mm6, mm2); // wsptr[xxx],[xxx],[0,z11],[1,z11] | |
1983 | |
1984 psubw_r2r(mm0, mm1); // wsptr[xxx],[1,z12],[xxx],[1,z10] | |
1985 punpckhwd_r2r(mm6, mm4); // wsptr[xxx],[xxx],[0,z13],[1,z13] | |
1986 | |
1987 movq_r2r(mm3, mm0); | |
1988 punpcklwd_r2r(mm1, mm3); // wsptr[xxx],[xxx],[0,z12],[1,z12] | |
1989 | |
1990 movq_r2m(mm7, *(wsptr+3)); // save tmp3 | |
1991 punpckhwd_r2r(mm1, mm0); // wsptr[xxx],[xxx],[0,z10],[1,z10] | |
1992 | |
1993 movq_m2r(*(wsptr+4), mm6); // wsptr[2,0],[2,1],[2,2],[2,3] | |
1994 punpckhdq_r2r(mm2, mm0); // wsptr[0,z10],[1,z10],[0,z11],[1,z11] | |
1995 | |
1996 movq_m2r(*(wsptr+5), mm7); // wsptr[2,4],[2,5],[2,6],[2,7] | |
1997 punpckhdq_r2r(mm4, mm3); // wsptr[0,z12],[1,z12],[0,z13],[1,z13] | |
1998 | |
1999 movq_m2r(*(wsptr+6), mm1); // wsptr[3,0],[3,1],[3,2],[3,3] | |
2000 movq_r2r(mm6, mm4); | |
2001 | |
2002 punpckldq_r2r(mm7, mm6); // wsptr[2,0],[2,1],[2,4],[2,5] | |
2003 movq_r2r(mm1, mm5); | |
2004 | |
2005 punpckhdq_r2r(mm4, mm7); // wsptr[2,6],[2,7],[2,2],[2,3] | |
2006 movq_r2r(mm6, mm2); | |
2007 | |
2008 movq_m2r(*(wsptr+7), mm4); // wsptr[3,4],[3,5],[3,6],[3,7] | |
2009 paddw_r2r(mm7, mm6); // wsptr[xxx],[2,z11],[xxx],[2,z13] | |
2010 | |
2011 psubw_r2r(mm7, mm2); // wsptr[xxx],[2,z12],[xxx],[2,z10] | |
2012 punpckldq_r2r(mm4, mm1); // wsptr[3,0],[3,1],[3,4],[3,5] | |
2013 | |
2014 punpckhdq_r2r(mm5, mm4); // wsptr[3,6],[3,7],[3,2],[3,3] | |
2015 movq_r2r(mm1, mm7); | |
2016 | |
2017 paddw_r2r(mm4, mm1); // wsptr[xxx],[3,z11],[xxx],[3,z13] | |
2018 psubw_r2r(mm4, mm7); // wsptr[xxx],[3,z12],[xxx],[3,z10] | |
2019 | |
2020 movq_r2r(mm6, mm5); | |
2021 punpcklwd_r2r(mm1, mm6); // wsptr[xxx],[xxx],[2,z11],[3,z11] | |
2022 | |
2023 punpckhwd_r2r(mm1, mm5); // wsptr[xxx],[xxx],[2,z13],[3,z13] | |
2024 movq_r2r(mm2, mm4); | |
2025 | |
2026 punpcklwd_r2r(mm7, mm2); // wsptr[xxx],[xxx],[2,z12],[3,z12] | |
2027 | |
2028 punpckhwd_r2r(mm7, mm4); // wsptr[xxx],[xxx],[2,z10],[3,z10] | |
2029 | |
2030 punpckhdq_r2r(mm6, mm4); /// wsptr[2,z10],[3,z10],[2,z11],[3,z11] | |
2031 | |
2032 punpckhdq_r2r(mm5, mm2); // wsptr[2,z12],[3,z12],[2,z13],[3,z13] | |
2033 movq_r2r(mm0, mm5); | |
2034 | |
2035 punpckldq_r2r(mm4, mm0); // wsptr[0,z10],[1,z10],[2,z10],[3,z10] | |
2036 | |
2037 punpckhdq_r2r(mm4, mm5); // wsptr[0,z11],[1,z11],[2,z11],[3,z11] | |
2038 movq_r2r(mm3, mm4); | |
2039 | |
2040 punpckhdq_r2r(mm2, mm4); // wsptr[0,z13],[1,z13],[2,z13],[3,z13] | |
2041 movq_r2r(mm5, mm1); | |
2042 | |
2043 punpckldq_r2r(mm2, mm3); // wsptr[0,z12],[1,z12],[2,z12],[3,z12] | |
2044 // tmp7 = z11 + z13; /* phase 5 */ | |
2045 // tmp8 = z11 - z13; /* phase 5 */ | |
2046 psubw_r2r(mm4, mm1); // tmp8 | |
2047 | |
2048 paddw_r2r(mm4, mm5); // tmp7 | |
2049 // tmp21 = MULTIPLY(tmp8, FIX_1_414213562); /* 2*c4 */ | |
2050 psllw_i2r(2, mm1); | |
2051 | |
2052 psllw_i2r(2, mm0); | |
2053 | |
2054 pmulhw_m2r(fix_141, mm1); // tmp21 | |
2055 // tmp20 = MULTIPLY(z12, (FIX_1_082392200- FIX_1_847759065)) /* 2*(c2-c6) */ | |
2056 // + MULTIPLY(z10, - FIX_1_847759065); /* 2*c2 */ | |
2057 psllw_i2r(2, mm3); | |
2058 movq_r2r(mm0, mm7); | |
2059 | |
2060 pmulhw_m2r(fix_n184, mm7); | |
2061 movq_r2r(mm3, mm6); | |
2062 | |
2063 movq_m2r(*(wsptr), mm2); // tmp0,final1 | |
2064 | |
2065 pmulhw_m2r(fix_108n184, mm6); | |
2066 // tmp22 = MULTIPLY(z10,(FIX_1_847759065 - FIX_2_613125930)) /* -2*(c2+c6) */ | |
2067 // + MULTIPLY(z12, FIX_1_847759065); /* 2*c2 */ | |
2068 movq_r2r(mm2, mm4); // final1 | |
2069 | |
2070 pmulhw_m2r(fix_184n261, mm0); | |
2071 paddw_r2r(mm5, mm2); // tmp0+tmp7,final1 | |
2072 | |
2073 pmulhw_m2r(fix_184, mm3); | |
2074 psubw_r2r(mm5, mm4); // tmp0-tmp7,final1 | |
2075 | |
2076 // tmp6 = tmp22 - tmp7; /* phase 2 */ | |
2077 psraw_i2r(3, mm2); // outptr[0,0],[1,0],[2,0],[3,0],final1 | |
2078 | |
2079 paddw_r2r(mm6, mm7); // tmp20 | |
2080 psraw_i2r(3, mm4); // outptr[0,7],[1,7],[2,7],[3,7],final1 | |
2081 | |
2082 paddw_r2r(mm0, mm3); // tmp22 | |
2083 | |
2084 // tmp5 = tmp21 - tmp6; | |
2085 psubw_r2r(mm5, mm3); // tmp6 | |
2086 | |
2087 // tmp4 = tmp20 + tmp5; | |
2088 movq_m2r(*(wsptr+1), mm0); // tmp1,final2 | |
2089 psubw_r2r(mm3, mm1); // tmp5 | |
2090 | |
2091 movq_r2r(mm0, mm6); // final2 | |
2092 paddw_r2r(mm3, mm0); // tmp1+tmp6,final2 | |
2093 | |
2094 /* Final output stage: scale down by a factor of 8 and range-limit */ | |
2095 | |
2096 | |
2097 // outptr[0] = range_limit[IDESCALE(tmp0 + tmp7, PASS1_BITS+3) | |
2098 // & RANGE_MASK]; | |
2099 // outptr[7] = range_limit[IDESCALE(tmp0 - tmp7, PASS1_BITS+3) | |
2100 // & RANGE_MASK]; final1 | |
2101 | |
2102 | |
2103 // outptr[1] = range_limit[IDESCALE(tmp1 + tmp6, PASS1_BITS+3) | |
2104 // & RANGE_MASK]; | |
2105 // outptr[6] = range_limit[IDESCALE(tmp1 - tmp6, PASS1_BITS+3) | |
2106 // & RANGE_MASK]; final2 | |
2107 psubw_r2r(mm3, mm6); // tmp1-tmp6,final2 | |
2108 psraw_i2r(3, mm0); // outptr[0,1],[1,1],[2,1],[3,1] | |
2109 | |
2110 psraw_i2r(3, mm6); // outptr[0,6],[1,6],[2,6],[3,6] | |
2111 | |
2112 packuswb_r2r(mm4, mm0); // out[0,1],[1,1],[2,1],[3,1],[0,7],[1,7],[2,7],[3,7] | |
2113 | |
2114 movq_m2r(*(wsptr+2), mm5); // tmp2,final3 | |
2115 packuswb_r2r(mm6, mm2); // out[0,0],[1,0],[2,0],[3,0],[0,6],[1,6],[2,6],[3,6] | |
2116 | |
2117 // outptr[2] = range_limit[IDESCALE(tmp2 + tmp5, PASS1_BITS+3) | |
2118 // & RANGE_MASK]; | |
2119 // outptr[5] = range_limit[IDESCALE(tmp2 - tmp5, PASS1_BITS+3) | |
2120 // & RANGE_MASK]; final3 | |
2121 paddw_r2r(mm1, mm7); // tmp4 | |
2122 movq_r2r(mm5, mm3); | |
2123 | |
2124 paddw_r2r(mm1, mm5); // tmp2+tmp5 | |
2125 psubw_r2r(mm1, mm3); // tmp2-tmp5 | |
2126 | |
2127 psraw_i2r(3, mm5); // outptr[0,2],[1,2],[2,2],[3,2] | |
2128 | |
2129 movq_m2r(*(wsptr+3), mm4); // tmp3,final4 | |
2130 psraw_i2r(3, mm3); // outptr[0,5],[1,5],[2,5],[3,5] | |
2131 | |
2132 | |
2133 | |
2134 // outptr[4] = range_limit[IDESCALE(tmp3 + tmp4, PASS1_BITS+3) | |
2135 // & RANGE_MASK]; | |
2136 // outptr[3] = range_limit[IDESCALE(tmp3 - tmp4, PASS1_BITS+3) | |
2137 // & RANGE_MASK]; final4 | |
2138 movq_r2r(mm4, mm6); | |
2139 paddw_r2r(mm7, mm4); // tmp3+tmp4 | |
2140 | |
2141 psubw_r2r(mm7, mm6); // tmp3-tmp4 | |
2142 psraw_i2r(3, mm4); // outptr[0,4],[1,4],[2,4],[3,4] | |
2143 | |
2144 // mov ecx, [dataptr] | |
2145 | |
2146 psraw_i2r(3, mm6); // outptr[0,3],[1,3],[2,3],[3,3] | |
2147 | |
2148 packuswb_r2r(mm4, mm5); // out[0,2],[1,2],[2,2],[3,2],[0,4],[1,4],[2,4],[3,4] | |
2149 | |
2150 packuswb_r2r(mm3, mm6); // out[0,3],[1,3],[2,3],[3,3],[0,5],[1,5],[2,5],[3,5] | |
2151 movq_r2r(mm2, mm4); | |
2152 | |
2153 movq_r2r(mm5, mm7); | |
2154 punpcklbw_r2r(mm0, mm2); // out[0,0],[0,1],[1,0],[1,1],[2,0],[2,1],[3,0],[3,1] | |
2155 | |
2156 punpckhbw_r2r(mm0, mm4); // out[0,6],[0,7],[1,6],[1,7],[2,6],[2,7],[3,6],[3,7] | |
2157 movq_r2r(mm2, mm1); | |
2158 | |
2159 punpcklbw_r2r(mm6, mm5); // out[0,2],[0,3],[1,2],[1,3],[2,2],[2,3],[3,2],[3,3] | |
2160 | |
2161 // add dataptr, 4 | |
2162 | |
2163 punpckhbw_r2r(mm6, mm7); // out[0,4],[0,5],[1,4],[1,5],[2,4],[2,5],[3,4],[3,5] | |
2164 | |
2165 punpcklwd_r2r(mm5, mm2); // out[0,0],[0,1],[0,2],[0,3],[1,0],[1,1],[1,2],[1,3] | |
2166 | |
2167 // add ecx, output_col | |
2168 | |
2169 movq_r2r(mm7, mm6); | |
2170 punpckhwd_r2r(mm5, mm1); // out[2,0],[2,1],[2,2],[2,3],[3,0],[3,1],[3,2],[3,3] | |
2171 | |
2172 movq_r2r(mm2, mm0); | |
2173 punpcklwd_r2r(mm4, mm6); // out[0,4],[0,5],[0,6],[0,7],[1,4],[1,5],[1,6],[1,7] | |
2174 | |
2175 // mov idata, [dataptr] | |
2176 | |
2177 punpckldq_r2r(mm6, mm2); // out[0,0],[0,1],[0,2],[0,3],[0,4],[0,5],[0,6],[0,7] | |
2178 | |
2179 // add dataptr, 4 | |
2180 | |
2181 movq_r2r(mm1, mm3); | |
2182 | |
2183 // add idata, output_col | |
2184 | |
2185 punpckhwd_r2r(mm4, mm7); // out[2,4],[2,5],[2,6],[2,7],[3,4],[3,5],[3,6],[3,7] | |
2186 | |
2187 movq_r2m(mm2, *(dataptr)); | |
2188 | |
2189 punpckhdq_r2r(mm6, mm0); // out[1,0],[1,1],[1,2],[1,3],[1,4],[1,5],[1,6],[1,7] | |
2190 | |
2191 dataptr += rskip; | |
2192 movq_r2m(mm0, *(dataptr)); | |
2193 | |
2194 punpckldq_r2r(mm7, mm1); // out[2,0],[2,1],[2,2],[2,3],[2,4],[2,5],[2,6],[2,7] | |
2195 punpckhdq_r2r(mm7, mm3); // out[3,0],[3,1],[3,2],[3,3],[3,4],[3,5],[3,6],[3,7] | |
2196 | |
2197 dataptr += rskip; | |
2198 movq_r2m(mm1, *(dataptr)); | |
2199 | |
2200 dataptr += rskip; | |
2201 movq_r2m(mm3, *(dataptr)); | |
2202 | |
2203 /*******************************************************************/ | |
2204 | |
2205 wsptr += 8; | |
2206 | |
2207 /*******************************************************************/ | |
2208 | |
2209 // tmp10 = ((DCTELEM) wsptr[0] + (DCTELEM) wsptr[4]); | |
2210 // tmp13 = ((DCTELEM) wsptr[2] + (DCTELEM) wsptr[6]); | |
2211 // tmp11 = ((DCTELEM) wsptr[0] - (DCTELEM) wsptr[4]); | |
2212 // tmp14 = ((DCTELEM) wsptr[2] - (DCTELEM) wsptr[6]); | |
2213 movq_m2r(*(wsptr), mm0); // wsptr[0,0],[0,1],[0,2],[0,3] | |
2214 | |
2215 movq_m2r(*(wsptr+1), mm1); // wsptr[0,4],[0,5],[0,6],[0,7] | |
2216 movq_r2r(mm0, mm2); | |
2217 | |
2218 movq_m2r(*(wsptr+2), mm3); // wsptr[1,0],[1,1],[1,2],[1,3] | |
2219 paddw_r2r(mm1, mm0); // wsptr[0,tmp10],[xxx],[0,tmp13],[xxx] | |
2220 | |
2221 movq_m2r(*(wsptr+3), mm4); // wsptr[1,4],[1,5],[1,6],[1,7] | |
2222 psubw_r2r(mm1, mm2); // wsptr[0,tmp11],[xxx],[0,tmp14],[xxx] | |
2223 | |
2224 movq_r2r(mm0, mm6); | |
2225 movq_r2r(mm3, mm5); | |
2226 | |
2227 paddw_r2r(mm4, mm3); // wsptr[1,tmp10],[xxx],[1,tmp13],[xxx] | |
2228 movq_r2r(mm2, mm1); | |
2229 | |
2230 psubw_r2r(mm4, mm5); // wsptr[1,tmp11],[xxx],[1,tmp14],[xxx] | |
2231 punpcklwd_r2r(mm3, mm0); // wsptr[0,tmp10],[1,tmp10],[xxx],[xxx] | |
2232 | |
2233 movq_m2r(*(wsptr+7), mm7); // wsptr[3,4],[3,5],[3,6],[3,7] | |
2234 punpckhwd_r2r(mm3, mm6); // wsptr[0,tmp13],[1,tmp13],[xxx],[xxx] | |
2235 | |
2236 movq_m2r(*(wsptr+4), mm3); // wsptr[2,0],[2,1],[2,2],[2,3] | |
2237 punpckldq_r2r(mm6, mm0); // wsptr[0,tmp10],[1,tmp10],[0,tmp13],[1,tmp13] | |
2238 | |
2239 punpcklwd_r2r(mm5, mm1); // wsptr[0,tmp11],[1,tmp11],[xxx],[xxx] | |
2240 movq_r2r(mm3, mm4); | |
2241 | |
2242 movq_m2r(*(wsptr+6), mm6); // wsptr[3,0],[3,1],[3,2],[3,3] | |
2243 punpckhwd_r2r(mm5, mm2); // wsptr[0,tmp14],[1,tmp14],[xxx],[xxx] | |
2244 | |
2245 movq_m2r(*(wsptr+5), mm5); // wsptr[2,4],[2,5],[2,6],[2,7] | |
2246 punpckldq_r2r(mm2, mm1); // wsptr[0,tmp11],[1,tmp11],[0,tmp14],[1,tmp14] | |
2247 | |
2248 paddw_r2r(mm5, mm3); // wsptr[2,tmp10],[xxx],[2,tmp13],[xxx] | |
2249 movq_r2r(mm6, mm2); | |
2250 | |
2251 psubw_r2r(mm5, mm4); // wsptr[2,tmp11],[xxx],[2,tmp14],[xxx] | |
2252 paddw_r2r(mm7, mm6); // wsptr[3,tmp10],[xxx],[3,tmp13],[xxx] | |
2253 | |
2254 movq_r2r(mm3, mm5); | |
2255 punpcklwd_r2r(mm6, mm3); // wsptr[2,tmp10],[3,tmp10],[xxx],[xxx] | |
2256 | |
2257 psubw_r2r(mm7, mm2); // wsptr[3,tmp11],[xxx],[3,tmp14],[xxx] | |
2258 punpckhwd_r2r(mm6, mm5); // wsptr[2,tmp13],[3,tmp13],[xxx],[xxx] | |
2259 | |
2260 movq_r2r(mm4, mm7); | |
2261 punpckldq_r2r(mm5, mm3); // wsptr[2,tmp10],[3,tmp10],[2,tmp13],[3,tmp13] | |
2262 | |
2263 punpcklwd_r2r(mm2, mm4); // wsptr[2,tmp11],[3,tmp11],[xxx],[xxx] | |
2264 | |
2265 punpckhwd_r2r(mm2, mm7); // wsptr[2,tmp14],[3,tmp14],[xxx],[xxx] | |
2266 | |
2267 punpckldq_r2r(mm7, mm4); // wsptr[2,tmp11],[3,tmp11],[2,tmp14],[3,tmp14] | |
2268 movq_r2r(mm1, mm6); | |
2269 | |
2270 //OK | |
2271 | |
2272 // mm0 = ;wsptr[0,tmp10],[1,tmp10],[0,tmp13],[1,tmp13] | |
2273 // mm1 = ;wsptr[0,tmp11],[1,tmp11],[0,tmp14],[1,tmp14] | |
2274 | |
2275 movq_r2r(mm0, mm2); | |
2276 punpckhdq_r2r(mm4, mm6); // wsptr[0,tmp14],[1,tmp14],[2,tmp14],[3,tmp14] | |
2277 | |
2278 punpckldq_r2r(mm4, mm1); // wsptr[0,tmp11],[1,tmp11],[2,tmp11],[3,tmp11] | |
2279 psllw_i2r(2, mm6); | |
2280 | |
2281 pmulhw_m2r(fix_141, mm6); | |
2282 punpckldq_r2r(mm3, mm0); // wsptr[0,tmp10],[1,tmp10],[2,tmp10],[3,tmp10] | |
2283 | |
2284 punpckhdq_r2r(mm3, mm2); // wsptr[0,tmp13],[1,tmp13],[2,tmp13],[3,tmp13] | |
2285 movq_r2r(mm0, mm7); | |
2286 | |
2287 // tmp0 = tmp10 + tmp13; | |
2288 // tmp3 = tmp10 - tmp13; | |
2289 paddw_r2r(mm2, mm0); // [0,tmp0],[1,tmp0],[2,tmp0],[3,tmp0] | |
2290 psubw_r2r(mm2, mm7); // [0,tmp3],[1,tmp3],[2,tmp3],[3,tmp3] | |
2291 | |
2292 // tmp12 = MULTIPLY(tmp14, FIX_1_414213562) - tmp13; | |
2293 psubw_r2r(mm2, mm6); // wsptr[0,tmp12],[1,tmp12],[2,tmp12],[3,tmp12] | |
2294 // tmp1 = tmp11 + tmp12; | |
2295 // tmp2 = tmp11 - tmp12; | |
2296 movq_r2r(mm1, mm5); | |
2297 | |
2298 //OK | |
2299 | |
2300 | |
2301 /* Odd part */ | |
2302 | |
2303 // z13 = (DCTELEM) wsptr[5] + (DCTELEM) wsptr[3]; | |
2304 // z10 = (DCTELEM) wsptr[5] - (DCTELEM) wsptr[3]; | |
2305 // z11 = (DCTELEM) wsptr[1] + (DCTELEM) wsptr[7]; | |
2306 // z12 = (DCTELEM) wsptr[1] - (DCTELEM) wsptr[7]; | |
2307 movq_m2r(*(wsptr), mm3); // wsptr[0,0],[0,1],[0,2],[0,3] | |
2308 paddw_r2r(mm6, mm1); // [0,tmp1],[1,tmp1],[2,tmp1],[3,tmp1] | |
2309 | |
2310 movq_m2r(*(wsptr+1), mm4); // wsptr[0,4],[0,5],[0,6],[0,7] | |
2311 psubw_r2r(mm6, mm5); // [0,tmp2],[1,tmp2],[2,tmp2],[3,tmp2] | |
2312 | |
2313 movq_r2r(mm3, mm6); | |
2314 punpckldq_r2r(mm4, mm3); // wsptr[0,0],[0,1],[0,4],[0,5] | |
2315 | |
2316 punpckhdq_r2r(mm6, mm4); // wsptr[0,6],[0,7],[0,2],[0,3] | |
2317 movq_r2r(mm3, mm2); | |
2318 | |
2319 //Save tmp0 and tmp1 in wsptr | |
2320 movq_r2m(mm0, *(wsptr)); // save tmp0 | |
2321 paddw_r2r(mm4, mm2); // wsptr[xxx],[0,z11],[xxx],[0,z13] | |
2322 | |
2323 | |
2324 //Continue with z10 --- z13 | |
2325 movq_m2r(*(wsptr+2), mm6); // wsptr[1,0],[1,1],[1,2],[1,3] | |
2326 psubw_r2r(mm4, mm3); // wsptr[xxx],[0,z12],[xxx],[0,z10] | |
2327 | |
2328 movq_m2r(*(wsptr+3), mm0); // wsptr[1,4],[1,5],[1,6],[1,7] | |
2329 movq_r2r(mm6, mm4); | |
2330 | |
2331 movq_r2m(mm1, *(wsptr+1)); // save tmp1 | |
2332 punpckldq_r2r(mm0, mm6); // wsptr[1,0],[1,1],[1,4],[1,5] | |
2333 | |
2334 punpckhdq_r2r(mm4, mm0); // wsptr[1,6],[1,7],[1,2],[1,3] | |
2335 movq_r2r(mm6, mm1); | |
2336 | |
2337 //Save tmp2 and tmp3 in wsptr | |
2338 paddw_r2r(mm0, mm6); // wsptr[xxx],[1,z11],[xxx],[1,z13] | |
2339 movq_r2r(mm2, mm4); | |
2340 | |
2341 //Continue with z10 --- z13 | |
2342 movq_r2m(mm5, *(wsptr+2)); // save tmp2 | |
2343 punpcklwd_r2r(mm6, mm2); // wsptr[xxx],[xxx],[0,z11],[1,z11] | |
2344 | |
2345 psubw_r2r(mm0, mm1); // wsptr[xxx],[1,z12],[xxx],[1,z10] | |
2346 punpckhwd_r2r(mm6, mm4); // wsptr[xxx],[xxx],[0,z13],[1,z13] | |
2347 | |
2348 movq_r2r(mm3, mm0); | |
2349 punpcklwd_r2r(mm1, mm3); // wsptr[xxx],[xxx],[0,z12],[1,z12] | |
2350 | |
2351 movq_r2m(mm7, *(wsptr+3)); // save tmp3 | |
2352 punpckhwd_r2r(mm1, mm0); // wsptr[xxx],[xxx],[0,z10],[1,z10] | |
2353 | |
2354 movq_m2r(*(wsptr+4), mm6); // wsptr[2,0],[2,1],[2,2],[2,3] | |
2355 punpckhdq_r2r(mm2, mm0); // wsptr[0,z10],[1,z10],[0,z11],[1,z11] | |
2356 | |
2357 movq_m2r(*(wsptr+5), mm7); // wsptr[2,4],[2,5],[2,6],[2,7] | |
2358 punpckhdq_r2r(mm4, mm3); // wsptr[0,z12],[1,z12],[0,z13],[1,z13] | |
2359 | |
2360 movq_m2r(*(wsptr+6), mm1); // wsptr[3,0],[3,1],[3,2],[3,3] | |
2361 movq_r2r(mm6, mm4); | |
2362 | |
2363 punpckldq_r2r(mm7, mm6); // wsptr[2,0],[2,1],[2,4],[2,5] | |
2364 movq_r2r(mm1, mm5); | |
2365 | |
2366 punpckhdq_r2r(mm4, mm7); // wsptr[2,6],[2,7],[2,2],[2,3] | |
2367 movq_r2r(mm6, mm2); | |
2368 | |
2369 movq_m2r(*(wsptr+7), mm4); // wsptr[3,4],[3,5],[3,6],[3,7] | |
2370 paddw_r2r(mm7, mm6); // wsptr[xxx],[2,z11],[xxx],[2,z13] | |
2371 | |
2372 psubw_r2r(mm7, mm2); // wsptr[xxx],[2,z12],[xxx],[2,z10] | |
2373 punpckldq_r2r(mm4, mm1); // wsptr[3,0],[3,1],[3,4],[3,5] | |
2374 | |
2375 punpckhdq_r2r(mm5, mm4); // wsptr[3,6],[3,7],[3,2],[3,3] | |
2376 movq_r2r(mm1, mm7); | |
2377 | |
2378 paddw_r2r(mm4, mm1); // wsptr[xxx],[3,z11],[xxx],[3,z13] | |
2379 psubw_r2r(mm4, mm7); // wsptr[xxx],[3,z12],[xxx],[3,z10] | |
2380 | |
2381 movq_r2r(mm6, mm5); | |
2382 punpcklwd_r2r(mm1, mm6); // wsptr[xxx],[xxx],[2,z11],[3,z11] | |
2383 | |
2384 punpckhwd_r2r(mm1, mm5); // wsptr[xxx],[xxx],[2,z13],[3,z13] | |
2385 movq_r2r(mm2, mm4); | |
2386 | |
2387 punpcklwd_r2r(mm7, mm2); // wsptr[xxx],[xxx],[2,z12],[3,z12] | |
2388 | |
2389 punpckhwd_r2r(mm7, mm4); // wsptr[xxx],[xxx],[2,z10],[3,z10] | |
2390 | |
2391 punpckhdq_r2r(mm6, mm4); // wsptr[2,z10],[3,z10],[2,z11],[3,z11] | |
2392 | |
2393 punpckhdq_r2r(mm5, mm2); // wsptr[2,z12],[3,z12],[2,z13],[3,z13] | |
2394 movq_r2r(mm0, mm5); | |
2395 | |
2396 punpckldq_r2r(mm4, mm0); // wsptr[0,z10],[1,z10],[2,z10],[3,z10] | |
2397 | |
2398 punpckhdq_r2r(mm4, mm5); // wsptr[0,z11],[1,z11],[2,z11],[3,z11] | |
2399 movq_r2r(mm3, mm4); | |
2400 | |
2401 punpckhdq_r2r(mm2, mm4); // wsptr[0,z13],[1,z13],[2,z13],[3,z13] | |
2402 movq_r2r(mm5, mm1); | |
2403 | |
2404 punpckldq_r2r(mm2, mm3); // wsptr[0,z12],[1,z12],[2,z12],[3,z12] | |
2405 // tmp7 = z11 + z13; /* phase 5 */ | |
2406 // tmp8 = z11 - z13; /* phase 5 */ | |
2407 psubw_r2r(mm4, mm1); // tmp8 | |
2408 | |
2409 paddw_r2r(mm4, mm5); // tmp7 | |
2410 // tmp21 = MULTIPLY(tmp8, FIX_1_414213562); /* 2*c4 */ | |
2411 psllw_i2r(2, mm1); | |
2412 | |
2413 psllw_i2r(2, mm0); | |
2414 | |
2415 pmulhw_m2r(fix_141, mm1); // tmp21 | |
2416 // tmp20 = MULTIPLY(z12, (FIX_1_082392200- FIX_1_847759065)) /* 2*(c2-c6) */ | |
2417 // + MULTIPLY(z10, - FIX_1_847759065); /* 2*c2 */ | |
2418 psllw_i2r(2, mm3); | |
2419 movq_r2r(mm0, mm7); | |
2420 | |
2421 pmulhw_m2r(fix_n184, mm7); | |
2422 movq_r2r(mm3, mm6); | |
2423 | |
2424 movq_m2r(*(wsptr), mm2); // tmp0,final1 | |
2425 | |
2426 pmulhw_m2r(fix_108n184, mm6); | |
2427 // tmp22 = MULTIPLY(z10,(FIX_1_847759065 - FIX_2_613125930)) /* -2*(c2+c6) */ | |
2428 // + MULTIPLY(z12, FIX_1_847759065); /* 2*c2 */ | |
2429 movq_r2r(mm2, mm4); // final1 | |
2430 | |
2431 pmulhw_m2r(fix_184n261, mm0); | |
2432 paddw_r2r(mm5, mm2); // tmp0+tmp7,final1 | |
2433 | |
2434 pmulhw_m2r(fix_184, mm3); | |
2435 psubw_r2r(mm5, mm4); // tmp0-tmp7,final1 | |
2436 | |
2437 // tmp6 = tmp22 - tmp7; /* phase 2 */ | |
2438 psraw_i2r(3, mm2); // outptr[0,0],[1,0],[2,0],[3,0],final1 | |
2439 | |
2440 paddw_r2r(mm6, mm7); // tmp20 | |
2441 psraw_i2r(3, mm4); // outptr[0,7],[1,7],[2,7],[3,7],final1 | |
2442 | |
2443 paddw_r2r(mm0, mm3); // tmp22 | |
2444 | |
2445 // tmp5 = tmp21 - tmp6; | |
2446 psubw_r2r(mm5, mm3); // tmp6 | |
2447 | |
2448 // tmp4 = tmp20 + tmp5; | |
2449 movq_m2r(*(wsptr+1), mm0); // tmp1,final2 | |
2450 psubw_r2r(mm3, mm1); // tmp5 | |
2451 | |
2452 movq_r2r(mm0, mm6); // final2 | |
2453 paddw_r2r(mm3, mm0); // tmp1+tmp6,final2 | |
2454 | |
2455 /* Final output stage: scale down by a factor of 8 and range-limit */ | |
2456 | |
2457 // outptr[0] = range_limit[IDESCALE(tmp0 + tmp7, PASS1_BITS+3) | |
2458 // & RANGE_MASK]; | |
2459 // outptr[7] = range_limit[IDESCALE(tmp0 - tmp7, PASS1_BITS+3) | |
2460 // & RANGE_MASK]; final1 | |
2461 | |
2462 | |
2463 // outptr[1] = range_limit[IDESCALE(tmp1 + tmp6, PASS1_BITS+3) | |
2464 // & RANGE_MASK]; | |
2465 // outptr[6] = range_limit[IDESCALE(tmp1 - tmp6, PASS1_BITS+3) | |
2466 // & RANGE_MASK]; final2 | |
2467 psubw_r2r(mm3, mm6); // tmp1-tmp6,final2 | |
2468 psraw_i2r(3, mm0); // outptr[0,1],[1,1],[2,1],[3,1] | |
2469 | |
2470 psraw_i2r(3, mm6); // outptr[0,6],[1,6],[2,6],[3,6] | |
2471 | |
2472 packuswb_r2r(mm4, mm0); // out[0,1],[1,1],[2,1],[3,1],[0,7],[1,7],[2,7],[3,7] | |
2473 | |
2474 movq_m2r(*(wsptr+2), mm5); // tmp2,final3 | |
2475 packuswb_r2r(mm6, mm2); // out[0,0],[1,0],[2,0],[3,0],[0,6],[1,6],[2,6],[3,6] | |
2476 | |
2477 // outptr[2] = range_limit[IDESCALE(tmp2 + tmp5, PASS1_BITS+3) | |
2478 // & RANGE_MASK]; | |
2479 // outptr[5] = range_limit[IDESCALE(tmp2 - tmp5, PASS1_BITS+3) | |
2480 // & RANGE_MASK]; final3 | |
2481 paddw_r2r(mm1, mm7); // tmp4 | |
2482 movq_r2r(mm5, mm3); | |
2483 | |
2484 paddw_r2r(mm1, mm5); // tmp2+tmp5 | |
2485 psubw_r2r(mm1, mm3); // tmp2-tmp5 | |
2486 | |
2487 psraw_i2r(3, mm5); // outptr[0,2],[1,2],[2,2],[3,2] | |
2488 | |
2489 movq_m2r(*(wsptr+3), mm4); // tmp3,final4 | |
2490 psraw_i2r(3, mm3); // outptr[0,5],[1,5],[2,5],[3,5] | |
2491 | |
2492 | |
2493 | |
2494 // outptr[4] = range_limit[IDESCALE(tmp3 + tmp4, PASS1_BITS+3) | |
2495 // & RANGE_MASK]; | |
2496 // outptr[3] = range_limit[IDESCALE(tmp3 - tmp4, PASS1_BITS+3) | |
2497 // & RANGE_MASK]; final4 | |
2498 movq_r2r(mm4, mm6); | |
2499 paddw_r2r(mm7, mm4); // tmp3+tmp4 | |
2500 | |
2501 psubw_r2r(mm7, mm6); // tmp3-tmp4 | |
2502 psraw_i2r(3, mm4); // outptr[0,4],[1,4],[2,4],[3,4] | |
2503 | |
2504 psraw_i2r(3, mm6); // outptr[0,3],[1,3],[2,3],[3,3] | |
2505 | |
2506 /* | |
2507 movq_r2m(mm4, *dummy); | |
2508 fprintf(stderr, "3-4 %016llx\n", dummy); | |
2509 movq_r2m(mm4, *dummy); | |
2510 fprintf(stderr, "3+4 %016llx\n", dummy); | |
2511 */ | |
2512 | |
2513 | |
2514 packuswb_r2r(mm4, mm5); // out[0,2],[1,2],[2,2],[3,2],[0,4],[1,4],[2,4],[3,4] | |
2515 | |
2516 packuswb_r2r(mm3, mm6); // out[0,3],[1,3],[2,3],[3,3],[0,5],[1,5],[2,5],[3,5] | |
2517 movq_r2r(mm2, mm4); | |
2518 | |
2519 movq_r2r(mm5, mm7); | |
2520 punpcklbw_r2r(mm0, mm2); // out[0,0],[0,1],[1,0],[1,1],[2,0],[2,1],[3,0],[3,1] | |
2521 | |
2522 punpckhbw_r2r(mm0, mm4); // out[0,6],[0,7],[1,6],[1,7],[2,6],[2,7],[3,6],[3,7] | |
2523 movq_r2r(mm2, mm1); | |
2524 | |
2525 punpcklbw_r2r(mm6, mm5); // out[0,2],[0,3],[1,2],[1,3],[2,2],[2,3],[3,2],[3,3] | |
2526 | |
2527 punpckhbw_r2r(mm6, mm7); // out[0,4],[0,5],[1,4],[1,5],[2,4],[2,5],[3,4],[3,5] | |
2528 | |
2529 punpcklwd_r2r(mm5, mm2); // out[0,0],[0,1],[0,2],[0,3],[1,0],[1,1],[1,2],[1,3] | |
2530 | |
2531 movq_r2r(mm7, mm6); | |
2532 punpckhwd_r2r(mm5, mm1); // out[2,0],[2,1],[2,2],[2,3],[3,0],[3,1],[3,2],[3,3] | |
2533 | |
2534 movq_r2r(mm2, mm0); | |
2535 punpcklwd_r2r(mm4, mm6); // out[0,4],[0,5],[0,6],[0,7],[1,4],[1,5],[1,6],[1,7] | |
2536 | |
2537 punpckldq_r2r(mm6, mm2); // out[0,0],[0,1],[0,2],[0,3],[0,4],[0,5],[0,6],[0,7] | |
2538 | |
2539 movq_r2r(mm1, mm3); | |
2540 | |
2541 punpckhwd_r2r(mm4, mm7); // out[2,4],[2,5],[2,6],[2,7],[3,4],[3,5],[3,6],[3,7] | |
2542 | |
2543 dataptr += rskip; | |
2544 movq_r2m(mm2, *(dataptr)); | |
2545 | |
2546 punpckhdq_r2r(mm6, mm0); // out[1,0],[1,1],[1,2],[1,3],[1,4],[1,5],[1,6],[1,7] | |
2547 | |
2548 dataptr += rskip; | |
2549 movq_r2m(mm0, *(dataptr)); | |
2550 | |
2551 punpckldq_r2r(mm7, mm1); // out[2,0],[2,1],[2,2],[2,3],[2,4],[2,5],[2,6],[2,7] | |
2552 | |
2553 punpckhdq_r2r(mm7, mm3); // out[3,0],[3,1],[3,2],[3,3],[3,4],[3,5],[3,6],[3,7] | |
2554 | |
2555 dataptr += rskip; | |
2556 movq_r2m(mm1, *(dataptr)); | |
2557 | |
2558 dataptr += rskip; | |
2559 movq_r2m(mm3, *(dataptr)); | |
2560 | |
2561 #else | |
2562 __s32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; | |
2563 __s32 tmp10, tmp11, tmp12, tmp13; | |
2564 __s32 z5, z10, z11, z12, z13; | |
2565 __s16 *inptr; | |
2566 __s32 *wsptr; | |
2567 __u8 *outptr; | |
2568 int ctr; | |
2569 __s32 dcval; | |
2570 __s32 workspace[64]; | |
2571 | |
2572 inptr = data; | |
2573 wsptr = workspace; | |
2574 for (ctr = 8; ctr > 0; ctr--) { | |
2575 | |
2576 if ((inptr[8] | inptr[16] | inptr[24] | | |
2577 inptr[32] | inptr[40] | inptr[48] | inptr[56]) == 0) { | |
2578 dcval = inptr[0]; | |
2579 wsptr[0] = dcval; | |
2580 wsptr[8] = dcval; | |
2581 wsptr[16] = dcval; | |
2582 wsptr[24] = dcval; | |
2583 wsptr[32] = dcval; | |
2584 wsptr[40] = dcval; | |
2585 wsptr[48] = dcval; | |
2586 wsptr[56] = dcval; | |
2587 | |
2588 inptr++; | |
2589 wsptr++; | |
2590 continue; | |
2591 } | |
2592 | |
2593 tmp0 = inptr[0]; | |
2594 tmp1 = inptr[16]; | |
2595 tmp2 = inptr[32]; | |
2596 tmp3 = inptr[48]; | |
2597 | |
2598 tmp10 = tmp0 + tmp2; | |
2599 tmp11 = tmp0 - tmp2; | |
2600 | |
2601 tmp13 = tmp1 + tmp3; | |
2602 tmp12 = MULTIPLY(tmp1 - tmp3, FIX_1_414213562) - tmp13; | |
2603 | |
2604 tmp0 = tmp10 + tmp13; | |
2605 tmp3 = tmp10 - tmp13; | |
2606 tmp1 = tmp11 + tmp12; | |
2607 tmp2 = tmp11 - tmp12; | |
2608 | |
2609 tmp4 = inptr[8]; | |
2610 tmp5 = inptr[24]; | |
2611 tmp6 = inptr[40]; | |
2612 tmp7 = inptr[56]; | |
2613 | |
2614 z13 = tmp6 + tmp5; | |
2615 z10 = tmp6 - tmp5; | |
2616 z11 = tmp4 + tmp7; | |
2617 z12 = tmp4 - tmp7; | |
2618 | |
2619 tmp7 = z11 + z13; | |
2620 tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); | |
2621 | |
2622 z5 = MULTIPLY(z10 + z12, FIX_1_847759065); | |
2623 tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; | |
2624 tmp12 = MULTIPLY(z10, - FIX_2_613125930) + z5; | |
2625 | |
2626 tmp6 = tmp12 - tmp7; | |
2627 tmp5 = tmp11 - tmp6; | |
2628 tmp4 = tmp10 + tmp5; | |
2629 | |
2630 wsptr[0] = (__s32) (tmp0 + tmp7); | |
2631 wsptr[56] = (__s32) (tmp0 - tmp7); | |
2632 wsptr[8] = (__s32) (tmp1 + tmp6); | |
2633 wsptr[48] = (__s32) (tmp1 - tmp6); | |
2634 wsptr[16] = (__s32) (tmp2 + tmp5); | |
2635 wsptr[40] = (__s32) (tmp2 - tmp5); | |
2636 wsptr[32] = (__s32) (tmp3 + tmp4); | |
2637 wsptr[24] = (__s32) (tmp3 - tmp4); | |
2638 | |
2639 inptr++; | |
2640 wsptr++; | |
2641 } | |
2642 | |
2643 wsptr = workspace; | |
2644 for (ctr = 0; ctr < 8; ctr++) { | |
2645 outptr = &(odata[ctr*rskip]); | |
2646 | |
2647 tmp10 = wsptr[0] + wsptr[4]; | |
2648 tmp11 = wsptr[0] - wsptr[4]; | |
2649 | |
2650 tmp13 = wsptr[2] + wsptr[6]; | |
2651 tmp12 = MULTIPLY(wsptr[2] - wsptr[6], FIX_1_414213562) - tmp13; | |
2652 | |
2653 tmp0 = tmp10 + tmp13; | |
2654 tmp3 = tmp10 - tmp13; | |
2655 tmp1 = tmp11 + tmp12; | |
2656 tmp2 = tmp11 - tmp12; | |
2657 | |
2658 z13 = wsptr[5] + wsptr[3]; | |
2659 z10 = wsptr[5] - wsptr[3]; | |
2660 z11 = wsptr[1] + wsptr[7]; | |
2661 z12 = wsptr[1] - wsptr[7]; | |
2662 | |
2663 tmp7 = z11 + z13; | |
2664 tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562); | |
2665 | |
2666 z5 = MULTIPLY(z10 + z12, FIX_1_847759065); | |
2667 tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; | |
2668 tmp12 = MULTIPLY(z10, - FIX_2_613125930) + z5; | |
2669 | |
2670 tmp6 = tmp12 - tmp7; | |
2671 tmp5 = tmp11 - tmp6; | |
2672 tmp4 = tmp10 + tmp5; | |
2673 | |
2674 outptr[0] = RL(DESCALE(tmp0 + tmp7)); | |
2675 outptr[7] = RL(DESCALE(tmp0 - tmp7)); | |
2676 outptr[1] = RL(DESCALE(tmp1 + tmp6)); | |
2677 outptr[6] = RL(DESCALE(tmp1 - tmp6)); | |
2678 outptr[2] = RL(DESCALE(tmp2 + tmp5)); | |
2679 outptr[5] = RL(DESCALE(tmp2 - tmp5)); | |
2680 outptr[4] = RL(DESCALE(tmp3 + tmp4)); | |
2681 outptr[3] = RL(DESCALE(tmp3 - tmp4)); | |
2682 | |
2683 wsptr += 8; | |
2684 } | |
2685 #endif | |
2686 } | |
2687 /* | |
2688 | |
2689 Main Routines | |
2690 | |
2691 This file contains most of the initialisation and control functions | |
2692 | |
2693 (C) Justin Schoeman 1998 | |
2694 | |
2695 */ | |
2696 | |
2697 /* | |
2698 | |
2699 Private function | |
2700 | |
2701 Initialise all the cache-aliged data blocks | |
2702 | |
2703 */ | |
2704 | |
2705 void RTjpeg_init_data(void) | |
2706 { | |
2707 unsigned long dptr; | |
2708 | |
2709 dptr=(unsigned long)&(RTjpeg_alldata[0]); | |
2710 dptr+=32; | |
2711 dptr=dptr>>5; | |
2712 dptr=dptr<<5; /* cache align data */ | |
2713 | |
2714 RTjpeg_block=(__s16 *)dptr; | |
2715 dptr+=sizeof(__s16)*64; | |
2716 RTjpeg_lqt=(__s32 *)dptr; | |
2717 dptr+=sizeof(__s32)*64; | |
2718 RTjpeg_cqt=(__s32 *)dptr; | |
2719 dptr+=sizeof(__s32)*64; | |
2720 RTjpeg_liqt=(__u32 *)dptr; | |
2721 dptr+=sizeof(__u32)*64; | |
2722 RTjpeg_ciqt=(__u32 *)dptr; | |
2723 } | |
2724 | |
2725 /* | |
2726 | |
2727 External Function | |
2728 | |
2729 Re-set quality factor | |
2730 | |
2731 Input: buf -> pointer to 128 ints for quant values store to pass back to | |
2732 init_decompress. | |
2733 Q -> quality factor (192=best, 32=worst) | |
2734 */ | |
2735 | |
2736 void RTjpeg_init_Q(__u8 Q) | |
2737 { | |
2738 int i; | |
2739 __u64 qual; | |
2740 | |
2741 qual=(__u64)Q<<(32-7); /* 32 bit FP, 255=2, 0=0 */ | |
2742 | |
2743 for(i=0; i<64; i++) | |
2744 { | |
2745 RTjpeg_lqt[i]=(__s32)((qual/((__u64)RTjpeg_lum_quant_tbl[i]<<16))>>3); | |
2746 if(RTjpeg_lqt[i]==0)RTjpeg_lqt[i]=1; | |
2747 RTjpeg_cqt[i]=(__s32)((qual/((__u64)RTjpeg_chrom_quant_tbl[i]<<16))>>3); | |
2748 if(RTjpeg_cqt[i]==0)RTjpeg_cqt[i]=1; | |
2749 RTjpeg_liqt[i]=(1<<16)/(RTjpeg_lqt[i]<<3); | |
2750 RTjpeg_ciqt[i]=(1<<16)/(RTjpeg_cqt[i]<<3); | |
2751 RTjpeg_lqt[i]=((1<<16)/RTjpeg_liqt[i])>>3; | |
2752 RTjpeg_cqt[i]=((1<<16)/RTjpeg_ciqt[i])>>3; | |
2753 } | |
2754 | |
2755 RTjpeg_lb8=0; | |
2756 while(RTjpeg_liqt[RTjpeg_ZZ[++RTjpeg_lb8]]<=8); | |
2757 RTjpeg_lb8--; | |
2758 RTjpeg_cb8=0; | |
2759 while(RTjpeg_ciqt[RTjpeg_ZZ[++RTjpeg_cb8]]<=8); | |
2760 RTjpeg_cb8--; | |
2761 | |
2762 RTjpeg_dct_init(); | |
2763 RTjpeg_idct_init(); | |
2764 RTjpeg_quant_init(); | |
2765 } | |
2766 | |
2767 /* | |
2768 | |
2769 External Function | |
2770 | |
2771 Initialise compression. | |
2772 | |
2773 Input: buf -> pointer to 128 ints for quant values store to pass back to | |
2774 init_decompress. | |
2775 width -> width of image | |
2776 height -> height of image | |
2777 Q -> quality factor (192=best, 32=worst) | |
2778 | |
2779 */ | |
2780 | |
2781 void RTjpeg_init_compress(__u32 *buf, int width, int height, __u8 Q) | |
2782 { | |
2783 int i; | |
2784 __u64 qual; | |
2785 | |
2786 RTjpeg_init_data(); | |
2787 | |
2788 RTjpeg_width=width; | |
2789 RTjpeg_height=height; | |
2790 RTjpeg_Ywidth = RTjpeg_width>>3; | |
2791 RTjpeg_Ysize=width * height; | |
2792 RTjpeg_Cwidth = RTjpeg_width>>4; | |
2793 RTjpeg_Csize= (width>>1) * height; | |
2794 | |
2795 qual=(__u64)Q<<(32-7); /* 32 bit FP, 255=2, 0=0 */ | |
2796 | |
2797 for(i=0; i<64; i++) | |
2798 { | |
2799 RTjpeg_lqt[i]=(__s32)((qual/((__u64)RTjpeg_lum_quant_tbl[i]<<16))>>3); | |
2800 if(RTjpeg_lqt[i]==0)RTjpeg_lqt[i]=1; | |
2801 RTjpeg_cqt[i]=(__s32)((qual/((__u64)RTjpeg_chrom_quant_tbl[i]<<16))>>3); | |
2802 if(RTjpeg_cqt[i]==0)RTjpeg_cqt[i]=1; | |
2803 RTjpeg_liqt[i]=(1<<16)/(RTjpeg_lqt[i]<<3); | |
2804 RTjpeg_ciqt[i]=(1<<16)/(RTjpeg_cqt[i]<<3); | |
2805 RTjpeg_lqt[i]=((1<<16)/RTjpeg_liqt[i])>>3; | |
2806 RTjpeg_cqt[i]=((1<<16)/RTjpeg_ciqt[i])>>3; | |
2807 } | |
2808 | |
2809 RTjpeg_lb8=0; | |
2810 while(RTjpeg_liqt[RTjpeg_ZZ[++RTjpeg_lb8]]<=8); | |
2811 RTjpeg_lb8--; | |
2812 RTjpeg_cb8=0; | |
2813 while(RTjpeg_ciqt[RTjpeg_ZZ[++RTjpeg_cb8]]<=8); | |
2814 RTjpeg_cb8--; | |
2815 | |
2816 RTjpeg_dct_init(); | |
2817 RTjpeg_quant_init(); | |
2818 | |
2819 for(i=0; i<64; i++) | |
14896
9ddae5897422
Make nuv files work on bigendian (but old nuv files created with mencoder
reimar
parents:
14642
diff
changeset
|
2820 buf[i]=le2me_32(RTjpeg_liqt[i]); |
3802 | 2821 for(i=0; i<64; i++) |
14896
9ddae5897422
Make nuv files work on bigendian (but old nuv files created with mencoder
reimar
parents:
14642
diff
changeset
|
2822 buf[64+i]=le2me_32(RTjpeg_ciqt[i]); |
3802 | 2823 } |
2824 | |
2825 void RTjpeg_init_decompress(__u32 *buf, int width, int height) | |
2826 { | |
2827 int i; | |
2828 | |
2829 RTjpeg_init_data(); | |
2830 | |
2831 RTjpeg_width=width; | |
2832 RTjpeg_height=height; | |
2833 RTjpeg_Ywidth = RTjpeg_width>>3; | |
2834 RTjpeg_Ysize=width * height; | |
2835 RTjpeg_Cwidth = RTjpeg_width>>4; | |
2836 RTjpeg_Csize= (width>>1) * height; | |
2837 | |
2838 for(i=0; i<64; i++) | |
2839 { | |
14896
9ddae5897422
Make nuv files work on bigendian (but old nuv files created with mencoder
reimar
parents:
14642
diff
changeset
|
2840 RTjpeg_liqt[i]=le2me_32(buf[i]); |
9ddae5897422
Make nuv files work on bigendian (but old nuv files created with mencoder
reimar
parents:
14642
diff
changeset
|
2841 RTjpeg_ciqt[i]=le2me_32(buf[i+64]); |
3802 | 2842 } |
2843 | |
2844 RTjpeg_lb8=0; | |
2845 while(RTjpeg_liqt[RTjpeg_ZZ[++RTjpeg_lb8]]<=8); | |
2846 RTjpeg_lb8--; | |
2847 RTjpeg_cb8=0; | |
2848 while(RTjpeg_ciqt[RTjpeg_ZZ[++RTjpeg_cb8]]<=8); | |
2849 RTjpeg_cb8--; | |
2850 | |
2851 RTjpeg_idct_init(); | |
2852 | |
2853 // RTjpeg_color_init(); | |
2854 } | |
2855 | |
2856 int RTjpeg_compressYUV420(__s8 *sp, unsigned char *bp) | |
2857 { | |
2858 __s8 * sb; | |
2859 register __s8 * bp1 = bp + (RTjpeg_width<<3); | |
2860 register __s8 * bp2 = bp + RTjpeg_Ysize; | |
2861 register __s8 * bp3 = bp2 + (RTjpeg_Csize>>1); | |
2862 register int i, j, k; | |
2863 | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
2864 #if HAVE_MMX |
3802 | 2865 emms(); |
2866 #endif | |
2867 sb=sp; | |
2868 /* Y */ | |
2869 for(i=RTjpeg_height>>1; i; i-=8) | |
2870 { | |
2871 for(j=0, k=0; j<RTjpeg_width; j+=16, k+=8) | |
2872 { | |
2873 RTjpeg_dctY(bp+j, RTjpeg_block, RTjpeg_Ywidth); | |
2874 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
2875 sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
2876 | |
2877 RTjpeg_dctY(bp+j+8, RTjpeg_block, RTjpeg_Ywidth); | |
2878 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
2879 sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
2880 | |
2881 RTjpeg_dctY(bp1+j, RTjpeg_block, RTjpeg_Ywidth); | |
2882 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
2883 sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
2884 | |
2885 RTjpeg_dctY(bp1+j+8, RTjpeg_block, RTjpeg_Ywidth); | |
2886 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
2887 sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
2888 | |
2889 RTjpeg_dctY(bp2+k, RTjpeg_block, RTjpeg_Cwidth); | |
2890 RTjpeg_quant(RTjpeg_block, RTjpeg_cqt); | |
2891 sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_cb8); | |
2892 | |
2893 RTjpeg_dctY(bp3+k, RTjpeg_block, RTjpeg_Cwidth); | |
2894 RTjpeg_quant(RTjpeg_block, RTjpeg_cqt); | |
2895 sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_cb8); | |
2896 | |
2897 } | |
2898 bp+=RTjpeg_width<<4; | |
2899 bp1+=RTjpeg_width<<4; | |
2900 bp2+=RTjpeg_width<<2; | |
2901 bp3+=RTjpeg_width<<2; | |
2902 | |
2903 } | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
2904 #if HAVE_MMX |
3802 | 2905 emms(); |
2906 #endif | |
2907 return (sp-sb); | |
2908 } | |
2909 | |
2910 int RTjpeg_compressYUV422(__s8 *sp, unsigned char *bp) | |
2911 { | |
2912 __s8 * sb; | |
2913 register __s8 * bp2 = bp + RTjpeg_Ysize; | |
2914 register __s8 * bp3 = bp2 + RTjpeg_Csize; | |
2915 register int i, j, k; | |
2916 | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
2917 #if HAVE_MMX |
3802 | 2918 emms(); |
2919 #endif | |
2920 sb=sp; | |
2921 /* Y */ | |
2922 for(i=RTjpeg_height; i; i-=8) | |
2923 { | |
2924 for(j=0, k=0; j<RTjpeg_width; j+=16, k+=8) | |
2925 { | |
2926 RTjpeg_dctY(bp+j, RTjpeg_block, RTjpeg_Ywidth); | |
2927 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
2928 sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
2929 | |
2930 RTjpeg_dctY(bp+j+8, RTjpeg_block, RTjpeg_Ywidth); | |
2931 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
2932 sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
2933 | |
2934 RTjpeg_dctY(bp2+k, RTjpeg_block, RTjpeg_Cwidth); | |
2935 RTjpeg_quant(RTjpeg_block, RTjpeg_cqt); | |
2936 sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_cb8); | |
2937 | |
2938 RTjpeg_dctY(bp3+k, RTjpeg_block, RTjpeg_Cwidth); | |
2939 RTjpeg_quant(RTjpeg_block, RTjpeg_cqt); | |
2940 sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_cb8); | |
2941 | |
2942 } | |
2943 bp+=RTjpeg_width<<3; | |
2944 bp2+=RTjpeg_width<<2; | |
2945 bp3+=RTjpeg_width<<2; | |
2946 | |
2947 } | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
2948 #if HAVE_MMX |
3802 | 2949 emms(); |
2950 #endif | |
2951 return (sp-sb); | |
2952 } | |
2953 | |
2954 int RTjpeg_compress8(__s8 *sp, unsigned char *bp) | |
2955 { | |
2956 __s8 * sb; | |
2957 int i, j; | |
2958 | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
2959 #if HAVE_MMX |
3802 | 2960 emms(); |
2961 #endif | |
2962 | |
2963 sb=sp; | |
2964 /* Y */ | |
2965 for(i=0; i<RTjpeg_height; i+=8) | |
2966 { | |
2967 for(j=0; j<RTjpeg_width; j+=8) | |
2968 { | |
2969 RTjpeg_dctY(bp+j, RTjpeg_block, RTjpeg_width); | |
2970 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
2971 sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
2972 } | |
2973 bp+=RTjpeg_width; | |
2974 } | |
2975 | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
2976 #if HAVE_MMX |
3802 | 2977 emms(); |
2978 #endif | |
2979 return (sp-sb); | |
2980 } | |
2981 | |
2982 void RTjpeg_decompressYUV422(__s8 *sp, __u8 *bp) | |
2983 { | |
2984 register __s8 * bp2 = bp + RTjpeg_Ysize; | |
2985 register __s8 * bp3 = bp2 + (RTjpeg_Csize); | |
2986 int i, j,k; | |
2987 | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
2988 #if HAVE_MMX |
3802 | 2989 emms(); |
2990 #endif | |
2991 | |
2992 /* Y */ | |
2993 for(i=RTjpeg_height; i; i-=8) | |
2994 { | |
2995 for(k=0, j=0; j<RTjpeg_width; j+=16, k+=8) { | |
2996 if(*sp==-1)sp++; | |
2997 else | |
2998 { | |
2999 sp+=RTjpeg_s2b(RTjpeg_block, sp, RTjpeg_lb8, RTjpeg_liqt); | |
3000 RTjpeg_idct(bp+j, RTjpeg_block, RTjpeg_width); | |
3001 } | |
3002 if(*sp==-1)sp++; | |
3003 else | |
3004 { | |
3005 sp+=RTjpeg_s2b(RTjpeg_block, sp, RTjpeg_lb8, RTjpeg_liqt); | |
3006 RTjpeg_idct(bp+j+8, RTjpeg_block, RTjpeg_width); | |
3007 } | |
3008 if(*sp==-1)sp++; | |
3009 else | |
3010 { | |
3011 sp+=RTjpeg_s2b(RTjpeg_block, sp, RTjpeg_cb8, RTjpeg_ciqt); | |
3012 RTjpeg_idct(bp2+k, RTjpeg_block, RTjpeg_width>>1); | |
3013 } | |
3014 if(*sp==-1)sp++; | |
3015 else | |
3016 { | |
3017 sp+=RTjpeg_s2b(RTjpeg_block, sp, RTjpeg_cb8, RTjpeg_ciqt); | |
3018 RTjpeg_idct(bp3+k, RTjpeg_block, RTjpeg_width>>1); | |
3019 } | |
3020 } | |
3021 bp+=RTjpeg_width<<3; | |
3022 bp2+=RTjpeg_width<<2; | |
3023 bp3+=RTjpeg_width<<2; | |
3024 } | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
3025 #if HAVE_MMX |
3802 | 3026 emms(); |
3027 #endif | |
3028 } | |
3029 | |
3030 void RTjpeg_decompressYUV420(__s8 *sp, __u8 *bp) | |
3031 { | |
3032 register __s8 * bp1 = bp + (RTjpeg_width<<3); | |
3033 register __s8 * bp2 = bp + RTjpeg_Ysize; | |
3034 register __s8 * bp3 = bp2 + (RTjpeg_Csize>>1); | |
3035 int i, j,k; | |
3036 | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
3037 #if HAVE_MMX |
3802 | 3038 emms(); |
3039 #endif | |
3040 | |
3041 /* Y */ | |
3042 for(i=RTjpeg_height>>1; i; i-=8) | |
3043 { | |
3044 for(k=0, j=0; j<RTjpeg_width; j+=16, k+=8) { | |
3045 if(*sp==-1)sp++; | |
3046 else | |
3047 { | |
3048 sp+=RTjpeg_s2b(RTjpeg_block, sp, RTjpeg_lb8, RTjpeg_liqt); | |
3049 RTjpeg_idct(bp+j, RTjpeg_block, RTjpeg_width); | |
3050 } | |
3051 if(*sp==-1)sp++; | |
3052 else | |
3053 { | |
3054 sp+=RTjpeg_s2b(RTjpeg_block, sp, RTjpeg_lb8, RTjpeg_liqt); | |
3055 RTjpeg_idct(bp+j+8, RTjpeg_block, RTjpeg_width); | |
3056 } | |
3057 if(*sp==-1)sp++; | |
3058 else | |
3059 { | |
3060 sp+=RTjpeg_s2b(RTjpeg_block, sp, RTjpeg_lb8, RTjpeg_liqt); | |
3061 RTjpeg_idct(bp1+j, RTjpeg_block, RTjpeg_width); | |
3062 } | |
3063 if(*sp==-1)sp++; | |
3064 else | |
3065 { | |
3066 sp+=RTjpeg_s2b(RTjpeg_block, sp, RTjpeg_lb8, RTjpeg_liqt); | |
3067 RTjpeg_idct(bp1+j+8, RTjpeg_block, RTjpeg_width); | |
3068 } | |
3069 if(*sp==-1)sp++; | |
3070 else | |
3071 { | |
3072 sp+=RTjpeg_s2b(RTjpeg_block, sp, RTjpeg_cb8, RTjpeg_ciqt); | |
3073 RTjpeg_idct(bp2+k, RTjpeg_block, RTjpeg_width>>1); | |
3074 } | |
3075 if(*sp==-1)sp++; | |
3076 else | |
3077 { | |
3078 sp+=RTjpeg_s2b(RTjpeg_block, sp, RTjpeg_cb8, RTjpeg_ciqt); | |
3079 RTjpeg_idct(bp3+k, RTjpeg_block, RTjpeg_width>>1); | |
3080 } | |
3081 } | |
3082 bp+=RTjpeg_width<<4; | |
3083 bp1+=RTjpeg_width<<4; | |
3084 bp2+=RTjpeg_width<<2; | |
3085 bp3+=RTjpeg_width<<2; | |
3086 } | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
3087 #if HAVE_MMX |
3802 | 3088 emms(); |
3089 #endif | |
3090 } | |
3091 | |
3092 void RTjpeg_decompress8(__s8 *sp, __u8 *bp) | |
3093 { | |
3094 int i, j; | |
3095 | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
3096 #if HAVE_MMX |
3802 | 3097 emms(); |
3098 #endif | |
3099 | |
3100 /* Y */ | |
3101 for(i=0; i<RTjpeg_height; i+=8) | |
3102 { | |
3103 for(j=0; j<RTjpeg_width; j+=8) | |
3104 if(*sp==-1)sp++; | |
3105 else | |
3106 { | |
3107 sp+=RTjpeg_s2b(RTjpeg_block, sp, RTjpeg_lb8, RTjpeg_liqt); | |
3108 RTjpeg_idct(bp+j, RTjpeg_block, RTjpeg_width); | |
3109 } | |
3110 bp+=RTjpeg_width<<3; | |
3111 } | |
3112 } | |
3113 | |
3114 /* | |
3115 External Function | |
3116 | |
3117 Initialise additional data structures for motion compensation | |
3118 | |
3119 */ | |
3120 | |
3121 void RTjpeg_init_mcompress(void) | |
3122 { | |
3123 unsigned long tmp; | |
3124 | |
3125 if(!RTjpeg_old) | |
3126 { | |
3127 RTjpeg_old=malloc((4*RTjpeg_width*RTjpeg_height)+32); | |
3128 tmp=(unsigned long)RTjpeg_old; | |
3129 tmp+=32; | |
3130 tmp=tmp>>5; | |
3131 RTjpeg_old=(__s16 *)(tmp<<5); | |
3132 } | |
3133 if (!RTjpeg_old) | |
3134 { | |
3135 fprintf(stderr, "RTjpeg: Could not allocate memory\n"); | |
3136 exit(-1); | |
3137 } | |
14642
38572280e8e7
bzero is deprecated patch by Gianluigi Tiesi <mplayer at netfarm.it>
faust3
parents:
12928
diff
changeset
|
3138 memset(RTjpeg_old, 0, ((4*RTjpeg_width*RTjpeg_height))); |
3802 | 3139 } |
3140 | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
3141 #if HAVE_MMX |
3802 | 3142 |
3143 int RTjpeg_bcomp(__s16 *old, mmx_t *mask) | |
3144 { | |
3145 int i; | |
3146 mmx_t *mold=(mmx_t *)old; | |
3147 mmx_t *mblock=(mmx_t *)RTjpeg_block; | |
3148 volatile mmx_t result; | |
12928 | 3149 static mmx_t neg={0xffffffffffffffffULL}; |
3802 | 3150 |
3151 movq_m2r(*mask, mm7); | |
3152 movq_m2r(neg, mm6); | |
3153 pxor_r2r(mm5, mm5); | |
3154 | |
3155 for(i=0; i<8; i++) | |
3156 { | |
3157 movq_m2r(*(mblock++), mm0); | |
3158 movq_m2r(*(mblock++), mm2); | |
3159 movq_m2r(*(mold++), mm1); | |
3160 movq_m2r(*(mold++), mm3); | |
3161 psubsw_r2r(mm1, mm0); | |
3162 psubsw_r2r(mm3, mm2); | |
3163 movq_r2r(mm0, mm1); | |
3164 movq_r2r(mm2, mm3); | |
3165 pcmpgtw_r2r(mm7, mm0); | |
3166 pcmpgtw_r2r(mm7, mm2); | |
3167 pxor_r2r(mm6, mm1); | |
3168 pxor_r2r(mm6, mm3); | |
3169 pcmpgtw_r2r(mm7, mm1); | |
3170 pcmpgtw_r2r(mm7, mm3); | |
3171 por_r2r(mm0, mm5); | |
3172 por_r2r(mm2, mm5); | |
3173 por_r2r(mm1, mm5); | |
3174 por_r2r(mm3, mm5); | |
3175 } | |
3176 movq_r2m(mm5, result); | |
3177 | |
3178 if(result.q) | |
3179 { | |
3180 // if(!RTjpeg_mtest) | |
3181 // for(i=0; i<16; i++)((__u64 *)old)[i]=((__u64 *)RTjpeg_block)[i]; | |
3182 return 0; | |
3183 } | |
3184 // printf("."); | |
3185 return 1; | |
3186 } | |
3187 | |
3188 #else | |
3189 int RTjpeg_bcomp(__s16 *old, __u16 *mask) | |
3190 { | |
3191 int i; | |
3192 | |
3193 for(i=0; i<64; i++) | |
3194 if(abs(old[i]-RTjpeg_block[i])>*mask) | |
3195 { | |
3196 if(!RTjpeg_mtest) | |
3197 for(i=0; i<16; i++)((__u64 *)old)[i]=((__u64 *)RTjpeg_block)[i]; | |
3198 return 0; | |
3199 } | |
3200 return 1; | |
3201 } | |
3202 #endif | |
3203 | |
3204 void RTjpeg_set_test(int i) | |
3205 { | |
3206 RTjpeg_mtest=i; | |
3207 } | |
3208 | |
3209 int RTjpeg_mcompressYUV420(__s8 *sp, unsigned char *bp, __u16 lmask, __u16 cmask) | |
3210 { | |
3211 __s8 * sb; | |
3212 //rh __s16 *block; | |
3213 register __s8 * bp1 = bp + (RTjpeg_width<<3); | |
3214 register __s8 * bp2 = bp + RTjpeg_Ysize; | |
3215 register __s8 * bp3 = bp2 + (RTjpeg_Csize>>1); | |
3216 register int i, j, k; | |
3217 | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
3218 #if HAVE_MMX |
3802 | 3219 emms(); |
16653
27b0d49988b2
Fix 100l bugs that break playback on 64 bit systems (like typedefing __u32
reimar
parents:
14896
diff
changeset
|
3220 RTjpeg_lmask.uq=((__u64)lmask<<48)|((__u64)lmask<<32)|((__u64)lmask<<16)|lmask; |
27b0d49988b2
Fix 100l bugs that break playback on 64 bit systems (like typedefing __u32
reimar
parents:
14896
diff
changeset
|
3221 RTjpeg_cmask.uq=((__u64)cmask<<48)|((__u64)cmask<<32)|((__u64)cmask<<16)|cmask; |
3802 | 3222 #else |
16661
adb581352e63
Stupidity in last patch broke compile without MMX: RTjpeg_lmask is a union
reimar
parents:
16653
diff
changeset
|
3223 RTjpeg_lmask=lmask; |
adb581352e63
Stupidity in last patch broke compile without MMX: RTjpeg_lmask is a union
reimar
parents:
16653
diff
changeset
|
3224 RTjpeg_cmask=cmask; |
3802 | 3225 #endif |
3226 | |
3227 sb=sp; | |
3228 block=RTjpeg_old; | |
3229 /* Y */ | |
3230 for(i=RTjpeg_height>>1; i; i-=8) | |
3231 { | |
3232 for(j=0, k=0; j<RTjpeg_width; j+=16, k+=8) | |
3233 { | |
3234 RTjpeg_dctY(bp+j, RTjpeg_block, RTjpeg_Ywidth); | |
3235 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
3236 if(RTjpeg_bcomp(block, &RTjpeg_lmask)) | |
3237 { | |
3238 *((__u8 *)sp++)=255; | |
3239 } | |
3240 else sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
3241 block+=64; | |
3242 | |
3243 RTjpeg_dctY(bp+j+8, RTjpeg_block, RTjpeg_Ywidth); | |
3244 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
3245 if(RTjpeg_bcomp(block, &RTjpeg_lmask)) | |
3246 { | |
3247 *((__u8 *)sp++)=255; | |
3248 } | |
3249 else sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
3250 block+=64; | |
3251 | |
3252 RTjpeg_dctY(bp1+j, RTjpeg_block, RTjpeg_Ywidth); | |
3253 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
3254 if(RTjpeg_bcomp(block, &RTjpeg_lmask)) | |
3255 { | |
3256 *((__u8 *)sp++)=255; | |
3257 } | |
3258 else sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
3259 block+=64; | |
3260 | |
3261 RTjpeg_dctY(bp1+j+8, RTjpeg_block, RTjpeg_Ywidth); | |
3262 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
3263 if(RTjpeg_bcomp(block, &RTjpeg_lmask)) | |
3264 { | |
3265 *((__u8 *)sp++)=255; | |
3266 } | |
3267 else sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
3268 block+=64; | |
3269 | |
3270 RTjpeg_dctY(bp2+k, RTjpeg_block, RTjpeg_Cwidth); | |
3271 RTjpeg_quant(RTjpeg_block, RTjpeg_cqt); | |
3272 if(RTjpeg_bcomp(block, &RTjpeg_cmask)) | |
3273 { | |
3274 *((__u8 *)sp++)=255; | |
3275 } | |
3276 else sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_cb8); | |
3277 block+=64; | |
3278 | |
3279 RTjpeg_dctY(bp3+k, RTjpeg_block, RTjpeg_Cwidth); | |
3280 RTjpeg_quant(RTjpeg_block, RTjpeg_cqt); | |
3281 if(RTjpeg_bcomp(block, &RTjpeg_cmask)) | |
3282 { | |
3283 *((__u8 *)sp++)=255; | |
3284 } | |
3285 else sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_cb8); | |
3286 block+=64; | |
3287 } | |
3288 bp+=RTjpeg_width<<4; | |
3289 bp1+=RTjpeg_width<<4; | |
3290 bp2+=RTjpeg_width<<2; | |
3291 bp3+=RTjpeg_width<<2; | |
3292 | |
3293 } | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
3294 #if HAVE_MMX |
3802 | 3295 emms(); |
3296 #endif | |
3297 return (sp-sb); | |
3298 } | |
3299 | |
3300 | |
3301 int RTjpeg_mcompressYUV422(__s8 *sp, unsigned char *bp, __u16 lmask, __u16 cmask) | |
3302 { | |
3303 __s8 * sb; | |
3304 __s16 *block; | |
3305 register __s8 * bp2; | |
3306 register __s8 * bp3; | |
3307 register int i, j, k; | |
3308 | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
3309 #if HAVE_MMX |
3802 | 3310 emms(); |
16653
27b0d49988b2
Fix 100l bugs that break playback on 64 bit systems (like typedefing __u32
reimar
parents:
14896
diff
changeset
|
3311 RTjpeg_lmask.uq=((__u64)lmask<<48)|((__u64)lmask<<32)|((__u64)lmask<<16)|lmask; |
27b0d49988b2
Fix 100l bugs that break playback on 64 bit systems (like typedefing __u32
reimar
parents:
14896
diff
changeset
|
3312 RTjpeg_cmask.uq=((__u64)cmask<<48)|((__u64)cmask<<32)|((__u64)cmask<<16)|cmask; |
3802 | 3313 #else |
16661
adb581352e63
Stupidity in last patch broke compile without MMX: RTjpeg_lmask is a union
reimar
parents:
16653
diff
changeset
|
3314 RTjpeg_lmask=lmask; |
adb581352e63
Stupidity in last patch broke compile without MMX: RTjpeg_lmask is a union
reimar
parents:
16653
diff
changeset
|
3315 RTjpeg_cmask=cmask; |
3802 | 3316 #endif |
3317 | |
3318 bp = bp - RTjpeg_width*0; | |
3319 bp2 = bp + RTjpeg_Ysize-RTjpeg_width*0; | |
3320 bp3 = bp2 + RTjpeg_Csize; | |
3321 | |
3322 sb=sp; | |
3323 block=RTjpeg_old; | |
3324 /* Y */ | |
3325 for(i=RTjpeg_height; i; i-=8) | |
3326 { | |
3327 for(j=0, k=0; j<RTjpeg_width; j+=16, k+=8) | |
3328 { | |
3329 RTjpeg_dctY(bp+j, RTjpeg_block, RTjpeg_Ywidth); | |
3330 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
3331 if(RTjpeg_bcomp(block, &RTjpeg_lmask)) | |
3332 { | |
3333 *((__u8 *)sp++)=255; | |
3334 } | |
3335 else sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
3336 block+=64; | |
3337 | |
3338 RTjpeg_dctY(bp+j+8, RTjpeg_block, RTjpeg_Ywidth); | |
3339 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
3340 if(RTjpeg_bcomp(block, &RTjpeg_lmask)) | |
3341 { | |
3342 *((__u8 *)sp++)=255; | |
3343 } | |
3344 else sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
3345 block+=64; | |
3346 | |
3347 RTjpeg_dctY(bp2+k, RTjpeg_block, RTjpeg_Cwidth); | |
3348 RTjpeg_quant(RTjpeg_block, RTjpeg_cqt); | |
3349 if(RTjpeg_bcomp(block, &RTjpeg_cmask)) | |
3350 { | |
3351 *((__u8 *)sp++)=255; | |
3352 } | |
3353 else sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_cb8); | |
3354 block+=64; | |
3355 | |
3356 RTjpeg_dctY(bp3+k, RTjpeg_block, RTjpeg_Cwidth); | |
3357 RTjpeg_quant(RTjpeg_block, RTjpeg_cqt); | |
3358 if(RTjpeg_bcomp(block, &RTjpeg_cmask)) | |
3359 { | |
3360 *((__u8 *)sp++)=255; | |
3361 } | |
3362 else sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_cb8); | |
3363 block+=64; | |
3364 | |
3365 } | |
3366 bp+=RTjpeg_width<<3; | |
3367 bp2+=RTjpeg_width<<2; | |
3368 bp3+=RTjpeg_width<<2; | |
3369 } | |
3370 printf ("%d\n", block - RTjpeg_old); | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
3371 #if HAVE_MMX |
3802 | 3372 emms(); |
3373 #endif | |
3374 return (sp-sb); | |
3375 } | |
3376 | |
3377 int RTjpeg_mcompress8(__s8 *sp, unsigned char *bp, __u16 lmask) | |
3378 { | |
3379 __s8 * sb; | |
3380 __s16 *block; | |
3381 int i, j; | |
3382 | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
3383 #if HAVE_MMX |
3802 | 3384 emms(); |
16653
27b0d49988b2
Fix 100l bugs that break playback on 64 bit systems (like typedefing __u32
reimar
parents:
14896
diff
changeset
|
3385 RTjpeg_lmask.uq=((__u64)lmask<<48)|((__u64)lmask<<32)|((__u64)lmask<<16)|lmask; |
3802 | 3386 #else |
16661
adb581352e63
Stupidity in last patch broke compile without MMX: RTjpeg_lmask is a union
reimar
parents:
16653
diff
changeset
|
3387 RTjpeg_lmask=lmask; |
3802 | 3388 #endif |
3389 | |
3390 | |
3391 sb=sp; | |
3392 block=RTjpeg_old; | |
3393 /* Y */ | |
3394 for(i=0; i<RTjpeg_height; i+=8) | |
3395 { | |
3396 for(j=0; j<RTjpeg_width; j+=8) | |
3397 { | |
3398 RTjpeg_dctY(bp+j, RTjpeg_block, RTjpeg_width); | |
3399 RTjpeg_quant(RTjpeg_block, RTjpeg_lqt); | |
3400 if(RTjpeg_bcomp(block, &RTjpeg_lmask)) | |
3401 { | |
3402 *((__u8 *)sp++)=255; | |
3403 // printf("* %d ", sp[-1]); | |
3404 } else sp+=RTjpeg_b2s(RTjpeg_block, sp, RTjpeg_lb8); | |
3405 block+=64; | |
3406 } | |
3407 bp+=RTjpeg_width<<3; | |
3408 } | |
28298
a7124a264ea6
Completely get rid of MMX define, use HAVE_MMX define instead.
gpoirier
parents:
28296
diff
changeset
|
3409 #if HAVE_MMX |
3802 | 3410 emms(); |
3411 #endif | |
3412 return (sp-sb); | |
3413 } | |
3414 | |
3415 void RTjpeg_color_init(void) | |
3416 { | |
3417 } | |
3418 | |
3419 #define KcrR 76284 | |
3420 #define KcrG 53281 | |
3421 #define KcbG 25625 | |
3422 #define KcbB 132252 | |
3423 #define Ky 76284 | |
3424 | |
3425 void RTjpeg_yuv422rgb(__u8 *buf, __u8 *rgb, int stride) | |
3426 { | |
3427 int tmp; | |
3428 int i, j; | |
3429 __s32 y, crR, crG, cbG, cbB; | |
3430 __u8 *bufcr, *bufcb, *bufy, *bufoute; | |
3431 int yskip; | |
3432 | |
3433 yskip=RTjpeg_width; | |
3434 | |
3435 bufcb=&buf[RTjpeg_width*RTjpeg_height]; | |
3436 bufcr=&buf[RTjpeg_width*RTjpeg_height+(RTjpeg_width*RTjpeg_height)/2]; | |
3437 bufy=&buf[0]; | |
3438 bufoute=rgb; | |
3439 | |
3440 for(i=0; i<(RTjpeg_height); i++) | |
3441 { | |
3442 for(j=0; j<RTjpeg_width; j+=2) | |
3443 { | |
3444 crR=(*bufcr-128)*KcrR; | |
3445 crG=(*(bufcr++)-128)*KcrG; | |
3446 cbG=(*bufcb-128)*KcbG; | |
3447 cbB=(*(bufcb++)-128)*KcbB; | |
3448 | |
3449 y=(bufy[j]-16)*Ky; | |
3450 | |
3451 tmp=(y+crR)>>16; | |
3452 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3453 tmp=(y-crG-cbG)>>16; | |
3454 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3455 tmp=(y+cbB)>>16; | |
3456 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3457 | |
3458 y=(bufy[j+1]-16)*Ky; | |
3459 | |
3460 tmp=(y+crR)>>16; | |
3461 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3462 tmp=(y-crG-cbG)>>16; | |
3463 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3464 tmp=(y+cbB)>>16; | |
3465 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3466 | |
3467 } | |
3468 bufy+=yskip; | |
3469 } | |
3470 } | |
3471 | |
3472 | |
3473 void RTjpeg_yuv420rgb(__u8 *buf, __u8 *rgb, int stride) | |
3474 { | |
3475 int tmp; | |
3476 int i, j; | |
3477 __s32 y, crR, crG, cbG, cbB; | |
3478 __u8 *bufcr, *bufcb, *bufy, *bufoute, *bufouto; | |
3479 int oskip, yskip; | |
3480 | |
3481 if(stride==0) | |
3482 oskip=RTjpeg_width*3; | |
3483 else | |
3484 oskip=2*stride-RTjpeg_width*3; | |
3485 | |
3486 yskip=RTjpeg_width; | |
3487 | |
3488 bufcb=&buf[RTjpeg_width*RTjpeg_height]; | |
3489 bufcr=&buf[RTjpeg_width*RTjpeg_height+(RTjpeg_width*RTjpeg_height)/4]; | |
3490 bufy=&buf[0]; | |
3491 bufoute=rgb; | |
3492 bufouto=rgb+RTjpeg_width*3; | |
3493 | |
3494 for(i=0; i<(RTjpeg_height>>1); i++) | |
3495 { | |
3496 for(j=0; j<RTjpeg_width; j+=2) | |
3497 { | |
3498 crR=(*bufcr-128)*KcrR; | |
3499 crG=(*(bufcr++)-128)*KcrG; | |
3500 cbG=(*bufcb-128)*KcbG; | |
3501 cbB=(*(bufcb++)-128)*KcbB; | |
3502 | |
3503 y=(bufy[j]-16)*Ky; | |
3504 | |
3505 tmp=(y+crR)>>16; | |
3506 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3507 tmp=(y-crG-cbG)>>16; | |
3508 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3509 tmp=(y+cbB)>>16; | |
3510 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3511 | |
3512 y=(bufy[j+1]-16)*Ky; | |
3513 | |
3514 tmp=(y+crR)>>16; | |
3515 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3516 tmp=(y-crG-cbG)>>16; | |
3517 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3518 tmp=(y+cbB)>>16; | |
3519 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3520 | |
3521 y=(bufy[j+yskip]-16)*Ky; | |
3522 | |
3523 tmp=(y+crR)>>16; | |
3524 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3525 tmp=(y-crG-cbG)>>16; | |
3526 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3527 tmp=(y+cbB)>>16; | |
3528 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3529 | |
3530 y=(bufy[j+1+yskip]-16)*Ky; | |
3531 | |
3532 tmp=(y+crR)>>16; | |
3533 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3534 tmp=(y-crG-cbG)>>16; | |
3535 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3536 tmp=(y+cbB)>>16; | |
3537 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3538 | |
3539 } | |
3540 bufoute+=oskip; | |
3541 bufouto+=oskip; | |
3542 bufy+=yskip<<1; | |
3543 } | |
3544 } | |
3545 | |
3546 | |
3547 void RTjpeg_yuvrgb32(__u8 *buf, __u8 *rgb, int stride) | |
3548 { | |
3549 int tmp; | |
3550 int i, j; | |
3551 __s32 y, crR, crG, cbG, cbB; | |
3552 __u8 *bufcr, *bufcb, *bufy, *bufoute, *bufouto; | |
3553 int oskip, yskip; | |
3554 | |
3555 if(stride==0) | |
3556 oskip=RTjpeg_width*4; | |
3557 else | |
3558 oskip = 2*stride-RTjpeg_width*4; | |
3559 yskip=RTjpeg_width; | |
3560 | |
3561 bufcb=&buf[RTjpeg_width*RTjpeg_height]; | |
3562 bufcr=&buf[RTjpeg_width*RTjpeg_height+(RTjpeg_width*RTjpeg_height)/2]; | |
3563 bufy=&buf[0]; | |
3564 bufoute=rgb; | |
3565 bufouto=rgb+RTjpeg_width*4; | |
3566 | |
3567 for(i=0; i<(RTjpeg_height>>1); i++) | |
3568 { | |
3569 for(j=0; j<RTjpeg_width; j+=2) | |
3570 { | |
3571 crR=(*bufcr-128)*KcrR; | |
3572 crG=(*(bufcr++)-128)*KcrG; | |
3573 cbG=(*bufcb-128)*KcbG; | |
3574 cbB=(*(bufcb++)-128)*KcbB; | |
3575 | |
3576 y=(bufy[j]-16)*Ky; | |
3577 | |
3578 tmp=(y+cbB)>>16; | |
3579 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3580 tmp=(y-crG-cbG)>>16; | |
3581 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3582 tmp=(y+crR)>>16; | |
3583 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3584 bufoute++; | |
3585 | |
3586 y=(bufy[j+1]-16)*Ky; | |
3587 | |
3588 tmp=(y+cbB)>>16; | |
3589 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3590 tmp=(y-crG-cbG)>>16; | |
3591 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3592 tmp=(y+crR)>>16; | |
3593 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3594 bufoute++; | |
3595 | |
3596 y=(bufy[j+yskip]-16)*Ky; | |
3597 | |
3598 tmp=(y+cbB)>>16; | |
3599 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3600 tmp=(y-crG-cbG)>>16; | |
3601 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3602 tmp=(y+crR)>>16; | |
3603 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3604 bufouto++; | |
3605 | |
3606 y=(bufy[j+1+yskip]-16)*Ky; | |
3607 | |
3608 tmp=(y+cbB)>>16; | |
3609 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3610 tmp=(y-crG-cbG)>>16; | |
3611 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3612 tmp=(y+crR)>>16; | |
3613 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3614 bufouto++; | |
3615 | |
3616 } | |
3617 bufoute+=oskip; | |
3618 bufouto+=oskip; | |
3619 bufy+=yskip<<1; | |
3620 } | |
3621 } | |
3622 | |
3623 void RTjpeg_yuvrgb24(__u8 *buf, __u8 *rgb, int stride) | |
3624 { | |
3625 int tmp; | |
3626 int i, j; | |
3627 __s32 y, crR, crG, cbG, cbB; | |
3628 __u8 *bufcr, *bufcb, *bufy, *bufoute, *bufouto; | |
3629 int oskip, yskip; | |
3630 | |
3631 if(stride==0) | |
3632 oskip=RTjpeg_width*3; | |
3633 else | |
3634 oskip=2*stride - RTjpeg_width*3; | |
3635 | |
3636 yskip=RTjpeg_width; | |
3637 | |
3638 bufcb=&buf[RTjpeg_width*RTjpeg_height]; | |
3639 bufcr=&buf[RTjpeg_width*RTjpeg_height+(RTjpeg_width*RTjpeg_height)/4]; | |
3640 bufy=&buf[0]; | |
3641 bufoute=rgb; | |
3642 bufouto=rgb+RTjpeg_width*3; | |
3643 | |
3644 for(i=0; i<(RTjpeg_height>>1); i++) | |
3645 { | |
3646 for(j=0; j<RTjpeg_width; j+=2) | |
3647 { | |
3648 crR=(*bufcr-128)*KcrR; | |
3649 crG=(*(bufcr++)-128)*KcrG; | |
3650 cbG=(*bufcb-128)*KcbG; | |
3651 cbB=(*(bufcb++)-128)*KcbB; | |
3652 | |
3653 y=(bufy[j]-16)*Ky; | |
3654 | |
3655 tmp=(y+cbB)>>16; | |
3656 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3657 tmp=(y-crG-cbG)>>16; | |
3658 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3659 tmp=(y+crR)>>16; | |
3660 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3661 | |
3662 y=(bufy[j+1]-16)*Ky; | |
3663 | |
3664 tmp=(y+cbB)>>16; | |
3665 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3666 tmp=(y-crG-cbG)>>16; | |
3667 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3668 tmp=(y+crR)>>16; | |
3669 *(bufoute++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3670 | |
3671 y=(bufy[j+yskip]-16)*Ky; | |
3672 | |
3673 tmp=(y+cbB)>>16; | |
3674 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3675 tmp=(y-crG-cbG)>>16; | |
3676 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3677 tmp=(y+crR)>>16; | |
3678 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3679 | |
3680 y=(bufy[j+1+yskip]-16)*Ky; | |
3681 | |
3682 tmp=(y+cbB)>>16; | |
3683 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3684 tmp=(y-crG-cbG)>>16; | |
3685 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3686 tmp=(y+crR)>>16; | |
3687 *(bufouto++)=(tmp>255)?255:((tmp<0)?0:tmp); | |
3688 | |
3689 } | |
3690 bufoute+=oskip; | |
3691 bufouto+=oskip; | |
3692 bufy+=yskip<<1; | |
3693 } | |
3694 } | |
3695 | |
3696 void RTjpeg_yuvrgb16(__u8 *buf, __u8 *rgb, int stride) | |
3697 { | |
3698 int tmp; | |
3699 int i, j; | |
3700 __s32 y, crR, crG, cbG, cbB; | |
3701 __u8 *bufcr, *bufcb, *bufy, *bufoute, *bufouto; | |
3702 int oskip, yskip; | |
3703 unsigned char r, g, b; | |
3704 | |
3705 if(stride==0) | |
3706 oskip=RTjpeg_width*2; | |
3707 else | |
3708 oskip=2*stride-RTjpeg_width*2; | |
3709 | |
3710 yskip=RTjpeg_width; | |
3711 | |
3712 bufcb=&buf[RTjpeg_width*RTjpeg_height]; | |
3713 bufcr=&buf[RTjpeg_width*RTjpeg_height+(RTjpeg_width*RTjpeg_height)/4]; | |
3714 bufy=&buf[0]; | |
3715 bufoute=rgb; | |
3716 bufouto=rgb+RTjpeg_width*2; | |
3717 | |
3718 for(i=0; i<(RTjpeg_height>>1); i++) | |
3719 { | |
3720 for(j=0; j<RTjpeg_width; j+=2) | |
3721 { | |
3722 crR=(*bufcr-128)*KcrR; | |
3723 crG=(*(bufcr++)-128)*KcrG; | |
3724 cbG=(*bufcb-128)*KcbG; | |
3725 cbB=(*(bufcb++)-128)*KcbB; | |
3726 | |
3727 y=(bufy[j]-16)*Ky; | |
3728 | |
3729 tmp=(y+cbB)>>16; | |
3730 b=(tmp>255)?255:((tmp<0)?0:tmp); | |
3731 tmp=(y-crG-cbG)>>16; | |
3732 g=(tmp>255)?255:((tmp<0)?0:tmp); | |
3733 tmp=(y+crR)>>16; | |
3734 r=(tmp>255)?255:((tmp<0)?0:tmp); | |
3735 tmp=(int)((int)b >> 3); | |
3736 tmp|=(int)(((int)g >> 2) << 5); | |
3737 tmp|=(int)(((int)r >> 3) << 11); | |
3738 *(bufoute++)=tmp&0xff; | |
3739 *(bufoute++)=tmp>>8; | |
3740 | |
3741 | |
3742 y=(bufy[j+1]-16)*Ky; | |
3743 | |
3744 tmp=(y+cbB)>>16; | |
3745 b=(tmp>255)?255:((tmp<0)?0:tmp); | |
3746 tmp=(y-crG-cbG)>>16; | |
3747 g=(tmp>255)?255:((tmp<0)?0:tmp); | |
3748 tmp=(y+crR)>>16; | |
3749 r=(tmp>255)?255:((tmp<0)?0:tmp); | |
3750 tmp=(int)((int)b >> 3); | |
3751 tmp|=(int)(((int)g >> 2) << 5); | |
3752 tmp|=(int)(((int)r >> 3) << 11); | |
3753 *(bufoute++)=tmp&0xff; | |
3754 *(bufoute++)=tmp>>8; | |
3755 | |
3756 y=(bufy[j+yskip]-16)*Ky; | |
3757 | |
3758 tmp=(y+cbB)>>16; | |
3759 b=(tmp>255)?255:((tmp<0)?0:tmp); | |
3760 tmp=(y-crG-cbG)>>16; | |
3761 g=(tmp>255)?255:((tmp<0)?0:tmp); | |
3762 tmp=(y+crR)>>16; | |
3763 r=(tmp>255)?255:((tmp<0)?0:tmp); | |
3764 tmp=(int)((int)b >> 3); | |
3765 tmp|=(int)(((int)g >> 2) << 5); | |
3766 tmp|=(int)(((int)r >> 3) << 11); | |
3767 *(bufouto++)=tmp&0xff; | |
3768 *(bufouto++)=tmp>>8; | |
3769 | |
3770 y=(bufy[j+1+yskip]-16)*Ky; | |
3771 | |
3772 tmp=(y+cbB)>>16; | |
3773 b=(tmp>255)?255:((tmp<0)?0:tmp); | |
3774 tmp=(y-crG-cbG)>>16; | |
3775 g=(tmp>255)?255:((tmp<0)?0:tmp); | |
3776 tmp=(y+crR)>>16; | |
3777 r=(tmp>255)?255:((tmp<0)?0:tmp); | |
3778 tmp=(int)((int)b >> 3); | |
3779 tmp|=(int)(((int)g >> 2) << 5); | |
3780 tmp|=(int)(((int)r >> 3) << 11); | |
3781 *(bufouto++)=tmp&0xff; | |
3782 *(bufouto++)=tmp>>8; | |
3783 | |
3784 } | |
3785 bufoute+=oskip; | |
3786 bufouto+=oskip; | |
3787 bufy+=yskip<<1; | |
3788 } | |
3789 } | |
3790 | |
3791 /* fix stride */ | |
3792 | |
3793 void RTjpeg_yuvrgb8(__u8 *buf, __u8 *rgb, int stride) | |
3794 { | |
9763 | 3795 memcpy(rgb, buf, RTjpeg_width*RTjpeg_height); |
3802 | 3796 } |
3797 |