Mercurial > libavcodec.hg
comparison x86/dsputilenc_yasm.asm @ 12497:c5ffa8b81f9c libavcodec
Move sse16_sse2() from inline asm to yasm. It is one of the functions causing
Win64/FATE issues.
author | rbultje |
---|---|
date | Fri, 17 Sep 2010 01:44:17 +0000 |
parents | |
children | c997f09d1e10 |
comparison
equal
deleted
inserted
replaced
12496:d9b601af5e5e | 12497:c5ffa8b81f9c |
---|---|
1 ;***************************************************************************** | |
2 ;* MMX optimized DSP utils | |
3 ;***************************************************************************** | |
4 ;* Copyright (c) 2000, 2001 Fabrice Bellard | |
5 ;* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> | |
6 ;* | |
7 ;* This file is part of FFmpeg. | |
8 ;* | |
9 ;* FFmpeg is free software; you can redistribute it and/or | |
10 ;* modify it under the terms of the GNU Lesser General Public | |
11 ;* License as published by the Free Software Foundation; either | |
12 ;* version 2.1 of the License, or (at your option) any later version. | |
13 ;* | |
14 ;* FFmpeg is distributed in the hope that it will be useful, | |
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 ;* Lesser General Public License for more details. | |
18 ;* | |
19 ;* You should have received a copy of the GNU Lesser General Public | |
20 ;* License along with FFmpeg; if not, write to the Free Software | |
21 ;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
22 ;***************************************************************************** | |
23 | |
24 %include "x86inc.asm" | |
25 %include "x86util.asm" | |
26 | |
27 SECTION .text | |
28 | |
29 INIT_XMM | |
30 ; sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) | |
31 cglobal sse16_sse2, 5, 5, 8 | |
32 shr r4, 1 | |
33 pxor m0, m0 ; mm0 = 0 | |
34 pxor m7, m7 ; mm7 holds the sum | |
35 | |
36 .next2lines ; FIXME why are these unaligned movs? pix1[] is aligned | |
37 movu m1, [r1 ] ; mm1 = pix1[0][0-15] | |
38 movu m2, [r2 ] ; mm2 = pix2[0][0-15] | |
39 movu m3, [r1+r3] ; mm3 = pix1[1][0-15] | |
40 movu m4, [r2+r3] ; mm4 = pix2[1][0-15] | |
41 | |
42 ; todo: mm1-mm2, mm3-mm4 | |
43 ; algo: subtract mm1 from mm2 with saturation and vice versa | |
44 ; OR the result to get the absolute difference | |
45 mova m5, m1 | |
46 mova m6, m3 | |
47 psubusb m1, m2 | |
48 psubusb m3, m4 | |
49 psubusb m2, m5 | |
50 psubusb m4, m6 | |
51 | |
52 por m2, m1 | |
53 por m4, m3 | |
54 | |
55 ; now convert to 16-bit vectors so we can square them | |
56 mova m1, m2 | |
57 mova m3, m4 | |
58 | |
59 punpckhbw m2, m0 | |
60 punpckhbw m4, m0 | |
61 punpcklbw m1, m0 ; mm1 not spread over (mm1,mm2) | |
62 punpcklbw m3, m0 ; mm4 not spread over (mm3,mm4) | |
63 | |
64 pmaddwd m2, m2 | |
65 pmaddwd m4, m4 | |
66 pmaddwd m1, m1 | |
67 pmaddwd m3, m3 | |
68 | |
69 lea r1, [r1+r3*2] ; pix1 += 2*line_size | |
70 lea r2, [r2+r3*2] ; pix2 += 2*line_size | |
71 | |
72 paddd m1, m2 | |
73 paddd m3, m4 | |
74 paddd m7, m1 | |
75 paddd m7, m3 | |
76 | |
77 dec r4 | |
78 jnz .next2lines | |
79 | |
80 mova m1, m7 | |
81 psrldq m7, 8 ; shift hi qword to lo | |
82 paddd m7, m1 | |
83 mova m1, m7 | |
84 psrldq m7, 4 ; shift hi dword to lo | |
85 paddd m7, m1 | |
86 movd eax, m7 ; return value | |
87 RET |