Mercurial > libavcodec.hg
annotate i386/dsputil_mmx.c @ 2237:d43321e67acd libavcodec
(non)interlaced huffyuv patch by (Loren Merritt <lorenm at u dot washington dot edu>)
author | michael |
---|---|
date | Fri, 17 Sep 2004 10:57:57 +0000 |
parents | 9492be49de46 |
children | 7e0b2e86afa9 |
rev | line source |
---|---|
0 | 1 /* |
2 * MMX optimized DSP utils | |
429 | 3 * Copyright (c) 2000, 2001 Fabrice Bellard. |
1739
07a484280a82
copyright year update of the files i touched and remembered, things look annoyingly unmaintained otherwise
michael
parents:
1729
diff
changeset
|
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> |
0 | 5 * |
429 | 6 * This library is free software; you can redistribute it and/or |
7 * modify it under the terms of the GNU Lesser General Public | |
8 * License as published by the Free Software Foundation; either | |
9 * version 2 of the License, or (at your option) any later version. | |
0 | 10 * |
429 | 11 * This library is distributed in the hope that it will be useful, |
0 | 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
429 | 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 * Lesser General Public License for more details. | |
0 | 15 * |
429 | 16 * You should have received a copy of the GNU Lesser General Public |
17 * License along with this library; if not, write to the Free Software | |
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
0 | 19 * |
20 * MMX optimization by Nick Kurshev <nickols_k@mail.ru> | |
21 */ | |
22 | |
23 #include "../dsputil.h" | |
1092 | 24 #include "../simple_idct.h" |
2067 | 25 #include "../mpegvideo.h" |
1984
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
26 #include "mmx.h" |
0 | 27 |
1729 | 28 //#undef NDEBUG |
29 //#include <assert.h> | |
30 | |
1647 | 31 extern const uint8_t ff_h263_loop_filter_strength[32]; |
32 | |
5 | 33 int mm_flags; /* multimedia extension flags */ |
936 | 34 |
0 | 35 /* pixel operations */ |
1845
3054613980a8
attribute used patch by (mitya at school dot ioffe dot ru (Dmitry Baryshkov))
michael
parents:
1784
diff
changeset
|
36 static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL; |
3054613980a8
attribute used patch by (mitya at school dot ioffe dot ru (Dmitry Baryshkov))
michael
parents:
1784
diff
changeset
|
37 static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL; |
3054613980a8
attribute used patch by (mitya at school dot ioffe dot ru (Dmitry Baryshkov))
michael
parents:
1784
diff
changeset
|
38 static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL; |
0 | 39 |
1845
3054613980a8
attribute used patch by (mitya at school dot ioffe dot ru (Dmitry Baryshkov))
michael
parents:
1784
diff
changeset
|
40 static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL; |
3054613980a8
attribute used patch by (mitya at school dot ioffe dot ru (Dmitry Baryshkov))
michael
parents:
1784
diff
changeset
|
41 static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL; |
2209 | 42 static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL; |
1845
3054613980a8
attribute used patch by (mitya at school dot ioffe dot ru (Dmitry Baryshkov))
michael
parents:
1784
diff
changeset
|
43 static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL; |
2209 | 44 static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL; |
1845
3054613980a8
attribute used patch by (mitya at school dot ioffe dot ru (Dmitry Baryshkov))
michael
parents:
1784
diff
changeset
|
45 static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL; |
954 | 46 |
1845
3054613980a8
attribute used patch by (mitya at school dot ioffe dot ru (Dmitry Baryshkov))
michael
parents:
1784
diff
changeset
|
47 static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL; |
1647 | 48 |
247
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
49 #define JUMPALIGN() __asm __volatile (".balign 8"::) |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
50 #define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::) |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
51 |
448 | 52 #define MOVQ_WONE(regd) \ |
53 __asm __volatile ( \ | |
54 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ | |
55 "psrlw $15, %%" #regd ::) | |
56 | |
57 #define MOVQ_BFE(regd) \ | |
58 __asm __volatile ( \ | |
59 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\ | |
60 "paddb %%" #regd ", %%" #regd " \n\t" ::) | |
61 | |
247
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
62 #ifndef PIC |
448 | 63 #define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone)) |
247
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
64 #define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo)) |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
65 #else |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
66 // for shared library it's better to use this way for accessing constants |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
67 // pcmpeqd -> -1 |
448 | 68 #define MOVQ_BONE(regd) \ |
247
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
69 __asm __volatile ( \ |
448 | 70 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ |
71 "psrlw $15, %%" #regd " \n\t" \ | |
72 "packuswb %%" #regd ", %%" #regd " \n\t" ::) | |
247
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
73 |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
74 #define MOVQ_WTWO(regd) \ |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
75 __asm __volatile ( \ |
448 | 76 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ |
77 "psrlw $15, %%" #regd " \n\t" \ | |
78 "psllw $1, %%" #regd " \n\t"::) | |
387 | 79 |
247
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
80 #endif |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
81 |
448 | 82 // using regr as temporary and for the output result |
444
a5edef76dac6
* new mmx code - based upon http://aggregate.org/MAGIC
kabi
parents:
438
diff
changeset
|
83 // first argument is unmodifed and second is trashed |
471 | 84 // regfe is supposed to contain 0xfefefefefefefefe |
85 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \ | |
445
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
86 "movq " #rega ", " #regr " \n\t"\ |
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
87 "pand " #regb ", " #regr " \n\t"\ |
444
a5edef76dac6
* new mmx code - based upon http://aggregate.org/MAGIC
kabi
parents:
438
diff
changeset
|
88 "pxor " #rega ", " #regb " \n\t"\ |
471 | 89 "pand " #regfe "," #regb " \n\t"\ |
444
a5edef76dac6
* new mmx code - based upon http://aggregate.org/MAGIC
kabi
parents:
438
diff
changeset
|
90 "psrlq $1, " #regb " \n\t"\ |
445
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
91 "paddb " #regb ", " #regr " \n\t" |
444
a5edef76dac6
* new mmx code - based upon http://aggregate.org/MAGIC
kabi
parents:
438
diff
changeset
|
92 |
471 | 93 #define PAVGB_MMX(rega, regb, regr, regfe) \ |
445
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
94 "movq " #rega ", " #regr " \n\t"\ |
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
95 "por " #regb ", " #regr " \n\t"\ |
444
a5edef76dac6
* new mmx code - based upon http://aggregate.org/MAGIC
kabi
parents:
438
diff
changeset
|
96 "pxor " #rega ", " #regb " \n\t"\ |
471 | 97 "pand " #regfe "," #regb " \n\t"\ |
444
a5edef76dac6
* new mmx code - based upon http://aggregate.org/MAGIC
kabi
parents:
438
diff
changeset
|
98 "psrlq $1, " #regb " \n\t"\ |
445
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
99 "psubb " #regb ", " #regr " \n\t" |
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
100 |
471 | 101 // mm6 is supposed to contain 0xfefefefefefefefe |
446
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
102 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
103 "movq " #rega ", " #regr " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
104 "movq " #regc ", " #regp " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
105 "pand " #regb ", " #regr " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
106 "pand " #regd ", " #regp " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
107 "pxor " #rega ", " #regb " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
108 "pxor " #regc ", " #regd " \n\t"\ |
448 | 109 "pand %%mm6, " #regb " \n\t"\ |
110 "pand %%mm6, " #regd " \n\t"\ | |
446
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
111 "psrlq $1, " #regb " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
112 "psrlq $1, " #regd " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
113 "paddb " #regb ", " #regr " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
114 "paddb " #regd ", " #regp " \n\t" |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
115 |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
116 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
117 "movq " #rega ", " #regr " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
118 "movq " #regc ", " #regp " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
119 "por " #regb ", " #regr " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
120 "por " #regd ", " #regp " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
121 "pxor " #rega ", " #regb " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
122 "pxor " #regc ", " #regd " \n\t"\ |
448 | 123 "pand %%mm6, " #regb " \n\t"\ |
124 "pand %%mm6, " #regd " \n\t"\ | |
446
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
125 "psrlq $1, " #regd " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
126 "psrlq $1, " #regb " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
127 "psubb " #regb ", " #regr " \n\t"\ |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
128 "psubb " #regd ", " #regp " \n\t" |
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
129 |
445
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
130 /***********************************/ |
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
131 /* MMX no rounding */ |
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
132 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx |
448 | 133 #define SET_RND MOVQ_WONE |
134 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f) | |
471 | 135 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e) |
445
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
136 |
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
137 #include "dsputil_mmx_rnd.h" |
444
a5edef76dac6
* new mmx code - based upon http://aggregate.org/MAGIC
kabi
parents:
438
diff
changeset
|
138 |
445
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
139 #undef DEF |
448 | 140 #undef SET_RND |
446
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
141 #undef PAVGBP |
471 | 142 #undef PAVGB |
445
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
143 /***********************************/ |
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
144 /* MMX rounding */ |
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
145 |
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
146 #define DEF(x, y) x ## _ ## y ##_mmx |
448 | 147 #define SET_RND MOVQ_WTWO |
148 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f) | |
471 | 149 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e) |
445
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
150 |
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
151 #include "dsputil_mmx_rnd.h" |
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
152 |
62c01dbdc1e0
* code with new PAVGB for MMX only CPU splited into separate file
kabi
parents:
444
diff
changeset
|
153 #undef DEF |
448 | 154 #undef SET_RND |
446
efe0c0d40577
* reenabled original xy2 put routine - rounding error is really bad with
kabi
parents:
445
diff
changeset
|
155 #undef PAVGBP |
471 | 156 #undef PAVGB |
387 | 157 |
0 | 158 /***********************************/ |
159 /* 3Dnow specific */ | |
160 | |
161 #define DEF(x) x ## _3dnow | |
162 /* for Athlons PAVGUSB is prefered */ | |
163 #define PAVGB "pavgusb" | |
164 | |
165 #include "dsputil_mmx_avg.h" | |
166 | |
167 #undef DEF | |
168 #undef PAVGB | |
169 | |
170 /***********************************/ | |
171 /* MMX2 specific */ | |
172 | |
386 | 173 #define DEF(x) x ## _mmx2 |
0 | 174 |
175 /* Introduced only in MMX2 set */ | |
176 #define PAVGB "pavgb" | |
177 | |
178 #include "dsputil_mmx_avg.h" | |
179 | |
180 #undef DEF | |
181 #undef PAVGB | |
182 | |
183 /***********************************/ | |
184 /* standard MMX */ | |
185 | |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
186 #ifdef CONFIG_ENCODERS |
1064 | 187 static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size) |
0 | 188 { |
386 | 189 asm volatile( |
190 "movl $-128, %%eax \n\t" | |
191 "pxor %%mm7, %%mm7 \n\t" | |
192 ".balign 16 \n\t" | |
193 "1: \n\t" | |
194 "movq (%0), %%mm0 \n\t" | |
195 "movq (%0, %2), %%mm2 \n\t" | |
196 "movq %%mm0, %%mm1 \n\t" | |
197 "movq %%mm2, %%mm3 \n\t" | |
198 "punpcklbw %%mm7, %%mm0 \n\t" | |
199 "punpckhbw %%mm7, %%mm1 \n\t" | |
200 "punpcklbw %%mm7, %%mm2 \n\t" | |
201 "punpckhbw %%mm7, %%mm3 \n\t" | |
202 "movq %%mm0, (%1, %%eax)\n\t" | |
203 "movq %%mm1, 8(%1, %%eax)\n\t" | |
204 "movq %%mm2, 16(%1, %%eax)\n\t" | |
205 "movq %%mm3, 24(%1, %%eax)\n\t" | |
206 "addl %3, %0 \n\t" | |
207 "addl $32, %%eax \n\t" | |
208 "js 1b \n\t" | |
209 : "+r" (pixels) | |
210 : "r" (block+64), "r" (line_size), "r" (line_size*2) | |
211 : "%eax" | |
212 ); | |
0 | 213 } |
214 | |
1064 | 215 static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride) |
324 | 216 { |
217 asm volatile( | |
386 | 218 "pxor %%mm7, %%mm7 \n\t" |
219 "movl $-128, %%eax \n\t" | |
324 | 220 ".balign 16 \n\t" |
221 "1: \n\t" | |
222 "movq (%0), %%mm0 \n\t" | |
223 "movq (%1), %%mm2 \n\t" | |
224 "movq %%mm0, %%mm1 \n\t" | |
225 "movq %%mm2, %%mm3 \n\t" | |
226 "punpcklbw %%mm7, %%mm0 \n\t" | |
227 "punpckhbw %%mm7, %%mm1 \n\t" | |
228 "punpcklbw %%mm7, %%mm2 \n\t" | |
229 "punpckhbw %%mm7, %%mm3 \n\t" | |
230 "psubw %%mm2, %%mm0 \n\t" | |
231 "psubw %%mm3, %%mm1 \n\t" | |
232 "movq %%mm0, (%2, %%eax)\n\t" | |
233 "movq %%mm1, 8(%2, %%eax)\n\t" | |
234 "addl %3, %0 \n\t" | |
235 "addl %3, %1 \n\t" | |
236 "addl $16, %%eax \n\t" | |
237 "jnz 1b \n\t" | |
238 : "+r" (s1), "+r" (s2) | |
239 : "r" (block+64), "r" (stride) | |
240 : "%eax" | |
241 ); | |
242 } | |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
243 #endif //CONFIG_ENCODERS |
324 | 244 |
1064 | 245 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) |
0 | 246 { |
247 const DCTELEM *p; | |
1064 | 248 uint8_t *pix; |
0 | 249 |
250 /* read the pixels */ | |
251 p = block; | |
252 pix = pixels; | |
247
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
253 /* unrolled loop */ |
0 | 254 __asm __volatile( |
151
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
255 "movq %3, %%mm0\n\t" |
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
256 "movq 8%3, %%mm1\n\t" |
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
257 "movq 16%3, %%mm2\n\t" |
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
258 "movq 24%3, %%mm3\n\t" |
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
259 "movq 32%3, %%mm4\n\t" |
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
260 "movq 40%3, %%mm5\n\t" |
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
261 "movq 48%3, %%mm6\n\t" |
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
262 "movq 56%3, %%mm7\n\t" |
0 | 263 "packuswb %%mm1, %%mm0\n\t" |
264 "packuswb %%mm3, %%mm2\n\t" | |
265 "packuswb %%mm5, %%mm4\n\t" | |
266 "packuswb %%mm7, %%mm6\n\t" | |
151
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
267 "movq %%mm0, (%0)\n\t" |
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
268 "movq %%mm2, (%0, %1)\n\t" |
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
269 "movq %%mm4, (%0, %1, 2)\n\t" |
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
270 "movq %%mm6, (%0, %2)\n\t" |
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
271 ::"r" (pix), "r" (line_size), "r" (line_size*3), "m"(*p) |
0 | 272 :"memory"); |
273 pix += line_size*4; | |
274 p += 32; | |
247
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
275 |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
276 // if here would be an exact copy of the code above |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
277 // compiler would generate some very strange code |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
278 // thus using "r" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
279 __asm __volatile( |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
280 "movq (%3), %%mm0\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
281 "movq 8(%3), %%mm1\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
282 "movq 16(%3), %%mm2\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
283 "movq 24(%3), %%mm3\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
284 "movq 32(%3), %%mm4\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
285 "movq 40(%3), %%mm5\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
286 "movq 48(%3), %%mm6\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
287 "movq 56(%3), %%mm7\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
288 "packuswb %%mm1, %%mm0\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
289 "packuswb %%mm3, %%mm2\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
290 "packuswb %%mm5, %%mm4\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
291 "packuswb %%mm7, %%mm6\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
292 "movq %%mm0, (%0)\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
293 "movq %%mm2, (%0, %1)\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
294 "movq %%mm4, (%0, %1, 2)\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
295 "movq %%mm6, (%0, %2)\n\t" |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
296 ::"r" (pix), "r" (line_size), "r" (line_size*3), "r"(p) |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
297 :"memory"); |
0 | 298 } |
299 | |
1985
b2bc62fdecc0
move the 0x80 vector outside of the function, thus saving the compiler
melanson
parents:
1984
diff
changeset
|
300 static unsigned char __align8 vector128[8] = |
b2bc62fdecc0
move the 0x80 vector outside of the function, thus saving the compiler
melanson
parents:
1984
diff
changeset
|
301 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }; |
b2bc62fdecc0
move the 0x80 vector outside of the function, thus saving the compiler
melanson
parents:
1984
diff
changeset
|
302 |
1984
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
303 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
304 { |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
305 int i; |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
306 |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
307 movq_m2r(*vector128, mm1); |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
308 for (i = 0; i < 8; i++) { |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
309 movq_m2r(*(block), mm0); |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
310 packsswb_m2r(*(block + 4), mm0); |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
311 block += 8; |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
312 paddb_r2r(mm1, mm0); |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
313 movq_r2m(mm0, *pixels); |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
314 pixels += line_size; |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
315 } |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
316 } |
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
317 |
1064 | 318 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) |
0 | 319 { |
320 const DCTELEM *p; | |
1064 | 321 uint8_t *pix; |
0 | 322 int i; |
323 | |
324 /* read the pixels */ | |
325 p = block; | |
326 pix = pixels; | |
247
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
327 MOVQ_ZERO(mm7); |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
328 i = 4; |
342
8635a7036395
* fixes problem with -funroll-loops and buggy gcc compiler
kabi
parents:
324
diff
changeset
|
329 do { |
0 | 330 __asm __volatile( |
342
8635a7036395
* fixes problem with -funroll-loops and buggy gcc compiler
kabi
parents:
324
diff
changeset
|
331 "movq (%2), %%mm0\n\t" |
8635a7036395
* fixes problem with -funroll-loops and buggy gcc compiler
kabi
parents:
324
diff
changeset
|
332 "movq 8(%2), %%mm1\n\t" |
8635a7036395
* fixes problem with -funroll-loops and buggy gcc compiler
kabi
parents:
324
diff
changeset
|
333 "movq 16(%2), %%mm2\n\t" |
8635a7036395
* fixes problem with -funroll-loops and buggy gcc compiler
kabi
parents:
324
diff
changeset
|
334 "movq 24(%2), %%mm3\n\t" |
0 | 335 "movq %0, %%mm4\n\t" |
336 "movq %1, %%mm6\n\t" | |
337 "movq %%mm4, %%mm5\n\t" | |
338 "punpcklbw %%mm7, %%mm4\n\t" | |
339 "punpckhbw %%mm7, %%mm5\n\t" | |
340 "paddsw %%mm4, %%mm0\n\t" | |
341 "paddsw %%mm5, %%mm1\n\t" | |
342 "movq %%mm6, %%mm5\n\t" | |
343 "punpcklbw %%mm7, %%mm6\n\t" | |
344 "punpckhbw %%mm7, %%mm5\n\t" | |
345 "paddsw %%mm6, %%mm2\n\t" | |
346 "paddsw %%mm5, %%mm3\n\t" | |
347 "packuswb %%mm1, %%mm0\n\t" | |
348 "packuswb %%mm3, %%mm2\n\t" | |
349 "movq %%mm0, %0\n\t" | |
350 "movq %%mm2, %1\n\t" | |
151
ae0516eadae2
fixed gcc-3.0.x compilation (by Michael Niedermayer)
nickols_k
parents:
42
diff
changeset
|
351 :"+m"(*pix), "+m"(*(pix+line_size)) |
342
8635a7036395
* fixes problem with -funroll-loops and buggy gcc compiler
kabi
parents:
324
diff
changeset
|
352 :"r"(p) |
0 | 353 :"memory"); |
354 pix += line_size*2; | |
355 p += 16; | |
342
8635a7036395
* fixes problem with -funroll-loops and buggy gcc compiler
kabi
parents:
324
diff
changeset
|
356 } while (--i); |
0 | 357 } |
358 | |
2209 | 359 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) |
360 { | |
361 __asm __volatile( | |
362 "lea (%3, %3), %%eax \n\t" | |
363 ".balign 8 \n\t" | |
364 "1: \n\t" | |
365 "movd (%1), %%mm0 \n\t" | |
366 "movd (%1, %3), %%mm1 \n\t" | |
367 "movd %%mm0, (%2) \n\t" | |
368 "movd %%mm1, (%2, %3) \n\t" | |
369 "addl %%eax, %1 \n\t" | |
370 "addl %%eax, %2 \n\t" | |
371 "movd (%1), %%mm0 \n\t" | |
372 "movd (%1, %3), %%mm1 \n\t" | |
373 "movd %%mm0, (%2) \n\t" | |
374 "movd %%mm1, (%2, %3) \n\t" | |
375 "addl %%eax, %1 \n\t" | |
376 "addl %%eax, %2 \n\t" | |
377 "subl $4, %0 \n\t" | |
378 "jnz 1b \n\t" | |
379 : "+g"(h), "+r" (pixels), "+r" (block) | |
380 : "r"(line_size) | |
381 : "%eax", "memory" | |
382 ); | |
383 } | |
384 | |
1064 | 385 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) |
0 | 386 { |
471 | 387 __asm __volatile( |
420 | 388 "lea (%3, %3), %%eax \n\t" |
422 | 389 ".balign 8 \n\t" |
420 | 390 "1: \n\t" |
391 "movq (%1), %%mm0 \n\t" | |
392 "movq (%1, %3), %%mm1 \n\t" | |
393 "movq %%mm0, (%2) \n\t" | |
394 "movq %%mm1, (%2, %3) \n\t" | |
395 "addl %%eax, %1 \n\t" | |
396 "addl %%eax, %2 \n\t" | |
397 "movq (%1), %%mm0 \n\t" | |
398 "movq (%1, %3), %%mm1 \n\t" | |
399 "movq %%mm0, (%2) \n\t" | |
400 "movq %%mm1, (%2, %3) \n\t" | |
401 "addl %%eax, %1 \n\t" | |
402 "addl %%eax, %2 \n\t" | |
403 "subl $4, %0 \n\t" | |
404 "jnz 1b \n\t" | |
405 : "+g"(h), "+r" (pixels), "+r" (block) | |
406 : "r"(line_size) | |
407 : "%eax", "memory" | |
408 ); | |
0 | 409 } |
410 | |
1064 | 411 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) |
651 | 412 { |
413 __asm __volatile( | |
414 "lea (%3, %3), %%eax \n\t" | |
415 ".balign 8 \n\t" | |
416 "1: \n\t" | |
417 "movq (%1), %%mm0 \n\t" | |
418 "movq 8(%1), %%mm4 \n\t" | |
419 "movq (%1, %3), %%mm1 \n\t" | |
420 "movq 8(%1, %3), %%mm5 \n\t" | |
421 "movq %%mm0, (%2) \n\t" | |
422 "movq %%mm4, 8(%2) \n\t" | |
423 "movq %%mm1, (%2, %3) \n\t" | |
424 "movq %%mm5, 8(%2, %3) \n\t" | |
425 "addl %%eax, %1 \n\t" | |
426 "addl %%eax, %2 \n\t" | |
427 "movq (%1), %%mm0 \n\t" | |
428 "movq 8(%1), %%mm4 \n\t" | |
429 "movq (%1, %3), %%mm1 \n\t" | |
430 "movq 8(%1, %3), %%mm5 \n\t" | |
431 "movq %%mm0, (%2) \n\t" | |
432 "movq %%mm4, 8(%2) \n\t" | |
433 "movq %%mm1, (%2, %3) \n\t" | |
434 "movq %%mm5, 8(%2, %3) \n\t" | |
435 "addl %%eax, %1 \n\t" | |
436 "addl %%eax, %2 \n\t" | |
437 "subl $4, %0 \n\t" | |
438 "jnz 1b \n\t" | |
439 : "+g"(h), "+r" (pixels), "+r" (block) | |
440 : "r"(line_size) | |
441 : "%eax", "memory" | |
442 ); | |
443 } | |
444 | |
296 | 445 static void clear_blocks_mmx(DCTELEM *blocks) |
446 { | |
471 | 447 __asm __volatile( |
296 | 448 "pxor %%mm7, %%mm7 \n\t" |
449 "movl $-128*6, %%eax \n\t" | |
450 "1: \n\t" | |
451 "movq %%mm7, (%0, %%eax) \n\t" | |
452 "movq %%mm7, 8(%0, %%eax) \n\t" | |
453 "movq %%mm7, 16(%0, %%eax) \n\t" | |
454 "movq %%mm7, 24(%0, %%eax) \n\t" | |
455 "addl $32, %%eax \n\t" | |
456 " js 1b \n\t" | |
457 : : "r" (((int)blocks)+128*6) | |
458 : "%eax" | |
459 ); | |
460 } | |
461 | |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
462 #ifdef CONFIG_ENCODERS |
1064 | 463 static int pix_sum16_mmx(uint8_t * pix, int line_size){ |
688 | 464 const int h=16; |
465 int sum; | |
466 int index= -line_size*h; | |
467 | |
468 __asm __volatile( | |
469 "pxor %%mm7, %%mm7 \n\t" | |
470 "pxor %%mm6, %%mm6 \n\t" | |
471 "1: \n\t" | |
472 "movq (%2, %1), %%mm0 \n\t" | |
473 "movq (%2, %1), %%mm1 \n\t" | |
474 "movq 8(%2, %1), %%mm2 \n\t" | |
475 "movq 8(%2, %1), %%mm3 \n\t" | |
476 "punpcklbw %%mm7, %%mm0 \n\t" | |
477 "punpckhbw %%mm7, %%mm1 \n\t" | |
478 "punpcklbw %%mm7, %%mm2 \n\t" | |
479 "punpckhbw %%mm7, %%mm3 \n\t" | |
480 "paddw %%mm0, %%mm1 \n\t" | |
481 "paddw %%mm2, %%mm3 \n\t" | |
482 "paddw %%mm1, %%mm3 \n\t" | |
483 "paddw %%mm3, %%mm6 \n\t" | |
484 "addl %3, %1 \n\t" | |
485 " js 1b \n\t" | |
486 "movq %%mm6, %%mm5 \n\t" | |
487 "psrlq $32, %%mm6 \n\t" | |
488 "paddw %%mm5, %%mm6 \n\t" | |
489 "movq %%mm6, %%mm5 \n\t" | |
490 "psrlq $16, %%mm6 \n\t" | |
491 "paddw %%mm5, %%mm6 \n\t" | |
492 "movd %%mm6, %0 \n\t" | |
493 "andl $0xFFFF, %0 \n\t" | |
494 : "=&r" (sum), "+r" (index) | |
495 : "r" (pix - index), "r" (line_size) | |
496 ); | |
497 | |
498 return sum; | |
499 } | |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
500 #endif //CONFIG_ENCODERS |
688 | 501 |
866 | 502 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){ |
503 int i=0; | |
504 asm volatile( | |
505 "1: \n\t" | |
506 "movq (%1, %0), %%mm0 \n\t" | |
507 "movq (%2, %0), %%mm1 \n\t" | |
508 "paddb %%mm0, %%mm1 \n\t" | |
509 "movq %%mm1, (%2, %0) \n\t" | |
510 "movq 8(%1, %0), %%mm0 \n\t" | |
511 "movq 8(%2, %0), %%mm1 \n\t" | |
512 "paddb %%mm0, %%mm1 \n\t" | |
513 "movq %%mm1, 8(%2, %0) \n\t" | |
514 "addl $16, %0 \n\t" | |
515 "cmpl %3, %0 \n\t" | |
516 " jb 1b \n\t" | |
517 : "+r" (i) | |
518 : "r"(src), "r"(dst), "r"(w-15) | |
519 ); | |
520 for(; i<w; i++) | |
521 dst[i+0] += src[i+0]; | |
522 } | |
523 | |
1648 | 524 #define H263_LOOP_FILTER \ |
525 "pxor %%mm7, %%mm7 \n\t"\ | |
526 "movq %0, %%mm0 \n\t"\ | |
527 "movq %0, %%mm1 \n\t"\ | |
528 "movq %3, %%mm2 \n\t"\ | |
529 "movq %3, %%mm3 \n\t"\ | |
530 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
531 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
532 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
533 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
534 "psubw %%mm2, %%mm0 \n\t"\ | |
535 "psubw %%mm3, %%mm1 \n\t"\ | |
536 "movq %1, %%mm2 \n\t"\ | |
537 "movq %1, %%mm3 \n\t"\ | |
538 "movq %2, %%mm4 \n\t"\ | |
539 "movq %2, %%mm5 \n\t"\ | |
540 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
541 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
542 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
543 "punpckhbw %%mm7, %%mm5 \n\t"\ | |
544 "psubw %%mm2, %%mm4 \n\t"\ | |
545 "psubw %%mm3, %%mm5 \n\t"\ | |
546 "psllw $2, %%mm4 \n\t"\ | |
547 "psllw $2, %%mm5 \n\t"\ | |
548 "paddw %%mm0, %%mm4 \n\t"\ | |
549 "paddw %%mm1, %%mm5 \n\t"\ | |
550 "pxor %%mm6, %%mm6 \n\t"\ | |
551 "pcmpgtw %%mm4, %%mm6 \n\t"\ | |
552 "pcmpgtw %%mm5, %%mm7 \n\t"\ | |
553 "pxor %%mm6, %%mm4 \n\t"\ | |
554 "pxor %%mm7, %%mm5 \n\t"\ | |
555 "psubw %%mm6, %%mm4 \n\t"\ | |
556 "psubw %%mm7, %%mm5 \n\t"\ | |
557 "psrlw $3, %%mm4 \n\t"\ | |
558 "psrlw $3, %%mm5 \n\t"\ | |
559 "packuswb %%mm5, %%mm4 \n\t"\ | |
560 "packsswb %%mm7, %%mm6 \n\t"\ | |
561 "pxor %%mm7, %%mm7 \n\t"\ | |
562 "movd %4, %%mm2 \n\t"\ | |
563 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
564 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
565 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
566 "psubusb %%mm4, %%mm2 \n\t"\ | |
567 "movq %%mm2, %%mm3 \n\t"\ | |
568 "psubusb %%mm4, %%mm3 \n\t"\ | |
569 "psubb %%mm3, %%mm2 \n\t"\ | |
570 "movq %1, %%mm3 \n\t"\ | |
571 "movq %2, %%mm4 \n\t"\ | |
572 "pxor %%mm6, %%mm3 \n\t"\ | |
573 "pxor %%mm6, %%mm4 \n\t"\ | |
574 "paddusb %%mm2, %%mm3 \n\t"\ | |
575 "psubusb %%mm2, %%mm4 \n\t"\ | |
576 "pxor %%mm6, %%mm3 \n\t"\ | |
577 "pxor %%mm6, %%mm4 \n\t"\ | |
578 "paddusb %%mm2, %%mm2 \n\t"\ | |
579 "packsswb %%mm1, %%mm0 \n\t"\ | |
580 "pcmpgtb %%mm0, %%mm7 \n\t"\ | |
581 "pxor %%mm7, %%mm0 \n\t"\ | |
582 "psubb %%mm7, %%mm0 \n\t"\ | |
583 "movq %%mm0, %%mm1 \n\t"\ | |
584 "psubusb %%mm2, %%mm0 \n\t"\ | |
585 "psubb %%mm0, %%mm1 \n\t"\ | |
586 "pand %5, %%mm1 \n\t"\ | |
587 "psrlw $2, %%mm1 \n\t"\ | |
588 "pxor %%mm7, %%mm1 \n\t"\ | |
589 "psubb %%mm7, %%mm1 \n\t"\ | |
590 "movq %0, %%mm5 \n\t"\ | |
591 "movq %3, %%mm6 \n\t"\ | |
592 "psubb %%mm1, %%mm5 \n\t"\ | |
593 "paddb %%mm1, %%mm6 \n\t" | |
594 | |
1647 | 595 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){ |
596 const int strength= ff_h263_loop_filter_strength[qscale]; | |
597 | |
598 asm volatile( | |
1648 | 599 |
600 H263_LOOP_FILTER | |
601 | |
1647 | 602 "movq %%mm3, %1 \n\t" |
603 "movq %%mm4, %2 \n\t" | |
1648 | 604 "movq %%mm5, %0 \n\t" |
605 "movq %%mm6, %3 \n\t" | |
1647 | 606 : "+m" (*(uint64_t*)(src - 2*stride)), |
607 "+m" (*(uint64_t*)(src - 1*stride)), | |
608 "+m" (*(uint64_t*)(src + 0*stride)), | |
609 "+m" (*(uint64_t*)(src + 1*stride)) | |
610 : "g" (2*strength), "m"(ff_pb_FC) | |
611 ); | |
612 } | |
613 | |
1648 | 614 static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){ |
615 asm volatile( //FIXME could save 1 instruction if done as 8x4 ... | |
616 "movd %4, %%mm0 \n\t" | |
617 "movd %5, %%mm1 \n\t" | |
618 "movd %6, %%mm2 \n\t" | |
619 "movd %7, %%mm3 \n\t" | |
620 "punpcklbw %%mm1, %%mm0 \n\t" | |
621 "punpcklbw %%mm3, %%mm2 \n\t" | |
622 "movq %%mm0, %%mm1 \n\t" | |
623 "punpcklwd %%mm2, %%mm0 \n\t" | |
624 "punpckhwd %%mm2, %%mm1 \n\t" | |
625 "movd %%mm0, %0 \n\t" | |
626 "punpckhdq %%mm0, %%mm0 \n\t" | |
627 "movd %%mm0, %1 \n\t" | |
628 "movd %%mm1, %2 \n\t" | |
629 "punpckhdq %%mm1, %%mm1 \n\t" | |
630 "movd %%mm1, %3 \n\t" | |
631 | |
632 : "=m" (*(uint32_t*)(dst + 0*dst_stride)), | |
633 "=m" (*(uint32_t*)(dst + 1*dst_stride)), | |
634 "=m" (*(uint32_t*)(dst + 2*dst_stride)), | |
635 "=m" (*(uint32_t*)(dst + 3*dst_stride)) | |
636 : "m" (*(uint32_t*)(src + 0*src_stride)), | |
637 "m" (*(uint32_t*)(src + 1*src_stride)), | |
638 "m" (*(uint32_t*)(src + 2*src_stride)), | |
639 "m" (*(uint32_t*)(src + 3*src_stride)) | |
640 ); | |
641 } | |
642 | |
643 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){ | |
644 const int strength= ff_h263_loop_filter_strength[qscale]; | |
645 uint64_t temp[4] __attribute__ ((aligned(8))); | |
646 uint8_t *btemp= (uint8_t*)temp; | |
647 | |
648 src -= 2; | |
649 | |
650 transpose4x4(btemp , src , 8, stride); | |
651 transpose4x4(btemp+4, src + 4*stride, 8, stride); | |
652 asm volatile( | |
653 H263_LOOP_FILTER // 5 3 4 6 | |
654 | |
655 : "+m" (temp[0]), | |
656 "+m" (temp[1]), | |
657 "+m" (temp[2]), | |
658 "+m" (temp[3]) | |
659 : "g" (2*strength), "m"(ff_pb_FC) | |
660 ); | |
661 | |
662 asm volatile( | |
663 "movq %%mm5, %%mm1 \n\t" | |
664 "movq %%mm4, %%mm0 \n\t" | |
665 "punpcklbw %%mm3, %%mm5 \n\t" | |
666 "punpcklbw %%mm6, %%mm4 \n\t" | |
667 "punpckhbw %%mm3, %%mm1 \n\t" | |
668 "punpckhbw %%mm6, %%mm0 \n\t" | |
669 "movq %%mm5, %%mm3 \n\t" | |
670 "movq %%mm1, %%mm6 \n\t" | |
671 "punpcklwd %%mm4, %%mm5 \n\t" | |
672 "punpcklwd %%mm0, %%mm1 \n\t" | |
673 "punpckhwd %%mm4, %%mm3 \n\t" | |
674 "punpckhwd %%mm0, %%mm6 \n\t" | |
675 "movd %%mm5, %0 \n\t" | |
676 "punpckhdq %%mm5, %%mm5 \n\t" | |
677 "movd %%mm5, %1 \n\t" | |
678 "movd %%mm3, %2 \n\t" | |
679 "punpckhdq %%mm3, %%mm3 \n\t" | |
680 "movd %%mm3, %3 \n\t" | |
681 "movd %%mm1, %4 \n\t" | |
682 "punpckhdq %%mm1, %%mm1 \n\t" | |
683 "movd %%mm1, %5 \n\t" | |
684 "movd %%mm6, %6 \n\t" | |
685 "punpckhdq %%mm6, %%mm6 \n\t" | |
686 "movd %%mm6, %7 \n\t" | |
687 : "=m" (*(uint32_t*)(src + 0*stride)), | |
688 "=m" (*(uint32_t*)(src + 1*stride)), | |
689 "=m" (*(uint32_t*)(src + 2*stride)), | |
690 "=m" (*(uint32_t*)(src + 3*stride)), | |
691 "=m" (*(uint32_t*)(src + 4*stride)), | |
692 "=m" (*(uint32_t*)(src + 5*stride)), | |
693 "=m" (*(uint32_t*)(src + 6*stride)), | |
694 "=m" (*(uint32_t*)(src + 7*stride)) | |
695 ); | |
696 } | |
697 | |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
698 #ifdef CONFIG_ENCODERS |
997
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
699 static int pix_norm1_mmx(uint8_t *pix, int line_size) { |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
700 int tmp; |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
701 asm volatile ( |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
702 "movl $16,%%ecx\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
703 "pxor %%mm0,%%mm0\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
704 "pxor %%mm7,%%mm7\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
705 "1:\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
706 "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
707 "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
708 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
709 "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
710 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
711 "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
712 "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
713 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
714 "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
715 "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
716 "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
717 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
718 "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
719 "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
720 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
721 "pmaddwd %%mm3,%%mm3\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
722 "pmaddwd %%mm4,%%mm4\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
723 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
724 "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2, |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
725 pix2^2+pix3^2+pix6^2+pix7^2) */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
726 "paddd %%mm3,%%mm4\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
727 "paddd %%mm2,%%mm7\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
728 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
729 "addl %2, %0\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
730 "paddd %%mm4,%%mm7\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
731 "dec %%ecx\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
732 "jnz 1b\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
733 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
734 "movq %%mm7,%%mm1\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
735 "psrlq $32, %%mm7\n" /* shift hi dword to lo */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
736 "paddd %%mm7,%%mm1\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
737 "movd %%mm1,%1\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
738 : "+r" (pix), "=r"(tmp) : "r" (line_size) : "%ecx" ); |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
739 return tmp; |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
740 } |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
741 |
2067 | 742 static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { |
743 int tmp; | |
744 asm volatile ( | |
745 "movl %4,%%ecx\n" | |
746 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */ | |
747 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */ | |
748 "1:\n" | |
749 "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */ | |
750 "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */ | |
751 | |
752 "movq %%mm1,%%mm5\n" | |
753 "psubusb %%mm2,%%mm1\n" | |
754 "psubusb %%mm5,%%mm2\n" | |
755 | |
756 "por %%mm1,%%mm2\n" | |
757 | |
758 "movq %%mm2,%%mm1\n" | |
759 | |
760 "punpckhbw %%mm0,%%mm2\n" | |
761 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */ | |
762 | |
763 "pmaddwd %%mm2,%%mm2\n" | |
764 "pmaddwd %%mm1,%%mm1\n" | |
765 | |
766 "addl %3,%0\n" | |
767 "addl %3,%1\n" | |
768 | |
769 "paddd %%mm2,%%mm1\n" | |
770 "paddd %%mm1,%%mm7\n" | |
771 | |
772 "decl %%ecx\n" | |
773 "jnz 1b\n" | |
774 | |
775 "movq %%mm7,%%mm1\n" | |
776 "psrlq $32, %%mm7\n" /* shift hi dword to lo */ | |
777 "paddd %%mm7,%%mm1\n" | |
778 "movd %%mm1,%2\n" | |
779 : "+r" (pix1), "+r" (pix2), "=r"(tmp) | |
780 : "r" (line_size) , "m" (h) | |
781 : "%ecx"); | |
782 return tmp; | |
783 } | |
784 | |
1708 | 785 static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { |
997
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
786 int tmp; |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
787 asm volatile ( |
1708 | 788 "movl %4,%%ecx\n" |
997
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
789 "pxor %%mm0,%%mm0\n" /* mm0 = 0 */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
790 "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
791 "1:\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
792 "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
793 "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
794 "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
795 "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
796 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
797 /* todo: mm1-mm2, mm3-mm4 */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
798 /* algo: substract mm1 from mm2 with saturation and vice versa */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
799 /* OR the results to get absolute difference */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
800 "movq %%mm1,%%mm5\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
801 "movq %%mm3,%%mm6\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
802 "psubusb %%mm2,%%mm1\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
803 "psubusb %%mm4,%%mm3\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
804 "psubusb %%mm5,%%mm2\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
805 "psubusb %%mm6,%%mm4\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
806 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
807 "por %%mm1,%%mm2\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
808 "por %%mm3,%%mm4\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
809 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
810 /* now convert to 16-bit vectors so we can square them */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
811 "movq %%mm2,%%mm1\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
812 "movq %%mm4,%%mm3\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
813 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
814 "punpckhbw %%mm0,%%mm2\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
815 "punpckhbw %%mm0,%%mm4\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
816 "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
817 "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
818 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
819 "pmaddwd %%mm2,%%mm2\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
820 "pmaddwd %%mm4,%%mm4\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
821 "pmaddwd %%mm1,%%mm1\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
822 "pmaddwd %%mm3,%%mm3\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
823 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
824 "addl %3,%0\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
825 "addl %3,%1\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
826 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
827 "paddd %%mm2,%%mm1\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
828 "paddd %%mm4,%%mm3\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
829 "paddd %%mm1,%%mm7\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
830 "paddd %%mm3,%%mm7\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
831 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
832 "decl %%ecx\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
833 "jnz 1b\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
834 |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
835 "movq %%mm7,%%mm1\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
836 "psrlq $32, %%mm7\n" /* shift hi dword to lo */ |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
837 "paddd %%mm7,%%mm1\n" |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
838 "movd %%mm1,%2\n" |
1708 | 839 : "+r" (pix1), "+r" (pix2), "=r"(tmp) |
840 : "r" (line_size) , "m" (h) | |
841 : "%ecx"); | |
997
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
842 return tmp; |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
843 } |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
844 |
2067 | 845 static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) { |
846 int tmp; | |
847 asm volatile ( | |
848 "movl %3,%%ecx\n" | |
849 "pxor %%mm7,%%mm7\n" | |
850 "pxor %%mm6,%%mm6\n" | |
851 | |
852 "movq (%0),%%mm0\n" | |
853 "movq %%mm0, %%mm1\n" | |
854 "psllq $8, %%mm0\n" | |
855 "psrlq $8, %%mm1\n" | |
856 "psrlq $8, %%mm0\n" | |
857 "movq %%mm0, %%mm2\n" | |
858 "movq %%mm1, %%mm3\n" | |
859 "punpcklbw %%mm7,%%mm0\n" | |
860 "punpcklbw %%mm7,%%mm1\n" | |
861 "punpckhbw %%mm7,%%mm2\n" | |
862 "punpckhbw %%mm7,%%mm3\n" | |
863 "psubw %%mm1, %%mm0\n" | |
864 "psubw %%mm3, %%mm2\n" | |
865 | |
866 "addl %2,%0\n" | |
867 | |
868 "movq (%0),%%mm4\n" | |
869 "movq %%mm4, %%mm1\n" | |
870 "psllq $8, %%mm4\n" | |
871 "psrlq $8, %%mm1\n" | |
872 "psrlq $8, %%mm4\n" | |
873 "movq %%mm4, %%mm5\n" | |
874 "movq %%mm1, %%mm3\n" | |
875 "punpcklbw %%mm7,%%mm4\n" | |
876 "punpcklbw %%mm7,%%mm1\n" | |
877 "punpckhbw %%mm7,%%mm5\n" | |
878 "punpckhbw %%mm7,%%mm3\n" | |
879 "psubw %%mm1, %%mm4\n" | |
880 "psubw %%mm3, %%mm5\n" | |
881 "psubw %%mm4, %%mm0\n" | |
882 "psubw %%mm5, %%mm2\n" | |
883 "pxor %%mm3, %%mm3\n" | |
884 "pxor %%mm1, %%mm1\n" | |
885 "pcmpgtw %%mm0, %%mm3\n\t" | |
886 "pcmpgtw %%mm2, %%mm1\n\t" | |
887 "pxor %%mm3, %%mm0\n" | |
888 "pxor %%mm1, %%mm2\n" | |
889 "psubw %%mm3, %%mm0\n" | |
890 "psubw %%mm1, %%mm2\n" | |
891 "paddw %%mm0, %%mm2\n" | |
892 "paddw %%mm2, %%mm6\n" | |
893 | |
894 "addl %2,%0\n" | |
895 "1:\n" | |
896 | |
897 "movq (%0),%%mm0\n" | |
898 "movq %%mm0, %%mm1\n" | |
899 "psllq $8, %%mm0\n" | |
900 "psrlq $8, %%mm1\n" | |
901 "psrlq $8, %%mm0\n" | |
902 "movq %%mm0, %%mm2\n" | |
903 "movq %%mm1, %%mm3\n" | |
904 "punpcklbw %%mm7,%%mm0\n" | |
905 "punpcklbw %%mm7,%%mm1\n" | |
906 "punpckhbw %%mm7,%%mm2\n" | |
907 "punpckhbw %%mm7,%%mm3\n" | |
908 "psubw %%mm1, %%mm0\n" | |
909 "psubw %%mm3, %%mm2\n" | |
910 "psubw %%mm0, %%mm4\n" | |
911 "psubw %%mm2, %%mm5\n" | |
912 "pxor %%mm3, %%mm3\n" | |
913 "pxor %%mm1, %%mm1\n" | |
914 "pcmpgtw %%mm4, %%mm3\n\t" | |
915 "pcmpgtw %%mm5, %%mm1\n\t" | |
916 "pxor %%mm3, %%mm4\n" | |
917 "pxor %%mm1, %%mm5\n" | |
918 "psubw %%mm3, %%mm4\n" | |
919 "psubw %%mm1, %%mm5\n" | |
920 "paddw %%mm4, %%mm5\n" | |
921 "paddw %%mm5, %%mm6\n" | |
922 | |
923 "addl %2,%0\n" | |
924 | |
925 "movq (%0),%%mm4\n" | |
926 "movq %%mm4, %%mm1\n" | |
927 "psllq $8, %%mm4\n" | |
928 "psrlq $8, %%mm1\n" | |
929 "psrlq $8, %%mm4\n" | |
930 "movq %%mm4, %%mm5\n" | |
931 "movq %%mm1, %%mm3\n" | |
932 "punpcklbw %%mm7,%%mm4\n" | |
933 "punpcklbw %%mm7,%%mm1\n" | |
934 "punpckhbw %%mm7,%%mm5\n" | |
935 "punpckhbw %%mm7,%%mm3\n" | |
936 "psubw %%mm1, %%mm4\n" | |
937 "psubw %%mm3, %%mm5\n" | |
938 "psubw %%mm4, %%mm0\n" | |
939 "psubw %%mm5, %%mm2\n" | |
940 "pxor %%mm3, %%mm3\n" | |
941 "pxor %%mm1, %%mm1\n" | |
942 "pcmpgtw %%mm0, %%mm3\n\t" | |
943 "pcmpgtw %%mm2, %%mm1\n\t" | |
944 "pxor %%mm3, %%mm0\n" | |
945 "pxor %%mm1, %%mm2\n" | |
946 "psubw %%mm3, %%mm0\n" | |
947 "psubw %%mm1, %%mm2\n" | |
948 "paddw %%mm0, %%mm2\n" | |
949 "paddw %%mm2, %%mm6\n" | |
950 | |
951 "addl %2,%0\n" | |
952 "subl $2, %%ecx\n" | |
953 " jnz 1b\n" | |
954 | |
955 "movq %%mm6, %%mm0\n" | |
956 "punpcklwd %%mm7,%%mm0\n" | |
957 "punpckhwd %%mm7,%%mm6\n" | |
958 "paddd %%mm0, %%mm6\n" | |
959 | |
960 "movq %%mm6,%%mm0\n" | |
961 "psrlq $32, %%mm6\n" | |
962 "paddd %%mm6,%%mm0\n" | |
963 "movd %%mm0,%1\n" | |
964 : "+r" (pix1), "=r"(tmp) | |
965 : "r" (line_size) , "g" (h-2) | |
966 : "%ecx"); | |
967 return tmp; | |
968 } | |
969 | |
970 static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) { | |
971 int tmp; | |
972 uint8_t * pix= pix1; | |
973 asm volatile ( | |
974 "movl %3,%%ecx\n" | |
975 "pxor %%mm7,%%mm7\n" | |
976 "pxor %%mm6,%%mm6\n" | |
977 | |
978 "movq (%0),%%mm0\n" | |
979 "movq 1(%0),%%mm1\n" | |
980 "movq %%mm0, %%mm2\n" | |
981 "movq %%mm1, %%mm3\n" | |
982 "punpcklbw %%mm7,%%mm0\n" | |
983 "punpcklbw %%mm7,%%mm1\n" | |
984 "punpckhbw %%mm7,%%mm2\n" | |
985 "punpckhbw %%mm7,%%mm3\n" | |
986 "psubw %%mm1, %%mm0\n" | |
987 "psubw %%mm3, %%mm2\n" | |
988 | |
989 "addl %2,%0\n" | |
990 | |
991 "movq (%0),%%mm4\n" | |
992 "movq 1(%0),%%mm1\n" | |
993 "movq %%mm4, %%mm5\n" | |
994 "movq %%mm1, %%mm3\n" | |
995 "punpcklbw %%mm7,%%mm4\n" | |
996 "punpcklbw %%mm7,%%mm1\n" | |
997 "punpckhbw %%mm7,%%mm5\n" | |
998 "punpckhbw %%mm7,%%mm3\n" | |
999 "psubw %%mm1, %%mm4\n" | |
1000 "psubw %%mm3, %%mm5\n" | |
1001 "psubw %%mm4, %%mm0\n" | |
1002 "psubw %%mm5, %%mm2\n" | |
1003 "pxor %%mm3, %%mm3\n" | |
1004 "pxor %%mm1, %%mm1\n" | |
1005 "pcmpgtw %%mm0, %%mm3\n\t" | |
1006 "pcmpgtw %%mm2, %%mm1\n\t" | |
1007 "pxor %%mm3, %%mm0\n" | |
1008 "pxor %%mm1, %%mm2\n" | |
1009 "psubw %%mm3, %%mm0\n" | |
1010 "psubw %%mm1, %%mm2\n" | |
1011 "paddw %%mm0, %%mm2\n" | |
1012 "paddw %%mm2, %%mm6\n" | |
1013 | |
1014 "addl %2,%0\n" | |
1015 "1:\n" | |
1016 | |
1017 "movq (%0),%%mm0\n" | |
1018 "movq 1(%0),%%mm1\n" | |
1019 "movq %%mm0, %%mm2\n" | |
1020 "movq %%mm1, %%mm3\n" | |
1021 "punpcklbw %%mm7,%%mm0\n" | |
1022 "punpcklbw %%mm7,%%mm1\n" | |
1023 "punpckhbw %%mm7,%%mm2\n" | |
1024 "punpckhbw %%mm7,%%mm3\n" | |
1025 "psubw %%mm1, %%mm0\n" | |
1026 "psubw %%mm3, %%mm2\n" | |
1027 "psubw %%mm0, %%mm4\n" | |
1028 "psubw %%mm2, %%mm5\n" | |
1029 "pxor %%mm3, %%mm3\n" | |
1030 "pxor %%mm1, %%mm1\n" | |
1031 "pcmpgtw %%mm4, %%mm3\n\t" | |
1032 "pcmpgtw %%mm5, %%mm1\n\t" | |
1033 "pxor %%mm3, %%mm4\n" | |
1034 "pxor %%mm1, %%mm5\n" | |
1035 "psubw %%mm3, %%mm4\n" | |
1036 "psubw %%mm1, %%mm5\n" | |
1037 "paddw %%mm4, %%mm5\n" | |
1038 "paddw %%mm5, %%mm6\n" | |
1039 | |
1040 "addl %2,%0\n" | |
1041 | |
1042 "movq (%0),%%mm4\n" | |
1043 "movq 1(%0),%%mm1\n" | |
1044 "movq %%mm4, %%mm5\n" | |
1045 "movq %%mm1, %%mm3\n" | |
1046 "punpcklbw %%mm7,%%mm4\n" | |
1047 "punpcklbw %%mm7,%%mm1\n" | |
1048 "punpckhbw %%mm7,%%mm5\n" | |
1049 "punpckhbw %%mm7,%%mm3\n" | |
1050 "psubw %%mm1, %%mm4\n" | |
1051 "psubw %%mm3, %%mm5\n" | |
1052 "psubw %%mm4, %%mm0\n" | |
1053 "psubw %%mm5, %%mm2\n" | |
1054 "pxor %%mm3, %%mm3\n" | |
1055 "pxor %%mm1, %%mm1\n" | |
1056 "pcmpgtw %%mm0, %%mm3\n\t" | |
1057 "pcmpgtw %%mm2, %%mm1\n\t" | |
1058 "pxor %%mm3, %%mm0\n" | |
1059 "pxor %%mm1, %%mm2\n" | |
1060 "psubw %%mm3, %%mm0\n" | |
1061 "psubw %%mm1, %%mm2\n" | |
1062 "paddw %%mm0, %%mm2\n" | |
1063 "paddw %%mm2, %%mm6\n" | |
1064 | |
1065 "addl %2,%0\n" | |
1066 "subl $2, %%ecx\n" | |
1067 " jnz 1b\n" | |
1068 | |
1069 "movq %%mm6, %%mm0\n" | |
1070 "punpcklwd %%mm7,%%mm0\n" | |
1071 "punpckhwd %%mm7,%%mm6\n" | |
1072 "paddd %%mm0, %%mm6\n" | |
1073 | |
1074 "movq %%mm6,%%mm0\n" | |
1075 "psrlq $32, %%mm6\n" | |
1076 "paddd %%mm6,%%mm0\n" | |
1077 "movd %%mm0,%1\n" | |
1078 : "+r" (pix1), "=r"(tmp) | |
1079 : "r" (line_size) , "g" (h-2) | |
1080 : "%ecx"); | |
1081 return tmp + hf_noise8_mmx(pix+8, line_size, h); | |
1082 } | |
1083 | |
1084 static int nsse16_mmx(MpegEncContext *c, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { | |
1085 int score1= sse16_mmx(c, pix1, pix2, line_size, h); | |
1086 int score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h); | |
1087 | |
1088 if(c) return score1 + ABS(score2)*c->avctx->nsse_weight; | |
1089 else return score1 + ABS(score2)*8; | |
1090 } | |
1091 | |
1092 static int nsse8_mmx(MpegEncContext *c, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { | |
1093 int score1= sse8_mmx(c, pix1, pix2, line_size, h); | |
1094 int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h); | |
1095 | |
1096 if(c) return score1 + ABS(score2)*c->avctx->nsse_weight; | |
1097 else return score1 + ABS(score2)*8; | |
1098 } | |
1099 | |
1729 | 1100 static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) { |
1101 int tmp; | |
1102 | |
1103 assert( (((int)pix) & 7) == 0); | |
1104 assert((line_size &7) ==0); | |
1105 | |
1106 #define SUM(in0, in1, out0, out1) \ | |
1107 "movq (%0), %%mm2\n"\ | |
1108 "movq 8(%0), %%mm3\n"\ | |
1109 "addl %2,%0\n"\ | |
1110 "movq %%mm2, " #out0 "\n"\ | |
1111 "movq %%mm3, " #out1 "\n"\ | |
1112 "psubusb " #in0 ", %%mm2\n"\ | |
1113 "psubusb " #in1 ", %%mm3\n"\ | |
1114 "psubusb " #out0 ", " #in0 "\n"\ | |
1115 "psubusb " #out1 ", " #in1 "\n"\ | |
1116 "por %%mm2, " #in0 "\n"\ | |
1117 "por %%mm3, " #in1 "\n"\ | |
1118 "movq " #in0 ", %%mm2\n"\ | |
1119 "movq " #in1 ", %%mm3\n"\ | |
1120 "punpcklbw %%mm7, " #in0 "\n"\ | |
1121 "punpcklbw %%mm7, " #in1 "\n"\ | |
1122 "punpckhbw %%mm7, %%mm2\n"\ | |
1123 "punpckhbw %%mm7, %%mm3\n"\ | |
1124 "paddw " #in1 ", " #in0 "\n"\ | |
1125 "paddw %%mm3, %%mm2\n"\ | |
1126 "paddw %%mm2, " #in0 "\n"\ | |
1127 "paddw " #in0 ", %%mm6\n" | |
1128 | |
1129 | |
1130 asm volatile ( | |
1131 "movl %3,%%ecx\n" | |
1132 "pxor %%mm6,%%mm6\n" | |
1133 "pxor %%mm7,%%mm7\n" | |
1134 "movq (%0),%%mm0\n" | |
1135 "movq 8(%0),%%mm1\n" | |
1136 "addl %2,%0\n" | |
1137 "subl $2, %%ecx\n" | |
1138 SUM(%%mm0, %%mm1, %%mm4, %%mm5) | |
1139 "1:\n" | |
1140 | |
1141 SUM(%%mm4, %%mm5, %%mm0, %%mm1) | |
1142 | |
1143 SUM(%%mm0, %%mm1, %%mm4, %%mm5) | |
1144 | |
1145 "subl $2, %%ecx\n" | |
1146 "jnz 1b\n" | |
1147 | |
1148 "movq %%mm6,%%mm0\n" | |
1149 "psrlq $32, %%mm6\n" | |
1150 "paddw %%mm6,%%mm0\n" | |
1151 "movq %%mm0,%%mm6\n" | |
1152 "psrlq $16, %%mm0\n" | |
1153 "paddw %%mm6,%%mm0\n" | |
1154 "movd %%mm0,%1\n" | |
1155 : "+r" (pix), "=r"(tmp) | |
1156 : "r" (line_size) , "m" (h) | |
1157 : "%ecx"); | |
1158 return tmp & 0xFFFF; | |
1159 } | |
1160 #undef SUM | |
1161 | |
1162 static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) { | |
1163 int tmp; | |
1164 | |
1165 assert( (((int)pix) & 7) == 0); | |
1166 assert((line_size &7) ==0); | |
1167 | |
1168 #define SUM(in0, in1, out0, out1) \ | |
1169 "movq (%0), " #out0 "\n"\ | |
1170 "movq 8(%0), " #out1 "\n"\ | |
1171 "addl %2,%0\n"\ | |
1172 "psadbw " #out0 ", " #in0 "\n"\ | |
1173 "psadbw " #out1 ", " #in1 "\n"\ | |
1174 "paddw " #in1 ", " #in0 "\n"\ | |
1175 "paddw " #in0 ", %%mm6\n" | |
1176 | |
1177 asm volatile ( | |
1178 "movl %3,%%ecx\n" | |
1179 "pxor %%mm6,%%mm6\n" | |
1180 "pxor %%mm7,%%mm7\n" | |
1181 "movq (%0),%%mm0\n" | |
1182 "movq 8(%0),%%mm1\n" | |
1183 "addl %2,%0\n" | |
1184 "subl $2, %%ecx\n" | |
1185 SUM(%%mm0, %%mm1, %%mm4, %%mm5) | |
1186 "1:\n" | |
1187 | |
1188 SUM(%%mm4, %%mm5, %%mm0, %%mm1) | |
1189 | |
1190 SUM(%%mm0, %%mm1, %%mm4, %%mm5) | |
1191 | |
1192 "subl $2, %%ecx\n" | |
1193 "jnz 1b\n" | |
1194 | |
1195 "movd %%mm6,%1\n" | |
1196 : "+r" (pix), "=r"(tmp) | |
1197 : "r" (line_size) , "m" (h) | |
1198 : "%ecx"); | |
1199 return tmp; | |
1200 } | |
1201 #undef SUM | |
1202 | |
1203 static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { | |
1204 int tmp; | |
1205 | |
1206 assert( (((int)pix1) & 7) == 0); | |
1207 assert( (((int)pix2) & 7) == 0); | |
1208 assert((line_size &7) ==0); | |
1209 | |
1210 #define SUM(in0, in1, out0, out1) \ | |
1211 "movq (%0),%%mm2\n"\ | |
1212 "movq (%1)," #out0 "\n"\ | |
1213 "movq 8(%0),%%mm3\n"\ | |
1214 "movq 8(%1)," #out1 "\n"\ | |
1215 "addl %3,%0\n"\ | |
1216 "addl %3,%1\n"\ | |
1217 "psubb " #out0 ", %%mm2\n"\ | |
1218 "psubb " #out1 ", %%mm3\n"\ | |
1219 "pxor %%mm7, %%mm2\n"\ | |
1220 "pxor %%mm7, %%mm3\n"\ | |
1221 "movq %%mm2, " #out0 "\n"\ | |
1222 "movq %%mm3, " #out1 "\n"\ | |
1223 "psubusb " #in0 ", %%mm2\n"\ | |
1224 "psubusb " #in1 ", %%mm3\n"\ | |
1225 "psubusb " #out0 ", " #in0 "\n"\ | |
1226 "psubusb " #out1 ", " #in1 "\n"\ | |
1227 "por %%mm2, " #in0 "\n"\ | |
1228 "por %%mm3, " #in1 "\n"\ | |
1229 "movq " #in0 ", %%mm2\n"\ | |
1230 "movq " #in1 ", %%mm3\n"\ | |
1231 "punpcklbw %%mm7, " #in0 "\n"\ | |
1232 "punpcklbw %%mm7, " #in1 "\n"\ | |
1233 "punpckhbw %%mm7, %%mm2\n"\ | |
1234 "punpckhbw %%mm7, %%mm3\n"\ | |
1235 "paddw " #in1 ", " #in0 "\n"\ | |
1236 "paddw %%mm3, %%mm2\n"\ | |
1237 "paddw %%mm2, " #in0 "\n"\ | |
1238 "paddw " #in0 ", %%mm6\n" | |
1239 | |
1240 | |
1241 asm volatile ( | |
1242 "movl %4,%%ecx\n" | |
1243 "pxor %%mm6,%%mm6\n" | |
1244 "pcmpeqw %%mm7,%%mm7\n" | |
1245 "psllw $15, %%mm7\n" | |
1246 "packsswb %%mm7, %%mm7\n" | |
1247 "movq (%0),%%mm0\n" | |
1248 "movq (%1),%%mm2\n" | |
1249 "movq 8(%0),%%mm1\n" | |
1250 "movq 8(%1),%%mm3\n" | |
1251 "addl %3,%0\n" | |
1252 "addl %3,%1\n" | |
1253 "subl $2, %%ecx\n" | |
1254 "psubb %%mm2, %%mm0\n" | |
1255 "psubb %%mm3, %%mm1\n" | |
1256 "pxor %%mm7, %%mm0\n" | |
1257 "pxor %%mm7, %%mm1\n" | |
1258 SUM(%%mm0, %%mm1, %%mm4, %%mm5) | |
1259 "1:\n" | |
1260 | |
1261 SUM(%%mm4, %%mm5, %%mm0, %%mm1) | |
1262 | |
1263 SUM(%%mm0, %%mm1, %%mm4, %%mm5) | |
1264 | |
1265 "subl $2, %%ecx\n" | |
1266 "jnz 1b\n" | |
1267 | |
1268 "movq %%mm6,%%mm0\n" | |
1269 "psrlq $32, %%mm6\n" | |
1270 "paddw %%mm6,%%mm0\n" | |
1271 "movq %%mm0,%%mm6\n" | |
1272 "psrlq $16, %%mm0\n" | |
1273 "paddw %%mm6,%%mm0\n" | |
1274 "movd %%mm0,%2\n" | |
1275 : "+r" (pix1), "+r" (pix2), "=r"(tmp) | |
1276 : "r" (line_size) , "m" (h) | |
1277 : "%ecx"); | |
1278 return tmp & 0x7FFF; | |
1279 } | |
1280 #undef SUM | |
1281 | |
1282 static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { | |
1283 int tmp; | |
1284 | |
1285 assert( (((int)pix1) & 7) == 0); | |
1286 assert( (((int)pix2) & 7) == 0); | |
1287 assert((line_size &7) ==0); | |
1288 | |
1289 #define SUM(in0, in1, out0, out1) \ | |
1290 "movq (%0)," #out0 "\n"\ | |
1291 "movq (%1),%%mm2\n"\ | |
1292 "movq 8(%0)," #out1 "\n"\ | |
1293 "movq 8(%1),%%mm3\n"\ | |
1294 "addl %3,%0\n"\ | |
1295 "addl %3,%1\n"\ | |
1296 "psubb %%mm2, " #out0 "\n"\ | |
1297 "psubb %%mm3, " #out1 "\n"\ | |
1298 "pxor %%mm7, " #out0 "\n"\ | |
1299 "pxor %%mm7, " #out1 "\n"\ | |
1300 "psadbw " #out0 ", " #in0 "\n"\ | |
1301 "psadbw " #out1 ", " #in1 "\n"\ | |
1302 "paddw " #in1 ", " #in0 "\n"\ | |
1303 "paddw " #in0 ", %%mm6\n" | |
1304 | |
1305 asm volatile ( | |
1306 "movl %4,%%ecx\n" | |
1307 "pxor %%mm6,%%mm6\n" | |
1308 "pcmpeqw %%mm7,%%mm7\n" | |
1309 "psllw $15, %%mm7\n" | |
1310 "packsswb %%mm7, %%mm7\n" | |
1311 "movq (%0),%%mm0\n" | |
1312 "movq (%1),%%mm2\n" | |
1313 "movq 8(%0),%%mm1\n" | |
1314 "movq 8(%1),%%mm3\n" | |
1315 "addl %3,%0\n" | |
1316 "addl %3,%1\n" | |
1317 "subl $2, %%ecx\n" | |
1318 "psubb %%mm2, %%mm0\n" | |
1319 "psubb %%mm3, %%mm1\n" | |
1320 "pxor %%mm7, %%mm0\n" | |
1321 "pxor %%mm7, %%mm1\n" | |
1322 SUM(%%mm0, %%mm1, %%mm4, %%mm5) | |
1323 "1:\n" | |
1324 | |
1325 SUM(%%mm4, %%mm5, %%mm0, %%mm1) | |
1326 | |
1327 SUM(%%mm0, %%mm1, %%mm4, %%mm5) | |
1328 | |
1329 "subl $2, %%ecx\n" | |
1330 "jnz 1b\n" | |
1331 | |
1332 "movd %%mm6,%2\n" | |
1333 : "+r" (pix1), "+r" (pix2), "=r"(tmp) | |
1334 : "r" (line_size) , "m" (h) | |
1335 : "%ecx"); | |
1336 return tmp; | |
1337 } | |
1338 #undef SUM | |
1339 | |
866 | 1340 static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ |
1341 int i=0; | |
1342 asm volatile( | |
1343 "1: \n\t" | |
1344 "movq (%2, %0), %%mm0 \n\t" | |
1345 "movq (%1, %0), %%mm1 \n\t" | |
1346 "psubb %%mm0, %%mm1 \n\t" | |
1347 "movq %%mm1, (%3, %0) \n\t" | |
1348 "movq 8(%2, %0), %%mm0 \n\t" | |
1349 "movq 8(%1, %0), %%mm1 \n\t" | |
1350 "psubb %%mm0, %%mm1 \n\t" | |
1351 "movq %%mm1, 8(%3, %0) \n\t" | |
1352 "addl $16, %0 \n\t" | |
1353 "cmpl %4, %0 \n\t" | |
1354 " jb 1b \n\t" | |
1355 : "+r" (i) | |
1356 : "r"(src1), "r"(src2), "r"(dst), "r"(w-15) | |
1357 ); | |
1358 for(; i<w; i++) | |
1359 dst[i+0] = src1[i+0]-src2[i+0]; | |
1360 } | |
1527 | 1361 |
1362 static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){ | |
1363 int i=0; | |
1364 uint8_t l, lt; | |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
1365 |
1527 | 1366 asm volatile( |
1367 "1: \n\t" | |
1368 "movq -1(%1, %0), %%mm0 \n\t" // LT | |
1369 "movq (%1, %0), %%mm1 \n\t" // T | |
1370 "movq -1(%2, %0), %%mm2 \n\t" // L | |
1371 "movq (%2, %0), %%mm3 \n\t" // X | |
1372 "movq %%mm2, %%mm4 \n\t" // L | |
1373 "psubb %%mm0, %%mm2 \n\t" | |
1374 "paddb %%mm1, %%mm2 \n\t" // L + T - LT | |
1375 "movq %%mm4, %%mm5 \n\t" // L | |
1376 "pmaxub %%mm1, %%mm4 \n\t" // max(T, L) | |
1377 "pminub %%mm5, %%mm1 \n\t" // min(T, L) | |
1378 "pminub %%mm2, %%mm4 \n\t" | |
1379 "pmaxub %%mm1, %%mm4 \n\t" | |
1380 "psubb %%mm4, %%mm3 \n\t" // dst - pred | |
1381 "movq %%mm3, (%3, %0) \n\t" | |
1382 "addl $8, %0 \n\t" | |
1383 "cmpl %4, %0 \n\t" | |
1384 " jb 1b \n\t" | |
1385 : "+r" (i) | |
1386 : "r"(src1), "r"(src2), "r"(dst), "r"(w) | |
1387 ); | |
1388 | |
1389 l= *left; | |
1390 lt= *left_top; | |
1391 | |
1392 dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF); | |
1393 | |
1394 *left_top= src1[w-1]; | |
1395 *left = src2[w-1]; | |
1396 } | |
1397 | |
1153 | 1398 #define LBUTTERFLY2(a1,b1,a2,b2)\ |
1399 "paddw " #b1 ", " #a1 " \n\t"\ | |
1400 "paddw " #b2 ", " #a2 " \n\t"\ | |
1401 "paddw " #b1 ", " #b1 " \n\t"\ | |
1402 "paddw " #b2 ", " #b2 " \n\t"\ | |
1403 "psubw " #a1 ", " #b1 " \n\t"\ | |
1186 | 1404 "psubw " #a2 ", " #b2 " \n\t" |
866 | 1405 |
936 | 1406 #define HADAMARD48\ |
1153 | 1407 LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\ |
1408 LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\ | |
1409 LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\ | |
1410 LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\ | |
1411 LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\ | |
1412 LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\ | |
936 | 1413 |
1414 #define MMABS(a,z)\ | |
1415 "pxor " #z ", " #z " \n\t"\ | |
1416 "pcmpgtw " #a ", " #z " \n\t"\ | |
1417 "pxor " #z ", " #a " \n\t"\ | |
1418 "psubw " #z ", " #a " \n\t" | |
1419 | |
1420 #define MMABS_SUM(a,z, sum)\ | |
1421 "pxor " #z ", " #z " \n\t"\ | |
1422 "pcmpgtw " #a ", " #z " \n\t"\ | |
1423 "pxor " #z ", " #a " \n\t"\ | |
1424 "psubw " #z ", " #a " \n\t"\ | |
1425 "paddusw " #a ", " #sum " \n\t" | |
1426 | |
1153 | 1427 #define MMABS_MMX2(a,z)\ |
1428 "pxor " #z ", " #z " \n\t"\ | |
1429 "psubw " #a ", " #z " \n\t"\ | |
1430 "pmaxsw " #z ", " #a " \n\t" | |
1431 | |
1432 #define MMABS_SUM_MMX2(a,z, sum)\ | |
1433 "pxor " #z ", " #z " \n\t"\ | |
1434 "psubw " #a ", " #z " \n\t"\ | |
1435 "pmaxsw " #z ", " #a " \n\t"\ | |
1436 "paddusw " #a ", " #sum " \n\t" | |
1437 | |
936 | 1438 #define SBUTTERFLY(a,b,t,n)\ |
1439 "movq " #a ", " #t " \n\t" /* abcd */\ | |
1440 "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\ | |
1441 "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\ | |
1153 | 1442 |
936 | 1443 #define TRANSPOSE4(a,b,c,d,t)\ |
1444 SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\ | |
1445 SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\ | |
1446 SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\ | |
1447 SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */ | |
1448 | |
1449 #define LOAD4(o, a, b, c, d)\ | |
1450 "movq "#o"(%1), " #a " \n\t"\ | |
1451 "movq "#o"+16(%1), " #b " \n\t"\ | |
1452 "movq "#o"+32(%1), " #c " \n\t"\ | |
1453 "movq "#o"+48(%1), " #d " \n\t" | |
1454 | |
1455 #define STORE4(o, a, b, c, d)\ | |
1456 "movq "#a", "#o"(%1) \n\t"\ | |
1457 "movq "#b", "#o"+16(%1) \n\t"\ | |
1458 "movq "#c", "#o"+32(%1) \n\t"\ | |
1459 "movq "#d", "#o"+48(%1) \n\t"\ | |
1460 | |
1708 | 1461 static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){ |
936 | 1462 uint64_t temp[16] __align8; |
1463 int sum=0; | |
1708 | 1464 |
1465 assert(h==8); | |
936 | 1466 |
1467 diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride); | |
1468 | |
1469 asm volatile( | |
1470 LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3) | |
1471 LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7) | |
1472 | |
1473 HADAMARD48 | |
1474 | |
1475 "movq %%mm7, 112(%1) \n\t" | |
1476 | |
1477 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7) | |
1478 STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2) | |
1479 | |
1480 "movq 112(%1), %%mm7 \n\t" | |
1481 TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0) | |
1482 STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6) | |
1483 | |
1484 LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3) | |
1485 LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7) | |
1486 | |
1487 HADAMARD48 | |
1488 | |
1489 "movq %%mm7, 120(%1) \n\t" | |
1490 | |
1491 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7) | |
1492 STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2) | |
1493 | |
1494 "movq 120(%1), %%mm7 \n\t" | |
1495 TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0) | |
1496 "movq %%mm7, %%mm5 \n\t"//FIXME remove | |
1497 "movq %%mm6, %%mm7 \n\t" | |
1498 "movq %%mm0, %%mm6 \n\t" | |
1499 // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove | |
1500 | |
1501 LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3) | |
1502 // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7) | |
1503 | |
1504 HADAMARD48 | |
1505 "movq %%mm7, 64(%1) \n\t" | |
1506 MMABS(%%mm0, %%mm7) | |
1507 MMABS_SUM(%%mm1, %%mm7, %%mm0) | |
1508 MMABS_SUM(%%mm2, %%mm7, %%mm0) | |
1509 MMABS_SUM(%%mm3, %%mm7, %%mm0) | |
1510 MMABS_SUM(%%mm4, %%mm7, %%mm0) | |
1511 MMABS_SUM(%%mm5, %%mm7, %%mm0) | |
1512 MMABS_SUM(%%mm6, %%mm7, %%mm0) | |
1513 "movq 64(%1), %%mm1 \n\t" | |
1514 MMABS_SUM(%%mm1, %%mm7, %%mm0) | |
1515 "movq %%mm0, 64(%1) \n\t" | |
1516 | |
1517 LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3) | |
1518 LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7) | |
1519 | |
1520 HADAMARD48 | |
1521 "movq %%mm7, (%1) \n\t" | |
1522 MMABS(%%mm0, %%mm7) | |
1523 MMABS_SUM(%%mm1, %%mm7, %%mm0) | |
1524 MMABS_SUM(%%mm2, %%mm7, %%mm0) | |
1525 MMABS_SUM(%%mm3, %%mm7, %%mm0) | |
1526 MMABS_SUM(%%mm4, %%mm7, %%mm0) | |
1527 MMABS_SUM(%%mm5, %%mm7, %%mm0) | |
1528 MMABS_SUM(%%mm6, %%mm7, %%mm0) | |
1529 "movq (%1), %%mm1 \n\t" | |
1530 MMABS_SUM(%%mm1, %%mm7, %%mm0) | |
1531 "movq 64(%1), %%mm1 \n\t" | |
1532 MMABS_SUM(%%mm1, %%mm7, %%mm0) | |
1533 | |
1534 "movq %%mm0, %%mm1 \n\t" | |
1535 "psrlq $32, %%mm0 \n\t" | |
1536 "paddusw %%mm1, %%mm0 \n\t" | |
1537 "movq %%mm0, %%mm1 \n\t" | |
1538 "psrlq $16, %%mm0 \n\t" | |
1539 "paddusw %%mm1, %%mm0 \n\t" | |
1540 "movd %%mm0, %0 \n\t" | |
1541 | |
1542 : "=r" (sum) | |
1543 : "r"(temp) | |
1544 ); | |
1545 return sum&0xFFFF; | |
1546 } | |
1547 | |
1708 | 1548 static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){ |
1153 | 1549 uint64_t temp[16] __align8; |
1550 int sum=0; | |
1708 | 1551 |
1552 assert(h==8); | |
1153 | 1553 |
1554 diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride); | |
1555 | |
1556 asm volatile( | |
1557 LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3) | |
1558 LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7) | |
1559 | |
1560 HADAMARD48 | |
1561 | |
1562 "movq %%mm7, 112(%1) \n\t" | |
1563 | |
1564 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7) | |
1565 STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2) | |
1566 | |
1567 "movq 112(%1), %%mm7 \n\t" | |
1568 TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0) | |
1569 STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6) | |
1570 | |
1571 LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3) | |
1572 LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7) | |
1573 | |
1574 HADAMARD48 | |
1575 | |
1576 "movq %%mm7, 120(%1) \n\t" | |
1577 | |
1578 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7) | |
1579 STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2) | |
1580 | |
1581 "movq 120(%1), %%mm7 \n\t" | |
1582 TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0) | |
1583 "movq %%mm7, %%mm5 \n\t"//FIXME remove | |
1584 "movq %%mm6, %%mm7 \n\t" | |
1585 "movq %%mm0, %%mm6 \n\t" | |
1586 // STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove | |
1587 | |
1588 LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3) | |
1589 // LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7) | |
1590 | |
1591 HADAMARD48 | |
1592 "movq %%mm7, 64(%1) \n\t" | |
1593 MMABS_MMX2(%%mm0, %%mm7) | |
1594 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0) | |
1595 MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0) | |
1596 MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0) | |
1597 MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0) | |
1598 MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0) | |
1599 MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0) | |
1600 "movq 64(%1), %%mm1 \n\t" | |
1601 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0) | |
1602 "movq %%mm0, 64(%1) \n\t" | |
1603 | |
1604 LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3) | |
1605 LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7) | |
1606 | |
1607 HADAMARD48 | |
1608 "movq %%mm7, (%1) \n\t" | |
1609 MMABS_MMX2(%%mm0, %%mm7) | |
1610 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0) | |
1611 MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0) | |
1612 MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0) | |
1613 MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0) | |
1614 MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0) | |
1615 MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0) | |
1616 "movq (%1), %%mm1 \n\t" | |
1617 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0) | |
1618 "movq 64(%1), %%mm1 \n\t" | |
1619 MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0) | |
1620 | |
1621 "movq %%mm0, %%mm1 \n\t" | |
1622 "psrlq $32, %%mm0 \n\t" | |
1623 "paddusw %%mm1, %%mm0 \n\t" | |
1624 "movq %%mm0, %%mm1 \n\t" | |
1625 "psrlq $16, %%mm0 \n\t" | |
1626 "paddusw %%mm1, %%mm0 \n\t" | |
1627 "movd %%mm0, %0 \n\t" | |
1628 | |
1629 : "=r" (sum) | |
1630 : "r"(temp) | |
1631 ); | |
1632 return sum&0xFFFF; | |
1633 } | |
1634 | |
1635 | |
1708 | 1636 WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx) |
1637 WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2) | |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
1638 #endif //CONFIG_ENCODERS |
866 | 1639 |
959 | 1640 #define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d) |
1641 #define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d) | |
1642 | |
954 | 1643 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\ |
1644 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\ | |
961 | 1645 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\ |
954 | 1646 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\ |
1647 "movq "#in7", " #m3 " \n\t" /* d */\ | |
1648 "movq "#in0", %%mm5 \n\t" /* D */\ | |
1649 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\ | |
1650 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\ | |
1651 "movq "#in1", %%mm5 \n\t" /* C */\ | |
1652 "movq "#in2", %%mm6 \n\t" /* B */\ | |
1653 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\ | |
1654 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\ | |
1655 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\ | |
1656 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\ | |
961 | 1657 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\ |
954 | 1658 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\ |
1659 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\ | |
1660 "psraw $5, %%mm5 \n\t"\ | |
1661 "packuswb %%mm5, %%mm5 \n\t"\ | |
1662 OP(%%mm5, out, %%mm7, d) | |
1663 | |
959 | 1664 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\ |
1057 | 1665 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ |
954 | 1666 uint64_t temp;\ |
1667 \ | |
1668 asm volatile(\ | |
1669 "pxor %%mm7, %%mm7 \n\t"\ | |
1670 "1: \n\t"\ | |
1671 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ | |
1672 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ | |
1673 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ | |
1674 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ | |
1675 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ | |
1676 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ | |
1677 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ | |
1678 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ | |
1679 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ | |
1680 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ | |
1681 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ | |
1682 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ | |
1683 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ | |
1684 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ | |
1685 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ | |
1686 "paddw %%mm3, %%mm5 \n\t" /* b */\ | |
1687 "paddw %%mm2, %%mm6 \n\t" /* c */\ | |
1688 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
1689 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ | |
1690 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ | |
961 | 1691 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ |
954 | 1692 "paddw %%mm4, %%mm0 \n\t" /* a */\ |
1693 "paddw %%mm1, %%mm5 \n\t" /* d */\ | |
961 | 1694 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ |
954 | 1695 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ |
961 | 1696 "paddw %6, %%mm6 \n\t"\ |
954 | 1697 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ |
1698 "psraw $5, %%mm0 \n\t"\ | |
961 | 1699 "movq %%mm0, %5 \n\t"\ |
954 | 1700 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ |
1701 \ | |
1702 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\ | |
1703 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\ | |
1704 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\ | |
1705 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\ | |
1706 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\ | |
1707 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\ | |
1708 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\ | |
1709 "paddw %%mm0, %%mm2 \n\t" /* b */\ | |
1710 "paddw %%mm5, %%mm3 \n\t" /* c */\ | |
1711 "paddw %%mm2, %%mm2 \n\t" /* 2b */\ | |
1712 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ | |
1713 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\ | |
1714 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\ | |
1715 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\ | |
1716 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\ | |
961 | 1717 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ |
954 | 1718 "paddw %%mm2, %%mm1 \n\t" /* a */\ |
1719 "paddw %%mm6, %%mm4 \n\t" /* d */\ | |
961 | 1720 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ |
954 | 1721 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\ |
961 | 1722 "paddw %6, %%mm1 \n\t"\ |
954 | 1723 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\ |
1724 "psraw $5, %%mm3 \n\t"\ | |
961 | 1725 "movq %5, %%mm1 \n\t"\ |
954 | 1726 "packuswb %%mm3, %%mm1 \n\t"\ |
959 | 1727 OP_MMX2(%%mm1, (%1),%%mm4, q)\ |
954 | 1728 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\ |
1729 \ | |
1730 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\ | |
1731 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\ | |
1732 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\ | |
1733 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\ | |
1734 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\ | |
1735 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\ | |
1736 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\ | |
1737 "paddw %%mm1, %%mm5 \n\t" /* b */\ | |
1738 "paddw %%mm4, %%mm0 \n\t" /* c */\ | |
1739 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
1740 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\ | |
1741 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\ | |
1742 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\ | |
961 | 1743 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\ |
954 | 1744 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\ |
1745 "paddw %%mm3, %%mm2 \n\t" /* d */\ | |
1746 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\ | |
1747 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\ | |
1748 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\ | |
1749 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\ | |
1750 "paddw %%mm2, %%mm6 \n\t" /* a */\ | |
961 | 1751 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\ |
1752 "paddw %6, %%mm0 \n\t"\ | |
954 | 1753 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ |
1754 "psraw $5, %%mm0 \n\t"\ | |
1755 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\ | |
1756 \ | |
1757 "paddw %%mm5, %%mm3 \n\t" /* a */\ | |
1758 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\ | |
1759 "paddw %%mm4, %%mm6 \n\t" /* b */\ | |
1760 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\ | |
1761 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\ | |
1762 "paddw %%mm1, %%mm4 \n\t" /* c */\ | |
1763 "paddw %%mm2, %%mm5 \n\t" /* d */\ | |
1764 "paddw %%mm6, %%mm6 \n\t" /* 2b */\ | |
1765 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\ | |
961 | 1766 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\ |
1767 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\ | |
954 | 1768 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\ |
961 | 1769 "paddw %6, %%mm4 \n\t"\ |
954 | 1770 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\ |
1771 "psraw $5, %%mm4 \n\t"\ | |
1772 "packuswb %%mm4, %%mm0 \n\t"\ | |
959 | 1773 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\ |
954 | 1774 \ |
1775 "addl %3, %0 \n\t"\ | |
1776 "addl %4, %1 \n\t"\ | |
1777 "decl %2 \n\t"\ | |
1778 " jnz 1b \n\t"\ | |
967 | 1779 : "+a"(src), "+c"(dst), "+m"(h)\ |
966 | 1780 : "d"(srcStride), "S"(dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\ |
1781 : "memory"\ | |
954 | 1782 );\ |
1783 }\ | |
1784 \ | |
1785 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1786 int i;\ | |
1787 int16_t temp[16];\ | |
1788 /* quick HACK, XXX FIXME MUST be optimized */\ | |
1789 for(i=0; i<h; i++)\ | |
1790 {\ | |
1791 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ | |
1792 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ | |
1793 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ | |
1794 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ | |
1795 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ | |
1796 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\ | |
1797 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\ | |
1798 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\ | |
1799 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\ | |
1800 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\ | |
1801 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\ | |
1802 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\ | |
1803 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\ | |
1804 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\ | |
1805 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\ | |
1806 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\ | |
1807 asm volatile(\ | |
1808 "movq (%0), %%mm0 \n\t"\ | |
1809 "movq 8(%0), %%mm1 \n\t"\ | |
1810 "paddw %2, %%mm0 \n\t"\ | |
1811 "paddw %2, %%mm1 \n\t"\ | |
1812 "psraw $5, %%mm0 \n\t"\ | |
1813 "psraw $5, %%mm1 \n\t"\ | |
1814 "packuswb %%mm1, %%mm0 \n\t"\ | |
959 | 1815 OP_3DNOW(%%mm0, (%1), %%mm1, q)\ |
954 | 1816 "movq 16(%0), %%mm0 \n\t"\ |
1817 "movq 24(%0), %%mm1 \n\t"\ | |
1818 "paddw %2, %%mm0 \n\t"\ | |
1819 "paddw %2, %%mm1 \n\t"\ | |
1820 "psraw $5, %%mm0 \n\t"\ | |
1821 "psraw $5, %%mm1 \n\t"\ | |
1822 "packuswb %%mm1, %%mm0 \n\t"\ | |
959 | 1823 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\ |
954 | 1824 :: "r"(temp), "r"(dst), "m"(ROUNDER)\ |
966 | 1825 : "memory"\ |
954 | 1826 );\ |
1827 dst+=dstStride;\ | |
1828 src+=srcStride;\ | |
1829 }\ | |
1830 }\ | |
1831 \ | |
1057 | 1832 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ |
959 | 1833 uint64_t temp;\ |
1834 \ | |
1835 asm volatile(\ | |
1836 "pxor %%mm7, %%mm7 \n\t"\ | |
1837 "1: \n\t"\ | |
1838 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ | |
1839 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ | |
1840 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ | |
1841 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ | |
1842 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ | |
1843 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ | |
1844 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ | |
1845 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ | |
1846 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ | |
1847 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ | |
1848 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ | |
1849 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ | |
1850 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ | |
1851 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ | |
1852 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ | |
1853 "paddw %%mm3, %%mm5 \n\t" /* b */\ | |
1854 "paddw %%mm2, %%mm6 \n\t" /* c */\ | |
1855 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
1856 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ | |
1857 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ | |
961 | 1858 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ |
959 | 1859 "paddw %%mm4, %%mm0 \n\t" /* a */\ |
1860 "paddw %%mm1, %%mm5 \n\t" /* d */\ | |
961 | 1861 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ |
959 | 1862 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ |
961 | 1863 "paddw %6, %%mm6 \n\t"\ |
959 | 1864 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ |
1865 "psraw $5, %%mm0 \n\t"\ | |
1866 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ | |
1867 \ | |
1868 "movd 5(%0), %%mm5 \n\t" /* FGHI */\ | |
1869 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\ | |
1870 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\ | |
1871 "paddw %%mm5, %%mm1 \n\t" /* a */\ | |
1872 "paddw %%mm6, %%mm2 \n\t" /* b */\ | |
1873 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\ | |
1874 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\ | |
1875 "paddw %%mm6, %%mm3 \n\t" /* c */\ | |
1876 "paddw %%mm5, %%mm4 \n\t" /* d */\ | |
1877 "paddw %%mm2, %%mm2 \n\t" /* 2b */\ | |
1878 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ | |
961 | 1879 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ |
1880 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ | |
959 | 1881 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\ |
961 | 1882 "paddw %6, %%mm1 \n\t"\ |
959 | 1883 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\ |
1884 "psraw $5, %%mm3 \n\t"\ | |
1885 "packuswb %%mm3, %%mm0 \n\t"\ | |
1886 OP_MMX2(%%mm0, (%1), %%mm4, q)\ | |
1887 \ | |
1888 "addl %3, %0 \n\t"\ | |
1889 "addl %4, %1 \n\t"\ | |
1890 "decl %2 \n\t"\ | |
961 | 1891 " jnz 1b \n\t"\ |
967 | 1892 : "+a"(src), "+c"(dst), "+m"(h)\ |
966 | 1893 : "S"(srcStride), "D"(dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\ |
1894 : "memory"\ | |
959 | 1895 );\ |
1896 }\ | |
1897 \ | |
1898 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1899 int i;\ | |
1900 int16_t temp[8];\ | |
1901 /* quick HACK, XXX FIXME MUST be optimized */\ | |
1902 for(i=0; i<h; i++)\ | |
1903 {\ | |
1904 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ | |
1905 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ | |
1906 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ | |
1907 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ | |
1908 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ | |
1909 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\ | |
1910 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\ | |
1911 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\ | |
1912 asm volatile(\ | |
1913 "movq (%0), %%mm0 \n\t"\ | |
1914 "movq 8(%0), %%mm1 \n\t"\ | |
1915 "paddw %2, %%mm0 \n\t"\ | |
1916 "paddw %2, %%mm1 \n\t"\ | |
1917 "psraw $5, %%mm0 \n\t"\ | |
1918 "psraw $5, %%mm1 \n\t"\ | |
1919 "packuswb %%mm1, %%mm0 \n\t"\ | |
1920 OP_3DNOW(%%mm0, (%1), %%mm1, q)\ | |
1921 :: "r"(temp), "r"(dst), "m"(ROUNDER)\ | |
966 | 1922 :"memory"\ |
959 | 1923 );\ |
1924 dst+=dstStride;\ | |
1925 src+=srcStride;\ | |
1926 }\ | |
1927 } | |
1928 | |
1929 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\ | |
1930 \ | |
1931 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
954 | 1932 uint64_t temp[17*4];\ |
1933 uint64_t *temp_ptr= temp;\ | |
1934 int count= 17;\ | |
1935 \ | |
1936 /*FIXME unroll */\ | |
1937 asm volatile(\ | |
1938 "pxor %%mm7, %%mm7 \n\t"\ | |
1939 "1: \n\t"\ | |
1940 "movq (%0), %%mm0 \n\t"\ | |
1941 "movq (%0), %%mm1 \n\t"\ | |
1942 "movq 8(%0), %%mm2 \n\t"\ | |
1943 "movq 8(%0), %%mm3 \n\t"\ | |
1944 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1945 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
1946 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1947 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
1948 "movq %%mm0, (%1) \n\t"\ | |
1949 "movq %%mm1, 17*8(%1) \n\t"\ | |
967 | 1950 "movq %%mm2, 2*17*8(%1) \n\t"\ |
1951 "movq %%mm3, 3*17*8(%1) \n\t"\ | |
954 | 1952 "addl $8, %1 \n\t"\ |
1953 "addl %3, %0 \n\t"\ | |
1954 "decl %2 \n\t"\ | |
1955 " jnz 1b \n\t"\ | |
1956 : "+r" (src), "+r" (temp_ptr), "+r"(count)\ | |
967 | 1957 : "r" (srcStride)\ |
966 | 1958 : "memory"\ |
954 | 1959 );\ |
1960 \ | |
1961 temp_ptr= temp;\ | |
1962 count=4;\ | |
1963 \ | |
1964 /*FIXME reorder for speed */\ | |
1965 asm volatile(\ | |
1966 /*"pxor %%mm7, %%mm7 \n\t"*/\ | |
1967 "1: \n\t"\ | |
1968 "movq (%0), %%mm0 \n\t"\ | |
1969 "movq 8(%0), %%mm1 \n\t"\ | |
1970 "movq 16(%0), %%mm2 \n\t"\ | |
1971 "movq 24(%0), %%mm3 \n\t"\ | |
961 | 1972 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ |
1973 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ | |
954 | 1974 "addl %4, %1 \n\t"\ |
961 | 1975 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ |
954 | 1976 \ |
961 | 1977 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ |
954 | 1978 "addl %4, %1 \n\t"\ |
961 | 1979 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ |
1980 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\ | |
954 | 1981 "addl %4, %1 \n\t"\ |
961 | 1982 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\ |
1983 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\ | |
954 | 1984 "addl %4, %1 \n\t"\ |
961 | 1985 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\ |
1986 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\ | |
954 | 1987 "addl %4, %1 \n\t"\ |
961 | 1988 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\ |
1989 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\ | |
954 | 1990 "addl %4, %1 \n\t"\ |
961 | 1991 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\ |
954 | 1992 \ |
961 | 1993 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\ |
954 | 1994 "addl %4, %1 \n\t" \ |
961 | 1995 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\ |
1996 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\ | |
954 | 1997 \ |
1998 "addl $136, %0 \n\t"\ | |
961 | 1999 "addl %6, %1 \n\t"\ |
954 | 2000 "decl %2 \n\t"\ |
2001 " jnz 1b \n\t"\ | |
958
9bb668034ecf
slowdown / gcc 2.95.* bug workaround (this should be reversed as soon as gcc 2.95.* support is droped)
michaelni
parents:
954
diff
changeset
|
2002 \ |
967 | 2003 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ |
961 | 2004 : "r"(dstStride), "r"(2*dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*dstStride)\ |
966 | 2005 :"memory"\ |
954 | 2006 );\ |
2007 }\ | |
2008 \ | |
1057 | 2009 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
2209 | 2010 uint64_t temp[9*2];\ |
954 | 2011 uint64_t *temp_ptr= temp;\ |
2012 int count= 9;\ | |
2013 \ | |
2014 /*FIXME unroll */\ | |
2015 asm volatile(\ | |
2016 "pxor %%mm7, %%mm7 \n\t"\ | |
2017 "1: \n\t"\ | |
2018 "movq (%0), %%mm0 \n\t"\ | |
2019 "movq (%0), %%mm1 \n\t"\ | |
2020 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
2021 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
2022 "movq %%mm0, (%1) \n\t"\ | |
2023 "movq %%mm1, 9*8(%1) \n\t"\ | |
2024 "addl $8, %1 \n\t"\ | |
2025 "addl %3, %0 \n\t"\ | |
2026 "decl %2 \n\t"\ | |
2027 " jnz 1b \n\t"\ | |
2028 : "+r" (src), "+r" (temp_ptr), "+r"(count)\ | |
2029 : "r" (srcStride)\ | |
966 | 2030 : "memory"\ |
954 | 2031 );\ |
2032 \ | |
2033 temp_ptr= temp;\ | |
2034 count=2;\ | |
2035 \ | |
2036 /*FIXME reorder for speed */\ | |
2037 asm volatile(\ | |
2038 /*"pxor %%mm7, %%mm7 \n\t"*/\ | |
2039 "1: \n\t"\ | |
2040 "movq (%0), %%mm0 \n\t"\ | |
2041 "movq 8(%0), %%mm1 \n\t"\ | |
2042 "movq 16(%0), %%mm2 \n\t"\ | |
2043 "movq 24(%0), %%mm3 \n\t"\ | |
961 | 2044 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ |
2045 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ | |
954 | 2046 "addl %4, %1 \n\t"\ |
961 | 2047 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ |
954 | 2048 \ |
961 | 2049 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ |
954 | 2050 "addl %4, %1 \n\t"\ |
961 | 2051 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ |
954 | 2052 \ |
961 | 2053 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\ |
954 | 2054 "addl %4, %1 \n\t"\ |
961 | 2055 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\ |
2056 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\ | |
954 | 2057 \ |
2058 "addl $72, %0 \n\t"\ | |
961 | 2059 "addl %6, %1 \n\t"\ |
954 | 2060 "decl %2 \n\t"\ |
2061 " jnz 1b \n\t"\ | |
2062 \ | |
961 | 2063 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ |
2064 : "r"(dstStride), "r"(2*dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*dstStride)\ | |
966 | 2065 : "memory"\ |
2066 );\ | |
959 | 2067 }\ |
954 | 2068 \ |
1064 | 2069 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ |
959 | 2070 OPNAME ## pixels8_mmx(dst, src, stride, 8);\ |
954 | 2071 }\ |
2072 \ | |
1064 | 2073 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2074 uint64_t temp[8];\ |
954 | 2075 uint8_t * const half= (uint8_t*)temp;\ |
2076 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ | |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2077 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ |
954 | 2078 }\ |
2079 \ | |
1064 | 2080 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
954 | 2081 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\ |
2082 }\ | |
2083 \ | |
1064 | 2084 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2085 uint64_t temp[8];\ |
954 | 2086 uint8_t * const half= (uint8_t*)temp;\ |
2087 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ | |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2088 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\ |
954 | 2089 }\ |
2090 \ | |
1064 | 2091 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2092 uint64_t temp[8];\ |
954 | 2093 uint8_t * const half= (uint8_t*)temp;\ |
959 | 2094 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2095 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ |
954 | 2096 }\ |
2097 \ | |
1064 | 2098 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
959 | 2099 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\ |
954 | 2100 }\ |
2101 \ | |
1064 | 2102 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2103 uint64_t temp[8];\ |
954 | 2104 uint8_t * const half= (uint8_t*)temp;\ |
959 | 2105 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2106 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\ |
954 | 2107 }\ |
1064 | 2108 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2109 uint64_t half[8 + 9];\ |
2110 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
2111 uint8_t * const halfHV= ((uint8_t*)half);\ | |
954 | 2112 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2113 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ |
959 | 2114 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2115 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ |
954 | 2116 }\ |
1064 | 2117 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2118 uint64_t half[8 + 9];\ |
2119 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
2120 uint8_t * const halfHV= ((uint8_t*)half);\ | |
954 | 2121 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2122 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ |
959 | 2123 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2124 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ |
954 | 2125 }\ |
1064 | 2126 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2127 uint64_t half[8 + 9];\ |
2128 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
2129 uint8_t * const halfHV= ((uint8_t*)half);\ | |
954 | 2130 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2131 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ |
959 | 2132 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2133 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ |
954 | 2134 }\ |
1064 | 2135 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2136 uint64_t half[8 + 9];\ |
2137 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
2138 uint8_t * const halfHV= ((uint8_t*)half);\ | |
2139 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2140 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ |
959 | 2141 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2142 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ |
954 | 2143 }\ |
1064 | 2144 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2145 uint64_t half[8 + 9];\ |
954 | 2146 uint8_t * const halfH= ((uint8_t*)half) + 64;\ |
2147 uint8_t * const halfHV= ((uint8_t*)half);\ | |
2148 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
959 | 2149 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2150 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ |
954 | 2151 }\ |
1064 | 2152 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2153 uint64_t half[8 + 9];\ |
954 | 2154 uint8_t * const halfH= ((uint8_t*)half) + 64;\ |
2155 uint8_t * const halfHV= ((uint8_t*)half);\ | |
2156 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
959 | 2157 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2158 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ |
954 | 2159 }\ |
1064 | 2160 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2161 uint64_t half[8 + 9];\ |
2162 uint8_t * const halfH= ((uint8_t*)half);\ | |
954 | 2163 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2164 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ |
984 | 2165 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ |
954 | 2166 }\ |
1064 | 2167 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2168 uint64_t half[8 + 9];\ |
2169 uint8_t * const halfH= ((uint8_t*)half);\ | |
954 | 2170 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2171 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ |
984 | 2172 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ |
954 | 2173 }\ |
1064 | 2174 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2175 uint64_t half[9];\ |
954 | 2176 uint8_t * const halfH= ((uint8_t*)half);\ |
2177 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
959 | 2178 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ |
954 | 2179 }\ |
1064 | 2180 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ |
959 | 2181 OPNAME ## pixels16_mmx(dst, src, stride, 16);\ |
954 | 2182 }\ |
2183 \ | |
1064 | 2184 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
954 | 2185 uint64_t temp[32];\ |
2186 uint8_t * const half= (uint8_t*)temp;\ | |
2187 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ | |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2188 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ |
954 | 2189 }\ |
2190 \ | |
1064 | 2191 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
954 | 2192 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\ |
2193 }\ | |
2194 \ | |
1064 | 2195 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
954 | 2196 uint64_t temp[32];\ |
2197 uint8_t * const half= (uint8_t*)temp;\ | |
2198 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ | |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2199 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\ |
954 | 2200 }\ |
2201 \ | |
1064 | 2202 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
954 | 2203 uint64_t temp[32];\ |
2204 uint8_t * const half= (uint8_t*)temp;\ | |
959 | 2205 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2206 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ |
954 | 2207 }\ |
2208 \ | |
1064 | 2209 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
959 | 2210 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\ |
954 | 2211 }\ |
2212 \ | |
1064 | 2213 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
954 | 2214 uint64_t temp[32];\ |
2215 uint8_t * const half= (uint8_t*)temp;\ | |
959 | 2216 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2217 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\ |
954 | 2218 }\ |
1064 | 2219 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2220 uint64_t half[16*2 + 17*2];\ |
2221 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
2222 uint8_t * const halfHV= ((uint8_t*)half);\ | |
954 | 2223 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2224 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ |
959 | 2225 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2226 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ |
954 | 2227 }\ |
1064 | 2228 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2229 uint64_t half[16*2 + 17*2];\ |
2230 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
2231 uint8_t * const halfHV= ((uint8_t*)half);\ | |
954 | 2232 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2233 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ |
959 | 2234 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2235 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ |
954 | 2236 }\ |
1064 | 2237 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2238 uint64_t half[16*2 + 17*2];\ |
2239 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
2240 uint8_t * const halfHV= ((uint8_t*)half);\ | |
954 | 2241 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2242 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ |
959 | 2243 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2244 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ |
954 | 2245 }\ |
1064 | 2246 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2247 uint64_t half[16*2 + 17*2];\ |
2248 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
2249 uint8_t * const halfHV= ((uint8_t*)half);\ | |
2250 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2251 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ |
959 | 2252 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2253 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ |
954 | 2254 }\ |
1064 | 2255 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
954 | 2256 uint64_t half[16*2 + 17*2];\ |
2257 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
2258 uint8_t * const halfHV= ((uint8_t*)half);\ | |
2259 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
959 | 2260 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2261 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ |
954 | 2262 }\ |
1064 | 2263 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
954 | 2264 uint64_t half[16*2 + 17*2];\ |
2265 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
2266 uint8_t * const halfHV= ((uint8_t*)half);\ | |
2267 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
959 | 2268 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2269 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ |
954 | 2270 }\ |
1064 | 2271 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2272 uint64_t half[17*2];\ |
2273 uint8_t * const halfH= ((uint8_t*)half);\ | |
954 | 2274 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2275 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ |
984 | 2276 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ |
954 | 2277 }\ |
1064 | 2278 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
984 | 2279 uint64_t half[17*2];\ |
2280 uint8_t * const halfH= ((uint8_t*)half);\ | |
954 | 2281 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ |
2207
22b768f1261a
10000l fix and use more mmx2/3dnow code for mpeg4 qpel which has been written and commited long time ago but appearently never used, qpel motion compensation is 5% faster
michael
parents:
2067
diff
changeset
|
2282 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ |
984 | 2283 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ |
954 | 2284 }\ |
1064 | 2285 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ |
954 | 2286 uint64_t half[17*2];\ |
2287 uint8_t * const halfH= ((uint8_t*)half);\ | |
2288 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
959 | 2289 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ |
954 | 2290 } |
2291 | |
2211 | 2292 #define QPEL_H264V(A,B,C,D,E,F,OP)\ |
2293 "movd (%0), "#F" \n\t"\ | |
2294 "movq "#C", %%mm6 \n\t"\ | |
2295 "paddw "#D", %%mm6 \n\t"\ | |
2296 "psllw $2, %%mm6 \n\t"\ | |
2297 "psubw "#B", %%mm6 \n\t"\ | |
2298 "psubw "#E", %%mm6 \n\t"\ | |
2299 "pmullw %4, %%mm6 \n\t"\ | |
2300 "addl %2, %0 \n\t"\ | |
2301 "punpcklbw %%mm7, "#F" \n\t"\ | |
2302 "paddw %5, "#A" \n\t"\ | |
2303 "paddw "#F", "#A" \n\t"\ | |
2304 "paddw "#A", %%mm6 \n\t"\ | |
2305 "psraw $5, %%mm6 \n\t"\ | |
2306 "packuswb %%mm6, %%mm6 \n\t"\ | |
2307 OP(%%mm6, (%1), A, d)\ | |
2308 "addl %3, %1 \n\t" | |
2216 | 2309 |
2310 #define QPEL_H264HV(A,B,C,D,E,F,OF)\ | |
2311 "movd (%0), "#F" \n\t"\ | |
2312 "movq "#C", %%mm6 \n\t"\ | |
2313 "paddw "#D", %%mm6 \n\t"\ | |
2314 "psllw $2, %%mm6 \n\t"\ | |
2315 "psubw "#B", %%mm6 \n\t"\ | |
2316 "psubw "#E", %%mm6 \n\t"\ | |
2317 "pmullw %3, %%mm6 \n\t"\ | |
2318 "addl %2, %0 \n\t"\ | |
2319 "punpcklbw %%mm7, "#F" \n\t"\ | |
2320 "paddw "#F", "#A" \n\t"\ | |
2321 "paddw "#A", %%mm6 \n\t"\ | |
2322 "movq %%mm6, "#OF"(%1) \n\t" | |
2211 | 2323 |
2209 | 2324 #define QPEL_H264(OPNAME, OP, MMX)\ |
2325 static void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
2326 int h=4;\ | |
2327 \ | |
2328 asm volatile(\ | |
2329 "pxor %%mm7, %%mm7 \n\t"\ | |
2330 "movq %5, %%mm4 \n\t"\ | |
2331 "movq %6, %%mm5 \n\t"\ | |
2332 "1: \n\t"\ | |
2333 "movd -1(%0), %%mm1 \n\t"\ | |
2334 "movd (%0), %%mm2 \n\t"\ | |
2335 "movd 1(%0), %%mm3 \n\t"\ | |
2336 "movd 2(%0), %%mm0 \n\t"\ | |
2337 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
2338 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
2339 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
2340 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
2341 "paddw %%mm0, %%mm1 \n\t"\ | |
2342 "paddw %%mm3, %%mm2 \n\t"\ | |
2343 "movd -2(%0), %%mm0 \n\t"\ | |
2344 "movd 3(%0), %%mm3 \n\t"\ | |
2345 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
2346 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
2347 "paddw %%mm3, %%mm0 \n\t"\ | |
2348 "psllw $2, %%mm2 \n\t"\ | |
2349 "psubw %%mm1, %%mm2 \n\t"\ | |
2350 "pmullw %%mm4, %%mm2 \n\t"\ | |
2351 "paddw %%mm5, %%mm0 \n\t"\ | |
2352 "paddw %%mm2, %%mm0 \n\t"\ | |
2353 "psraw $5, %%mm0 \n\t"\ | |
2354 "packuswb %%mm0, %%mm0 \n\t"\ | |
2355 OP(%%mm0, (%1),%%mm6, d)\ | |
2356 "addl %3, %0 \n\t"\ | |
2357 "addl %4, %1 \n\t"\ | |
2358 "decl %2 \n\t"\ | |
2359 " jnz 1b \n\t"\ | |
2360 : "+a"(src), "+c"(dst), "+m"(h)\ | |
2361 : "d"(srcStride), "S"(dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
2362 : "memory"\ | |
2363 );\ | |
2364 }\ | |
2365 static void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
2366 src -= 2*srcStride;\ | |
2367 asm volatile(\ | |
2368 "pxor %%mm7, %%mm7 \n\t"\ | |
2369 "movd (%0), %%mm0 \n\t"\ | |
2212 | 2370 "addl %2, %0 \n\t"\ |
2371 "movd (%0), %%mm1 \n\t"\ | |
2372 "addl %2, %0 \n\t"\ | |
2373 "movd (%0), %%mm2 \n\t"\ | |
2374 "addl %2, %0 \n\t"\ | |
2375 "movd (%0), %%mm3 \n\t"\ | |
2376 "addl %2, %0 \n\t"\ | |
2377 "movd (%0), %%mm4 \n\t"\ | |
2378 "addl %2, %0 \n\t"\ | |
2209 | 2379 "punpcklbw %%mm7, %%mm0 \n\t"\ |
2212 | 2380 "punpcklbw %%mm7, %%mm1 \n\t"\ |
2381 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
2382 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
2383 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
2384 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ | |
2385 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ | |
2386 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ | |
2387 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ | |
2388 \ | |
2389 : "+a"(src), "+c"(dst)\ | |
2390 : "S"(srcStride), "D"(dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
2209 | 2391 : "memory"\ |
2392 );\ | |
2393 }\ | |
2394 static void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ | |
2216 | 2395 int h=4;\ |
2396 int w=3;\ | |
2397 src -= 2*srcStride+2;\ | |
2398 while(w--){\ | |
2399 asm volatile(\ | |
2400 "pxor %%mm7, %%mm7 \n\t"\ | |
2401 "movd (%0), %%mm0 \n\t"\ | |
2402 "addl %2, %0 \n\t"\ | |
2403 "movd (%0), %%mm1 \n\t"\ | |
2404 "addl %2, %0 \n\t"\ | |
2405 "movd (%0), %%mm2 \n\t"\ | |
2406 "addl %2, %0 \n\t"\ | |
2407 "movd (%0), %%mm3 \n\t"\ | |
2408 "addl %2, %0 \n\t"\ | |
2409 "movd (%0), %%mm4 \n\t"\ | |
2410 "addl %2, %0 \n\t"\ | |
2411 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
2412 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
2413 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
2414 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
2415 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
2416 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\ | |
2417 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\ | |
2418 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\ | |
2419 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\ | |
2420 \ | |
2421 : "+a"(src)\ | |
2422 : "c"(tmp), "S"(srcStride), "m"(ff_pw_5)\ | |
2423 : "memory"\ | |
2424 );\ | |
2425 tmp += 4;\ | |
2426 src += 4 - 9*srcStride;\ | |
2209 | 2427 }\ |
2216 | 2428 tmp -= 3*4;\ |
2429 asm volatile(\ | |
2430 "movq %4, %%mm6 \n\t"\ | |
2431 "1: \n\t"\ | |
2432 "movq (%0), %%mm0 \n\t"\ | |
2433 "paddw 10(%0), %%mm0 \n\t"\ | |
2434 "movq 2(%0), %%mm1 \n\t"\ | |
2435 "paddw 8(%0), %%mm1 \n\t"\ | |
2436 "movq 4(%0), %%mm2 \n\t"\ | |
2437 "paddw 6(%0), %%mm2 \n\t"\ | |
2438 "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\ | |
2439 "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\ | |
2440 "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\ | |
2441 "paddsw %%mm2, %%mm0 \n\t"\ | |
2442 "psraw $2, %%mm0 \n\t"/*((a-b)/4-b)/4 */\ | |
2443 "paddw %%mm6, %%mm2 \n\t"\ | |
2444 "paddw %%mm2, %%mm0 \n\t"\ | |
2445 "psraw $6, %%mm0 \n\t"\ | |
2446 "packuswb %%mm0, %%mm0 \n\t"\ | |
2447 OP(%%mm0, (%1),%%mm7, d)\ | |
2448 "addl $24, %0 \n\t"\ | |
2449 "addl %3, %1 \n\t"\ | |
2450 "decl %2 \n\t"\ | |
2451 " jnz 1b \n\t"\ | |
2452 : "+a"(tmp), "+c"(dst), "+m"(h)\ | |
2453 : "S"(dstStride), "m"(ff_pw_32)\ | |
2454 : "memory"\ | |
2455 );\ | |
2209 | 2456 }\ |
2457 \ | |
2458 static void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
2459 int h=8;\ | |
2460 asm volatile(\ | |
2461 "pxor %%mm7, %%mm7 \n\t"\ | |
2462 "movq %5, %%mm6 \n\t"\ | |
2463 "1: \n\t"\ | |
2464 "movq (%0), %%mm0 \n\t"\ | |
2465 "movq 1(%0), %%mm2 \n\t"\ | |
2466 "movq %%mm0, %%mm1 \n\t"\ | |
2467 "movq %%mm2, %%mm3 \n\t"\ | |
2468 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
2469 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
2470 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
2471 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
2472 "paddw %%mm2, %%mm0 \n\t"\ | |
2473 "paddw %%mm3, %%mm1 \n\t"\ | |
2474 "psllw $2, %%mm0 \n\t"\ | |
2475 "psllw $2, %%mm1 \n\t"\ | |
2476 "movq -1(%0), %%mm2 \n\t"\ | |
2477 "movq 2(%0), %%mm4 \n\t"\ | |
2478 "movq %%mm2, %%mm3 \n\t"\ | |
2479 "movq %%mm4, %%mm5 \n\t"\ | |
2480 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
2481 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
2482 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
2483 "punpckhbw %%mm7, %%mm5 \n\t"\ | |
2484 "paddw %%mm4, %%mm2 \n\t"\ | |
2485 "paddw %%mm3, %%mm5 \n\t"\ | |
2486 "psubw %%mm2, %%mm0 \n\t"\ | |
2487 "psubw %%mm5, %%mm1 \n\t"\ | |
2488 "pmullw %%mm6, %%mm0 \n\t"\ | |
2489 "pmullw %%mm6, %%mm1 \n\t"\ | |
2490 "movd -2(%0), %%mm2 \n\t"\ | |
2491 "movd 7(%0), %%mm5 \n\t"\ | |
2492 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
2493 "punpcklbw %%mm7, %%mm5 \n\t"\ | |
2494 "paddw %%mm3, %%mm2 \n\t"\ | |
2495 "paddw %%mm5, %%mm4 \n\t"\ | |
2496 "movq %6, %%mm5 \n\t"\ | |
2497 "paddw %%mm5, %%mm2 \n\t"\ | |
2498 "paddw %%mm5, %%mm4 \n\t"\ | |
2499 "paddw %%mm2, %%mm0 \n\t"\ | |
2500 "paddw %%mm4, %%mm1 \n\t"\ | |
2501 "psraw $5, %%mm0 \n\t"\ | |
2502 "psraw $5, %%mm1 \n\t"\ | |
2503 "packuswb %%mm1, %%mm0 \n\t"\ | |
2504 OP(%%mm0, (%1),%%mm5, q)\ | |
2505 "addl %3, %0 \n\t"\ | |
2506 "addl %4, %1 \n\t"\ | |
2507 "decl %2 \n\t"\ | |
2508 " jnz 1b \n\t"\ | |
2509 : "+a"(src), "+c"(dst), "+m"(h)\ | |
2510 : "d"(srcStride), "S"(dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
2511 : "memory"\ | |
2512 );\ | |
2513 }\ | |
2514 \ | |
2515 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
2211 | 2516 int h= 2;\ |
2209 | 2517 src -= 2*srcStride;\ |
2210 | 2518 \ |
2211 | 2519 while(h--){\ |
2520 asm volatile(\ | |
2209 | 2521 "pxor %%mm7, %%mm7 \n\t"\ |
2211 | 2522 "movd (%0), %%mm0 \n\t"\ |
2523 "addl %2, %0 \n\t"\ | |
2524 "movd (%0), %%mm1 \n\t"\ | |
2525 "addl %2, %0 \n\t"\ | |
2526 "movd (%0), %%mm2 \n\t"\ | |
2527 "addl %2, %0 \n\t"\ | |
2528 "movd (%0), %%mm3 \n\t"\ | |
2529 "addl %2, %0 \n\t"\ | |
2530 "movd (%0), %%mm4 \n\t"\ | |
2531 "addl %2, %0 \n\t"\ | |
2210 | 2532 "punpcklbw %%mm7, %%mm0 \n\t"\ |
2211 | 2533 "punpcklbw %%mm7, %%mm1 \n\t"\ |
2534 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
2535 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
2536 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
2537 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ | |
2538 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ | |
2539 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ | |
2540 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ | |
2541 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ | |
2542 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ | |
2543 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ | |
2544 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ | |
2545 \ | |
2546 : "+a"(src), "+c"(dst)\ | |
2547 : "S"(srcStride), "D"(dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
2209 | 2548 : "memory"\ |
2211 | 2549 );\ |
2550 src += 4-13*srcStride;\ | |
2551 dst += 4-8*dstStride;\ | |
2552 }\ | |
2209 | 2553 }\ |
2554 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ | |
2217 | 2555 int h=8;\ |
2556 int w=4;\ | |
2557 src -= 2*srcStride+2;\ | |
2558 while(w--){\ | |
2559 asm volatile(\ | |
2560 "pxor %%mm7, %%mm7 \n\t"\ | |
2561 "movd (%0), %%mm0 \n\t"\ | |
2562 "addl %2, %0 \n\t"\ | |
2563 "movd (%0), %%mm1 \n\t"\ | |
2564 "addl %2, %0 \n\t"\ | |
2565 "movd (%0), %%mm2 \n\t"\ | |
2566 "addl %2, %0 \n\t"\ | |
2567 "movd (%0), %%mm3 \n\t"\ | |
2568 "addl %2, %0 \n\t"\ | |
2569 "movd (%0), %%mm4 \n\t"\ | |
2570 "addl %2, %0 \n\t"\ | |
2571 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
2572 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
2573 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
2574 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
2575 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
2576 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*4)\ | |
2577 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*4)\ | |
2578 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*4)\ | |
2579 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*4)\ | |
2580 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*8*4)\ | |
2581 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*8*4)\ | |
2582 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*8*4)\ | |
2583 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*8*4)\ | |
2584 \ | |
2585 : "+a"(src)\ | |
2586 : "c"(tmp), "S"(srcStride), "m"(ff_pw_5)\ | |
2587 : "memory"\ | |
2588 );\ | |
2589 tmp += 4;\ | |
2590 src += 4 - 13*srcStride;\ | |
2591 }\ | |
2592 tmp -= 4*4;\ | |
2593 asm volatile(\ | |
2594 "movq %4, %%mm6 \n\t"\ | |
2595 "1: \n\t"\ | |
2596 "movq (%0), %%mm0 \n\t"\ | |
2597 "movq 8(%0), %%mm3 \n\t"\ | |
2598 "movq 2(%0), %%mm1 \n\t"\ | |
2599 "movq 10(%0), %%mm4 \n\t"\ | |
2600 "paddw %%mm4, %%mm0 \n\t"\ | |
2601 "paddw %%mm3, %%mm1 \n\t"\ | |
2602 "paddw 18(%0), %%mm3 \n\t"\ | |
2603 "paddw 16(%0), %%mm4 \n\t"\ | |
2604 "movq 4(%0), %%mm2 \n\t"\ | |
2605 "movq 12(%0), %%mm5 \n\t"\ | |
2606 "paddw 6(%0), %%mm2 \n\t"\ | |
2607 "paddw 14(%0), %%mm5 \n\t"\ | |
2608 "psubw %%mm1, %%mm0 \n\t"\ | |
2609 "psubw %%mm4, %%mm3 \n\t"\ | |
2610 "psraw $2, %%mm0 \n\t"\ | |
2611 "psraw $2, %%mm3 \n\t"\ | |
2612 "psubw %%mm1, %%mm0 \n\t"\ | |
2613 "psubw %%mm4, %%mm3 \n\t"\ | |
2614 "paddsw %%mm2, %%mm0 \n\t"\ | |
2615 "paddsw %%mm5, %%mm3 \n\t"\ | |
2616 "psraw $2, %%mm0 \n\t"\ | |
2617 "psraw $2, %%mm3 \n\t"\ | |
2618 "paddw %%mm6, %%mm2 \n\t"\ | |
2619 "paddw %%mm6, %%mm5 \n\t"\ | |
2620 "paddw %%mm2, %%mm0 \n\t"\ | |
2621 "paddw %%mm5, %%mm3 \n\t"\ | |
2622 "psraw $6, %%mm0 \n\t"\ | |
2623 "psraw $6, %%mm3 \n\t"\ | |
2624 "packuswb %%mm3, %%mm0 \n\t"\ | |
2625 OP(%%mm0, (%1),%%mm7, q)\ | |
2626 "addl $32, %0 \n\t"\ | |
2627 "addl %3, %1 \n\t"\ | |
2628 "decl %2 \n\t"\ | |
2629 " jnz 1b \n\t"\ | |
2630 : "+a"(tmp), "+c"(dst), "+m"(h)\ | |
2631 : "S"(dstStride), "m"(ff_pw_32)\ | |
2632 : "memory"\ | |
2633 );\ | |
2209 | 2634 }\ |
2635 static void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
2636 OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ | |
2637 OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ | |
2638 src += 8*srcStride;\ | |
2639 dst += 8*dstStride;\ | |
2640 OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ | |
2641 OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ | |
2642 }\ | |
2643 \ | |
2644 static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
2645 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ | |
2646 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ | |
2647 src += 8*srcStride;\ | |
2648 dst += 8*dstStride;\ | |
2649 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ | |
2650 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ | |
2651 }\ | |
2652 \ | |
2653 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ | |
2654 OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride);\ | |
2216 | 2655 OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst+8, tmp , src+8, dstStride, tmpStride, srcStride);\ |
2209 | 2656 src += 8*srcStride;\ |
2657 dst += 8*dstStride;\ | |
2658 OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride);\ | |
2216 | 2659 OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst+8, tmp , src+8, dstStride, tmpStride, srcStride);\ |
2209 | 2660 }\ |
2661 | |
2662 #define H264_MC(OPNAME, SIZE, MMX) \ | |
2663 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ | |
2664 OPNAME ## pixels ## SIZE ## _mmx(dst, src, stride, SIZE);\ | |
2665 }\ | |
2666 \ | |
2667 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2668 uint64_t temp[SIZE*SIZE/8];\ | |
2669 uint8_t * const half= (uint8_t*)temp;\ | |
2670 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\ | |
2671 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\ | |
2672 }\ | |
2673 \ | |
2674 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2675 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\ | |
2676 }\ | |
2677 \ | |
2678 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2679 uint64_t temp[SIZE*SIZE/8];\ | |
2680 uint8_t * const half= (uint8_t*)temp;\ | |
2681 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\ | |
2682 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+1, half, stride, stride, SIZE);\ | |
2683 }\ | |
2684 \ | |
2685 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2686 uint64_t temp[SIZE*SIZE/8];\ | |
2687 uint8_t * const half= (uint8_t*)temp;\ | |
2688 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\ | |
2689 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\ | |
2690 }\ | |
2691 \ | |
2692 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2693 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\ | |
2694 }\ | |
2695 \ | |
2696 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2697 uint64_t temp[SIZE*SIZE/8];\ | |
2698 uint8_t * const half= (uint8_t*)temp;\ | |
2699 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\ | |
2700 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, half, stride, stride, SIZE);\ | |
2701 }\ | |
2702 \ | |
2703 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2704 uint64_t temp[SIZE*SIZE/4];\ | |
2705 uint8_t * const halfH= (uint8_t*)temp;\ | |
2706 uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\ | |
2707 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\ | |
2708 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\ | |
2709 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\ | |
2710 }\ | |
2711 \ | |
2712 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2713 uint64_t temp[SIZE*SIZE/4];\ | |
2714 uint8_t * const halfH= (uint8_t*)temp;\ | |
2715 uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\ | |
2716 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\ | |
2717 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\ | |
2718 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\ | |
2719 }\ | |
2720 \ | |
2721 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2722 uint64_t temp[SIZE*SIZE/4];\ | |
2723 uint8_t * const halfH= (uint8_t*)temp;\ | |
2724 uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\ | |
2725 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\ | |
2726 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\ | |
2727 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\ | |
2728 }\ | |
2729 \ | |
2730 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2731 uint64_t temp[SIZE*SIZE/4];\ | |
2732 uint8_t * const halfH= (uint8_t*)temp;\ | |
2733 uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\ | |
2734 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\ | |
2735 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\ | |
2736 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\ | |
2737 }\ | |
2738 \ | |
2739 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2216 | 2740 uint64_t temp[SIZE*(SIZE+8)/4];\ |
2741 int16_t * const tmp= (int16_t*)temp;\ | |
2209 | 2742 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\ |
2743 }\ | |
2744 \ | |
2745 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2216 | 2746 uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\ |
2747 uint8_t * const halfH= (uint8_t*)temp;\ | |
2748 uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\ | |
2749 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\ | |
2209 | 2750 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\ |
2751 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\ | |
2752 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\ | |
2753 }\ | |
2754 \ | |
2755 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2216 | 2756 uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\ |
2757 uint8_t * const halfH= (uint8_t*)temp;\ | |
2758 uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\ | |
2759 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\ | |
2209 | 2760 put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\ |
2761 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\ | |
2762 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\ | |
2763 }\ | |
2764 \ | |
2765 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2216 | 2766 uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\ |
2767 uint8_t * const halfV= (uint8_t*)temp;\ | |
2768 uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\ | |
2769 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\ | |
2209 | 2770 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\ |
2771 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\ | |
2772 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfV, halfHV, stride, SIZE, SIZE);\ | |
2773 }\ | |
2774 \ | |
2775 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
2216 | 2776 uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\ |
2777 uint8_t * const halfV= (uint8_t*)temp;\ | |
2778 uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\ | |
2779 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\ | |
2209 | 2780 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\ |
2781 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\ | |
2782 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfV, halfHV, stride, SIZE, SIZE);\ | |
2783 }\ | |
2784 | |
954 | 2785 |
2786 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t" | |
959 | 2787 #define AVG_3DNOW_OP(a,b,temp, size) \ |
954 | 2788 "mov" #size " " #b ", " #temp " \n\t"\ |
2789 "pavgusb " #temp ", " #a " \n\t"\ | |
2790 "mov" #size " " #a ", " #b " \n\t" | |
959 | 2791 #define AVG_MMX2_OP(a,b,temp, size) \ |
954 | 2792 "mov" #size " " #b ", " #temp " \n\t"\ |
2793 "pavgb " #temp ", " #a " \n\t"\ | |
2794 "mov" #size " " #a ", " #b " \n\t" | |
959 | 2795 |
2796 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP) | |
2797 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP) | |
2798 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP) | |
2799 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow) | |
2800 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow) | |
2801 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow) | |
954 | 2802 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2) |
959 | 2803 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2) |
954 | 2804 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2) |
2805 | |
2209 | 2806 QPEL_H264(put_ , PUT_OP, 3dnow) |
2807 QPEL_H264(avg_ , AVG_3DNOW_OP, 3dnow) | |
2808 QPEL_H264(put_ , PUT_OP, mmx2) | |
2809 QPEL_H264(avg_ , AVG_MMX2_OP, mmx2) | |
2810 | |
2811 H264_MC(put_, 4, 3dnow) | |
2812 H264_MC(put_, 8, 3dnow) | |
2813 H264_MC(put_, 16,3dnow) | |
2814 H264_MC(avg_, 4, 3dnow) | |
2815 H264_MC(avg_, 8, 3dnow) | |
2816 H264_MC(avg_, 16,3dnow) | |
2817 H264_MC(put_, 4, mmx2) | |
2818 H264_MC(put_, 8, mmx2) | |
2819 H264_MC(put_, 16,mmx2) | |
2820 H264_MC(avg_, 4, mmx2) | |
2821 H264_MC(avg_, 8, mmx2) | |
2822 H264_MC(avg_, 16,mmx2) | |
2823 | |
393 | 2824 #if 0 |
247
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
2825 static void just_return() { return; } |
393 | 2826 #endif |
247
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
2827 |
954 | 2828 #define SET_QPEL_FUNC(postfix1, postfix2) \ |
2829 c->put_ ## postfix1 = put_ ## postfix2;\ | |
2830 c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\ | |
2831 c->avg_ ## postfix1 = avg_ ## postfix2; | |
1092 | 2832 |
1784 | 2833 static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){ |
2834 int i=0; | |
2835 | |
2836 assert(ABS(scale) < 256); | |
2837 scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT; | |
2838 | |
2839 asm volatile( | |
2840 "pcmpeqw %%mm6, %%mm6 \n\t" // -1w | |
2841 "psrlw $15, %%mm6 \n\t" // 1w | |
2842 "pxor %%mm7, %%mm7 \n\t" | |
2843 "movd %4, %%mm5 \n\t" | |
2844 "punpcklwd %%mm5, %%mm5 \n\t" | |
2845 "punpcklwd %%mm5, %%mm5 \n\t" | |
2846 "1: \n\t" | |
2847 "movq (%1, %0), %%mm0 \n\t" | |
2848 "movq 8(%1, %0), %%mm1 \n\t" | |
2849 "pmulhw %%mm5, %%mm0 \n\t" | |
2850 "pmulhw %%mm5, %%mm1 \n\t" | |
2851 "paddw %%mm6, %%mm0 \n\t" | |
2852 "paddw %%mm6, %%mm1 \n\t" | |
2853 "psraw $1, %%mm0 \n\t" | |
2854 "psraw $1, %%mm1 \n\t" | |
2855 "paddw (%2, %0), %%mm0 \n\t" | |
2856 "paddw 8(%2, %0), %%mm1 \n\t" | |
2857 "psraw $6, %%mm0 \n\t" | |
2858 "psraw $6, %%mm1 \n\t" | |
2859 "pmullw (%3, %0), %%mm0 \n\t" | |
2860 "pmullw 8(%3, %0), %%mm1 \n\t" | |
2861 "pmaddwd %%mm0, %%mm0 \n\t" | |
2862 "pmaddwd %%mm1, %%mm1 \n\t" | |
2863 "paddd %%mm1, %%mm0 \n\t" | |
2864 "psrld $4, %%mm0 \n\t" | |
2865 "paddd %%mm0, %%mm7 \n\t" | |
2866 "addl $16, %0 \n\t" | |
2867 "cmpl $128, %0 \n\t" //FIXME optimize & bench | |
2868 " jb 1b \n\t" | |
2869 "movq %%mm7, %%mm6 \n\t" | |
2870 "psrlq $32, %%mm7 \n\t" | |
2871 "paddd %%mm6, %%mm7 \n\t" | |
2872 "psrld $2, %%mm7 \n\t" | |
2873 "movd %%mm7, %0 \n\t" | |
2874 | |
2875 : "+r" (i) | |
2876 : "r"(basis), "r"(rem), "r"(weight), "g"(scale) | |
2877 ); | |
2878 return i; | |
2879 } | |
2880 | |
2881 static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){ | |
2882 int i=0; | |
2883 | |
2884 if(ABS(scale) < 256){ | |
2885 scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT; | |
2886 asm volatile( | |
2887 "pcmpeqw %%mm6, %%mm6 \n\t" // -1w | |
2888 "psrlw $15, %%mm6 \n\t" // 1w | |
2889 "movd %3, %%mm5 \n\t" | |
2890 "punpcklwd %%mm5, %%mm5 \n\t" | |
2891 "punpcklwd %%mm5, %%mm5 \n\t" | |
2892 "1: \n\t" | |
2893 "movq (%1, %0), %%mm0 \n\t" | |
2894 "movq 8(%1, %0), %%mm1 \n\t" | |
2895 "pmulhw %%mm5, %%mm0 \n\t" | |
2896 "pmulhw %%mm5, %%mm1 \n\t" | |
2897 "paddw %%mm6, %%mm0 \n\t" | |
2898 "paddw %%mm6, %%mm1 \n\t" | |
2899 "psraw $1, %%mm0 \n\t" | |
2900 "psraw $1, %%mm1 \n\t" | |
2901 "paddw (%2, %0), %%mm0 \n\t" | |
2902 "paddw 8(%2, %0), %%mm1 \n\t" | |
2903 "movq %%mm0, (%2, %0) \n\t" | |
2904 "movq %%mm1, 8(%2, %0) \n\t" | |
2905 "addl $16, %0 \n\t" | |
2906 "cmpl $128, %0 \n\t" //FIXME optimize & bench | |
2907 " jb 1b \n\t" | |
2908 | |
2909 : "+r" (i) | |
2910 : "r"(basis), "r"(rem), "g"(scale) | |
2911 ); | |
2912 }else{ | |
2913 for(i=0; i<8*8; i++){ | |
2914 rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT); | |
2915 } | |
2916 } | |
2917 } | |
2918 | |
1092 | 2919 /* external functions, from idct_mmx.c */ |
2920 void ff_mmx_idct(DCTELEM *block); | |
2921 void ff_mmxext_idct(DCTELEM *block); | |
2922 | |
2923 /* XXX: those functions should be suppressed ASAP when all IDCTs are | |
2924 converted */ | |
2925 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block) | |
2926 { | |
2927 ff_mmx_idct (block); | |
2928 put_pixels_clamped_mmx(block, dest, line_size); | |
2929 } | |
2930 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block) | |
2931 { | |
2932 ff_mmx_idct (block); | |
2933 add_pixels_clamped_mmx(block, dest, line_size); | |
2934 } | |
2935 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block) | |
2936 { | |
2937 ff_mmxext_idct (block); | |
2938 put_pixels_clamped_mmx(block, dest, line_size); | |
2939 } | |
2940 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block) | |
2941 { | |
2942 ff_mmxext_idct (block); | |
2943 add_pixels_clamped_mmx(block, dest, line_size); | |
2944 } | |
954 | 2945 |
1092 | 2946 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) |
0 | 2947 { |
2948 mm_flags = mm_support(); | |
1115 | 2949 |
1122 | 2950 if (avctx->dsp_mask) { |
2951 if (avctx->dsp_mask & FF_MM_FORCE) | |
2952 mm_flags |= (avctx->dsp_mask & 0xffff); | |
2953 else | |
2954 mm_flags &= ~(avctx->dsp_mask & 0xffff); | |
2955 } | |
1115 | 2956 |
631
47a8964ba5cd
be less verbose patch by (Lennert Buytenhek <buytenh at math dot leidenuniv dot nl>)
michaelni
parents:
629
diff
changeset
|
2957 #if 0 |
1868 | 2958 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:"); |
0 | 2959 if (mm_flags & MM_MMX) |
1868 | 2960 av_log(avctx, AV_LOG_INFO, " mmx"); |
0 | 2961 if (mm_flags & MM_MMXEXT) |
1868 | 2962 av_log(avctx, AV_LOG_INFO, " mmxext"); |
0 | 2963 if (mm_flags & MM_3DNOW) |
1868 | 2964 av_log(avctx, AV_LOG_INFO, " 3dnow"); |
0 | 2965 if (mm_flags & MM_SSE) |
1868 | 2966 av_log(avctx, AV_LOG_INFO, " sse"); |
0 | 2967 if (mm_flags & MM_SSE2) |
1868 | 2968 av_log(avctx, AV_LOG_INFO, " sse2"); |
2969 av_log(avctx, AV_LOG_INFO, "\n"); | |
0 | 2970 #endif |
2971 | |
2972 if (mm_flags & MM_MMX) { | |
1092 | 2973 const int idct_algo= avctx->idct_algo; |
2974 | |
1232
e88d3b1fb2a1
more #ifdef CONFIG_ENCODERS by (Wolfgang Hesseler <qv at multimediaware dot com>)
michaelni
parents:
1186
diff
changeset
|
2975 #ifdef CONFIG_ENCODERS |
2024
f65d87bfdd5a
some of the warning fixes by (Michael Roitzsch <mroi at users dot sourceforge dot net>)
michael
parents:
1985
diff
changeset
|
2976 const int dct_algo = avctx->dct_algo; |
1565 | 2977 if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){ |
1765
e31754bc5b65
SSE2 fdct by (Balatoni Denes <pnis at coder dot hu>)
michael
parents:
1739
diff
changeset
|
2978 if(mm_flags & MM_SSE2){ |
e31754bc5b65
SSE2 fdct by (Balatoni Denes <pnis at coder dot hu>)
michael
parents:
1739
diff
changeset
|
2979 c->fdct = ff_fdct_sse2; |
e31754bc5b65
SSE2 fdct by (Balatoni Denes <pnis at coder dot hu>)
michael
parents:
1739
diff
changeset
|
2980 }else if(mm_flags & MM_MMXEXT){ |
1565 | 2981 c->fdct = ff_fdct_mmx2; |
2982 }else{ | |
2983 c->fdct = ff_fdct_mmx; | |
2984 } | |
2985 } | |
1232
e88d3b1fb2a1
more #ifdef CONFIG_ENCODERS by (Wolfgang Hesseler <qv at multimediaware dot com>)
michaelni
parents:
1186
diff
changeset
|
2986 #endif //CONFIG_ENCODERS |
1092 | 2987 |
2988 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){ | |
2989 c->idct_put= ff_simple_idct_put_mmx; | |
2990 c->idct_add= ff_simple_idct_add_mmx; | |
1324
7d328fd9d8a5
the return of the idct with 16bit output by ("Ivan Kalvachev" <ivan at cacad dot com>)
michaelni
parents:
1232
diff
changeset
|
2991 c->idct = ff_simple_idct_mmx; |
1092 | 2992 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM; |
2993 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){ | |
2994 if(mm_flags & MM_MMXEXT){ | |
2995 c->idct_put= ff_libmpeg2mmx2_idct_put; | |
2996 c->idct_add= ff_libmpeg2mmx2_idct_add; | |
1324
7d328fd9d8a5
the return of the idct with 16bit output by ("Ivan Kalvachev" <ivan at cacad dot com>)
michaelni
parents:
1232
diff
changeset
|
2997 c->idct = ff_mmxext_idct; |
1092 | 2998 }else{ |
2999 c->idct_put= ff_libmpeg2mmx_idct_put; | |
3000 c->idct_add= ff_libmpeg2mmx_idct_add; | |
1324
7d328fd9d8a5
the return of the idct with 16bit output by ("Ivan Kalvachev" <ivan at cacad dot com>)
michaelni
parents:
1232
diff
changeset
|
3001 c->idct = ff_mmx_idct; |
1092 | 3002 } |
3003 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; | |
3004 } | |
1868 | 3005 |
3006 /* VP3 optimized DSP functions */ | |
1972 | 3007 if (mm_flags & MM_SSE2) { |
3008 c->vp3_dsp_init = vp3_dsp_init_sse2; | |
1977 | 3009 c->vp3_idct = vp3_idct_sse2; |
1972 | 3010 } else { |
3011 c->vp3_dsp_init = vp3_dsp_init_mmx; | |
1977 | 3012 c->vp3_idct = vp3_idct_mmx; |
1972 | 3013 } |
1977 | 3014 |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
3015 #ifdef CONFIG_ENCODERS |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3016 c->get_pixels = get_pixels_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3017 c->diff_pixels = diff_pixels_mmx; |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
3018 #endif //CONFIG_ENCODERS |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3019 c->put_pixels_clamped = put_pixels_clamped_mmx; |
1984
ef919e9ef73e
separate out put_signed_pixels_clamped() into its own function and
melanson
parents:
1977
diff
changeset
|
3020 c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx; |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3021 c->add_pixels_clamped = add_pixels_clamped_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3022 c->clear_blocks = clear_blocks_mmx; |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
3023 #ifdef CONFIG_ENCODERS |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3024 c->pix_sum = pix_sum16_mmx; |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
3025 #endif //CONFIG_ENCODERS |
415 | 3026 |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3027 c->put_pixels_tab[0][0] = put_pixels16_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3028 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3029 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3030 c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx; |
0 | 3031 |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3032 c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3033 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3034 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3035 c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx; |
651 | 3036 |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3037 c->avg_pixels_tab[0][0] = avg_pixels16_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3038 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3039 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3040 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx; |
415 | 3041 |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3042 c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3043 c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3044 c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3045 c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3046 |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3047 c->put_pixels_tab[1][0] = put_pixels8_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3048 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3049 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3050 c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx; |
0 | 3051 |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3052 c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3053 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3054 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3055 c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx; |
651 | 3056 |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3057 c->avg_pixels_tab[1][0] = avg_pixels8_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3058 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3059 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3060 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx; |
651 | 3061 |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3062 c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3063 c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3064 c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3065 c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx; |
954 | 3066 |
866 | 3067 c->add_bytes= add_bytes_mmx; |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
3068 #ifdef CONFIG_ENCODERS |
866 | 3069 c->diff_bytes= diff_bytes_mmx; |
936 | 3070 |
3071 c->hadamard8_diff[0]= hadamard8_diff16_mmx; | |
3072 c->hadamard8_diff[1]= hadamard8_diff_mmx; | |
3073 | |
997
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
3074 c->pix_norm1 = pix_norm1_mmx; |
4dfe15ae0078
sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe dot de>) (with some modifications)
michaelni
parents:
984
diff
changeset
|
3075 c->sse[0] = sse16_mmx; |
2067 | 3076 c->sse[1] = sse8_mmx; |
1729 | 3077 c->vsad[4]= vsad_intra16_mmx; |
3078 | |
2067 | 3079 c->nsse[0] = nsse16_mmx; |
3080 c->nsse[1] = nsse8_mmx; | |
1729 | 3081 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ |
3082 c->vsad[0] = vsad16_mmx; | |
3083 } | |
1784 | 3084 |
3085 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
3086 c->try_8x8basis= try_8x8basis_mmx; | |
3087 } | |
3088 c->add_8x8basis= add_8x8basis_mmx; | |
3089 | |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
3090 #endif //CONFIG_ENCODERS |
1647 | 3091 |
3092 c->h263_v_loop_filter= h263_v_loop_filter_mmx; | |
1784 | 3093 c->h263_h_loop_filter= h263_h_loop_filter_mmx; |
936 | 3094 |
0 | 3095 if (mm_flags & MM_MMXEXT) { |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3096 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3097 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2; |
651 | 3098 |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3099 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3100 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3101 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2; |
415 | 3102 |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3103 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3104 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2; |
651 | 3105 |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3106 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3107 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3108 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2; |
1092 | 3109 |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
3110 #ifdef CONFIG_ENCODERS |
1153 | 3111 c->hadamard8_diff[0]= hadamard8_diff16_mmx2; |
3112 c->hadamard8_diff[1]= hadamard8_diff_mmx2; | |
1729 | 3113 c->vsad[4]= vsad_intra16_mmx2; |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
3114 #endif //CONFIG_ENCODERS |
1153 | 3115 |
1092 | 3116 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ |
3117 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2; | |
3118 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2; | |
3119 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2; | |
3120 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2; | |
3121 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2; | |
3122 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2; | |
1772
8cd5257195c9
vsad16_mmx2 only applies if encoders are turned on
melanson
parents:
1765
diff
changeset
|
3123 #ifdef CONFIG_ENCODERS |
1729 | 3124 c->vsad[0] = vsad16_mmx2; |
1772
8cd5257195c9
vsad16_mmx2 only applies if encoders are turned on
melanson
parents:
1765
diff
changeset
|
3125 #endif //CONFIG_ENCODERS |
1092 | 3126 } |
959 | 3127 |
961 | 3128 #if 1 |
954 | 3129 SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2) |
3130 SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2) | |
3131 SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2) | |
3132 SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2) | |
3133 SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2) | |
3134 SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2) | |
3135 SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2) | |
3136 SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2) | |
3137 SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2) | |
3138 SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2) | |
3139 SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2) | |
3140 SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2) | |
3141 SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2) | |
3142 SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2) | |
3143 SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2) | |
3144 SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2) | |
3145 SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2) | |
3146 SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2) | |
3147 SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2) | |
3148 SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2) | |
3149 SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2) | |
3150 SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2) | |
3151 SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2) | |
3152 SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2) | |
3153 SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2) | |
3154 SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2) | |
3155 SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2) | |
3156 SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2) | |
3157 SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2) | |
3158 SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2) | |
3159 SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2) | |
3160 SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2) | |
961 | 3161 #endif |
1527 | 3162 |
2209 | 3163 //FIXME 3dnow too |
3164 #define dspfunc(PFX, IDX, NUM) \ | |
3165 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_mmx2; \ | |
3166 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_mmx2; \ | |
3167 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_mmx2; \ | |
3168 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_mmx2; \ | |
3169 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_mmx2; \ | |
3170 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_mmx2; \ | |
3171 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_mmx2; \ | |
3172 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_mmx2; \ | |
3173 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_mmx2; \ | |
3174 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_mmx2; \ | |
3175 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_mmx2; \ | |
3176 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_mmx2; \ | |
3177 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_mmx2; \ | |
3178 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_mmx2; \ | |
3179 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_mmx2; \ | |
3180 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_mmx2 | |
3181 | |
3182 dspfunc(put_h264_qpel, 0, 16); | |
3183 dspfunc(put_h264_qpel, 1, 8); | |
3184 dspfunc(put_h264_qpel, 2, 4); | |
3185 dspfunc(avg_h264_qpel, 0, 16); | |
3186 dspfunc(avg_h264_qpel, 1, 8); | |
3187 dspfunc(avg_h264_qpel, 2, 4); | |
3188 #undef dspfunc | |
3189 | |
1686
68abbec33289
Here are juste two added #ifdef CONFIG_ENCODERS to allow
michael
parents:
1648
diff
changeset
|
3190 #ifdef CONFIG_ENCODERS |
1527 | 3191 c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2; |
1686
68abbec33289
Here are juste two added #ifdef CONFIG_ENCODERS to allow
michael
parents:
1648
diff
changeset
|
3192 #endif //CONFIG_ENCODERS |
0 | 3193 } else if (mm_flags & MM_3DNOW) { |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3194 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3195 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; |
393 | 3196 |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3197 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3198 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3199 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow; |
651 | 3200 |
853
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3201 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3202 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3203 |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3204 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3205 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow; |
eacc2dd8fd9d
* using DSPContext - so each codec could use its local (sub)set of CPU extension
kabi
parents:
706
diff
changeset
|
3206 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow; |
1092 | 3207 |
3208 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
3209 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow; | |
3210 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow; | |
3211 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow; | |
3212 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow; | |
3213 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow; | |
3214 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow; | |
3215 } | |
984 | 3216 |
954 | 3217 SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow) |
3218 SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow) | |
3219 SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow) | |
3220 SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow) | |
3221 SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow) | |
3222 SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow) | |
3223 SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow) | |
3224 SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow) | |
3225 SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow) | |
3226 SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow) | |
3227 SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow) | |
3228 SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow) | |
3229 SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow) | |
3230 SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow) | |
3231 SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow) | |
3232 SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow) | |
3233 SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow) | |
3234 SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow) | |
3235 SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow) | |
3236 SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow) | |
3237 SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow) | |
3238 SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow) | |
3239 SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow) | |
3240 SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow) | |
3241 SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow) | |
3242 SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow) | |
3243 SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow) | |
3244 SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow) | |
3245 SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow) | |
3246 SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow) | |
3247 SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow) | |
3248 SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow) | |
2209 | 3249 |
3250 #define dspfunc(PFX, IDX, NUM) \ | |
3251 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_3dnow; \ | |
3252 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_3dnow; \ | |
3253 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_3dnow; \ | |
3254 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_3dnow; \ | |
3255 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_3dnow; \ | |
3256 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_3dnow; \ | |
3257 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_3dnow; \ | |
3258 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_3dnow; \ | |
3259 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_3dnow; \ | |
3260 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_3dnow; \ | |
3261 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_3dnow; \ | |
3262 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_3dnow; \ | |
3263 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_3dnow; \ | |
3264 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_3dnow; \ | |
3265 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_3dnow; \ | |
3266 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_3dnow | |
3267 | |
3268 dspfunc(put_h264_qpel, 0, 16); | |
3269 dspfunc(put_h264_qpel, 1, 8); | |
3270 dspfunc(put_h264_qpel, 2, 4); | |
3271 dspfunc(avg_h264_qpel, 0, 16); | |
3272 dspfunc(avg_h264_qpel, 1, 8); | |
3273 dspfunc(avg_h264_qpel, 2, 4); | |
0 | 3274 } |
3275 } | |
1092 | 3276 |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
3277 #ifdef CONFIG_ENCODERS |
1092 | 3278 dsputil_init_pix_mmx(c, avctx); |
1530
3b31998fe22f
disable encoders where appropriate (patch courtesy of BERO
melanson
parents:
1527
diff
changeset
|
3279 #endif //CONFIG_ENCODERS |
247
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3280 #if 0 |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3281 // for speed testing |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3282 get_pixels = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3283 put_pixels_clamped = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3284 add_pixels_clamped = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3285 |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3286 pix_abs16x16 = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3287 pix_abs16x16_x2 = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3288 pix_abs16x16_y2 = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3289 pix_abs16x16_xy2 = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3290 |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3291 put_pixels_tab[0] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3292 put_pixels_tab[1] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3293 put_pixels_tab[2] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3294 put_pixels_tab[3] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3295 |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3296 put_no_rnd_pixels_tab[0] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3297 put_no_rnd_pixels_tab[1] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3298 put_no_rnd_pixels_tab[2] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3299 put_no_rnd_pixels_tab[3] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3300 |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3301 avg_pixels_tab[0] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3302 avg_pixels_tab[1] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3303 avg_pixels_tab[2] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3304 avg_pixels_tab[3] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3305 |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3306 avg_no_rnd_pixels_tab[0] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3307 avg_no_rnd_pixels_tab[1] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3308 avg_no_rnd_pixels_tab[2] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3309 avg_no_rnd_pixels_tab[3] = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3310 |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3311 //av_fdct = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3312 //ff_idct = just_return; |
6f48cacd9ed9
* some modifications to allow gcc to compile same code for -fPIC
kabi
parents:
188
diff
changeset
|
3313 #endif |
0 | 3314 } |