Mercurial > libavcodec.hg
annotate x86/dsputil_mmx.c @ 12451:4c3e6ff1237e libavcodec
Rename h264_weight_sse2.asm to h264_weight.asm; add 16x8/8x16/8x4 non-square
biweight code to sse2/ssse3; add sse2 weight code; and use that same code to
create mmx2 functions also, so that the inline asm in h264dsp_mmx.c can be
removed. OK'ed by Jason on IRC.
author | rbultje |
---|---|
date | Wed, 01 Sep 2010 20:56:16 +0000 |
parents | 3941687b4fa9 |
children | f4355cd85faa |
rev | line source |
---|---|
8430 | 1 /* |
2 * MMX optimized DSP utils | |
8629
04423b2f6e0b
cosmetics: Remove pointless period after copyright statement non-sentences.
diego
parents:
8596
diff
changeset
|
3 * Copyright (c) 2000, 2001 Fabrice Bellard |
8430 | 4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> |
5 * | |
6 * This file is part of FFmpeg. | |
7 * | |
8 * FFmpeg is free software; you can redistribute it and/or | |
9 * modify it under the terms of the GNU Lesser General Public | |
10 * License as published by the Free Software Foundation; either | |
11 * version 2.1 of the License, or (at your option) any later version. | |
12 * | |
13 * FFmpeg is distributed in the hope that it will be useful, | |
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 * Lesser General Public License for more details. | |
17 * | |
18 * You should have received a copy of the GNU Lesser General Public | |
19 * License along with FFmpeg; if not, write to the Free Software | |
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 * | |
22 * MMX optimization by Nick Kurshev <nickols_k@mail.ru> | |
23 */ | |
24 | |
25 #include "libavutil/x86_cpu.h" | |
26 #include "libavcodec/dsputil.h" | |
11499 | 27 #include "libavcodec/h264dsp.h" |
8430 | 28 #include "libavcodec/mpegvideo.h" |
29 #include "libavcodec/simple_idct.h" | |
30 #include "dsputil_mmx.h" | |
31 #include "idct_xvid.h" | |
32 | |
33 //#undef NDEBUG | |
34 //#include <assert.h> | |
35 | |
36 /* pixel operations */ | |
11369 | 37 DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL; |
38 DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL; | |
8430 | 39 |
11369 | 40 DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] = |
8430 | 41 {0x8000000080000000ULL, 0x8000000080000000ULL}; |
42 | |
11369 | 43 DECLARE_ALIGNED(8, const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL; |
12143 | 44 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4 ) = {0x0004000400040004ULL, 0x0004000400040004ULL}; |
11369 | 45 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5 ) = {0x0005000500050005ULL, 0x0005000500050005ULL}; |
46 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8 ) = {0x0008000800080008ULL, 0x0008000800080008ULL}; | |
12205
d38e8565ba05
VP8 MBedge loopfilter MMX/MMX2/SSE2 functions for both luma (width=16)
rbultje
parents:
12168
diff
changeset
|
47 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9 ) = {0x0009000900090009ULL, 0x0009000900090009ULL}; |
11369 | 48 DECLARE_ALIGNED(8, const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL; |
49 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL}; | |
12205
d38e8565ba05
VP8 MBedge loopfilter MMX/MMX2/SSE2 functions for both luma (width=16)
rbultje
parents:
12168
diff
changeset
|
50 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18 ) = {0x0012001200120012ULL, 0x0012001200120012ULL}; |
11369 | 51 DECLARE_ALIGNED(8, const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL; |
12205
d38e8565ba05
VP8 MBedge loopfilter MMX/MMX2/SSE2 functions for both luma (width=16)
rbultje
parents:
12168
diff
changeset
|
52 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_27 ) = {0x001B001B001B001BULL, 0x001B001B001B001BULL}; |
11369 | 53 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL}; |
54 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL}; | |
55 DECLARE_ALIGNED(8, const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL; | |
12206 | 56 DECLARE_ALIGNED(8, const uint64_t, ff_pw_53 ) = 0x0035003500350035ULL; |
12205
d38e8565ba05
VP8 MBedge loopfilter MMX/MMX2/SSE2 functions for both luma (width=16)
rbultje
parents:
12168
diff
changeset
|
57 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_63 ) = {0x003F003F003F003FULL, 0x003F003F003F003FULL}; |
11369 | 58 DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL}; |
59 DECLARE_ALIGNED(8, const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL; | |
60 DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL; | |
61 DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL; | |
8430 | 62 |
12168
b246b214c2e9
VP8 H/V inner loopfilter MMX/MMXEXT/SSE2 optimizations.
rbultje
parents:
12143
diff
changeset
|
63 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_1 ) = {0x0101010101010101ULL, 0x0101010101010101ULL}; |
11951
afee30fe8c26
16x16 and 8x8c x86 SIMD intra pred functions for VP8 and H.264
darkshikari
parents:
11826
diff
changeset
|
64 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_3 ) = {0x0303030303030303ULL, 0x0303030303030303ULL}; |
12086
d780ae746855
Simple H/V loopfilter for VP8 in MMX, MMX2 and SSE2 (yay for yasm macros).
rbultje
parents:
11981
diff
changeset
|
65 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_4 ) = {0x0404040404040404ULL, 0x0404040404040404ULL}; |
11369 | 66 DECLARE_ALIGNED(8, const uint64_t, ff_pb_7 ) = 0x0707070707070707ULL; |
67 DECLARE_ALIGNED(8, const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL; | |
68 DECLARE_ALIGNED(8, const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL; | |
12086
d780ae746855
Simple H/V loopfilter for VP8 in MMX, MMX2 and SSE2 (yay for yasm macros).
rbultje
parents:
11981
diff
changeset
|
69 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80 ) = {0x8080808080808080ULL, 0x8080808080808080ULL}; |
11369 | 70 DECLARE_ALIGNED(8, const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL; |
71 DECLARE_ALIGNED(8, const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL; | |
12086
d780ae746855
Simple H/V loopfilter for VP8 in MMX, MMX2 and SSE2 (yay for yasm macros).
rbultje
parents:
11981
diff
changeset
|
72 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_F8 ) = {0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL}; |
11369 | 73 DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL; |
12086
d780ae746855
Simple H/V loopfilter for VP8 in MMX, MMX2 and SSE2 (yay for yasm macros).
rbultje
parents:
11981
diff
changeset
|
74 DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL}; |
8430 | 75 |
11369 | 76 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 }; |
77 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 }; | |
8430 | 78 |
79 #define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::) | |
80 #define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::) | |
81 | |
82 #define MOVQ_BFE(regd) \ | |
83 __asm__ volatile ( \ | |
84 "pcmpeqd %%" #regd ", %%" #regd " \n\t"\ | |
85 "paddb %%" #regd ", %%" #regd " \n\t" ::) | |
86 | |
87 #ifndef PIC | |
88 #define MOVQ_BONE(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone)) | |
89 #define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo)) | |
90 #else | |
91 // for shared library it's better to use this way for accessing constants | |
92 // pcmpeqd -> -1 | |
93 #define MOVQ_BONE(regd) \ | |
94 __asm__ volatile ( \ | |
95 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ | |
96 "psrlw $15, %%" #regd " \n\t" \ | |
97 "packuswb %%" #regd ", %%" #regd " \n\t" ::) | |
98 | |
99 #define MOVQ_WTWO(regd) \ | |
100 __asm__ volatile ( \ | |
101 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ | |
102 "psrlw $15, %%" #regd " \n\t" \ | |
103 "psllw $1, %%" #regd " \n\t"::) | |
104 | |
105 #endif | |
106 | |
107 // using regr as temporary and for the output result | |
108 // first argument is unmodifed and second is trashed | |
109 // regfe is supposed to contain 0xfefefefefefefefe | |
110 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \ | |
111 "movq " #rega ", " #regr " \n\t"\ | |
112 "pand " #regb ", " #regr " \n\t"\ | |
113 "pxor " #rega ", " #regb " \n\t"\ | |
114 "pand " #regfe "," #regb " \n\t"\ | |
115 "psrlq $1, " #regb " \n\t"\ | |
116 "paddb " #regb ", " #regr " \n\t" | |
117 | |
118 #define PAVGB_MMX(rega, regb, regr, regfe) \ | |
119 "movq " #rega ", " #regr " \n\t"\ | |
120 "por " #regb ", " #regr " \n\t"\ | |
121 "pxor " #rega ", " #regb " \n\t"\ | |
122 "pand " #regfe "," #regb " \n\t"\ | |
123 "psrlq $1, " #regb " \n\t"\ | |
124 "psubb " #regb ", " #regr " \n\t" | |
125 | |
126 // mm6 is supposed to contain 0xfefefefefefefefe | |
127 #define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \ | |
128 "movq " #rega ", " #regr " \n\t"\ | |
129 "movq " #regc ", " #regp " \n\t"\ | |
130 "pand " #regb ", " #regr " \n\t"\ | |
131 "pand " #regd ", " #regp " \n\t"\ | |
132 "pxor " #rega ", " #regb " \n\t"\ | |
133 "pxor " #regc ", " #regd " \n\t"\ | |
134 "pand %%mm6, " #regb " \n\t"\ | |
135 "pand %%mm6, " #regd " \n\t"\ | |
136 "psrlq $1, " #regb " \n\t"\ | |
137 "psrlq $1, " #regd " \n\t"\ | |
138 "paddb " #regb ", " #regr " \n\t"\ | |
139 "paddb " #regd ", " #regp " \n\t" | |
140 | |
141 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \ | |
142 "movq " #rega ", " #regr " \n\t"\ | |
143 "movq " #regc ", " #regp " \n\t"\ | |
144 "por " #regb ", " #regr " \n\t"\ | |
145 "por " #regd ", " #regp " \n\t"\ | |
146 "pxor " #rega ", " #regb " \n\t"\ | |
147 "pxor " #regc ", " #regd " \n\t"\ | |
148 "pand %%mm6, " #regb " \n\t"\ | |
149 "pand %%mm6, " #regd " \n\t"\ | |
150 "psrlq $1, " #regd " \n\t"\ | |
151 "psrlq $1, " #regb " \n\t"\ | |
152 "psubb " #regb ", " #regr " \n\t"\ | |
153 "psubb " #regd ", " #regp " \n\t" | |
154 | |
155 /***********************************/ | |
156 /* MMX no rounding */ | |
157 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx | |
158 #define SET_RND MOVQ_WONE | |
159 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f) | |
160 #define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e) | |
9445
41245484dc0b
avg_ pixel functions need to use (dst+pix+1)>>1 to average with existing
conrad
parents:
9441
diff
changeset
|
161 #define OP_AVG(a, b, c, e) PAVGB_MMX(a, b, c, e) |
8430 | 162 |
163 #include "dsputil_mmx_rnd_template.c" | |
164 | |
165 #undef DEF | |
166 #undef SET_RND | |
167 #undef PAVGBP | |
168 #undef PAVGB | |
169 /***********************************/ | |
170 /* MMX rounding */ | |
171 | |
172 #define DEF(x, y) x ## _ ## y ##_mmx | |
173 #define SET_RND MOVQ_WTWO | |
174 #define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f) | |
175 #define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e) | |
176 | |
177 #include "dsputil_mmx_rnd_template.c" | |
178 | |
179 #undef DEF | |
180 #undef SET_RND | |
181 #undef PAVGBP | |
182 #undef PAVGB | |
9445
41245484dc0b
avg_ pixel functions need to use (dst+pix+1)>>1 to average with existing
conrad
parents:
9441
diff
changeset
|
183 #undef OP_AVG |
8430 | 184 |
185 /***********************************/ | |
186 /* 3Dnow specific */ | |
187 | |
188 #define DEF(x) x ## _3dnow | |
189 #define PAVGB "pavgusb" | |
9445
41245484dc0b
avg_ pixel functions need to use (dst+pix+1)>>1 to average with existing
conrad
parents:
9441
diff
changeset
|
190 #define OP_AVG PAVGB |
8430 | 191 |
192 #include "dsputil_mmx_avg_template.c" | |
193 | |
194 #undef DEF | |
195 #undef PAVGB | |
9445
41245484dc0b
avg_ pixel functions need to use (dst+pix+1)>>1 to average with existing
conrad
parents:
9441
diff
changeset
|
196 #undef OP_AVG |
8430 | 197 |
198 /***********************************/ | |
199 /* MMX2 specific */ | |
200 | |
201 #define DEF(x) x ## _mmx2 | |
202 | |
203 /* Introduced only in MMX2 set */ | |
204 #define PAVGB "pavgb" | |
9445
41245484dc0b
avg_ pixel functions need to use (dst+pix+1)>>1 to average with existing
conrad
parents:
9441
diff
changeset
|
205 #define OP_AVG PAVGB |
8430 | 206 |
207 #include "dsputil_mmx_avg_template.c" | |
208 | |
209 #undef DEF | |
210 #undef PAVGB | |
9445
41245484dc0b
avg_ pixel functions need to use (dst+pix+1)>>1 to average with existing
conrad
parents:
9441
diff
changeset
|
211 #undef OP_AVG |
8430 | 212 |
213 #define put_no_rnd_pixels16_mmx put_pixels16_mmx | |
214 #define put_no_rnd_pixels8_mmx put_pixels8_mmx | |
215 #define put_pixels16_mmx2 put_pixels16_mmx | |
216 #define put_pixels8_mmx2 put_pixels8_mmx | |
217 #define put_pixels4_mmx2 put_pixels4_mmx | |
218 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx | |
219 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx | |
220 #define put_pixels16_3dnow put_pixels16_mmx | |
221 #define put_pixels8_3dnow put_pixels8_mmx | |
222 #define put_pixels4_3dnow put_pixels4_mmx | |
223 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx | |
224 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx | |
225 | |
226 /***********************************/ | |
227 /* standard MMX */ | |
228 | |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
229 void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) |
8430 | 230 { |
231 const DCTELEM *p; | |
232 uint8_t *pix; | |
233 | |
234 /* read the pixels */ | |
235 p = block; | |
236 pix = pixels; | |
237 /* unrolled loop */ | |
238 __asm__ volatile( | |
239 "movq %3, %%mm0 \n\t" | |
240 "movq 8%3, %%mm1 \n\t" | |
241 "movq 16%3, %%mm2 \n\t" | |
242 "movq 24%3, %%mm3 \n\t" | |
243 "movq 32%3, %%mm4 \n\t" | |
244 "movq 40%3, %%mm5 \n\t" | |
245 "movq 48%3, %%mm6 \n\t" | |
246 "movq 56%3, %%mm7 \n\t" | |
247 "packuswb %%mm1, %%mm0 \n\t" | |
248 "packuswb %%mm3, %%mm2 \n\t" | |
249 "packuswb %%mm5, %%mm4 \n\t" | |
250 "packuswb %%mm7, %%mm6 \n\t" | |
251 "movq %%mm0, (%0) \n\t" | |
252 "movq %%mm2, (%0, %1) \n\t" | |
253 "movq %%mm4, (%0, %1, 2) \n\t" | |
254 "movq %%mm6, (%0, %2) \n\t" | |
255 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p) | |
256 :"memory"); | |
257 pix += line_size*4; | |
258 p += 32; | |
259 | |
260 // if here would be an exact copy of the code above | |
261 // compiler would generate some very strange code | |
262 // thus using "r" | |
263 __asm__ volatile( | |
264 "movq (%3), %%mm0 \n\t" | |
265 "movq 8(%3), %%mm1 \n\t" | |
266 "movq 16(%3), %%mm2 \n\t" | |
267 "movq 24(%3), %%mm3 \n\t" | |
268 "movq 32(%3), %%mm4 \n\t" | |
269 "movq 40(%3), %%mm5 \n\t" | |
270 "movq 48(%3), %%mm6 \n\t" | |
271 "movq 56(%3), %%mm7 \n\t" | |
272 "packuswb %%mm1, %%mm0 \n\t" | |
273 "packuswb %%mm3, %%mm2 \n\t" | |
274 "packuswb %%mm5, %%mm4 \n\t" | |
275 "packuswb %%mm7, %%mm6 \n\t" | |
276 "movq %%mm0, (%0) \n\t" | |
277 "movq %%mm2, (%0, %1) \n\t" | |
278 "movq %%mm4, (%0, %1, 2) \n\t" | |
279 "movq %%mm6, (%0, %2) \n\t" | |
280 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p) | |
281 :"memory"); | |
282 } | |
283 | |
10961
34a65026fa06
Move array specifiers outside DECLARE_ALIGNED() invocations
mru
parents:
10766
diff
changeset
|
284 DECLARE_ASM_CONST(8, uint8_t, ff_vector128)[8] = |
8430 | 285 { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }; |
286 | |
9337
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
287 #define put_signed_pixels_clamped_mmx_half(off) \ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
288 "movq "#off"(%2), %%mm1 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
289 "movq 16+"#off"(%2), %%mm2 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
290 "movq 32+"#off"(%2), %%mm3 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
291 "movq 48+"#off"(%2), %%mm4 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
292 "packsswb 8+"#off"(%2), %%mm1 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
293 "packsswb 24+"#off"(%2), %%mm2 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
294 "packsswb 40+"#off"(%2), %%mm3 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
295 "packsswb 56+"#off"(%2), %%mm4 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
296 "paddb %%mm0, %%mm1 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
297 "paddb %%mm0, %%mm2 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
298 "paddb %%mm0, %%mm3 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
299 "paddb %%mm0, %%mm4 \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
300 "movq %%mm1, (%0) \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
301 "movq %%mm2, (%0, %3) \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
302 "movq %%mm3, (%0, %3, 2) \n\t"\ |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
303 "movq %%mm4, (%0, %1) \n\t" |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
304 |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
305 void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) |
8430 | 306 { |
9337
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
307 x86_reg line_skip = line_size; |
9341
06532529c428
Mark line_skip3 asm argument as output-only instead of using av_uninit.
reimar
parents:
9340
diff
changeset
|
308 x86_reg line_skip3; |
8430 | 309 |
9337
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
310 __asm__ volatile ( |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
311 "movq "MANGLE(ff_vector128)", %%mm0 \n\t" |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
312 "lea (%3, %3, 2), %1 \n\t" |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
313 put_signed_pixels_clamped_mmx_half(0) |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
314 "lea (%0, %3, 4), %0 \n\t" |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
315 put_signed_pixels_clamped_mmx_half(64) |
9341
06532529c428
Mark line_skip3 asm argument as output-only instead of using av_uninit.
reimar
parents:
9340
diff
changeset
|
316 :"+&r" (pixels), "=&r" (line_skip3) |
9337
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
317 :"r" (block), "r"(line_skip) |
a0d54042ea37
Rewrite put_signed_pixels_clamped_mmx() to eliminate mmx.h from dsputil_mmx.c.
alexc
parents:
8818
diff
changeset
|
318 :"memory"); |
8430 | 319 } |
320 | |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
321 void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) |
8430 | 322 { |
323 const DCTELEM *p; | |
324 uint8_t *pix; | |
325 int i; | |
326 | |
327 /* read the pixels */ | |
328 p = block; | |
329 pix = pixels; | |
330 MOVQ_ZERO(mm7); | |
331 i = 4; | |
332 do { | |
333 __asm__ volatile( | |
334 "movq (%2), %%mm0 \n\t" | |
335 "movq 8(%2), %%mm1 \n\t" | |
336 "movq 16(%2), %%mm2 \n\t" | |
337 "movq 24(%2), %%mm3 \n\t" | |
338 "movq %0, %%mm4 \n\t" | |
339 "movq %1, %%mm6 \n\t" | |
340 "movq %%mm4, %%mm5 \n\t" | |
341 "punpcklbw %%mm7, %%mm4 \n\t" | |
342 "punpckhbw %%mm7, %%mm5 \n\t" | |
343 "paddsw %%mm4, %%mm0 \n\t" | |
344 "paddsw %%mm5, %%mm1 \n\t" | |
345 "movq %%mm6, %%mm5 \n\t" | |
346 "punpcklbw %%mm7, %%mm6 \n\t" | |
347 "punpckhbw %%mm7, %%mm5 \n\t" | |
348 "paddsw %%mm6, %%mm2 \n\t" | |
349 "paddsw %%mm5, %%mm3 \n\t" | |
350 "packuswb %%mm1, %%mm0 \n\t" | |
351 "packuswb %%mm3, %%mm2 \n\t" | |
352 "movq %%mm0, %0 \n\t" | |
353 "movq %%mm2, %1 \n\t" | |
354 :"+m"(*pix), "+m"(*(pix+line_size)) | |
355 :"r"(p) | |
356 :"memory"); | |
357 pix += line_size*2; | |
358 p += 16; | |
359 } while (--i); | |
360 } | |
361 | |
362 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
363 { | |
364 __asm__ volatile( | |
365 "lea (%3, %3), %%"REG_a" \n\t" | |
366 ASMALIGN(3) | |
367 "1: \n\t" | |
368 "movd (%1), %%mm0 \n\t" | |
369 "movd (%1, %3), %%mm1 \n\t" | |
370 "movd %%mm0, (%2) \n\t" | |
371 "movd %%mm1, (%2, %3) \n\t" | |
372 "add %%"REG_a", %1 \n\t" | |
373 "add %%"REG_a", %2 \n\t" | |
374 "movd (%1), %%mm0 \n\t" | |
375 "movd (%1, %3), %%mm1 \n\t" | |
376 "movd %%mm0, (%2) \n\t" | |
377 "movd %%mm1, (%2, %3) \n\t" | |
378 "add %%"REG_a", %1 \n\t" | |
379 "add %%"REG_a", %2 \n\t" | |
380 "subl $4, %0 \n\t" | |
381 "jnz 1b \n\t" | |
382 : "+g"(h), "+r" (pixels), "+r" (block) | |
383 : "r"((x86_reg)line_size) | |
384 : "%"REG_a, "memory" | |
385 ); | |
386 } | |
387 | |
388 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
389 { | |
390 __asm__ volatile( | |
391 "lea (%3, %3), %%"REG_a" \n\t" | |
392 ASMALIGN(3) | |
393 "1: \n\t" | |
394 "movq (%1), %%mm0 \n\t" | |
395 "movq (%1, %3), %%mm1 \n\t" | |
396 "movq %%mm0, (%2) \n\t" | |
397 "movq %%mm1, (%2, %3) \n\t" | |
398 "add %%"REG_a", %1 \n\t" | |
399 "add %%"REG_a", %2 \n\t" | |
400 "movq (%1), %%mm0 \n\t" | |
401 "movq (%1, %3), %%mm1 \n\t" | |
402 "movq %%mm0, (%2) \n\t" | |
403 "movq %%mm1, (%2, %3) \n\t" | |
404 "add %%"REG_a", %1 \n\t" | |
405 "add %%"REG_a", %2 \n\t" | |
406 "subl $4, %0 \n\t" | |
407 "jnz 1b \n\t" | |
408 : "+g"(h), "+r" (pixels), "+r" (block) | |
409 : "r"((x86_reg)line_size) | |
410 : "%"REG_a, "memory" | |
411 ); | |
412 } | |
413 | |
414 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
415 { | |
416 __asm__ volatile( | |
417 "lea (%3, %3), %%"REG_a" \n\t" | |
418 ASMALIGN(3) | |
419 "1: \n\t" | |
420 "movq (%1), %%mm0 \n\t" | |
421 "movq 8(%1), %%mm4 \n\t" | |
422 "movq (%1, %3), %%mm1 \n\t" | |
423 "movq 8(%1, %3), %%mm5 \n\t" | |
424 "movq %%mm0, (%2) \n\t" | |
425 "movq %%mm4, 8(%2) \n\t" | |
426 "movq %%mm1, (%2, %3) \n\t" | |
427 "movq %%mm5, 8(%2, %3) \n\t" | |
428 "add %%"REG_a", %1 \n\t" | |
429 "add %%"REG_a", %2 \n\t" | |
430 "movq (%1), %%mm0 \n\t" | |
431 "movq 8(%1), %%mm4 \n\t" | |
432 "movq (%1, %3), %%mm1 \n\t" | |
433 "movq 8(%1, %3), %%mm5 \n\t" | |
434 "movq %%mm0, (%2) \n\t" | |
435 "movq %%mm4, 8(%2) \n\t" | |
436 "movq %%mm1, (%2, %3) \n\t" | |
437 "movq %%mm5, 8(%2, %3) \n\t" | |
438 "add %%"REG_a", %1 \n\t" | |
439 "add %%"REG_a", %2 \n\t" | |
440 "subl $4, %0 \n\t" | |
441 "jnz 1b \n\t" | |
442 : "+g"(h), "+r" (pixels), "+r" (block) | |
443 : "r"((x86_reg)line_size) | |
444 : "%"REG_a, "memory" | |
445 ); | |
446 } | |
447 | |
448 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
449 { | |
450 __asm__ volatile( | |
451 "1: \n\t" | |
452 "movdqu (%1), %%xmm0 \n\t" | |
453 "movdqu (%1,%3), %%xmm1 \n\t" | |
454 "movdqu (%1,%3,2), %%xmm2 \n\t" | |
455 "movdqu (%1,%4), %%xmm3 \n\t" | |
456 "movdqa %%xmm0, (%2) \n\t" | |
457 "movdqa %%xmm1, (%2,%3) \n\t" | |
458 "movdqa %%xmm2, (%2,%3,2) \n\t" | |
459 "movdqa %%xmm3, (%2,%4) \n\t" | |
460 "subl $4, %0 \n\t" | |
461 "lea (%1,%3,4), %1 \n\t" | |
462 "lea (%2,%3,4), %2 \n\t" | |
463 "jnz 1b \n\t" | |
464 : "+g"(h), "+r" (pixels), "+r" (block) | |
465 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) | |
466 : "memory" | |
467 ); | |
468 } | |
469 | |
470 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h) | |
471 { | |
472 __asm__ volatile( | |
473 "1: \n\t" | |
474 "movdqu (%1), %%xmm0 \n\t" | |
475 "movdqu (%1,%3), %%xmm1 \n\t" | |
476 "movdqu (%1,%3,2), %%xmm2 \n\t" | |
477 "movdqu (%1,%4), %%xmm3 \n\t" | |
478 "pavgb (%2), %%xmm0 \n\t" | |
479 "pavgb (%2,%3), %%xmm1 \n\t" | |
480 "pavgb (%2,%3,2), %%xmm2 \n\t" | |
481 "pavgb (%2,%4), %%xmm3 \n\t" | |
482 "movdqa %%xmm0, (%2) \n\t" | |
483 "movdqa %%xmm1, (%2,%3) \n\t" | |
484 "movdqa %%xmm2, (%2,%3,2) \n\t" | |
485 "movdqa %%xmm3, (%2,%4) \n\t" | |
486 "subl $4, %0 \n\t" | |
487 "lea (%1,%3,4), %1 \n\t" | |
488 "lea (%2,%3,4), %2 \n\t" | |
489 "jnz 1b \n\t" | |
490 : "+g"(h), "+r" (pixels), "+r" (block) | |
491 : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size) | |
492 : "memory" | |
493 ); | |
494 } | |
495 | |
496 #define CLEAR_BLOCKS(name,n) \ | |
497 static void name(DCTELEM *blocks)\ | |
498 {\ | |
499 __asm__ volatile(\ | |
500 "pxor %%mm7, %%mm7 \n\t"\ | |
501 "mov %1, %%"REG_a" \n\t"\ | |
502 "1: \n\t"\ | |
503 "movq %%mm7, (%0, %%"REG_a") \n\t"\ | |
504 "movq %%mm7, 8(%0, %%"REG_a") \n\t"\ | |
505 "movq %%mm7, 16(%0, %%"REG_a") \n\t"\ | |
506 "movq %%mm7, 24(%0, %%"REG_a") \n\t"\ | |
507 "add $32, %%"REG_a" \n\t"\ | |
508 " js 1b \n\t"\ | |
509 : : "r" (((uint8_t *)blocks)+128*n),\ | |
510 "i" (-128*n)\ | |
511 : "%"REG_a\ | |
512 );\ | |
513 } | |
514 CLEAR_BLOCKS(clear_blocks_mmx, 6) | |
515 CLEAR_BLOCKS(clear_block_mmx, 1) | |
516 | |
517 static void clear_block_sse(DCTELEM *block) | |
518 { | |
519 __asm__ volatile( | |
520 "xorps %%xmm0, %%xmm0 \n" | |
521 "movaps %%xmm0, (%0) \n" | |
522 "movaps %%xmm0, 16(%0) \n" | |
523 "movaps %%xmm0, 32(%0) \n" | |
524 "movaps %%xmm0, 48(%0) \n" | |
525 "movaps %%xmm0, 64(%0) \n" | |
526 "movaps %%xmm0, 80(%0) \n" | |
527 "movaps %%xmm0, 96(%0) \n" | |
528 "movaps %%xmm0, 112(%0) \n" | |
529 :: "r"(block) | |
530 : "memory" | |
531 ); | |
532 } | |
533 | |
9861 | 534 static void clear_blocks_sse(DCTELEM *blocks) |
535 {\ | |
536 __asm__ volatile( | |
537 "xorps %%xmm0, %%xmm0 \n" | |
538 "mov %1, %%"REG_a" \n" | |
539 "1: \n" | |
540 "movaps %%xmm0, (%0, %%"REG_a") \n" | |
541 "movaps %%xmm0, 16(%0, %%"REG_a") \n" | |
542 "movaps %%xmm0, 32(%0, %%"REG_a") \n" | |
543 "movaps %%xmm0, 48(%0, %%"REG_a") \n" | |
544 "movaps %%xmm0, 64(%0, %%"REG_a") \n" | |
545 "movaps %%xmm0, 80(%0, %%"REG_a") \n" | |
546 "movaps %%xmm0, 96(%0, %%"REG_a") \n" | |
547 "movaps %%xmm0, 112(%0, %%"REG_a") \n" | |
548 "add $128, %%"REG_a" \n" | |
549 " js 1b \n" | |
550 : : "r" (((uint8_t *)blocks)+128*6), | |
551 "i" (-128*6) | |
552 : "%"REG_a | |
553 ); | |
554 } | |
555 | |
8430 | 556 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){ |
557 x86_reg i=0; | |
558 __asm__ volatile( | |
559 "jmp 2f \n\t" | |
560 "1: \n\t" | |
561 "movq (%1, %0), %%mm0 \n\t" | |
562 "movq (%2, %0), %%mm1 \n\t" | |
563 "paddb %%mm0, %%mm1 \n\t" | |
564 "movq %%mm1, (%2, %0) \n\t" | |
565 "movq 8(%1, %0), %%mm0 \n\t" | |
566 "movq 8(%2, %0), %%mm1 \n\t" | |
567 "paddb %%mm0, %%mm1 \n\t" | |
568 "movq %%mm1, 8(%2, %0) \n\t" | |
569 "add $16, %0 \n\t" | |
570 "2: \n\t" | |
571 "cmp %3, %0 \n\t" | |
572 " js 1b \n\t" | |
573 : "+r" (i) | |
574 : "r"(src), "r"(dst), "r"((x86_reg)w-15) | |
575 ); | |
576 for(; i<w; i++) | |
577 dst[i+0] += src[i+0]; | |
578 } | |
579 | |
580 static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ | |
581 x86_reg i=0; | |
582 __asm__ volatile( | |
583 "jmp 2f \n\t" | |
584 "1: \n\t" | |
585 "movq (%2, %0), %%mm0 \n\t" | |
586 "movq 8(%2, %0), %%mm1 \n\t" | |
587 "paddb (%3, %0), %%mm0 \n\t" | |
588 "paddb 8(%3, %0), %%mm1 \n\t" | |
589 "movq %%mm0, (%1, %0) \n\t" | |
590 "movq %%mm1, 8(%1, %0) \n\t" | |
591 "add $16, %0 \n\t" | |
592 "2: \n\t" | |
593 "cmp %4, %0 \n\t" | |
594 " js 1b \n\t" | |
595 : "+r" (i) | |
596 : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15) | |
597 ); | |
598 for(; i<w; i++) | |
599 dst[i] = src1[i] + src2[i]; | |
600 } | |
601 | |
8798
a5c8210814d7
Add check whether the compiler/assembler supports 10 or more operands.
diego
parents:
8760
diff
changeset
|
602 #if HAVE_7REGS && HAVE_TEN_OPERANDS |
10431 | 603 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) { |
8760 | 604 x86_reg w2 = -w; |
605 x86_reg x; | |
606 int l = *left & 0xff; | |
607 int tl = *left_top & 0xff; | |
608 int t; | |
609 __asm__ volatile( | |
610 "mov %7, %3 \n" | |
611 "1: \n" | |
612 "movzx (%3,%4), %2 \n" | |
613 "mov %2, %k3 \n" | |
614 "sub %b1, %b3 \n" | |
615 "add %b0, %b3 \n" | |
616 "mov %2, %1 \n" | |
617 "cmp %0, %2 \n" | |
618 "cmovg %0, %2 \n" | |
619 "cmovg %1, %0 \n" | |
620 "cmp %k3, %0 \n" | |
621 "cmovg %k3, %0 \n" | |
622 "mov %7, %3 \n" | |
623 "cmp %2, %0 \n" | |
624 "cmovl %2, %0 \n" | |
625 "add (%6,%4), %b0 \n" | |
626 "mov %b0, (%5,%4) \n" | |
627 "inc %4 \n" | |
628 "jl 1b \n" | |
629 :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2) | |
630 :"r"(dst+w), "r"(diff+w), "rm"(top+w) | |
631 ); | |
632 *left = l; | |
633 *left_top = tl; | |
634 } | |
635 #endif | |
636 | |
8430 | 637 #define H263_LOOP_FILTER \ |
638 "pxor %%mm7, %%mm7 \n\t"\ | |
639 "movq %0, %%mm0 \n\t"\ | |
640 "movq %0, %%mm1 \n\t"\ | |
641 "movq %3, %%mm2 \n\t"\ | |
642 "movq %3, %%mm3 \n\t"\ | |
643 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
644 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
645 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
646 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
647 "psubw %%mm2, %%mm0 \n\t"\ | |
648 "psubw %%mm3, %%mm1 \n\t"\ | |
649 "movq %1, %%mm2 \n\t"\ | |
650 "movq %1, %%mm3 \n\t"\ | |
651 "movq %2, %%mm4 \n\t"\ | |
652 "movq %2, %%mm5 \n\t"\ | |
653 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
654 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
655 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
656 "punpckhbw %%mm7, %%mm5 \n\t"\ | |
657 "psubw %%mm2, %%mm4 \n\t"\ | |
658 "psubw %%mm3, %%mm5 \n\t"\ | |
659 "psllw $2, %%mm4 \n\t"\ | |
660 "psllw $2, %%mm5 \n\t"\ | |
661 "paddw %%mm0, %%mm4 \n\t"\ | |
662 "paddw %%mm1, %%mm5 \n\t"\ | |
663 "pxor %%mm6, %%mm6 \n\t"\ | |
664 "pcmpgtw %%mm4, %%mm6 \n\t"\ | |
665 "pcmpgtw %%mm5, %%mm7 \n\t"\ | |
666 "pxor %%mm6, %%mm4 \n\t"\ | |
667 "pxor %%mm7, %%mm5 \n\t"\ | |
668 "psubw %%mm6, %%mm4 \n\t"\ | |
669 "psubw %%mm7, %%mm5 \n\t"\ | |
670 "psrlw $3, %%mm4 \n\t"\ | |
671 "psrlw $3, %%mm5 \n\t"\ | |
672 "packuswb %%mm5, %%mm4 \n\t"\ | |
673 "packsswb %%mm7, %%mm6 \n\t"\ | |
674 "pxor %%mm7, %%mm7 \n\t"\ | |
675 "movd %4, %%mm2 \n\t"\ | |
676 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
677 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
678 "punpcklbw %%mm2, %%mm2 \n\t"\ | |
679 "psubusb %%mm4, %%mm2 \n\t"\ | |
680 "movq %%mm2, %%mm3 \n\t"\ | |
681 "psubusb %%mm4, %%mm3 \n\t"\ | |
682 "psubb %%mm3, %%mm2 \n\t"\ | |
683 "movq %1, %%mm3 \n\t"\ | |
684 "movq %2, %%mm4 \n\t"\ | |
685 "pxor %%mm6, %%mm3 \n\t"\ | |
686 "pxor %%mm6, %%mm4 \n\t"\ | |
687 "paddusb %%mm2, %%mm3 \n\t"\ | |
688 "psubusb %%mm2, %%mm4 \n\t"\ | |
689 "pxor %%mm6, %%mm3 \n\t"\ | |
690 "pxor %%mm6, %%mm4 \n\t"\ | |
691 "paddusb %%mm2, %%mm2 \n\t"\ | |
692 "packsswb %%mm1, %%mm0 \n\t"\ | |
693 "pcmpgtb %%mm0, %%mm7 \n\t"\ | |
694 "pxor %%mm7, %%mm0 \n\t"\ | |
695 "psubb %%mm7, %%mm0 \n\t"\ | |
696 "movq %%mm0, %%mm1 \n\t"\ | |
697 "psubusb %%mm2, %%mm0 \n\t"\ | |
698 "psubb %%mm0, %%mm1 \n\t"\ | |
699 "pand %5, %%mm1 \n\t"\ | |
700 "psrlw $2, %%mm1 \n\t"\ | |
701 "pxor %%mm7, %%mm1 \n\t"\ | |
702 "psubb %%mm7, %%mm1 \n\t"\ | |
703 "movq %0, %%mm5 \n\t"\ | |
704 "movq %3, %%mm6 \n\t"\ | |
705 "psubb %%mm1, %%mm5 \n\t"\ | |
706 "paddb %%mm1, %%mm6 \n\t" | |
707 | |
708 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){ | |
10749
5cca4b6c459d
Get rid of pointless CONFIG_ANY_H263 preprocessor definition.
diego
parents:
10645
diff
changeset
|
709 if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) { |
8430 | 710 const int strength= ff_h263_loop_filter_strength[qscale]; |
711 | |
712 __asm__ volatile( | |
713 | |
714 H263_LOOP_FILTER | |
715 | |
716 "movq %%mm3, %1 \n\t" | |
717 "movq %%mm4, %2 \n\t" | |
718 "movq %%mm5, %0 \n\t" | |
719 "movq %%mm6, %3 \n\t" | |
720 : "+m" (*(uint64_t*)(src - 2*stride)), | |
721 "+m" (*(uint64_t*)(src - 1*stride)), | |
722 "+m" (*(uint64_t*)(src + 0*stride)), | |
723 "+m" (*(uint64_t*)(src + 1*stride)) | |
724 : "g" (2*strength), "m"(ff_pb_FC) | |
725 ); | |
726 } | |
727 } | |
728 | |
729 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){ | |
10749
5cca4b6c459d
Get rid of pointless CONFIG_ANY_H263 preprocessor definition.
diego
parents:
10645
diff
changeset
|
730 if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) { |
8430 | 731 const int strength= ff_h263_loop_filter_strength[qscale]; |
10961
34a65026fa06
Move array specifiers outside DECLARE_ALIGNED() invocations
mru
parents:
10766
diff
changeset
|
732 DECLARE_ALIGNED(8, uint64_t, temp)[4]; |
8430 | 733 uint8_t *btemp= (uint8_t*)temp; |
734 | |
735 src -= 2; | |
736 | |
737 transpose4x4(btemp , src , 8, stride); | |
738 transpose4x4(btemp+4, src + 4*stride, 8, stride); | |
739 __asm__ volatile( | |
740 H263_LOOP_FILTER // 5 3 4 6 | |
741 | |
742 : "+m" (temp[0]), | |
743 "+m" (temp[1]), | |
744 "+m" (temp[2]), | |
745 "+m" (temp[3]) | |
746 : "g" (2*strength), "m"(ff_pb_FC) | |
747 ); | |
748 | |
749 __asm__ volatile( | |
750 "movq %%mm5, %%mm1 \n\t" | |
751 "movq %%mm4, %%mm0 \n\t" | |
752 "punpcklbw %%mm3, %%mm5 \n\t" | |
753 "punpcklbw %%mm6, %%mm4 \n\t" | |
754 "punpckhbw %%mm3, %%mm1 \n\t" | |
755 "punpckhbw %%mm6, %%mm0 \n\t" | |
756 "movq %%mm5, %%mm3 \n\t" | |
757 "movq %%mm1, %%mm6 \n\t" | |
758 "punpcklwd %%mm4, %%mm5 \n\t" | |
759 "punpcklwd %%mm0, %%mm1 \n\t" | |
760 "punpckhwd %%mm4, %%mm3 \n\t" | |
761 "punpckhwd %%mm0, %%mm6 \n\t" | |
762 "movd %%mm5, (%0) \n\t" | |
763 "punpckhdq %%mm5, %%mm5 \n\t" | |
764 "movd %%mm5, (%0,%2) \n\t" | |
765 "movd %%mm3, (%0,%2,2) \n\t" | |
766 "punpckhdq %%mm3, %%mm3 \n\t" | |
767 "movd %%mm3, (%0,%3) \n\t" | |
768 "movd %%mm1, (%1) \n\t" | |
769 "punpckhdq %%mm1, %%mm1 \n\t" | |
770 "movd %%mm1, (%1,%2) \n\t" | |
771 "movd %%mm6, (%1,%2,2) \n\t" | |
772 "punpckhdq %%mm6, %%mm6 \n\t" | |
773 "movd %%mm6, (%1,%3) \n\t" | |
774 :: "r" (src), | |
775 "r" (src + 4*stride), | |
776 "r" ((x86_reg) stride ), | |
777 "r" ((x86_reg)(3*stride)) | |
778 ); | |
779 } | |
780 } | |
781 | |
782 /* draw the edges of width 'w' of an image of size width, height | |
783 this mmx version can only handle w==8 || w==16 */ | |
784 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w) | |
785 { | |
786 uint8_t *ptr, *last_line; | |
787 int i; | |
788 | |
789 last_line = buf + (height - 1) * wrap; | |
790 /* left and right */ | |
791 ptr = buf; | |
792 if(w==8) | |
793 { | |
794 __asm__ volatile( | |
795 "1: \n\t" | |
796 "movd (%0), %%mm0 \n\t" | |
797 "punpcklbw %%mm0, %%mm0 \n\t" | |
798 "punpcklwd %%mm0, %%mm0 \n\t" | |
799 "punpckldq %%mm0, %%mm0 \n\t" | |
800 "movq %%mm0, -8(%0) \n\t" | |
801 "movq -8(%0, %2), %%mm1 \n\t" | |
802 "punpckhbw %%mm1, %%mm1 \n\t" | |
803 "punpckhwd %%mm1, %%mm1 \n\t" | |
804 "punpckhdq %%mm1, %%mm1 \n\t" | |
805 "movq %%mm1, (%0, %2) \n\t" | |
806 "add %1, %0 \n\t" | |
807 "cmp %3, %0 \n\t" | |
808 " jb 1b \n\t" | |
809 : "+r" (ptr) | |
810 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) | |
811 ); | |
812 } | |
813 else | |
814 { | |
815 __asm__ volatile( | |
816 "1: \n\t" | |
817 "movd (%0), %%mm0 \n\t" | |
818 "punpcklbw %%mm0, %%mm0 \n\t" | |
819 "punpcklwd %%mm0, %%mm0 \n\t" | |
820 "punpckldq %%mm0, %%mm0 \n\t" | |
821 "movq %%mm0, -8(%0) \n\t" | |
822 "movq %%mm0, -16(%0) \n\t" | |
823 "movq -8(%0, %2), %%mm1 \n\t" | |
824 "punpckhbw %%mm1, %%mm1 \n\t" | |
825 "punpckhwd %%mm1, %%mm1 \n\t" | |
826 "punpckhdq %%mm1, %%mm1 \n\t" | |
827 "movq %%mm1, (%0, %2) \n\t" | |
828 "movq %%mm1, 8(%0, %2) \n\t" | |
829 "add %1, %0 \n\t" | |
830 "cmp %3, %0 \n\t" | |
831 " jb 1b \n\t" | |
832 : "+r" (ptr) | |
833 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height) | |
834 ); | |
835 } | |
836 | |
837 for(i=0;i<w;i+=4) { | |
838 /* top and bottom (and hopefully also the corners) */ | |
839 ptr= buf - (i + 1) * wrap - w; | |
840 __asm__ volatile( | |
841 "1: \n\t" | |
842 "movq (%1, %0), %%mm0 \n\t" | |
843 "movq %%mm0, (%0) \n\t" | |
844 "movq %%mm0, (%0, %2) \n\t" | |
845 "movq %%mm0, (%0, %2, 2) \n\t" | |
846 "movq %%mm0, (%0, %3) \n\t" | |
847 "add $8, %0 \n\t" | |
848 "cmp %4, %0 \n\t" | |
849 " jb 1b \n\t" | |
850 : "+r" (ptr) | |
851 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w) | |
852 ); | |
853 ptr= last_line + (i + 1) * wrap - w; | |
854 __asm__ volatile( | |
855 "1: \n\t" | |
856 "movq (%1, %0), %%mm0 \n\t" | |
857 "movq %%mm0, (%0) \n\t" | |
858 "movq %%mm0, (%0, %2) \n\t" | |
859 "movq %%mm0, (%0, %2, 2) \n\t" | |
860 "movq %%mm0, (%0, %3) \n\t" | |
861 "add $8, %0 \n\t" | |
862 "cmp %4, %0 \n\t" | |
863 " jb 1b \n\t" | |
864 : "+r" (ptr) | |
865 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w) | |
866 ); | |
867 } | |
868 } | |
869 | |
870 #define PAETH(cpu, abs3)\ | |
871 static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\ | |
872 {\ | |
873 x86_reg i = -bpp;\ | |
874 x86_reg end = w-3;\ | |
875 __asm__ volatile(\ | |
876 "pxor %%mm7, %%mm7 \n"\ | |
877 "movd (%1,%0), %%mm0 \n"\ | |
878 "movd (%2,%0), %%mm1 \n"\ | |
879 "punpcklbw %%mm7, %%mm0 \n"\ | |
880 "punpcklbw %%mm7, %%mm1 \n"\ | |
881 "add %4, %0 \n"\ | |
882 "1: \n"\ | |
883 "movq %%mm1, %%mm2 \n"\ | |
884 "movd (%2,%0), %%mm1 \n"\ | |
885 "movq %%mm2, %%mm3 \n"\ | |
886 "punpcklbw %%mm7, %%mm1 \n"\ | |
887 "movq %%mm2, %%mm4 \n"\ | |
888 "psubw %%mm1, %%mm3 \n"\ | |
889 "psubw %%mm0, %%mm4 \n"\ | |
890 "movq %%mm3, %%mm5 \n"\ | |
891 "paddw %%mm4, %%mm5 \n"\ | |
892 abs3\ | |
893 "movq %%mm4, %%mm6 \n"\ | |
894 "pminsw %%mm5, %%mm6 \n"\ | |
895 "pcmpgtw %%mm6, %%mm3 \n"\ | |
896 "pcmpgtw %%mm5, %%mm4 \n"\ | |
897 "movq %%mm4, %%mm6 \n"\ | |
898 "pand %%mm3, %%mm4 \n"\ | |
899 "pandn %%mm3, %%mm6 \n"\ | |
900 "pandn %%mm0, %%mm3 \n"\ | |
901 "movd (%3,%0), %%mm0 \n"\ | |
902 "pand %%mm1, %%mm6 \n"\ | |
903 "pand %%mm4, %%mm2 \n"\ | |
904 "punpcklbw %%mm7, %%mm0 \n"\ | |
905 "movq %6, %%mm5 \n"\ | |
906 "paddw %%mm6, %%mm0 \n"\ | |
907 "paddw %%mm2, %%mm3 \n"\ | |
908 "paddw %%mm3, %%mm0 \n"\ | |
909 "pand %%mm5, %%mm0 \n"\ | |
910 "movq %%mm0, %%mm3 \n"\ | |
911 "packuswb %%mm3, %%mm3 \n"\ | |
912 "movd %%mm3, (%1,%0) \n"\ | |
913 "add %4, %0 \n"\ | |
914 "cmp %5, %0 \n"\ | |
915 "jle 1b \n"\ | |
916 :"+r"(i)\ | |
917 :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\ | |
918 "m"(ff_pw_255)\ | |
919 :"memory"\ | |
920 );\ | |
921 } | |
922 | |
923 #define ABS3_MMX2\ | |
924 "psubw %%mm5, %%mm7 \n"\ | |
925 "pmaxsw %%mm7, %%mm5 \n"\ | |
926 "pxor %%mm6, %%mm6 \n"\ | |
927 "pxor %%mm7, %%mm7 \n"\ | |
928 "psubw %%mm3, %%mm6 \n"\ | |
929 "psubw %%mm4, %%mm7 \n"\ | |
930 "pmaxsw %%mm6, %%mm3 \n"\ | |
931 "pmaxsw %%mm7, %%mm4 \n"\ | |
932 "pxor %%mm7, %%mm7 \n" | |
933 | |
934 #define ABS3_SSSE3\ | |
935 "pabsw %%mm3, %%mm3 \n"\ | |
936 "pabsw %%mm4, %%mm4 \n"\ | |
937 "pabsw %%mm5, %%mm5 \n" | |
938 | |
939 PAETH(mmx2, ABS3_MMX2) | |
8590 | 940 #if HAVE_SSSE3 |
8430 | 941 PAETH(ssse3, ABS3_SSSE3) |
942 #endif | |
943 | |
944 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\ | |
945 "paddw " #m4 ", " #m3 " \n\t" /* x1 */\ | |
946 "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\ | |
947 "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\ | |
948 "movq "#in7", " #m3 " \n\t" /* d */\ | |
949 "movq "#in0", %%mm5 \n\t" /* D */\ | |
950 "paddw " #m3 ", %%mm5 \n\t" /* x4 */\ | |
951 "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\ | |
952 "movq "#in1", %%mm5 \n\t" /* C */\ | |
953 "movq "#in2", %%mm6 \n\t" /* B */\ | |
954 "paddw " #m6 ", %%mm5 \n\t" /* x3 */\ | |
955 "paddw " #m5 ", %%mm6 \n\t" /* x2 */\ | |
956 "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\ | |
957 "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\ | |
958 "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\ | |
959 "paddw " #rnd ", %%mm4 \n\t" /* x2 */\ | |
960 "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\ | |
961 "psraw $5, %%mm5 \n\t"\ | |
962 "packuswb %%mm5, %%mm5 \n\t"\ | |
963 OP(%%mm5, out, %%mm7, d) | |
964 | |
965 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\ | |
966 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
967 uint64_t temp;\ | |
968 \ | |
969 __asm__ volatile(\ | |
970 "pxor %%mm7, %%mm7 \n\t"\ | |
971 "1: \n\t"\ | |
972 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ | |
973 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ | |
974 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ | |
975 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ | |
976 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ | |
977 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ | |
978 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ | |
979 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ | |
980 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ | |
981 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ | |
982 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ | |
983 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ | |
984 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ | |
985 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ | |
986 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ | |
987 "paddw %%mm3, %%mm5 \n\t" /* b */\ | |
988 "paddw %%mm2, %%mm6 \n\t" /* c */\ | |
989 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
990 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ | |
991 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ | |
992 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ | |
993 "paddw %%mm4, %%mm0 \n\t" /* a */\ | |
994 "paddw %%mm1, %%mm5 \n\t" /* d */\ | |
995 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ | |
996 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ | |
997 "paddw %6, %%mm6 \n\t"\ | |
998 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ | |
999 "psraw $5, %%mm0 \n\t"\ | |
1000 "movq %%mm0, %5 \n\t"\ | |
1001 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ | |
1002 \ | |
1003 "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\ | |
1004 "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\ | |
1005 "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\ | |
1006 "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\ | |
1007 "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\ | |
1008 "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\ | |
1009 "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\ | |
1010 "paddw %%mm0, %%mm2 \n\t" /* b */\ | |
1011 "paddw %%mm5, %%mm3 \n\t" /* c */\ | |
1012 "paddw %%mm2, %%mm2 \n\t" /* 2b */\ | |
1013 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ | |
1014 "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\ | |
1015 "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\ | |
1016 "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\ | |
1017 "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\ | |
1018 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ | |
1019 "paddw %%mm2, %%mm1 \n\t" /* a */\ | |
1020 "paddw %%mm6, %%mm4 \n\t" /* d */\ | |
1021 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ | |
1022 "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\ | |
1023 "paddw %6, %%mm1 \n\t"\ | |
1024 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\ | |
1025 "psraw $5, %%mm3 \n\t"\ | |
1026 "movq %5, %%mm1 \n\t"\ | |
1027 "packuswb %%mm3, %%mm1 \n\t"\ | |
1028 OP_MMX2(%%mm1, (%1),%%mm4, q)\ | |
1029 /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\ | |
1030 \ | |
1031 "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\ | |
1032 "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\ | |
1033 "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\ | |
1034 "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\ | |
1035 "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\ | |
1036 "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\ | |
1037 "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\ | |
1038 "paddw %%mm1, %%mm5 \n\t" /* b */\ | |
1039 "paddw %%mm4, %%mm0 \n\t" /* c */\ | |
1040 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
1041 "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\ | |
1042 "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\ | |
1043 "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\ | |
1044 "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\ | |
1045 "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\ | |
1046 "paddw %%mm3, %%mm2 \n\t" /* d */\ | |
1047 "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\ | |
1048 "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\ | |
1049 "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\ | |
1050 "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\ | |
1051 "paddw %%mm2, %%mm6 \n\t" /* a */\ | |
1052 "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\ | |
1053 "paddw %6, %%mm0 \n\t"\ | |
1054 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ | |
1055 "psraw $5, %%mm0 \n\t"\ | |
1056 /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\ | |
1057 \ | |
1058 "paddw %%mm5, %%mm3 \n\t" /* a */\ | |
1059 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\ | |
1060 "paddw %%mm4, %%mm6 \n\t" /* b */\ | |
1061 "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\ | |
1062 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\ | |
1063 "paddw %%mm1, %%mm4 \n\t" /* c */\ | |
1064 "paddw %%mm2, %%mm5 \n\t" /* d */\ | |
1065 "paddw %%mm6, %%mm6 \n\t" /* 2b */\ | |
1066 "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\ | |
1067 "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\ | |
1068 "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\ | |
1069 "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\ | |
1070 "paddw %6, %%mm4 \n\t"\ | |
1071 "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\ | |
1072 "psraw $5, %%mm4 \n\t"\ | |
1073 "packuswb %%mm4, %%mm0 \n\t"\ | |
1074 OP_MMX2(%%mm0, 8(%1), %%mm4, q)\ | |
1075 \ | |
1076 "add %3, %0 \n\t"\ | |
1077 "add %4, %1 \n\t"\ | |
1078 "decl %2 \n\t"\ | |
1079 " jnz 1b \n\t"\ | |
1080 : "+a"(src), "+c"(dst), "+D"(h)\ | |
1081 : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\ | |
1082 : "memory"\ | |
1083 );\ | |
1084 }\ | |
1085 \ | |
1086 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1087 int i;\ | |
1088 int16_t temp[16];\ | |
1089 /* quick HACK, XXX FIXME MUST be optimized */\ | |
1090 for(i=0; i<h; i++)\ | |
1091 {\ | |
1092 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ | |
1093 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ | |
1094 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ | |
1095 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ | |
1096 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ | |
1097 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\ | |
1098 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\ | |
1099 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\ | |
1100 temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\ | |
1101 temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\ | |
1102 temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\ | |
1103 temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\ | |
1104 temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\ | |
1105 temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\ | |
1106 temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\ | |
1107 temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\ | |
1108 __asm__ volatile(\ | |
1109 "movq (%0), %%mm0 \n\t"\ | |
1110 "movq 8(%0), %%mm1 \n\t"\ | |
1111 "paddw %2, %%mm0 \n\t"\ | |
1112 "paddw %2, %%mm1 \n\t"\ | |
1113 "psraw $5, %%mm0 \n\t"\ | |
1114 "psraw $5, %%mm1 \n\t"\ | |
1115 "packuswb %%mm1, %%mm0 \n\t"\ | |
1116 OP_3DNOW(%%mm0, (%1), %%mm1, q)\ | |
1117 "movq 16(%0), %%mm0 \n\t"\ | |
1118 "movq 24(%0), %%mm1 \n\t"\ | |
1119 "paddw %2, %%mm0 \n\t"\ | |
1120 "paddw %2, %%mm1 \n\t"\ | |
1121 "psraw $5, %%mm0 \n\t"\ | |
1122 "psraw $5, %%mm1 \n\t"\ | |
1123 "packuswb %%mm1, %%mm0 \n\t"\ | |
1124 OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\ | |
1125 :: "r"(temp), "r"(dst), "m"(ROUNDER)\ | |
1126 : "memory"\ | |
1127 );\ | |
1128 dst+=dstStride;\ | |
1129 src+=srcStride;\ | |
1130 }\ | |
1131 }\ | |
1132 \ | |
1133 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1134 __asm__ volatile(\ | |
1135 "pxor %%mm7, %%mm7 \n\t"\ | |
1136 "1: \n\t"\ | |
1137 "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ | |
1138 "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ | |
1139 "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ | |
1140 "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ | |
1141 "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ | |
1142 "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ | |
1143 "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ | |
1144 "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ | |
1145 "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ | |
1146 "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ | |
1147 "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ | |
1148 "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ | |
1149 "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ | |
1150 "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ | |
1151 "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ | |
1152 "paddw %%mm3, %%mm5 \n\t" /* b */\ | |
1153 "paddw %%mm2, %%mm6 \n\t" /* c */\ | |
1154 "paddw %%mm5, %%mm5 \n\t" /* 2b */\ | |
1155 "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ | |
1156 "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ | |
1157 "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ | |
1158 "paddw %%mm4, %%mm0 \n\t" /* a */\ | |
1159 "paddw %%mm1, %%mm5 \n\t" /* d */\ | |
1160 "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ | |
1161 "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ | |
1162 "paddw %5, %%mm6 \n\t"\ | |
1163 "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ | |
1164 "psraw $5, %%mm0 \n\t"\ | |
1165 /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ | |
1166 \ | |
1167 "movd 5(%0), %%mm5 \n\t" /* FGHI */\ | |
1168 "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\ | |
1169 "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\ | |
1170 "paddw %%mm5, %%mm1 \n\t" /* a */\ | |
1171 "paddw %%mm6, %%mm2 \n\t" /* b */\ | |
1172 "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\ | |
1173 "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\ | |
1174 "paddw %%mm6, %%mm3 \n\t" /* c */\ | |
1175 "paddw %%mm5, %%mm4 \n\t" /* d */\ | |
1176 "paddw %%mm2, %%mm2 \n\t" /* 2b */\ | |
1177 "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ | |
1178 "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ | |
1179 "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ | |
1180 "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\ | |
1181 "paddw %5, %%mm1 \n\t"\ | |
1182 "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\ | |
1183 "psraw $5, %%mm3 \n\t"\ | |
1184 "packuswb %%mm3, %%mm0 \n\t"\ | |
1185 OP_MMX2(%%mm0, (%1), %%mm4, q)\ | |
1186 \ | |
1187 "add %3, %0 \n\t"\ | |
1188 "add %4, %1 \n\t"\ | |
1189 "decl %2 \n\t"\ | |
1190 " jnz 1b \n\t"\ | |
1191 : "+a"(src), "+c"(dst), "+d"(h)\ | |
1192 : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\ | |
1193 : "memory"\ | |
1194 );\ | |
1195 }\ | |
1196 \ | |
1197 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ | |
1198 int i;\ | |
1199 int16_t temp[8];\ | |
1200 /* quick HACK, XXX FIXME MUST be optimized */\ | |
1201 for(i=0; i<h; i++)\ | |
1202 {\ | |
1203 temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ | |
1204 temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ | |
1205 temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ | |
1206 temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ | |
1207 temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ | |
1208 temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\ | |
1209 temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\ | |
1210 temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\ | |
1211 __asm__ volatile(\ | |
1212 "movq (%0), %%mm0 \n\t"\ | |
1213 "movq 8(%0), %%mm1 \n\t"\ | |
1214 "paddw %2, %%mm0 \n\t"\ | |
1215 "paddw %2, %%mm1 \n\t"\ | |
1216 "psraw $5, %%mm0 \n\t"\ | |
1217 "psraw $5, %%mm1 \n\t"\ | |
1218 "packuswb %%mm1, %%mm0 \n\t"\ | |
1219 OP_3DNOW(%%mm0, (%1), %%mm1, q)\ | |
1220 :: "r"(temp), "r"(dst), "m"(ROUNDER)\ | |
1221 :"memory"\ | |
1222 );\ | |
1223 dst+=dstStride;\ | |
1224 src+=srcStride;\ | |
1225 }\ | |
1226 } | |
1227 | |
1228 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\ | |
1229 \ | |
1230 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1231 uint64_t temp[17*4];\ | |
1232 uint64_t *temp_ptr= temp;\ | |
1233 int count= 17;\ | |
1234 \ | |
1235 /*FIXME unroll */\ | |
1236 __asm__ volatile(\ | |
1237 "pxor %%mm7, %%mm7 \n\t"\ | |
1238 "1: \n\t"\ | |
1239 "movq (%0), %%mm0 \n\t"\ | |
1240 "movq (%0), %%mm1 \n\t"\ | |
1241 "movq 8(%0), %%mm2 \n\t"\ | |
1242 "movq 8(%0), %%mm3 \n\t"\ | |
1243 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1244 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
1245 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1246 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
1247 "movq %%mm0, (%1) \n\t"\ | |
1248 "movq %%mm1, 17*8(%1) \n\t"\ | |
1249 "movq %%mm2, 2*17*8(%1) \n\t"\ | |
1250 "movq %%mm3, 3*17*8(%1) \n\t"\ | |
1251 "add $8, %1 \n\t"\ | |
1252 "add %3, %0 \n\t"\ | |
1253 "decl %2 \n\t"\ | |
1254 " jnz 1b \n\t"\ | |
1255 : "+r" (src), "+r" (temp_ptr), "+r"(count)\ | |
1256 : "r" ((x86_reg)srcStride)\ | |
1257 : "memory"\ | |
1258 );\ | |
1259 \ | |
1260 temp_ptr= temp;\ | |
1261 count=4;\ | |
1262 \ | |
1263 /*FIXME reorder for speed */\ | |
1264 __asm__ volatile(\ | |
1265 /*"pxor %%mm7, %%mm7 \n\t"*/\ | |
1266 "1: \n\t"\ | |
1267 "movq (%0), %%mm0 \n\t"\ | |
1268 "movq 8(%0), %%mm1 \n\t"\ | |
1269 "movq 16(%0), %%mm2 \n\t"\ | |
1270 "movq 24(%0), %%mm3 \n\t"\ | |
1271 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ | |
1272 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ | |
1273 "add %4, %1 \n\t"\ | |
1274 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ | |
1275 \ | |
1276 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ | |
1277 "add %4, %1 \n\t"\ | |
1278 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ | |
1279 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\ | |
1280 "add %4, %1 \n\t"\ | |
1281 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\ | |
1282 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\ | |
1283 "add %4, %1 \n\t"\ | |
1284 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\ | |
1285 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\ | |
1286 "add %4, %1 \n\t"\ | |
1287 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\ | |
1288 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\ | |
1289 "add %4, %1 \n\t"\ | |
1290 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\ | |
1291 \ | |
1292 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\ | |
1293 "add %4, %1 \n\t" \ | |
1294 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\ | |
1295 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\ | |
1296 \ | |
1297 "add $136, %0 \n\t"\ | |
1298 "add %6, %1 \n\t"\ | |
1299 "decl %2 \n\t"\ | |
1300 " jnz 1b \n\t"\ | |
1301 \ | |
1302 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ | |
1303 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\ | |
1304 :"memory"\ | |
1305 );\ | |
1306 }\ | |
1307 \ | |
1308 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1309 uint64_t temp[9*2];\ | |
1310 uint64_t *temp_ptr= temp;\ | |
1311 int count= 9;\ | |
1312 \ | |
1313 /*FIXME unroll */\ | |
1314 __asm__ volatile(\ | |
1315 "pxor %%mm7, %%mm7 \n\t"\ | |
1316 "1: \n\t"\ | |
1317 "movq (%0), %%mm0 \n\t"\ | |
1318 "movq (%0), %%mm1 \n\t"\ | |
1319 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1320 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
1321 "movq %%mm0, (%1) \n\t"\ | |
1322 "movq %%mm1, 9*8(%1) \n\t"\ | |
1323 "add $8, %1 \n\t"\ | |
1324 "add %3, %0 \n\t"\ | |
1325 "decl %2 \n\t"\ | |
1326 " jnz 1b \n\t"\ | |
1327 : "+r" (src), "+r" (temp_ptr), "+r"(count)\ | |
1328 : "r" ((x86_reg)srcStride)\ | |
1329 : "memory"\ | |
1330 );\ | |
1331 \ | |
1332 temp_ptr= temp;\ | |
1333 count=2;\ | |
1334 \ | |
1335 /*FIXME reorder for speed */\ | |
1336 __asm__ volatile(\ | |
1337 /*"pxor %%mm7, %%mm7 \n\t"*/\ | |
1338 "1: \n\t"\ | |
1339 "movq (%0), %%mm0 \n\t"\ | |
1340 "movq 8(%0), %%mm1 \n\t"\ | |
1341 "movq 16(%0), %%mm2 \n\t"\ | |
1342 "movq 24(%0), %%mm3 \n\t"\ | |
1343 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ | |
1344 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ | |
1345 "add %4, %1 \n\t"\ | |
1346 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ | |
1347 \ | |
1348 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ | |
1349 "add %4, %1 \n\t"\ | |
1350 QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ | |
1351 \ | |
1352 QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\ | |
1353 "add %4, %1 \n\t"\ | |
1354 QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\ | |
1355 QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\ | |
1356 \ | |
1357 "add $72, %0 \n\t"\ | |
1358 "add %6, %1 \n\t"\ | |
1359 "decl %2 \n\t"\ | |
1360 " jnz 1b \n\t"\ | |
1361 \ | |
1362 : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ | |
1363 : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\ | |
1364 : "memory"\ | |
1365 );\ | |
1366 }\ | |
1367 \ | |
1368 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ | |
1369 OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\ | |
1370 }\ | |
1371 \ | |
1372 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1373 uint64_t temp[8];\ | |
1374 uint8_t * const half= (uint8_t*)temp;\ | |
1375 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ | |
1376 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ | |
1377 }\ | |
1378 \ | |
1379 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1380 OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\ | |
1381 }\ | |
1382 \ | |
1383 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1384 uint64_t temp[8];\ | |
1385 uint8_t * const half= (uint8_t*)temp;\ | |
1386 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ | |
1387 OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\ | |
1388 }\ | |
1389 \ | |
1390 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1391 uint64_t temp[8];\ | |
1392 uint8_t * const half= (uint8_t*)temp;\ | |
1393 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ | |
1394 OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ | |
1395 }\ | |
1396 \ | |
1397 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1398 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\ | |
1399 }\ | |
1400 \ | |
1401 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1402 uint64_t temp[8];\ | |
1403 uint8_t * const half= (uint8_t*)temp;\ | |
1404 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ | |
1405 OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\ | |
1406 }\ | |
1407 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1408 uint64_t half[8 + 9];\ | |
1409 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1410 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1411 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1412 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ | |
1413 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1414 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ | |
1415 }\ | |
1416 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1417 uint64_t half[8 + 9];\ | |
1418 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1419 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1420 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1421 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ | |
1422 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1423 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ | |
1424 }\ | |
1425 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1426 uint64_t half[8 + 9];\ | |
1427 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1428 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1429 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1430 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ | |
1431 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1432 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ | |
1433 }\ | |
1434 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1435 uint64_t half[8 + 9];\ | |
1436 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1437 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1438 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1439 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ | |
1440 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1441 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ | |
1442 }\ | |
1443 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1444 uint64_t half[8 + 9];\ | |
1445 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1446 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1447 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1448 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1449 OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ | |
1450 }\ | |
1451 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1452 uint64_t half[8 + 9];\ | |
1453 uint8_t * const halfH= ((uint8_t*)half) + 64;\ | |
1454 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1455 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1456 put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ | |
1457 OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ | |
1458 }\ | |
1459 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1460 uint64_t half[8 + 9];\ | |
1461 uint8_t * const halfH= ((uint8_t*)half);\ | |
1462 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1463 put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ | |
1464 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ | |
1465 }\ | |
1466 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1467 uint64_t half[8 + 9];\ | |
1468 uint8_t * const halfH= ((uint8_t*)half);\ | |
1469 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1470 put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ | |
1471 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ | |
1472 }\ | |
1473 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1474 uint64_t half[9];\ | |
1475 uint8_t * const halfH= ((uint8_t*)half);\ | |
1476 put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ | |
1477 OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ | |
1478 }\ | |
1479 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ | |
1480 OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\ | |
1481 }\ | |
1482 \ | |
1483 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1484 uint64_t temp[32];\ | |
1485 uint8_t * const half= (uint8_t*)temp;\ | |
1486 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ | |
1487 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ | |
1488 }\ | |
1489 \ | |
1490 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1491 OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\ | |
1492 }\ | |
1493 \ | |
1494 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1495 uint64_t temp[32];\ | |
1496 uint8_t * const half= (uint8_t*)temp;\ | |
1497 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ | |
1498 OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\ | |
1499 }\ | |
1500 \ | |
1501 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1502 uint64_t temp[32];\ | |
1503 uint8_t * const half= (uint8_t*)temp;\ | |
1504 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ | |
1505 OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ | |
1506 }\ | |
1507 \ | |
1508 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1509 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\ | |
1510 }\ | |
1511 \ | |
1512 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1513 uint64_t temp[32];\ | |
1514 uint8_t * const half= (uint8_t*)temp;\ | |
1515 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ | |
1516 OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\ | |
1517 }\ | |
1518 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1519 uint64_t half[16*2 + 17*2];\ | |
1520 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1521 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1522 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1523 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ | |
1524 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1525 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ | |
1526 }\ | |
1527 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1528 uint64_t half[16*2 + 17*2];\ | |
1529 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1530 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1531 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1532 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ | |
1533 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1534 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ | |
1535 }\ | |
1536 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1537 uint64_t half[16*2 + 17*2];\ | |
1538 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1539 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1540 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1541 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ | |
1542 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1543 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ | |
1544 }\ | |
1545 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1546 uint64_t half[16*2 + 17*2];\ | |
1547 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1548 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1549 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1550 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ | |
1551 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1552 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ | |
1553 }\ | |
1554 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1555 uint64_t half[16*2 + 17*2];\ | |
1556 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1557 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1558 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1559 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1560 OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ | |
1561 }\ | |
1562 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1563 uint64_t half[16*2 + 17*2];\ | |
1564 uint8_t * const halfH= ((uint8_t*)half) + 256;\ | |
1565 uint8_t * const halfHV= ((uint8_t*)half);\ | |
1566 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1567 put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ | |
1568 OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ | |
1569 }\ | |
1570 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1571 uint64_t half[17*2];\ | |
1572 uint8_t * const halfH= ((uint8_t*)half);\ | |
1573 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1574 put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ | |
1575 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ | |
1576 }\ | |
1577 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1578 uint64_t half[17*2];\ | |
1579 uint8_t * const halfH= ((uint8_t*)half);\ | |
1580 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1581 put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ | |
1582 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ | |
1583 }\ | |
1584 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1585 uint64_t half[17*2];\ | |
1586 uint8_t * const halfH= ((uint8_t*)half);\ | |
1587 put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ | |
1588 OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ | |
1589 } | |
1590 | |
1591 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t" | |
1592 #define AVG_3DNOW_OP(a,b,temp, size) \ | |
1593 "mov" #size " " #b ", " #temp " \n\t"\ | |
1594 "pavgusb " #temp ", " #a " \n\t"\ | |
1595 "mov" #size " " #a ", " #b " \n\t" | |
1596 #define AVG_MMX2_OP(a,b,temp, size) \ | |
1597 "mov" #size " " #b ", " #temp " \n\t"\ | |
1598 "pavgb " #temp ", " #a " \n\t"\ | |
1599 "mov" #size " " #a ", " #b " \n\t" | |
1600 | |
1601 QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP) | |
1602 QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP) | |
1603 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP) | |
1604 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow) | |
1605 QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow) | |
1606 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow) | |
1607 QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2) | |
1608 QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2) | |
1609 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2) | |
1610 | |
1611 /***********************************/ | |
1612 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */ | |
1613 | |
1614 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\ | |
1615 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1616 OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\ | |
1617 } | |
1618 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\ | |
1619 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1620 OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\ | |
1621 } | |
1622 | |
1623 #define QPEL_2TAP(OPNAME, SIZE, MMX)\ | |
1624 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\ | |
1625 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\ | |
1626 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\ | |
1627 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\ | |
1628 OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\ | |
1629 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\ | |
1630 OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\ | |
1631 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\ | |
1632 OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\ | |
1633 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1634 OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\ | |
1635 }\ | |
1636 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1637 OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\ | |
1638 }\ | |
1639 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\ | |
1640 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\ | |
1641 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\ | |
1642 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\ | |
1643 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\ | |
1644 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\ | |
1645 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\ | |
1646 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\ | |
1647 | |
1648 QPEL_2TAP(put_, 16, mmx2) | |
1649 QPEL_2TAP(avg_, 16, mmx2) | |
1650 QPEL_2TAP(put_, 8, mmx2) | |
1651 QPEL_2TAP(avg_, 8, mmx2) | |
1652 QPEL_2TAP(put_, 16, 3dnow) | |
1653 QPEL_2TAP(avg_, 16, 3dnow) | |
1654 QPEL_2TAP(put_, 8, 3dnow) | |
1655 QPEL_2TAP(avg_, 8, 3dnow) | |
1656 | |
1657 | |
1658 #if 0 | |
8527
f8bf438c6000
Add missing 'void' keyword to parameterless function declarations.
diego
parents:
8519
diff
changeset
|
1659 static void just_return(void) { return; } |
8430 | 1660 #endif |
1661 | |
1662 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, | |
1663 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){ | |
1664 const int w = 8; | |
1665 const int ix = ox>>(16+shift); | |
1666 const int iy = oy>>(16+shift); | |
1667 const int oxs = ox>>4; | |
1668 const int oys = oy>>4; | |
1669 const int dxxs = dxx>>4; | |
1670 const int dxys = dxy>>4; | |
1671 const int dyxs = dyx>>4; | |
1672 const int dyys = dyy>>4; | |
1673 const uint16_t r4[4] = {r,r,r,r}; | |
1674 const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys}; | |
1675 const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys}; | |
1676 const uint64_t shift2 = 2*shift; | |
1677 uint8_t edge_buf[(h+1)*stride]; | |
1678 int x, y; | |
1679 | |
1680 const int dxw = (dxx-(1<<(16+shift)))*(w-1); | |
1681 const int dyh = (dyy-(1<<(16+shift)))*(h-1); | |
1682 const int dxh = dxy*(h-1); | |
1683 const int dyw = dyx*(w-1); | |
1684 if( // non-constant fullpel offset (3% of blocks) | |
1685 ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) | | |
1686 (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift) | |
1687 // uses more than 16 bits of subpel mv (only at huge resolution) | |
1688 || (dxx|dxy|dyx|dyy)&15 ) | |
1689 { | |
1690 //FIXME could still use mmx for some of the rows | |
1691 ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height); | |
1692 return; | |
1693 } | |
1694 | |
1695 src += ix + iy*stride; | |
1696 if( (unsigned)ix >= width-w || | |
1697 (unsigned)iy >= height-h ) | |
1698 { | |
1699 ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height); | |
1700 src = edge_buf; | |
1701 } | |
1702 | |
1703 __asm__ volatile( | |
1704 "movd %0, %%mm6 \n\t" | |
1705 "pxor %%mm7, %%mm7 \n\t" | |
1706 "punpcklwd %%mm6, %%mm6 \n\t" | |
1707 "punpcklwd %%mm6, %%mm6 \n\t" | |
1708 :: "r"(1<<shift) | |
1709 ); | |
1710 | |
1711 for(x=0; x<w; x+=4){ | |
1712 uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0), | |
1713 oxs - dxys + dxxs*(x+1), | |
1714 oxs - dxys + dxxs*(x+2), | |
1715 oxs - dxys + dxxs*(x+3) }; | |
1716 uint16_t dy4[4] = { oys - dyys + dyxs*(x+0), | |
1717 oys - dyys + dyxs*(x+1), | |
1718 oys - dyys + dyxs*(x+2), | |
1719 oys - dyys + dyxs*(x+3) }; | |
1720 | |
1721 for(y=0; y<h; y++){ | |
1722 __asm__ volatile( | |
1723 "movq %0, %%mm4 \n\t" | |
1724 "movq %1, %%mm5 \n\t" | |
1725 "paddw %2, %%mm4 \n\t" | |
1726 "paddw %3, %%mm5 \n\t" | |
1727 "movq %%mm4, %0 \n\t" | |
1728 "movq %%mm5, %1 \n\t" | |
1729 "psrlw $12, %%mm4 \n\t" | |
1730 "psrlw $12, %%mm5 \n\t" | |
1731 : "+m"(*dx4), "+m"(*dy4) | |
1732 : "m"(*dxy4), "m"(*dyy4) | |
1733 ); | |
1734 | |
1735 __asm__ volatile( | |
1736 "movq %%mm6, %%mm2 \n\t" | |
1737 "movq %%mm6, %%mm1 \n\t" | |
1738 "psubw %%mm4, %%mm2 \n\t" | |
1739 "psubw %%mm5, %%mm1 \n\t" | |
1740 "movq %%mm2, %%mm0 \n\t" | |
1741 "movq %%mm4, %%mm3 \n\t" | |
1742 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy) | |
1743 "pmullw %%mm5, %%mm3 \n\t" // dx*dy | |
1744 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy | |
1745 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy) | |
1746 | |
1747 "movd %4, %%mm5 \n\t" | |
1748 "movd %3, %%mm4 \n\t" | |
1749 "punpcklbw %%mm7, %%mm5 \n\t" | |
1750 "punpcklbw %%mm7, %%mm4 \n\t" | |
1751 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy | |
1752 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy | |
1753 | |
1754 "movd %2, %%mm5 \n\t" | |
1755 "movd %1, %%mm4 \n\t" | |
1756 "punpcklbw %%mm7, %%mm5 \n\t" | |
1757 "punpcklbw %%mm7, %%mm4 \n\t" | |
1758 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy) | |
1759 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy) | |
1760 "paddw %5, %%mm1 \n\t" | |
1761 "paddw %%mm3, %%mm2 \n\t" | |
1762 "paddw %%mm1, %%mm0 \n\t" | |
1763 "paddw %%mm2, %%mm0 \n\t" | |
1764 | |
1765 "psrlw %6, %%mm0 \n\t" | |
1766 "packuswb %%mm0, %%mm0 \n\t" | |
1767 "movd %%mm0, %0 \n\t" | |
1768 | |
1769 : "=m"(dst[x+y*stride]) | |
1770 : "m"(src[0]), "m"(src[1]), | |
1771 "m"(src[stride]), "m"(src[stride+1]), | |
1772 "m"(*r4), "m"(shift2) | |
1773 ); | |
1774 src += stride; | |
1775 } | |
1776 src += 4-h*stride; | |
1777 } | |
1778 } | |
1779 | |
1780 #define PREFETCH(name, op) \ | |
1781 static void name(void *mem, int stride, int h){\ | |
1782 const uint8_t *p= mem;\ | |
1783 do{\ | |
1784 __asm__ volatile(#op" %0" :: "m"(*p));\ | |
1785 p+= stride;\ | |
1786 }while(--h);\ | |
1787 } | |
1788 PREFETCH(prefetch_mmx2, prefetcht0) | |
1789 PREFETCH(prefetch_3dnow, prefetch) | |
1790 #undef PREFETCH | |
1791 | |
12450
3941687b4fa9
Split h264dsp_mmx.c (which was #included in dsputil_mmx.c) in h264_qpel_mmx.c,
rbultje
parents:
12439
diff
changeset
|
1792 #include "h264_qpel_mmx.c" |
12437
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1793 |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1794 void ff_put_h264_chroma_mc8_mmx_rnd (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1795 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1796 void ff_put_vc1_chroma_mc8_mmx_nornd (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1797 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1798 void ff_put_rv40_chroma_mc8_mmx (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1799 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1800 void ff_avg_h264_chroma_mc8_mmx2_rnd (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1801 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1802 void ff_avg_vc1_chroma_mc8_mmx2_nornd (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1803 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1804 void ff_avg_rv40_chroma_mc8_mmx2 (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1805 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1806 void ff_avg_h264_chroma_mc8_3dnow_rnd (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1807 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1808 void ff_avg_vc1_chroma_mc8_3dnow_nornd(uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1809 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1810 void ff_avg_rv40_chroma_mc8_3dnow (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1811 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1812 |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1813 void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1814 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1815 void ff_put_rv40_chroma_mc4_mmx (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1816 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1817 void ff_avg_h264_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1818 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1819 void ff_avg_rv40_chroma_mc4_mmx2 (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1820 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1821 void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1822 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1823 void ff_avg_rv40_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1824 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1825 |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1826 void ff_put_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1827 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1828 void ff_avg_h264_chroma_mc2_mmx2 (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1829 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1830 |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1831 void ff_put_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1832 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1833 void ff_put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1834 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1835 void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1836 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1837 |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1838 void ff_avg_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1839 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1840 void ff_avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1841 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1842 void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src, |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1843 int stride, int h, int x, int y); |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
1844 |
8430 | 1845 |
1846 /* CAVS specific */ | |
1847 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1848 put_pixels8_mmx(dst, src, stride, 8); | |
1849 } | |
1850 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1851 avg_pixels8_mmx(dst, src, stride, 8); | |
1852 } | |
1853 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1854 put_pixels16_mmx(dst, src, stride, 16); | |
1855 } | |
1856 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { | |
1857 avg_pixels16_mmx(dst, src, stride, 16); | |
1858 } | |
1859 | |
1860 /* VC1 specific */ | |
1861 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { | |
1862 put_pixels8_mmx(dst, src, stride, 8); | |
1863 } | |
9441 | 1864 void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) { |
1865 avg_pixels8_mmx2(dst, src, stride, 8); | |
1866 } | |
8430 | 1867 |
1868 /* XXX: those functions should be suppressed ASAP when all IDCTs are | |
1869 converted */ | |
8590 | 1870 #if CONFIG_GPL |
8430 | 1871 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block) |
1872 { | |
1873 ff_mmx_idct (block); | |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
1874 ff_put_pixels_clamped_mmx(block, dest, line_size); |
8430 | 1875 } |
1876 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1877 { | |
1878 ff_mmx_idct (block); | |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
1879 ff_add_pixels_clamped_mmx(block, dest, line_size); |
8430 | 1880 } |
1881 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block) | |
1882 { | |
1883 ff_mmxext_idct (block); | |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
1884 ff_put_pixels_clamped_mmx(block, dest, line_size); |
8430 | 1885 } |
1886 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1887 { | |
1888 ff_mmxext_idct (block); | |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
1889 ff_add_pixels_clamped_mmx(block, dest, line_size); |
8430 | 1890 } |
1891 #endif | |
1892 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block) | |
1893 { | |
1894 ff_idct_xvid_mmx (block); | |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
1895 ff_put_pixels_clamped_mmx(block, dest, line_size); |
8430 | 1896 } |
1897 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1898 { | |
1899 ff_idct_xvid_mmx (block); | |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
1900 ff_add_pixels_clamped_mmx(block, dest, line_size); |
8430 | 1901 } |
1902 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block) | |
1903 { | |
1904 ff_idct_xvid_mmx2 (block); | |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
1905 ff_put_pixels_clamped_mmx(block, dest, line_size); |
8430 | 1906 } |
1907 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block) | |
1908 { | |
1909 ff_idct_xvid_mmx2 (block); | |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
1910 ff_add_pixels_clamped_mmx(block, dest, line_size); |
8430 | 1911 } |
1912 | |
1913 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize) | |
1914 { | |
1915 int i; | |
1916 __asm__ volatile("pxor %%mm7, %%mm7":); | |
1917 for(i=0; i<blocksize; i+=2) { | |
1918 __asm__ volatile( | |
1919 "movq %0, %%mm0 \n\t" | |
1920 "movq %1, %%mm1 \n\t" | |
1921 "movq %%mm0, %%mm2 \n\t" | |
1922 "movq %%mm1, %%mm3 \n\t" | |
1923 "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0 | |
1924 "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0 | |
1925 "pslld $31, %%mm2 \n\t" // keep only the sign bit | |
1926 "pxor %%mm2, %%mm1 \n\t" | |
1927 "movq %%mm3, %%mm4 \n\t" | |
1928 "pand %%mm1, %%mm3 \n\t" | |
1929 "pandn %%mm1, %%mm4 \n\t" | |
1930 "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) | |
1931 "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) | |
1932 "movq %%mm3, %1 \n\t" | |
1933 "movq %%mm0, %0 \n\t" | |
1934 :"+m"(mag[i]), "+m"(ang[i]) | |
1935 ::"memory" | |
1936 ); | |
1937 } | |
1938 __asm__ volatile("femms"); | |
1939 } | |
1940 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize) | |
1941 { | |
1942 int i; | |
1943 | |
1944 __asm__ volatile( | |
1945 "movaps %0, %%xmm5 \n\t" | |
1946 ::"m"(ff_pdw_80000000[0]) | |
1947 ); | |
1948 for(i=0; i<blocksize; i+=4) { | |
1949 __asm__ volatile( | |
1950 "movaps %0, %%xmm0 \n\t" | |
1951 "movaps %1, %%xmm1 \n\t" | |
1952 "xorps %%xmm2, %%xmm2 \n\t" | |
1953 "xorps %%xmm3, %%xmm3 \n\t" | |
1954 "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0 | |
1955 "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0 | |
1956 "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit | |
1957 "xorps %%xmm2, %%xmm1 \n\t" | |
1958 "movaps %%xmm3, %%xmm4 \n\t" | |
1959 "andps %%xmm1, %%xmm3 \n\t" | |
1960 "andnps %%xmm1, %%xmm4 \n\t" | |
1961 "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) | |
1962 "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) | |
1963 "movaps %%xmm3, %1 \n\t" | |
1964 "movaps %%xmm0, %0 \n\t" | |
1965 :"+m"(mag[i]), "+m"(ang[i]) | |
1966 ::"memory" | |
1967 ); | |
1968 } | |
1969 } | |
1970 | |
1971 #define IF1(x) x | |
1972 #define IF0(x) | |
1973 | |
1974 #define MIX5(mono,stereo)\ | |
1975 __asm__ volatile(\ | |
1976 "movss 0(%2), %%xmm5 \n"\ | |
1977 "movss 8(%2), %%xmm6 \n"\ | |
1978 "movss 24(%2), %%xmm7 \n"\ | |
1979 "shufps $0, %%xmm5, %%xmm5 \n"\ | |
1980 "shufps $0, %%xmm6, %%xmm6 \n"\ | |
1981 "shufps $0, %%xmm7, %%xmm7 \n"\ | |
1982 "1: \n"\ | |
1983 "movaps (%0,%1), %%xmm0 \n"\ | |
1984 "movaps 0x400(%0,%1), %%xmm1 \n"\ | |
1985 "movaps 0x800(%0,%1), %%xmm2 \n"\ | |
1986 "movaps 0xc00(%0,%1), %%xmm3 \n"\ | |
1987 "movaps 0x1000(%0,%1), %%xmm4 \n"\ | |
1988 "mulps %%xmm5, %%xmm0 \n"\ | |
1989 "mulps %%xmm6, %%xmm1 \n"\ | |
1990 "mulps %%xmm5, %%xmm2 \n"\ | |
1991 "mulps %%xmm7, %%xmm3 \n"\ | |
1992 "mulps %%xmm7, %%xmm4 \n"\ | |
1993 stereo("addps %%xmm1, %%xmm0 \n")\ | |
1994 "addps %%xmm1, %%xmm2 \n"\ | |
1995 "addps %%xmm3, %%xmm0 \n"\ | |
1996 "addps %%xmm4, %%xmm2 \n"\ | |
1997 mono("addps %%xmm2, %%xmm0 \n")\ | |
1998 "movaps %%xmm0, (%0,%1) \n"\ | |
1999 stereo("movaps %%xmm2, 0x400(%0,%1) \n")\ | |
2000 "add $16, %0 \n"\ | |
2001 "jl 1b \n"\ | |
2002 :"+&r"(i)\ | |
2003 :"r"(samples[0]+len), "r"(matrix)\ | |
2004 :"memory"\ | |
2005 ); | |
2006 | |
2007 #define MIX_MISC(stereo)\ | |
2008 __asm__ volatile(\ | |
2009 "1: \n"\ | |
2010 "movaps (%3,%0), %%xmm0 \n"\ | |
2011 stereo("movaps %%xmm0, %%xmm1 \n")\ | |
2012 "mulps %%xmm6, %%xmm0 \n"\ | |
2013 stereo("mulps %%xmm7, %%xmm1 \n")\ | |
2014 "lea 1024(%3,%0), %1 \n"\ | |
2015 "mov %5, %2 \n"\ | |
2016 "2: \n"\ | |
2017 "movaps (%1), %%xmm2 \n"\ | |
2018 stereo("movaps %%xmm2, %%xmm3 \n")\ | |
2019 "mulps (%4,%2), %%xmm2 \n"\ | |
2020 stereo("mulps 16(%4,%2), %%xmm3 \n")\ | |
2021 "addps %%xmm2, %%xmm0 \n"\ | |
2022 stereo("addps %%xmm3, %%xmm1 \n")\ | |
2023 "add $1024, %1 \n"\ | |
2024 "add $32, %2 \n"\ | |
2025 "jl 2b \n"\ | |
2026 "movaps %%xmm0, (%3,%0) \n"\ | |
2027 stereo("movaps %%xmm1, 1024(%3,%0) \n")\ | |
2028 "add $16, %0 \n"\ | |
2029 "jl 1b \n"\ | |
2030 :"+&r"(i), "=&r"(j), "=&r"(k)\ | |
2031 :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\ | |
2032 :"memory"\ | |
2033 ); | |
2034 | |
2035 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len) | |
2036 { | |
2037 int (*matrix_cmp)[2] = (int(*)[2])matrix; | |
2038 intptr_t i,j,k; | |
2039 | |
2040 i = -len*sizeof(float); | |
2041 if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) { | |
2042 MIX5(IF0,IF1); | |
2043 } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) { | |
2044 MIX5(IF1,IF0); | |
2045 } else { | |
11369 | 2046 DECLARE_ALIGNED(16, float, matrix_simd)[in_ch][2][4]; |
8430 | 2047 j = 2*in_ch*sizeof(float); |
2048 __asm__ volatile( | |
2049 "1: \n" | |
2050 "sub $8, %0 \n" | |
2051 "movss (%2,%0), %%xmm6 \n" | |
2052 "movss 4(%2,%0), %%xmm7 \n" | |
2053 "shufps $0, %%xmm6, %%xmm6 \n" | |
2054 "shufps $0, %%xmm7, %%xmm7 \n" | |
2055 "movaps %%xmm6, (%1,%0,4) \n" | |
2056 "movaps %%xmm7, 16(%1,%0,4) \n" | |
2057 "jg 1b \n" | |
2058 :"+&r"(j) | |
2059 :"r"(matrix_simd), "r"(matrix) | |
2060 :"memory" | |
2061 ); | |
2062 if(out_ch == 2) { | |
2063 MIX_MISC(IF1); | |
2064 } else { | |
2065 MIX_MISC(IF0); | |
2066 } | |
2067 } | |
2068 } | |
2069 | |
2070 static void vector_fmul_3dnow(float *dst, const float *src, int len){ | |
2071 x86_reg i = (len-4)*4; | |
2072 __asm__ volatile( | |
2073 "1: \n\t" | |
2074 "movq (%1,%0), %%mm0 \n\t" | |
2075 "movq 8(%1,%0), %%mm1 \n\t" | |
2076 "pfmul (%2,%0), %%mm0 \n\t" | |
2077 "pfmul 8(%2,%0), %%mm1 \n\t" | |
2078 "movq %%mm0, (%1,%0) \n\t" | |
2079 "movq %%mm1, 8(%1,%0) \n\t" | |
2080 "sub $16, %0 \n\t" | |
2081 "jge 1b \n\t" | |
2082 "femms \n\t" | |
2083 :"+r"(i) | |
2084 :"r"(dst), "r"(src) | |
2085 :"memory" | |
2086 ); | |
2087 } | |
2088 static void vector_fmul_sse(float *dst, const float *src, int len){ | |
2089 x86_reg i = (len-8)*4; | |
2090 __asm__ volatile( | |
2091 "1: \n\t" | |
2092 "movaps (%1,%0), %%xmm0 \n\t" | |
2093 "movaps 16(%1,%0), %%xmm1 \n\t" | |
2094 "mulps (%2,%0), %%xmm0 \n\t" | |
2095 "mulps 16(%2,%0), %%xmm1 \n\t" | |
2096 "movaps %%xmm0, (%1,%0) \n\t" | |
2097 "movaps %%xmm1, 16(%1,%0) \n\t" | |
2098 "sub $32, %0 \n\t" | |
2099 "jge 1b \n\t" | |
2100 :"+r"(i) | |
2101 :"r"(dst), "r"(src) | |
2102 :"memory" | |
2103 ); | |
2104 } | |
2105 | |
2106 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){ | |
2107 x86_reg i = len*4-16; | |
2108 __asm__ volatile( | |
2109 "1: \n\t" | |
2110 "pswapd 8(%1), %%mm0 \n\t" | |
2111 "pswapd (%1), %%mm1 \n\t" | |
2112 "pfmul (%3,%0), %%mm0 \n\t" | |
2113 "pfmul 8(%3,%0), %%mm1 \n\t" | |
2114 "movq %%mm0, (%2,%0) \n\t" | |
2115 "movq %%mm1, 8(%2,%0) \n\t" | |
2116 "add $16, %1 \n\t" | |
2117 "sub $16, %0 \n\t" | |
2118 "jge 1b \n\t" | |
2119 :"+r"(i), "+r"(src1) | |
2120 :"r"(dst), "r"(src0) | |
2121 ); | |
2122 __asm__ volatile("femms"); | |
2123 } | |
2124 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){ | |
2125 x86_reg i = len*4-32; | |
2126 __asm__ volatile( | |
2127 "1: \n\t" | |
2128 "movaps 16(%1), %%xmm0 \n\t" | |
2129 "movaps (%1), %%xmm1 \n\t" | |
2130 "shufps $0x1b, %%xmm0, %%xmm0 \n\t" | |
2131 "shufps $0x1b, %%xmm1, %%xmm1 \n\t" | |
2132 "mulps (%3,%0), %%xmm0 \n\t" | |
2133 "mulps 16(%3,%0), %%xmm1 \n\t" | |
2134 "movaps %%xmm0, (%2,%0) \n\t" | |
2135 "movaps %%xmm1, 16(%2,%0) \n\t" | |
2136 "add $32, %1 \n\t" | |
2137 "sub $32, %0 \n\t" | |
2138 "jge 1b \n\t" | |
2139 :"+r"(i), "+r"(src1) | |
2140 :"r"(dst), "r"(src0) | |
2141 ); | |
2142 } | |
2143 | |
10300
4d1b9ca628fc
Drop unused args from vector_fmul_add_add, simpify code, and rename
mru
parents:
10107
diff
changeset
|
2144 static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1, |
4d1b9ca628fc
Drop unused args from vector_fmul_add_add, simpify code, and rename
mru
parents:
10107
diff
changeset
|
2145 const float *src2, int len){ |
8430 | 2146 x86_reg i = (len-4)*4; |
10301 | 2147 __asm__ volatile( |
2148 "1: \n\t" | |
2149 "movq (%2,%0), %%mm0 \n\t" | |
2150 "movq 8(%2,%0), %%mm1 \n\t" | |
2151 "pfmul (%3,%0), %%mm0 \n\t" | |
2152 "pfmul 8(%3,%0), %%mm1 \n\t" | |
2153 "pfadd (%4,%0), %%mm0 \n\t" | |
2154 "pfadd 8(%4,%0), %%mm1 \n\t" | |
2155 "movq %%mm0, (%1,%0) \n\t" | |
2156 "movq %%mm1, 8(%1,%0) \n\t" | |
2157 "sub $16, %0 \n\t" | |
2158 "jge 1b \n\t" | |
2159 :"+r"(i) | |
2160 :"r"(dst), "r"(src0), "r"(src1), "r"(src2) | |
2161 :"memory" | |
2162 ); | |
8430 | 2163 __asm__ volatile("femms"); |
2164 } | |
10300
4d1b9ca628fc
Drop unused args from vector_fmul_add_add, simpify code, and rename
mru
parents:
10107
diff
changeset
|
2165 static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1, |
4d1b9ca628fc
Drop unused args from vector_fmul_add_add, simpify code, and rename
mru
parents:
10107
diff
changeset
|
2166 const float *src2, int len){ |
8430 | 2167 x86_reg i = (len-8)*4; |
10301 | 2168 __asm__ volatile( |
2169 "1: \n\t" | |
2170 "movaps (%2,%0), %%xmm0 \n\t" | |
2171 "movaps 16(%2,%0), %%xmm1 \n\t" | |
2172 "mulps (%3,%0), %%xmm0 \n\t" | |
2173 "mulps 16(%3,%0), %%xmm1 \n\t" | |
2174 "addps (%4,%0), %%xmm0 \n\t" | |
2175 "addps 16(%4,%0), %%xmm1 \n\t" | |
2176 "movaps %%xmm0, (%1,%0) \n\t" | |
2177 "movaps %%xmm1, 16(%1,%0) \n\t" | |
2178 "sub $32, %0 \n\t" | |
2179 "jge 1b \n\t" | |
2180 :"+r"(i) | |
2181 :"r"(dst), "r"(src0), "r"(src1), "r"(src2) | |
2182 :"memory" | |
2183 ); | |
8430 | 2184 } |
2185 | |
2186 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1, | |
2187 const float *win, float add_bias, int len){ | |
8590 | 2188 #if HAVE_6REGS |
8430 | 2189 if(add_bias == 0){ |
2190 x86_reg i = -len*4; | |
2191 x86_reg j = len*4-8; | |
2192 __asm__ volatile( | |
2193 "1: \n" | |
2194 "pswapd (%5,%1), %%mm1 \n" | |
2195 "movq (%5,%0), %%mm0 \n" | |
2196 "pswapd (%4,%1), %%mm5 \n" | |
2197 "movq (%3,%0), %%mm4 \n" | |
2198 "movq %%mm0, %%mm2 \n" | |
2199 "movq %%mm1, %%mm3 \n" | |
2200 "pfmul %%mm4, %%mm2 \n" // src0[len+i]*win[len+i] | |
2201 "pfmul %%mm5, %%mm3 \n" // src1[ j]*win[len+j] | |
2202 "pfmul %%mm4, %%mm1 \n" // src0[len+i]*win[len+j] | |
2203 "pfmul %%mm5, %%mm0 \n" // src1[ j]*win[len+i] | |
2204 "pfadd %%mm3, %%mm2 \n" | |
2205 "pfsub %%mm0, %%mm1 \n" | |
2206 "pswapd %%mm2, %%mm2 \n" | |
2207 "movq %%mm1, (%2,%0) \n" | |
2208 "movq %%mm2, (%2,%1) \n" | |
2209 "sub $8, %1 \n" | |
2210 "add $8, %0 \n" | |
2211 "jl 1b \n" | |
2212 "femms \n" | |
2213 :"+r"(i), "+r"(j) | |
2214 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) | |
2215 ); | |
2216 }else | |
2217 #endif | |
2218 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len); | |
2219 } | |
2220 | |
2221 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1, | |
2222 const float *win, float add_bias, int len){ | |
8590 | 2223 #if HAVE_6REGS |
8430 | 2224 if(add_bias == 0){ |
2225 x86_reg i = -len*4; | |
2226 x86_reg j = len*4-16; | |
2227 __asm__ volatile( | |
2228 "1: \n" | |
2229 "movaps (%5,%1), %%xmm1 \n" | |
2230 "movaps (%5,%0), %%xmm0 \n" | |
2231 "movaps (%4,%1), %%xmm5 \n" | |
2232 "movaps (%3,%0), %%xmm4 \n" | |
2233 "shufps $0x1b, %%xmm1, %%xmm1 \n" | |
2234 "shufps $0x1b, %%xmm5, %%xmm5 \n" | |
2235 "movaps %%xmm0, %%xmm2 \n" | |
2236 "movaps %%xmm1, %%xmm3 \n" | |
2237 "mulps %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i] | |
2238 "mulps %%xmm5, %%xmm3 \n" // src1[ j]*win[len+j] | |
2239 "mulps %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j] | |
2240 "mulps %%xmm5, %%xmm0 \n" // src1[ j]*win[len+i] | |
2241 "addps %%xmm3, %%xmm2 \n" | |
2242 "subps %%xmm0, %%xmm1 \n" | |
2243 "shufps $0x1b, %%xmm2, %%xmm2 \n" | |
2244 "movaps %%xmm1, (%2,%0) \n" | |
2245 "movaps %%xmm2, (%2,%1) \n" | |
2246 "sub $16, %1 \n" | |
2247 "add $16, %0 \n" | |
2248 "jl 1b \n" | |
2249 :"+r"(i), "+r"(j) | |
2250 :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len) | |
2251 ); | |
2252 }else | |
2253 #endif | |
2254 ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len); | |
2255 } | |
2256 | |
2257 static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len) | |
2258 { | |
2259 x86_reg i = -4*len; | |
2260 __asm__ volatile( | |
2261 "movss %3, %%xmm4 \n" | |
2262 "shufps $0, %%xmm4, %%xmm4 \n" | |
2263 "1: \n" | |
2264 "cvtpi2ps (%2,%0), %%xmm0 \n" | |
2265 "cvtpi2ps 8(%2,%0), %%xmm1 \n" | |
2266 "cvtpi2ps 16(%2,%0), %%xmm2 \n" | |
2267 "cvtpi2ps 24(%2,%0), %%xmm3 \n" | |
2268 "movlhps %%xmm1, %%xmm0 \n" | |
2269 "movlhps %%xmm3, %%xmm2 \n" | |
2270 "mulps %%xmm4, %%xmm0 \n" | |
2271 "mulps %%xmm4, %%xmm2 \n" | |
2272 "movaps %%xmm0, (%1,%0) \n" | |
2273 "movaps %%xmm2, 16(%1,%0) \n" | |
2274 "add $32, %0 \n" | |
2275 "jl 1b \n" | |
2276 :"+r"(i) | |
2277 :"r"(dst+len), "r"(src+len), "m"(mul) | |
2278 ); | |
2279 } | |
2280 | |
2281 static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len) | |
2282 { | |
2283 x86_reg i = -4*len; | |
2284 __asm__ volatile( | |
2285 "movss %3, %%xmm4 \n" | |
2286 "shufps $0, %%xmm4, %%xmm4 \n" | |
2287 "1: \n" | |
2288 "cvtdq2ps (%2,%0), %%xmm0 \n" | |
2289 "cvtdq2ps 16(%2,%0), %%xmm1 \n" | |
2290 "mulps %%xmm4, %%xmm0 \n" | |
2291 "mulps %%xmm4, %%xmm1 \n" | |
2292 "movaps %%xmm0, (%1,%0) \n" | |
2293 "movaps %%xmm1, 16(%1,%0) \n" | |
2294 "add $32, %0 \n" | |
2295 "jl 1b \n" | |
2296 :"+r"(i) | |
2297 :"r"(dst+len), "r"(src+len), "m"(mul) | |
2298 ); | |
2299 } | |
2300 | |
10105 | 2301 static void vector_clipf_sse(float *dst, const float *src, float min, float max, |
10104
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2302 int len) |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2303 { |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2304 x86_reg i = (len-16)*4; |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2305 __asm__ volatile( |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2306 "movss %3, %%xmm4 \n" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2307 "movss %4, %%xmm5 \n" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2308 "shufps $0, %%xmm4, %%xmm4 \n" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2309 "shufps $0, %%xmm5, %%xmm5 \n" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2310 "1: \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2311 "movaps (%2,%0), %%xmm0 \n\t" // 3/1 on intel |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2312 "movaps 16(%2,%0), %%xmm1 \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2313 "movaps 32(%2,%0), %%xmm2 \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2314 "movaps 48(%2,%0), %%xmm3 \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2315 "maxps %%xmm4, %%xmm0 \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2316 "maxps %%xmm4, %%xmm1 \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2317 "maxps %%xmm4, %%xmm2 \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2318 "maxps %%xmm4, %%xmm3 \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2319 "minps %%xmm5, %%xmm0 \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2320 "minps %%xmm5, %%xmm1 \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2321 "minps %%xmm5, %%xmm2 \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2322 "minps %%xmm5, %%xmm3 \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2323 "movaps %%xmm0, (%1,%0) \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2324 "movaps %%xmm1, 16(%1,%0) \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2325 "movaps %%xmm2, 32(%1,%0) \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2326 "movaps %%xmm3, 48(%1,%0) \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2327 "sub $64, %0 \n\t" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2328 "jge 1b \n\t" |
10107
3b61bc6ce377
Mark "i" parameter of vector_clipf_sse() as early-clobber
vitor
parents:
10105
diff
changeset
|
2329 :"+&r"(i) |
10104
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2330 :"r"(dst), "r"(src), "m"(min), "m"(max) |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2331 :"memory" |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2332 ); |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2333 } |
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2334 |
8430 | 2335 static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){ |
2336 x86_reg reglen = len; | |
2337 // not bit-exact: pf2id uses different rounding than C and SSE | |
2338 __asm__ volatile( | |
2339 "add %0 , %0 \n\t" | |
2340 "lea (%2,%0,2) , %2 \n\t" | |
2341 "add %0 , %1 \n\t" | |
2342 "neg %0 \n\t" | |
2343 "1: \n\t" | |
2344 "pf2id (%2,%0,2) , %%mm0 \n\t" | |
2345 "pf2id 8(%2,%0,2) , %%mm1 \n\t" | |
2346 "pf2id 16(%2,%0,2) , %%mm2 \n\t" | |
2347 "pf2id 24(%2,%0,2) , %%mm3 \n\t" | |
2348 "packssdw %%mm1 , %%mm0 \n\t" | |
2349 "packssdw %%mm3 , %%mm2 \n\t" | |
2350 "movq %%mm0 , (%1,%0) \n\t" | |
2351 "movq %%mm2 , 8(%1,%0) \n\t" | |
2352 "add $16 , %0 \n\t" | |
2353 " js 1b \n\t" | |
2354 "femms \n\t" | |
2355 :"+r"(reglen), "+r"(dst), "+r"(src) | |
2356 ); | |
2357 } | |
2358 static void float_to_int16_sse(int16_t *dst, const float *src, long len){ | |
2359 x86_reg reglen = len; | |
2360 __asm__ volatile( | |
2361 "add %0 , %0 \n\t" | |
2362 "lea (%2,%0,2) , %2 \n\t" | |
2363 "add %0 , %1 \n\t" | |
2364 "neg %0 \n\t" | |
2365 "1: \n\t" | |
2366 "cvtps2pi (%2,%0,2) , %%mm0 \n\t" | |
2367 "cvtps2pi 8(%2,%0,2) , %%mm1 \n\t" | |
2368 "cvtps2pi 16(%2,%0,2) , %%mm2 \n\t" | |
2369 "cvtps2pi 24(%2,%0,2) , %%mm3 \n\t" | |
2370 "packssdw %%mm1 , %%mm0 \n\t" | |
2371 "packssdw %%mm3 , %%mm2 \n\t" | |
2372 "movq %%mm0 , (%1,%0) \n\t" | |
2373 "movq %%mm2 , 8(%1,%0) \n\t" | |
2374 "add $16 , %0 \n\t" | |
2375 " js 1b \n\t" | |
2376 "emms \n\t" | |
2377 :"+r"(reglen), "+r"(dst), "+r"(src) | |
2378 ); | |
2379 } | |
2380 | |
2381 static void float_to_int16_sse2(int16_t *dst, const float *src, long len){ | |
2382 x86_reg reglen = len; | |
2383 __asm__ volatile( | |
2384 "add %0 , %0 \n\t" | |
2385 "lea (%2,%0,2) , %2 \n\t" | |
2386 "add %0 , %1 \n\t" | |
2387 "neg %0 \n\t" | |
2388 "1: \n\t" | |
2389 "cvtps2dq (%2,%0,2) , %%xmm0 \n\t" | |
2390 "cvtps2dq 16(%2,%0,2) , %%xmm1 \n\t" | |
2391 "packssdw %%xmm1 , %%xmm0 \n\t" | |
2392 "movdqa %%xmm0 , (%1,%0) \n\t" | |
2393 "add $16 , %0 \n\t" | |
2394 " js 1b \n\t" | |
2395 :"+r"(reglen), "+r"(dst), "+r"(src) | |
2396 ); | |
2397 } | |
2398 | |
12436
d6d0a43848b4
Move VP3 IDCT functions from inline ASM to YASM. This fixes part of the VP3/5/6
rbultje
parents:
12435
diff
changeset
|
2399 void ff_vp3_idct_mmx(int16_t *input_data); |
d6d0a43848b4
Move VP3 IDCT functions from inline ASM to YASM. This fixes part of the VP3/5/6
rbultje
parents:
12435
diff
changeset
|
2400 void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block); |
d6d0a43848b4
Move VP3 IDCT functions from inline ASM to YASM. This fixes part of the VP3/5/6
rbultje
parents:
12435
diff
changeset
|
2401 void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block); |
d6d0a43848b4
Move VP3 IDCT functions from inline ASM to YASM. This fixes part of the VP3/5/6
rbultje
parents:
12435
diff
changeset
|
2402 |
d6d0a43848b4
Move VP3 IDCT functions from inline ASM to YASM. This fixes part of the VP3/5/6
rbultje
parents:
12435
diff
changeset
|
2403 void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size, const DCTELEM *block); |
d6d0a43848b4
Move VP3 IDCT functions from inline ASM to YASM. This fixes part of the VP3/5/6
rbultje
parents:
12435
diff
changeset
|
2404 |
d6d0a43848b4
Move VP3 IDCT functions from inline ASM to YASM. This fixes part of the VP3/5/6
rbultje
parents:
12435
diff
changeset
|
2405 void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values); |
d6d0a43848b4
Move VP3 IDCT functions from inline ASM to YASM. This fixes part of the VP3/5/6
rbultje
parents:
12435
diff
changeset
|
2406 void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values); |
d6d0a43848b4
Move VP3 IDCT functions from inline ASM to YASM. This fixes part of the VP3/5/6
rbultje
parents:
12435
diff
changeset
|
2407 |
d6d0a43848b4
Move VP3 IDCT functions from inline ASM to YASM. This fixes part of the VP3/5/6
rbultje
parents:
12435
diff
changeset
|
2408 void ff_vp3_idct_sse2(int16_t *input_data); |
d6d0a43848b4
Move VP3 IDCT functions from inline ASM to YASM. This fixes part of the VP3/5/6
rbultje
parents:
12435
diff
changeset
|
2409 void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block); |
d6d0a43848b4
Move VP3 IDCT functions from inline ASM to YASM. This fixes part of the VP3/5/6
rbultje
parents:
12435
diff
changeset
|
2410 void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block); |
d6d0a43848b4
Move VP3 IDCT functions from inline ASM to YASM. This fixes part of the VP3/5/6
rbultje
parents:
12435
diff
changeset
|
2411 |
8430 | 2412 void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len); |
2413 void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len); | |
2414 void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len); | |
11981 | 2415 int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, int order, int shift); |
2416 int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order, int shift); | |
2417 int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul); | |
2418 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul); | |
2419 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul); | |
10431 | 2420 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top); |
2421 int ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left); | |
2422 int ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left); | |
10645 | 2423 |
12450
3941687b4fa9
Split h264dsp_mmx.c (which was #included in dsputil_mmx.c) in h264_qpel_mmx.c,
rbultje
parents:
12439
diff
changeset
|
2424 #if !HAVE_YASM |
8430 | 2425 #define ff_float_to_int16_interleave6_sse(a,b,c) float_to_int16_interleave_misc_sse(a,b,c,6) |
2426 #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6) | |
2427 #define ff_float_to_int16_interleave6_3dn2(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6) | |
2428 #endif | |
2429 #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse | |
2430 | |
2431 #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \ | |
2432 /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\ | |
2433 static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\ | |
11369 | 2434 DECLARE_ALIGNED(16, int16_t, tmp)[len];\ |
8430 | 2435 int i,j,c;\ |
2436 for(c=0; c<channels; c++){\ | |
2437 float_to_int16_##cpu(tmp, src[c], len);\ | |
2438 for(i=0, j=c; i<len; i++, j+=channels)\ | |
2439 dst[j] = tmp[i];\ | |
2440 }\ | |
2441 }\ | |
2442 \ | |
2443 static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\ | |
2444 if(channels==1)\ | |
2445 float_to_int16_##cpu(dst, src[0], len);\ | |
2446 else if(channels==2){\ | |
2447 x86_reg reglen = len; \ | |
2448 const float *src0 = src[0];\ | |
2449 const float *src1 = src[1];\ | |
2450 __asm__ volatile(\ | |
2451 "shl $2, %0 \n"\ | |
2452 "add %0, %1 \n"\ | |
2453 "add %0, %2 \n"\ | |
2454 "add %0, %3 \n"\ | |
2455 "neg %0 \n"\ | |
2456 body\ | |
2457 :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\ | |
2458 );\ | |
2459 }else if(channels==6){\ | |
2460 ff_float_to_int16_interleave6_##cpu(dst, src, len);\ | |
2461 }else\ | |
2462 float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\ | |
2463 } | |
2464 | |
2465 FLOAT_TO_INT16_INTERLEAVE(3dnow, | |
2466 "1: \n" | |
2467 "pf2id (%2,%0), %%mm0 \n" | |
2468 "pf2id 8(%2,%0), %%mm1 \n" | |
2469 "pf2id (%3,%0), %%mm2 \n" | |
2470 "pf2id 8(%3,%0), %%mm3 \n" | |
2471 "packssdw %%mm1, %%mm0 \n" | |
2472 "packssdw %%mm3, %%mm2 \n" | |
2473 "movq %%mm0, %%mm1 \n" | |
2474 "punpcklwd %%mm2, %%mm0 \n" | |
2475 "punpckhwd %%mm2, %%mm1 \n" | |
2476 "movq %%mm0, (%1,%0)\n" | |
2477 "movq %%mm1, 8(%1,%0)\n" | |
2478 "add $16, %0 \n" | |
2479 "js 1b \n" | |
2480 "femms \n" | |
2481 ) | |
2482 | |
2483 FLOAT_TO_INT16_INTERLEAVE(sse, | |
2484 "1: \n" | |
2485 "cvtps2pi (%2,%0), %%mm0 \n" | |
2486 "cvtps2pi 8(%2,%0), %%mm1 \n" | |
2487 "cvtps2pi (%3,%0), %%mm2 \n" | |
2488 "cvtps2pi 8(%3,%0), %%mm3 \n" | |
2489 "packssdw %%mm1, %%mm0 \n" | |
2490 "packssdw %%mm3, %%mm2 \n" | |
2491 "movq %%mm0, %%mm1 \n" | |
2492 "punpcklwd %%mm2, %%mm0 \n" | |
2493 "punpckhwd %%mm2, %%mm1 \n" | |
2494 "movq %%mm0, (%1,%0)\n" | |
2495 "movq %%mm1, 8(%1,%0)\n" | |
2496 "add $16, %0 \n" | |
2497 "js 1b \n" | |
2498 "emms \n" | |
2499 ) | |
2500 | |
2501 FLOAT_TO_INT16_INTERLEAVE(sse2, | |
2502 "1: \n" | |
2503 "cvtps2dq (%2,%0), %%xmm0 \n" | |
2504 "cvtps2dq (%3,%0), %%xmm1 \n" | |
2505 "packssdw %%xmm1, %%xmm0 \n" | |
2506 "movhlps %%xmm0, %%xmm1 \n" | |
2507 "punpcklwd %%xmm1, %%xmm0 \n" | |
2508 "movdqa %%xmm0, (%1,%0) \n" | |
2509 "add $16, %0 \n" | |
2510 "js 1b \n" | |
2511 ) | |
2512 | |
2513 static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){ | |
2514 if(channels==6) | |
2515 ff_float_to_int16_interleave6_3dn2(dst, src, len); | |
2516 else | |
2517 float_to_int16_interleave_3dnow(dst, src, len, channels); | |
2518 } | |
2519 | |
10964 | 2520 float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order); |
2521 | |
8430 | 2522 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) |
2523 { | |
12414 | 2524 int mm_flags = mm_support(); |
8430 | 2525 |
2526 if (avctx->dsp_mask) { | |
2527 if (avctx->dsp_mask & FF_MM_FORCE) | |
2528 mm_flags |= (avctx->dsp_mask & 0xffff); | |
2529 else | |
2530 mm_flags &= ~(avctx->dsp_mask & 0xffff); | |
2531 } | |
2532 | |
2533 #if 0 | |
2534 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:"); | |
2535 if (mm_flags & FF_MM_MMX) | |
2536 av_log(avctx, AV_LOG_INFO, " mmx"); | |
9342
7f594601d5e9
Rename FF_MM_MMXEXT to FF_MM_MMX2, for both clarity and consistency
stefano
parents:
9341
diff
changeset
|
2537 if (mm_flags & FF_MM_MMX2) |
7f594601d5e9
Rename FF_MM_MMXEXT to FF_MM_MMX2, for both clarity and consistency
stefano
parents:
9341
diff
changeset
|
2538 av_log(avctx, AV_LOG_INFO, " mmx2"); |
8430 | 2539 if (mm_flags & FF_MM_3DNOW) |
2540 av_log(avctx, AV_LOG_INFO, " 3dnow"); | |
2541 if (mm_flags & FF_MM_SSE) | |
2542 av_log(avctx, AV_LOG_INFO, " sse"); | |
2543 if (mm_flags & FF_MM_SSE2) | |
2544 av_log(avctx, AV_LOG_INFO, " sse2"); | |
2545 av_log(avctx, AV_LOG_INFO, "\n"); | |
2546 #endif | |
2547 | |
2548 if (mm_flags & FF_MM_MMX) { | |
2549 const int idct_algo= avctx->idct_algo; | |
2550 | |
2551 if(avctx->lowres==0){ | |
2552 if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){ | |
2553 c->idct_put= ff_simple_idct_put_mmx; | |
2554 c->idct_add= ff_simple_idct_add_mmx; | |
2555 c->idct = ff_simple_idct_mmx; | |
2556 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM; | |
8590 | 2557 #if CONFIG_GPL |
8430 | 2558 }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){ |
9342
7f594601d5e9
Rename FF_MM_MMXEXT to FF_MM_MMX2, for both clarity and consistency
stefano
parents:
9341
diff
changeset
|
2559 if(mm_flags & FF_MM_MMX2){ |
8430 | 2560 c->idct_put= ff_libmpeg2mmx2_idct_put; |
2561 c->idct_add= ff_libmpeg2mmx2_idct_add; | |
2562 c->idct = ff_mmxext_idct; | |
2563 }else{ | |
2564 c->idct_put= ff_libmpeg2mmx_idct_put; | |
2565 c->idct_add= ff_libmpeg2mmx_idct_add; | |
2566 c->idct = ff_mmx_idct; | |
2567 } | |
2568 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; | |
2569 #endif | |
9975
d6d7e8d4a04d
Do not redundantly check for both CONFIG_THEORA_DECODER and CONFIG_VP3_DECODER.
diego
parents:
9959
diff
changeset
|
2570 }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) && |
12439
51fc247eed32
Fix compilation failure if yasm is disabled (missing vp3 symbols).
rbultje
parents:
12437
diff
changeset
|
2571 idct_algo==FF_IDCT_VP3 && HAVE_YASM){ |
8430 | 2572 if(mm_flags & FF_MM_SSE2){ |
2573 c->idct_put= ff_vp3_idct_put_sse2; | |
2574 c->idct_add= ff_vp3_idct_add_sse2; | |
2575 c->idct = ff_vp3_idct_sse2; | |
2576 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; | |
2577 }else{ | |
2578 c->idct_put= ff_vp3_idct_put_mmx; | |
2579 c->idct_add= ff_vp3_idct_add_mmx; | |
2580 c->idct = ff_vp3_idct_mmx; | |
2581 c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM; | |
2582 } | |
2583 }else if(idct_algo==FF_IDCT_CAVS){ | |
2584 c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; | |
2585 }else if(idct_algo==FF_IDCT_XVIDMMX){ | |
2586 if(mm_flags & FF_MM_SSE2){ | |
2587 c->idct_put= ff_idct_xvid_sse2_put; | |
2588 c->idct_add= ff_idct_xvid_sse2_add; | |
2589 c->idct = ff_idct_xvid_sse2; | |
2590 c->idct_permutation_type= FF_SSE2_IDCT_PERM; | |
9342
7f594601d5e9
Rename FF_MM_MMXEXT to FF_MM_MMX2, for both clarity and consistency
stefano
parents:
9341
diff
changeset
|
2591 }else if(mm_flags & FF_MM_MMX2){ |
8430 | 2592 c->idct_put= ff_idct_xvid_mmx2_put; |
2593 c->idct_add= ff_idct_xvid_mmx2_add; | |
2594 c->idct = ff_idct_xvid_mmx2; | |
2595 }else{ | |
2596 c->idct_put= ff_idct_xvid_mmx_put; | |
2597 c->idct_add= ff_idct_xvid_mmx_add; | |
2598 c->idct = ff_idct_xvid_mmx; | |
2599 } | |
2600 } | |
2601 } | |
2602 | |
12435
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
2603 c->put_pixels_clamped = ff_put_pixels_clamped_mmx; |
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
2604 c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx; |
fe78a4548d12
Put ff_ prefix on non-static {put_signed,put,add}_pixels_clamped_mmx()
rbultje
parents:
12417
diff
changeset
|
2605 c->add_pixels_clamped = ff_add_pixels_clamped_mmx; |
8430 | 2606 c->clear_block = clear_block_mmx; |
2607 c->clear_blocks = clear_blocks_mmx; | |
10766
78c2be62260a
Fix XvMC. XvMCCreateBlocks() may not allocate 16-byte aligned blocks,
gb
parents:
10749
diff
changeset
|
2608 if ((mm_flags & FF_MM_SSE) && |
78c2be62260a
Fix XvMC. XvMCCreateBlocks() may not allocate 16-byte aligned blocks,
gb
parents:
10749
diff
changeset
|
2609 !(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){ |
78c2be62260a
Fix XvMC. XvMCCreateBlocks() may not allocate 16-byte aligned blocks,
gb
parents:
10749
diff
changeset
|
2610 /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */ |
9861 | 2611 c->clear_block = clear_block_sse; |
2612 c->clear_blocks = clear_blocks_sse; | |
2613 } | |
8430 | 2614 |
2615 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \ | |
2616 c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \ | |
2617 c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \ | |
2618 c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \ | |
2619 c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU | |
2620 | |
2621 SET_HPEL_FUNCS(put, 0, 16, mmx); | |
2622 SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx); | |
2623 SET_HPEL_FUNCS(avg, 0, 16, mmx); | |
2624 SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx); | |
2625 SET_HPEL_FUNCS(put, 1, 8, mmx); | |
2626 SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx); | |
2627 SET_HPEL_FUNCS(avg, 1, 8, mmx); | |
2628 SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx); | |
2629 | |
2630 c->gmc= gmc_mmx; | |
2631 | |
2632 c->add_bytes= add_bytes_mmx; | |
2633 c->add_bytes_l2= add_bytes_l2_mmx; | |
2634 | |
2635 c->draw_edges = draw_edges_mmx; | |
2636 | |
10749
5cca4b6c459d
Get rid of pointless CONFIG_ANY_H263 preprocessor definition.
diego
parents:
10645
diff
changeset
|
2637 if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) { |
8430 | 2638 c->h263_v_loop_filter= h263_v_loop_filter_mmx; |
2639 c->h263_h_loop_filter= h263_h_loop_filter_mmx; | |
2640 } | |
2641 | |
12437
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2642 #if HAVE_YASM |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2643 c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_mmx_rnd; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2644 c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2645 c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_mmx_nornd; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2646 |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2647 c->put_rv40_chroma_pixels_tab[0]= ff_put_rv40_chroma_mc8_mmx; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2648 c->put_rv40_chroma_pixels_tab[1]= ff_put_rv40_chroma_mc4_mmx; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2649 #endif |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2650 |
9342
7f594601d5e9
Rename FF_MM_MMXEXT to FF_MM_MMX2, for both clarity and consistency
stefano
parents:
9341
diff
changeset
|
2651 if (mm_flags & FF_MM_MMX2) { |
8430 | 2652 c->prefetch = prefetch_mmx2; |
2653 | |
2654 c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; | |
2655 c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2; | |
2656 | |
2657 c->avg_pixels_tab[0][0] = avg_pixels16_mmx2; | |
2658 c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2; | |
2659 c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2; | |
2660 | |
2661 c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2; | |
2662 c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2; | |
2663 | |
2664 c->avg_pixels_tab[1][0] = avg_pixels8_mmx2; | |
2665 c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2; | |
2666 c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2; | |
2667 | |
2668 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2669 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2; | |
2670 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2; | |
2671 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2; | |
2672 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2; | |
2673 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2; | |
2674 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2; | |
2675 | |
12439
51fc247eed32
Fix compilation failure if yasm is disabled (missing vp3 symbols).
rbultje
parents:
12437
diff
changeset
|
2676 if (CONFIG_VP3_DECODER && HAVE_YASM) { |
8430 | 2677 c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2; |
2678 c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2; | |
2679 } | |
2680 } | |
12439
51fc247eed32
Fix compilation failure if yasm is disabled (missing vp3 symbols).
rbultje
parents:
12437
diff
changeset
|
2681 if (CONFIG_VP3_DECODER && HAVE_YASM) { |
11637 | 2682 c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2; |
2683 } | |
8430 | 2684 |
11826
11c5a87497d3
Add bitexact versions of put_no_rnd_pixels8 _x2 and _y2 for vp3/theora
conrad
parents:
11637
diff
changeset
|
2685 if (CONFIG_VP3_DECODER |
11c5a87497d3
Add bitexact versions of put_no_rnd_pixels8 _x2 and _y2 for vp3/theora
conrad
parents:
11637
diff
changeset
|
2686 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) { |
11c5a87497d3
Add bitexact versions of put_no_rnd_pixels8 _x2 and _y2 for vp3/theora
conrad
parents:
11637
diff
changeset
|
2687 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2; |
11c5a87497d3
Add bitexact versions of put_no_rnd_pixels8 _x2 and _y2 for vp3/theora
conrad
parents:
11637
diff
changeset
|
2688 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2; |
11c5a87497d3
Add bitexact versions of put_no_rnd_pixels8 _x2 and _y2 for vp3/theora
conrad
parents:
11637
diff
changeset
|
2689 } |
11c5a87497d3
Add bitexact versions of put_no_rnd_pixels8 _x2 and _y2 for vp3/theora
conrad
parents:
11637
diff
changeset
|
2690 |
8430 | 2691 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \ |
2692 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \ | |
2693 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \ | |
2694 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \ | |
2695 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \ | |
2696 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \ | |
2697 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \ | |
2698 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \ | |
2699 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \ | |
2700 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \ | |
2701 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \ | |
2702 c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \ | |
2703 c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \ | |
2704 c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \ | |
2705 c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \ | |
2706 c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \ | |
2707 c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU | |
2708 | |
2709 SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2); | |
2710 SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2); | |
2711 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2); | |
2712 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2); | |
2713 SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2); | |
2714 SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2); | |
2715 | |
2716 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2); | |
2717 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2); | |
2718 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2); | |
2719 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2); | |
2720 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2); | |
2721 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2); | |
2722 | |
2723 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2); | |
2724 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2); | |
2725 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2); | |
2726 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2); | |
2727 | |
12437
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2728 #if HAVE_YASM |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2729 c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_mmx2; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2730 c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_mmx2; |
8519
cc64e1343397
Use H264 MMX chroma functions to accelerate RV40 decoding.
cehoyos
parents:
8510
diff
changeset
|
2731 |
12437
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2732 c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_mmx2_nornd; |
9440 | 2733 |
12437
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2734 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2735 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2736 c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2737 c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_mmx2; |
8430 | 2738 |
8760 | 2739 c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2; |
2740 #endif | |
8798
a5c8210814d7
Add check whether the compiler/assembler supports 10 or more operands.
diego
parents:
8760
diff
changeset
|
2741 #if HAVE_7REGS && HAVE_TEN_OPERANDS |
8760 | 2742 if( mm_flags&FF_MM_3DNOW ) |
2743 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov; | |
2744 #endif | |
2745 | |
9995
3141f69e3905
Do not check for both CONFIG_VC1_DECODER and CONFIG_WMV3_DECODER,
diego
parents:
9975
diff
changeset
|
2746 if (CONFIG_VC1_DECODER) |
8430 | 2747 ff_vc1dsp_init_mmx(c, avctx); |
2748 | |
2749 c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2; | |
2750 } else if (mm_flags & FF_MM_3DNOW) { | |
2751 c->prefetch = prefetch_3dnow; | |
2752 | |
2753 c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; | |
2754 c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; | |
2755 | |
2756 c->avg_pixels_tab[0][0] = avg_pixels16_3dnow; | |
2757 c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow; | |
2758 c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow; | |
2759 | |
2760 c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow; | |
2761 c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow; | |
2762 | |
2763 c->avg_pixels_tab[1][0] = avg_pixels8_3dnow; | |
2764 c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow; | |
2765 c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow; | |
2766 | |
2767 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2768 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow; | |
2769 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow; | |
2770 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow; | |
2771 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow; | |
2772 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow; | |
2773 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow; | |
2774 } | |
2775 | |
11826
11c5a87497d3
Add bitexact versions of put_no_rnd_pixels8 _x2 and _y2 for vp3/theora
conrad
parents:
11637
diff
changeset
|
2776 if (CONFIG_VP3_DECODER |
11c5a87497d3
Add bitexact versions of put_no_rnd_pixels8 _x2 and _y2 for vp3/theora
conrad
parents:
11637
diff
changeset
|
2777 && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) { |
11c5a87497d3
Add bitexact versions of put_no_rnd_pixels8 _x2 and _y2 for vp3/theora
conrad
parents:
11637
diff
changeset
|
2778 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow; |
11c5a87497d3
Add bitexact versions of put_no_rnd_pixels8 _x2 and _y2 for vp3/theora
conrad
parents:
11637
diff
changeset
|
2779 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow; |
11c5a87497d3
Add bitexact versions of put_no_rnd_pixels8 _x2 and _y2 for vp3/theora
conrad
parents:
11637
diff
changeset
|
2780 } |
11c5a87497d3
Add bitexact versions of put_no_rnd_pixels8 _x2 and _y2 for vp3/theora
conrad
parents:
11637
diff
changeset
|
2781 |
8430 | 2782 SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow); |
2783 SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow); | |
2784 SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow); | |
2785 SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow); | |
2786 SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow); | |
2787 SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow); | |
2788 | |
2789 SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow); | |
2790 SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow); | |
2791 SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow); | |
2792 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow); | |
2793 SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow); | |
2794 SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow); | |
2795 | |
2796 SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow); | |
2797 SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow); | |
2798 SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow); | |
2799 SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow); | |
2800 | |
12437
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2801 #if HAVE_YASM |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2802 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_3dnow_rnd; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2803 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow; |
8430 | 2804 |
12437
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2805 c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_3dnow_nornd; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2806 |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2807 c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_3dnow; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2808 c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_3dnow; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2809 #endif |
8430 | 2810 } |
2811 | |
2812 | |
2813 #define H264_QPEL_FUNCS(x, y, CPU)\ | |
2814 c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\ | |
2815 c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\ | |
2816 c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\ | |
2817 c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU; | |
2818 if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){ | |
2819 // these functions are slower than mmx on AMD, but faster on Intel | |
2820 c->put_pixels_tab[0][0] = put_pixels16_sse2; | |
2821 c->avg_pixels_tab[0][0] = avg_pixels16_sse2; | |
2822 H264_QPEL_FUNCS(0, 0, sse2); | |
2823 } | |
2824 if(mm_flags & FF_MM_SSE2){ | |
2825 H264_QPEL_FUNCS(0, 1, sse2); | |
2826 H264_QPEL_FUNCS(0, 2, sse2); | |
2827 H264_QPEL_FUNCS(0, 3, sse2); | |
2828 H264_QPEL_FUNCS(1, 1, sse2); | |
2829 H264_QPEL_FUNCS(1, 2, sse2); | |
2830 H264_QPEL_FUNCS(1, 3, sse2); | |
2831 H264_QPEL_FUNCS(2, 1, sse2); | |
2832 H264_QPEL_FUNCS(2, 2, sse2); | |
2833 H264_QPEL_FUNCS(2, 3, sse2); | |
2834 H264_QPEL_FUNCS(3, 1, sse2); | |
2835 H264_QPEL_FUNCS(3, 2, sse2); | |
2836 H264_QPEL_FUNCS(3, 3, sse2); | |
2837 } | |
8590 | 2838 #if HAVE_SSSE3 |
8430 | 2839 if(mm_flags & FF_MM_SSSE3){ |
2840 H264_QPEL_FUNCS(1, 0, ssse3); | |
2841 H264_QPEL_FUNCS(1, 1, ssse3); | |
2842 H264_QPEL_FUNCS(1, 2, ssse3); | |
2843 H264_QPEL_FUNCS(1, 3, ssse3); | |
2844 H264_QPEL_FUNCS(2, 0, ssse3); | |
2845 H264_QPEL_FUNCS(2, 1, ssse3); | |
2846 H264_QPEL_FUNCS(2, 2, ssse3); | |
2847 H264_QPEL_FUNCS(2, 3, ssse3); | |
2848 H264_QPEL_FUNCS(3, 0, ssse3); | |
2849 H264_QPEL_FUNCS(3, 1, ssse3); | |
2850 H264_QPEL_FUNCS(3, 2, ssse3); | |
2851 H264_QPEL_FUNCS(3, 3, ssse3); | |
2852 c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3; | |
10430 | 2853 #if HAVE_YASM |
12437
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2854 c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_ssse3_nornd; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2855 c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_ssse3_nornd; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2856 c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2857 c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2858 c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3; |
b242eb86ea9a
Move H264 chroma MC from inline asm to yasm. This fixes VP3/5/6 and VC-1
rbultje
parents:
12436
diff
changeset
|
2859 c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_ssse3; |
10430 | 2860 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3; |
2861 if (mm_flags & FF_MM_SSE4) // not really sse4, just slow on Conroe | |
2862 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4; | |
2863 #endif | |
8430 | 2864 } |
2865 #endif | |
2866 | |
2867 if(mm_flags & FF_MM_3DNOW){ | |
2868 c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow; | |
2869 c->vector_fmul = vector_fmul_3dnow; | |
2870 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2871 c->float_to_int16 = float_to_int16_3dnow; | |
2872 c->float_to_int16_interleave = float_to_int16_interleave_3dnow; | |
2873 } | |
2874 } | |
2875 if(mm_flags & FF_MM_3DNOWEXT){ | |
2876 c->vector_fmul_reverse = vector_fmul_reverse_3dnow2; | |
2877 c->vector_fmul_window = vector_fmul_window_3dnow2; | |
2878 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ | |
2879 c->float_to_int16_interleave = float_to_int16_interleave_3dn2; | |
2880 } | |
2881 } | |
10633 | 2882 if(mm_flags & FF_MM_MMX2){ |
2883 #if HAVE_YASM | |
2884 c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2; | |
10644 | 2885 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2; |
10633 | 2886 #endif |
2887 } | |
8430 | 2888 if(mm_flags & FF_MM_SSE){ |
2889 c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse; | |
2890 c->ac3_downmix = ac3_downmix_sse; | |
2891 c->vector_fmul = vector_fmul_sse; | |
2892 c->vector_fmul_reverse = vector_fmul_reverse_sse; | |
10300
4d1b9ca628fc
Drop unused args from vector_fmul_add_add, simpify code, and rename
mru
parents:
10107
diff
changeset
|
2893 c->vector_fmul_add = vector_fmul_add_sse; |
8430 | 2894 c->vector_fmul_window = vector_fmul_window_sse; |
2895 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse; | |
10104
0fa3d21b317e
SSE optimized vector_clipf(). 10% faster TwinVQ decoding.
vitor
parents:
9995
diff
changeset
|
2896 c->vector_clipf = vector_clipf_sse; |
8430 | 2897 c->float_to_int16 = float_to_int16_sse; |
2898 c->float_to_int16_interleave = float_to_int16_interleave_sse; | |
10964 | 2899 #if HAVE_YASM |
2900 c->scalarproduct_float = ff_scalarproduct_float_sse; | |
2901 #endif | |
8430 | 2902 } |
2903 if(mm_flags & FF_MM_3DNOW) | |
10300
4d1b9ca628fc
Drop unused args from vector_fmul_add_add, simpify code, and rename
mru
parents:
10107
diff
changeset
|
2904 c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse |
8430 | 2905 if(mm_flags & FF_MM_SSE2){ |
2906 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2; | |
2907 c->float_to_int16 = float_to_int16_sse2; | |
2908 c->float_to_int16_interleave = float_to_int16_interleave_sse2; | |
10633 | 2909 #if HAVE_YASM |
2910 c->scalarproduct_int16 = ff_scalarproduct_int16_sse2; | |
10644 | 2911 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2; |
10633 | 2912 #endif |
8430 | 2913 } |
10644 | 2914 if((mm_flags & FF_MM_SSSE3) && !(mm_flags & (FF_MM_SSE42|FF_MM_3DNOW)) && HAVE_YASM) // cachesplit |
2915 c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3; | |
8430 | 2916 } |
2917 | |
8596
68e959302527
replace all occurrence of ENABLE_ by the corresponding CONFIG_, HAVE_ or ARCH_
aurel
parents:
8590
diff
changeset
|
2918 if (CONFIG_ENCODERS) |
8430 | 2919 dsputilenc_init_mmx(c, avctx); |
2920 | |
2921 #if 0 | |
2922 // for speed testing | |
2923 get_pixels = just_return; | |
2924 put_pixels_clamped = just_return; | |
2925 add_pixels_clamped = just_return; | |
2926 | |
2927 pix_abs16x16 = just_return; | |
2928 pix_abs16x16_x2 = just_return; | |
2929 pix_abs16x16_y2 = just_return; | |
2930 pix_abs16x16_xy2 = just_return; | |
2931 | |
2932 put_pixels_tab[0] = just_return; | |
2933 put_pixels_tab[1] = just_return; | |
2934 put_pixels_tab[2] = just_return; | |
2935 put_pixels_tab[3] = just_return; | |
2936 | |
2937 put_no_rnd_pixels_tab[0] = just_return; | |
2938 put_no_rnd_pixels_tab[1] = just_return; | |
2939 put_no_rnd_pixels_tab[2] = just_return; | |
2940 put_no_rnd_pixels_tab[3] = just_return; | |
2941 | |
2942 avg_pixels_tab[0] = just_return; | |
2943 avg_pixels_tab[1] = just_return; | |
2944 avg_pixels_tab[2] = just_return; | |
2945 avg_pixels_tab[3] = just_return; | |
2946 | |
2947 avg_no_rnd_pixels_tab[0] = just_return; | |
2948 avg_no_rnd_pixels_tab[1] = just_return; | |
2949 avg_no_rnd_pixels_tab[2] = just_return; | |
2950 avg_no_rnd_pixels_tab[3] = just_return; | |
2951 | |
2952 //av_fdct = just_return; | |
2953 //ff_idct = just_return; | |
2954 #endif | |
2955 } |