Mercurial > libavcodec.hg
annotate i386/h264dsp_mmx.c @ 6320:ffb2a7b80d6d libavcodec
ff_h264_idct8_add_sse2.
compared to mmx, 217->126 cycles on core2, 262->220 on k8.
author | lorenm |
---|---|
date | Sun, 03 Feb 2008 07:05:11 +0000 |
parents | 4089a1ae6558 |
children | 57bd93f81a14 |
rev | line source |
---|---|
2754 | 1 /* |
2 * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt | |
3 * | |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3645
diff
changeset
|
4 * This file is part of FFmpeg. |
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3645
diff
changeset
|
5 * |
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3645
diff
changeset
|
6 * FFmpeg is free software; you can redistribute it and/or |
2754 | 7 * modify it under the terms of the GNU Lesser General Public |
8 * License as published by the Free Software Foundation; either | |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3645
diff
changeset
|
9 * version 2.1 of the License, or (at your option) any later version. |
2754 | 10 * |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3645
diff
changeset
|
11 * FFmpeg is distributed in the hope that it will be useful, |
2754 | 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 * Lesser General Public License for more details. | |
15 * | |
16 * You should have received a copy of the GNU Lesser General Public | |
3947
c8c591fe26f8
Change license headers to say 'FFmpeg' instead of 'this program/this library'
diego
parents:
3645
diff
changeset
|
17 * License along with FFmpeg; if not, write to the Free Software |
3036
0b546eab515d
Update licensing information: The FSF changed postal address.
diego
parents:
3029
diff
changeset
|
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
2754 | 19 */ |
20 | |
5946
55251379b5b1
make ff_p* vars extern so that they can be used in various *_mmx.c files
aurel
parents:
5809
diff
changeset
|
21 #include "dsputil_mmx.h" |
2754 | 22 |
23 /***********************************/ | |
24 /* IDCT */ | |
25 | |
26 #define SUMSUB_BADC( a, b, c, d ) \ | |
27 "paddw "#b", "#a" \n\t"\ | |
28 "paddw "#d", "#c" \n\t"\ | |
29 "paddw "#b", "#b" \n\t"\ | |
30 "paddw "#d", "#d" \n\t"\ | |
31 "psubw "#a", "#b" \n\t"\ | |
32 "psubw "#c", "#d" \n\t" | |
33 | |
34 #define SUMSUBD2_AB( a, b, t ) \ | |
35 "movq "#b", "#t" \n\t"\ | |
36 "psraw $1 , "#b" \n\t"\ | |
37 "paddw "#a", "#b" \n\t"\ | |
38 "psraw $1 , "#a" \n\t"\ | |
39 "psubw "#t", "#a" \n\t" | |
40 | |
41 #define IDCT4_1D( s02, s13, d02, d13, t ) \ | |
42 SUMSUB_BA ( s02, d02 )\ | |
43 SUMSUBD2_AB( s13, d13, t )\ | |
44 SUMSUB_BADC( d13, s02, s13, d02 ) | |
45 | |
46 #define STORE_DIFF_4P( p, t, z ) \ | |
47 "psraw $6, "#p" \n\t"\ | |
48 "movd (%0), "#t" \n\t"\ | |
49 "punpcklbw "#z", "#t" \n\t"\ | |
50 "paddsw "#t", "#p" \n\t"\ | |
51 "packuswb "#z", "#p" \n\t"\ | |
52 "movd "#p", (%0) \n\t" | |
53 | |
3173 | 54 static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride) |
2754 | 55 { |
56 /* Load dct coeffs */ | |
57 asm volatile( | |
58 "movq (%0), %%mm0 \n\t" | |
59 "movq 8(%0), %%mm1 \n\t" | |
60 "movq 16(%0), %%mm2 \n\t" | |
61 "movq 24(%0), %%mm3 \n\t" | |
62 :: "r"(block) ); | |
63 | |
64 asm volatile( | |
65 /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */ | |
66 IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 ) | |
67 | |
68 "movq %0, %%mm6 \n\t" | |
69 /* in: 1,4,0,2 out: 1,2,3,0 */ | |
70 TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 ) | |
71 | |
72 "paddw %%mm6, %%mm3 \n\t" | |
73 | |
74 /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */ | |
75 IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 ) | |
76 | |
77 "pxor %%mm7, %%mm7 \n\t" | |
6320 | 78 :: "m"(*ff_pw_32)); |
2754 | 79 |
80 asm volatile( | |
81 STORE_DIFF_4P( %%mm0, %%mm1, %%mm7) | |
82 "add %1, %0 \n\t" | |
83 STORE_DIFF_4P( %%mm2, %%mm1, %%mm7) | |
84 "add %1, %0 \n\t" | |
85 STORE_DIFF_4P( %%mm3, %%mm1, %%mm7) | |
86 "add %1, %0 \n\t" | |
87 STORE_DIFF_4P( %%mm4, %%mm1, %%mm7) | |
88 : "+r"(dst) | |
89 : "r" ((long)stride) | |
90 ); | |
91 } | |
92 | |
3174 | 93 static inline void h264_idct8_1d(int16_t *block) |
94 { | |
95 asm volatile( | |
96 "movq 112(%0), %%mm7 \n\t" | |
6319
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
97 "movq 80(%0), %%mm0 \n\t" |
3174 | 98 "movq 48(%0), %%mm3 \n\t" |
6319
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
99 "movq 16(%0), %%mm5 \n\t" |
3174 | 100 |
6319
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
101 "movq %%mm0, %%mm4 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
102 "movq %%mm5, %%mm1 \n\t" |
3174 | 103 "psraw $1, %%mm4 \n\t" |
6319
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
104 "psraw $1, %%mm1 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
105 "paddw %%mm0, %%mm4 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
106 "paddw %%mm5, %%mm1 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
107 "paddw %%mm7, %%mm4 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
108 "paddw %%mm0, %%mm1 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
109 "psubw %%mm5, %%mm4 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
110 "paddw %%mm3, %%mm1 \n\t" |
3174 | 111 |
6319
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
112 "psubw %%mm3, %%mm5 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
113 "psubw %%mm3, %%mm0 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
114 "paddw %%mm7, %%mm5 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
115 "psubw %%mm7, %%mm0 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
116 "psraw $1, %%mm3 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
117 "psraw $1, %%mm7 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
118 "psubw %%mm3, %%mm5 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
119 "psubw %%mm7, %%mm0 \n\t" |
3174 | 120 |
121 "movq %%mm4, %%mm3 \n\t" | |
6319
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
122 "movq %%mm1, %%mm7 \n\t" |
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
123 "psraw $2, %%mm1 \n\t" |
3174 | 124 "psraw $2, %%mm3 \n\t" |
6319
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
125 "paddw %%mm5, %%mm3 \n\t" |
3174 | 126 "psraw $2, %%mm5 \n\t" |
6319
4089a1ae6558
remove some movq in ff_h264_idct8_add_mmx. 225->217 cycles on core2.
lorenm
parents:
6135
diff
changeset
|
127 "paddw %%mm0, %%mm1 \n\t" |
3174 | 128 "psraw $2, %%mm0 \n\t" |
129 "psubw %%mm4, %%mm5 \n\t" | |
130 "psubw %%mm0, %%mm7 \n\t" | |
131 | |
132 "movq 32(%0), %%mm2 \n\t" | |
133 "movq 96(%0), %%mm6 \n\t" | |
134 "movq %%mm2, %%mm4 \n\t" | |
135 "movq %%mm6, %%mm0 \n\t" | |
136 "psraw $1, %%mm4 \n\t" | |
137 "psraw $1, %%mm6 \n\t" | |
138 "psubw %%mm0, %%mm4 \n\t" | |
139 "paddw %%mm2, %%mm6 \n\t" | |
140 | |
141 "movq (%0), %%mm2 \n\t" | |
142 "movq 64(%0), %%mm0 \n\t" | |
143 SUMSUB_BA( %%mm0, %%mm2 ) | |
144 SUMSUB_BA( %%mm6, %%mm0 ) | |
145 SUMSUB_BA( %%mm4, %%mm2 ) | |
146 SUMSUB_BA( %%mm7, %%mm6 ) | |
147 SUMSUB_BA( %%mm5, %%mm4 ) | |
148 SUMSUB_BA( %%mm3, %%mm2 ) | |
149 SUMSUB_BA( %%mm1, %%mm0 ) | |
150 :: "r"(block) | |
151 ); | |
152 } | |
153 | |
154 static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride) | |
155 { | |
156 int i; | |
157 int16_t __attribute__ ((aligned(8))) b2[64]; | |
158 | |
159 block[0] += 32; | |
160 | |
161 for(i=0; i<2; i++){ | |
4137 | 162 DECLARE_ALIGNED_8(uint64_t, tmp); |
3174 | 163 |
164 h264_idct8_1d(block+4*i); | |
165 | |
166 asm volatile( | |
167 "movq %%mm7, %0 \n\t" | |
168 TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 ) | |
169 "movq %%mm0, 8(%1) \n\t" | |
170 "movq %%mm6, 24(%1) \n\t" | |
171 "movq %%mm7, 40(%1) \n\t" | |
172 "movq %%mm4, 56(%1) \n\t" | |
173 "movq %0, %%mm7 \n\t" | |
174 TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 ) | |
175 "movq %%mm7, (%1) \n\t" | |
176 "movq %%mm1, 16(%1) \n\t" | |
177 "movq %%mm0, 32(%1) \n\t" | |
178 "movq %%mm3, 48(%1) \n\t" | |
179 : "=m"(tmp) | |
180 : "r"(b2+32*i) | |
181 : "memory" | |
182 ); | |
183 } | |
184 | |
185 for(i=0; i<2; i++){ | |
186 h264_idct8_1d(b2+4*i); | |
187 | |
188 asm volatile( | |
189 "psraw $6, %%mm7 \n\t" | |
190 "psraw $6, %%mm6 \n\t" | |
191 "psraw $6, %%mm5 \n\t" | |
192 "psraw $6, %%mm4 \n\t" | |
193 "psraw $6, %%mm3 \n\t" | |
194 "psraw $6, %%mm2 \n\t" | |
195 "psraw $6, %%mm1 \n\t" | |
196 "psraw $6, %%mm0 \n\t" | |
197 | |
198 "movq %%mm7, (%0) \n\t" | |
199 "movq %%mm5, 16(%0) \n\t" | |
200 "movq %%mm3, 32(%0) \n\t" | |
201 "movq %%mm1, 48(%0) \n\t" | |
202 "movq %%mm0, 64(%0) \n\t" | |
203 "movq %%mm2, 80(%0) \n\t" | |
204 "movq %%mm4, 96(%0) \n\t" | |
205 "movq %%mm6, 112(%0) \n\t" | |
206 :: "r"(b2+4*i) | |
207 : "memory" | |
208 ); | |
209 } | |
210 | |
211 add_pixels_clamped_mmx(b2, dst, stride); | |
212 } | |
213 | |
6320 | 214 #define STORE_DIFF_8P( p, d, t, z )\ |
215 "movq "#d", "#t" \n"\ | |
216 "psraw $6, "#p" \n"\ | |
217 "punpcklbw "#z", "#t" \n"\ | |
218 "paddsw "#t", "#p" \n"\ | |
219 "packuswb "#p", "#p" \n"\ | |
220 "movq "#p", "#d" \n" | |
221 | |
222 #define H264_IDCT8_1D_SSE2(a,b,c,d,e,f,g,h)\ | |
223 "movdqa "#c", "#a" \n"\ | |
224 "movdqa "#g", "#e" \n"\ | |
225 "psraw $1, "#c" \n"\ | |
226 "psraw $1, "#g" \n"\ | |
227 "psubw "#e", "#c" \n"\ | |
228 "paddw "#a", "#g" \n"\ | |
229 "movdqa "#b", "#e" \n"\ | |
230 "psraw $1, "#e" \n"\ | |
231 "paddw "#b", "#e" \n"\ | |
232 "paddw "#d", "#e" \n"\ | |
233 "paddw "#f", "#e" \n"\ | |
234 "movdqa "#f", "#a" \n"\ | |
235 "psraw $1, "#a" \n"\ | |
236 "paddw "#f", "#a" \n"\ | |
237 "paddw "#h", "#a" \n"\ | |
238 "psubw "#b", "#a" \n"\ | |
239 "psubw "#d", "#b" \n"\ | |
240 "psubw "#d", "#f" \n"\ | |
241 "paddw "#h", "#b" \n"\ | |
242 "psubw "#h", "#f" \n"\ | |
243 "psraw $1, "#d" \n"\ | |
244 "psraw $1, "#h" \n"\ | |
245 "psubw "#d", "#b" \n"\ | |
246 "psubw "#h", "#f" \n"\ | |
247 "movdqa "#e", "#d" \n"\ | |
248 "movdqa "#a", "#h" \n"\ | |
249 "psraw $2, "#d" \n"\ | |
250 "psraw $2, "#h" \n"\ | |
251 "paddw "#f", "#d" \n"\ | |
252 "paddw "#b", "#h" \n"\ | |
253 "psraw $2, "#f" \n"\ | |
254 "psraw $2, "#b" \n"\ | |
255 "psubw "#f", "#e" \n"\ | |
256 "psubw "#a", "#b" \n"\ | |
257 "movdqa 0x00(%1), "#a" \n"\ | |
258 "movdqa 0x40(%1), "#f" \n"\ | |
259 SUMSUB_BA(f, a)\ | |
260 SUMSUB_BA(g, f)\ | |
261 SUMSUB_BA(c, a)\ | |
262 SUMSUB_BA(e, g)\ | |
263 SUMSUB_BA(b, c)\ | |
264 SUMSUB_BA(h, a)\ | |
265 SUMSUB_BA(d, f) | |
266 | |
267 static void ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride) | |
268 { | |
269 asm volatile( | |
270 "movdqa 0x10(%1), %%xmm1 \n" | |
271 "movdqa 0x20(%1), %%xmm2 \n" | |
272 "movdqa 0x30(%1), %%xmm3 \n" | |
273 "movdqa 0x50(%1), %%xmm5 \n" | |
274 "movdqa 0x60(%1), %%xmm6 \n" | |
275 "movdqa 0x70(%1), %%xmm7 \n" | |
276 H264_IDCT8_1D_SSE2(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, %%xmm5, %%xmm6, %%xmm7) | |
277 TRANSPOSE8(%%xmm4, %%xmm1, %%xmm7, %%xmm3, %%xmm5, %%xmm0, %%xmm2, %%xmm6, (%1)) | |
278 "paddw %4, %%xmm4 \n" | |
279 "movdqa %%xmm4, 0x00(%1) \n" | |
280 "movdqa %%xmm2, 0x40(%1) \n" | |
281 H264_IDCT8_1D_SSE2(%%xmm4, %%xmm0, %%xmm6, %%xmm3, %%xmm2, %%xmm5, %%xmm7, %%xmm1) | |
282 "movdqa %%xmm6, 0x60(%1) \n" | |
283 "movdqa %%xmm7, 0x70(%1) \n" | |
284 "pxor %%xmm7, %%xmm7 \n" | |
285 STORE_DIFF_8P(%%xmm2, (%0), %%xmm6, %%xmm7) | |
286 STORE_DIFF_8P(%%xmm0, (%0,%2), %%xmm6, %%xmm7) | |
287 STORE_DIFF_8P(%%xmm1, (%0,%2,2), %%xmm6, %%xmm7) | |
288 STORE_DIFF_8P(%%xmm3, (%0,%3), %%xmm6, %%xmm7) | |
289 "lea (%0,%2,4), %0 \n" | |
290 STORE_DIFF_8P(%%xmm5, (%0), %%xmm6, %%xmm7) | |
291 STORE_DIFF_8P(%%xmm4, (%0,%2), %%xmm6, %%xmm7) | |
292 "movdqa 0x60(%1), %%xmm0 \n" | |
293 "movdqa 0x70(%1), %%xmm1 \n" | |
294 STORE_DIFF_8P(%%xmm0, (%0,%2,2), %%xmm6, %%xmm7) | |
295 STORE_DIFF_8P(%%xmm1, (%0,%3), %%xmm6, %%xmm7) | |
296 :"+r"(dst) | |
297 :"r"(block), "r"((long)stride), "r"(3L*stride), "m"(*ff_pw_32) | |
298 ); | |
299 } | |
300 | |
3173 | 301 static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride) |
3105
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
302 { |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
303 int dc = (block[0] + 32) >> 6; |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
304 asm volatile( |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
305 "movd %0, %%mm0 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
306 "pshufw $0, %%mm0, %%mm0 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
307 "pxor %%mm1, %%mm1 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
308 "psubw %%mm0, %%mm1 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
309 "packuswb %%mm0, %%mm0 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
310 "packuswb %%mm1, %%mm1 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
311 ::"r"(dc) |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
312 ); |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
313 asm volatile( |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
314 "movd %0, %%mm2 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
315 "movd %1, %%mm3 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
316 "movd %2, %%mm4 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
317 "movd %3, %%mm5 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
318 "paddusb %%mm0, %%mm2 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
319 "paddusb %%mm0, %%mm3 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
320 "paddusb %%mm0, %%mm4 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
321 "paddusb %%mm0, %%mm5 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
322 "psubusb %%mm1, %%mm2 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
323 "psubusb %%mm1, %%mm3 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
324 "psubusb %%mm1, %%mm4 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
325 "psubusb %%mm1, %%mm5 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
326 "movd %%mm2, %0 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
327 "movd %%mm3, %1 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
328 "movd %%mm4, %2 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
329 "movd %%mm5, %3 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
330 :"+m"(*(uint32_t*)(dst+0*stride)), |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
331 "+m"(*(uint32_t*)(dst+1*stride)), |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
332 "+m"(*(uint32_t*)(dst+2*stride)), |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
333 "+m"(*(uint32_t*)(dst+3*stride)) |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
334 ); |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
335 } |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
336 |
3173 | 337 static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride) |
3105
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
338 { |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
339 int dc = (block[0] + 32) >> 6; |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
340 int y; |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
341 asm volatile( |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
342 "movd %0, %%mm0 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
343 "pshufw $0, %%mm0, %%mm0 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
344 "pxor %%mm1, %%mm1 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
345 "psubw %%mm0, %%mm1 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
346 "packuswb %%mm0, %%mm0 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
347 "packuswb %%mm1, %%mm1 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
348 ::"r"(dc) |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
349 ); |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
350 for(y=2; y--; dst += 4*stride){ |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
351 asm volatile( |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
352 "movq %0, %%mm2 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
353 "movq %1, %%mm3 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
354 "movq %2, %%mm4 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
355 "movq %3, %%mm5 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
356 "paddusb %%mm0, %%mm2 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
357 "paddusb %%mm0, %%mm3 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
358 "paddusb %%mm0, %%mm4 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
359 "paddusb %%mm0, %%mm5 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
360 "psubusb %%mm1, %%mm2 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
361 "psubusb %%mm1, %%mm3 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
362 "psubusb %%mm1, %%mm4 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
363 "psubusb %%mm1, %%mm5 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
364 "movq %%mm2, %0 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
365 "movq %%mm3, %1 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
366 "movq %%mm4, %2 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
367 "movq %%mm5, %3 \n\t" |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
368 :"+m"(*(uint64_t*)(dst+0*stride)), |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
369 "+m"(*(uint64_t*)(dst+1*stride)), |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
370 "+m"(*(uint64_t*)(dst+2*stride)), |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
371 "+m"(*(uint64_t*)(dst+3*stride)) |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
372 ); |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
373 } |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
374 } |
2d35fb3cb940
h264: special case dc-only idct. ~1% faster overall
lorenm
parents:
3102
diff
changeset
|
375 |
2754 | 376 |
377 /***********************************/ | |
378 /* deblocking */ | |
379 | |
380 // out: o = |x-y|>a | |
381 // clobbers: t | |
382 #define DIFF_GT_MMX(x,y,a,o,t)\ | |
383 "movq "#y", "#t" \n\t"\ | |
384 "movq "#x", "#o" \n\t"\ | |
385 "psubusb "#x", "#t" \n\t"\ | |
386 "psubusb "#y", "#o" \n\t"\ | |
387 "por "#t", "#o" \n\t"\ | |
388 "psubusb "#a", "#o" \n\t" | |
389 | |
4135 | 390 // out: o = |x-y|>a |
391 // clobbers: t | |
392 #define DIFF_GT2_MMX(x,y,a,o,t)\ | |
393 "movq "#y", "#t" \n\t"\ | |
394 "movq "#x", "#o" \n\t"\ | |
395 "psubusb "#x", "#t" \n\t"\ | |
396 "psubusb "#y", "#o" \n\t"\ | |
397 "psubusb "#a", "#t" \n\t"\ | |
398 "psubusb "#a", "#o" \n\t"\ | |
399 "pcmpeqb "#t", "#o" \n\t"\ | |
400 | |
2754 | 401 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 |
402 // out: mm5=beta-1, mm7=mask | |
403 // clobbers: mm4,mm6 | |
404 #define H264_DEBLOCK_MASK(alpha1, beta1) \ | |
405 "pshufw $0, "#alpha1", %%mm4 \n\t"\ | |
406 "pshufw $0, "#beta1 ", %%mm5 \n\t"\ | |
407 "packuswb %%mm4, %%mm4 \n\t"\ | |
408 "packuswb %%mm5, %%mm5 \n\t"\ | |
409 DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\ | |
410 DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\ | |
411 "por %%mm4, %%mm7 \n\t"\ | |
412 DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\ | |
413 "por %%mm4, %%mm7 \n\t"\ | |
414 "pxor %%mm6, %%mm6 \n\t"\ | |
415 "pcmpeqb %%mm6, %%mm7 \n\t" | |
416 | |
417 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) | |
418 // out: mm1=p0' mm2=q0' | |
419 // clobbers: mm0,3-6 | |
420 #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\ | |
4127 | 421 "movq %%mm1 , %%mm5 \n\t"\ |
422 "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\ | |
423 "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\ | |
424 "pcmpeqb %%mm4 , %%mm4 \n\t"\ | |
425 "pxor %%mm4 , %%mm3 \n\t"\ | |
426 "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\ | |
427 "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\ | |
428 "pxor %%mm1 , %%mm4 \n\t"\ | |
429 "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\ | |
430 "pavgb %%mm5 , %%mm3 \n\t"\ | |
4130 | 431 "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\ |
4129 | 432 "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\ |
433 "psubusb %%mm3 , %%mm6 \n\t"\ | |
4127 | 434 "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\ |
4129 | 435 "pminub %%mm7 , %%mm6 \n\t"\ |
4127 | 436 "pminub %%mm7 , %%mm3 \n\t"\ |
4129 | 437 "psubusb %%mm6 , %%mm1 \n\t"\ |
438 "psubusb %%mm3 , %%mm2 \n\t"\ | |
4127 | 439 "paddusb %%mm3 , %%mm1 \n\t"\ |
4129 | 440 "paddusb %%mm6 , %%mm2 \n\t" |
2754 | 441 |
5947 | 442 // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=ff_bone |
4866 | 443 // out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 ) |
2754 | 444 // clobbers: q2, tmp, tc0 |
445 #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\ | |
446 "movq %%mm1, "#tmp" \n\t"\ | |
447 "pavgb %%mm2, "#tmp" \n\t"\ | |
448 "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\ | |
449 "pxor "q2addr", "#tmp" \n\t"\ | |
450 "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\ | |
451 "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\ | |
452 "movq "#p1", "#tmp" \n\t"\ | |
453 "psubusb "#tc0", "#tmp" \n\t"\ | |
454 "paddusb "#p1", "#tc0" \n\t"\ | |
455 "pmaxub "#tmp", "#q2" \n\t"\ | |
456 "pminub "#tc0", "#q2" \n\t"\ | |
457 "movq "#q2", "q1addr" \n\t" | |
458 | |
459 static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0) | |
460 { | |
4137 | 461 DECLARE_ALIGNED_8(uint64_t, tmp0[2]); |
2754 | 462 |
463 asm volatile( | |
464 "movq (%1,%3), %%mm0 \n\t" //p1 | |
465 "movq (%1,%3,2), %%mm1 \n\t" //p0 | |
466 "movq (%2), %%mm2 \n\t" //q0 | |
467 "movq (%2,%3), %%mm3 \n\t" //q1 | |
468 H264_DEBLOCK_MASK(%6, %7) | |
4133 | 469 |
470 "movd %5, %%mm4 \n\t" | |
471 "punpcklbw %%mm4, %%mm4 \n\t" | |
472 "punpcklwd %%mm4, %%mm4 \n\t" | |
473 "pcmpeqb %%mm3, %%mm3 \n\t" | |
474 "movq %%mm4, %%mm6 \n\t" | |
475 "pcmpgtb %%mm3, %%mm4 \n\t" | |
476 "movq %%mm6, 8+%0 \n\t" | |
477 "pand %%mm4, %%mm7 \n\t" | |
478 "movq %%mm7, %0 \n\t" | |
2754 | 479 |
480 /* filter p1 */ | |
481 "movq (%1), %%mm3 \n\t" //p2 | |
4135 | 482 DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1 |
2754 | 483 "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta |
4136 | 484 "pand 8+%0, %%mm7 \n\t" // mask & tc0 |
485 "movq %%mm7, %%mm4 \n\t" | |
4131
1a8e384d0463
2 instructions less in h264_loop_filter_luma_mmx2()
michael
parents:
4130
diff
changeset
|
486 "psubb %%mm6, %%mm7 \n\t" |
2754 | 487 "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0 |
488 H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4) | |
489 | |
490 /* filter q1 */ | |
491 "movq (%2,%3,2), %%mm4 \n\t" //q2 | |
4135 | 492 DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1 |
2754 | 493 "pand %0, %%mm6 \n\t" |
4134 | 494 "movq 8+%0, %%mm5 \n\t" // can be merged with the and below but is slower then |
2754 | 495 "pand %%mm6, %%mm5 \n\t" |
4131
1a8e384d0463
2 instructions less in h264_loop_filter_luma_mmx2()
michael
parents:
4130
diff
changeset
|
496 "psubb %%mm6, %%mm7 \n\t" |
2754 | 497 "movq (%2,%3), %%mm3 \n\t" |
498 H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6) | |
499 | |
500 /* filter p0, q0 */ | |
4133 | 501 H264_DEBLOCK_P0_Q0(%8, unused) |
2754 | 502 "movq %%mm1, (%1,%3,2) \n\t" |
503 "movq %%mm2, (%2) \n\t" | |
504 | |
4133 | 505 : "=m"(*tmp0) |
2754 | 506 : "r"(pix-3*stride), "r"(pix), "r"((long)stride), |
4133 | 507 "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1), |
5947 | 508 "m"(ff_bone) |
2754 | 509 ); |
510 } | |
511 | |
512 static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) | |
513 { | |
514 if((tc0[0] & tc0[1]) >= 0) | |
515 h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0); | |
516 if((tc0[2] & tc0[3]) >= 0) | |
517 h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2); | |
518 } | |
519 static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) | |
520 { | |
521 //FIXME: could cut some load/stores by merging transpose with filter | |
522 // also, it only needs to transpose 6x8 | |
4137 | 523 DECLARE_ALIGNED_8(uint8_t, trans[8*8]); |
2754 | 524 int i; |
525 for(i=0; i<2; i++, pix+=8*stride, tc0+=2) { | |
526 if((tc0[0] & tc0[1]) < 0) | |
527 continue; | |
528 transpose4x4(trans, pix-4, 8, stride); | |
529 transpose4x4(trans +4*8, pix, 8, stride); | |
530 transpose4x4(trans+4, pix-4+4*stride, 8, stride); | |
531 transpose4x4(trans+4+4*8, pix +4*stride, 8, stride); | |
532 h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0); | |
533 transpose4x4(pix-2, trans +2*8, stride, 8); | |
534 transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8); | |
535 } | |
536 } | |
537 | |
538 static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0) | |
539 { | |
540 asm volatile( | |
541 "movq (%0), %%mm0 \n\t" //p1 | |
542 "movq (%0,%2), %%mm1 \n\t" //p0 | |
543 "movq (%1), %%mm2 \n\t" //q0 | |
544 "movq (%1,%2), %%mm3 \n\t" //q1 | |
545 H264_DEBLOCK_MASK(%4, %5) | |
546 "movd %3, %%mm6 \n\t" | |
547 "punpcklbw %%mm6, %%mm6 \n\t" | |
548 "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask | |
549 H264_DEBLOCK_P0_Q0(%6, %7) | |
550 "movq %%mm1, (%0,%2) \n\t" | |
551 "movq %%mm2, (%1) \n\t" | |
552 | |
553 :: "r"(pix-2*stride), "r"(pix), "r"((long)stride), | |
554 "r"(*(uint32_t*)tc0), | |
5947 | 555 "m"(alpha1), "m"(beta1), "m"(ff_bone), "m"(ff_pb_3F) |
2754 | 556 ); |
557 } | |
558 | |
559 static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) | |
560 { | |
561 h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0); | |
562 } | |
563 | |
564 static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) | |
565 { | |
566 //FIXME: could cut some load/stores by merging transpose with filter | |
4137 | 567 DECLARE_ALIGNED_8(uint8_t, trans[8*4]); |
2754 | 568 transpose4x4(trans, pix-2, 8, stride); |
569 transpose4x4(trans+4, pix-2+4*stride, 8, stride); | |
570 h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0); | |
571 transpose4x4(pix-2, trans, stride, 8); | |
572 transpose4x4(pix-2+4*stride, trans+4, stride, 8); | |
573 } | |
574 | |
575 // p0 = (p0 + q1 + 2*p1 + 2) >> 2 | |
576 #define H264_FILTER_CHROMA4(p0, p1, q1, one) \ | |
577 "movq "#p0", %%mm4 \n\t"\ | |
578 "pxor "#q1", %%mm4 \n\t"\ | |
579 "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\ | |
580 "pavgb "#q1", "#p0" \n\t"\ | |
581 "psubusb %%mm4, "#p0" \n\t"\ | |
582 "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\ | |
583 | |
584 static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1) | |
585 { | |
586 asm volatile( | |
587 "movq (%0), %%mm0 \n\t" | |
588 "movq (%0,%2), %%mm1 \n\t" | |
589 "movq (%1), %%mm2 \n\t" | |
590 "movq (%1,%2), %%mm3 \n\t" | |
591 H264_DEBLOCK_MASK(%3, %4) | |
592 "movq %%mm1, %%mm5 \n\t" | |
593 "movq %%mm2, %%mm6 \n\t" | |
594 H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0' | |
595 H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0' | |
596 "psubb %%mm5, %%mm1 \n\t" | |
597 "psubb %%mm6, %%mm2 \n\t" | |
598 "pand %%mm7, %%mm1 \n\t" | |
599 "pand %%mm7, %%mm2 \n\t" | |
600 "paddb %%mm5, %%mm1 \n\t" | |
601 "paddb %%mm6, %%mm2 \n\t" | |
602 "movq %%mm1, (%0,%2) \n\t" | |
603 "movq %%mm2, (%1) \n\t" | |
604 :: "r"(pix-2*stride), "r"(pix), "r"((long)stride), | |
5947 | 605 "m"(alpha1), "m"(beta1), "m"(ff_bone) |
2754 | 606 ); |
607 } | |
608 | |
609 static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta) | |
610 { | |
611 h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1); | |
612 } | |
613 | |
614 static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta) | |
615 { | |
616 //FIXME: could cut some load/stores by merging transpose with filter | |
4137 | 617 DECLARE_ALIGNED_8(uint8_t, trans[8*4]); |
2754 | 618 transpose4x4(trans, pix-2, 8, stride); |
619 transpose4x4(trans+4, pix-2+4*stride, 8, stride); | |
620 h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1); | |
621 transpose4x4(pix-2, trans, stride, 8); | |
622 transpose4x4(pix-2+4*stride, trans+4, stride, 8); | |
623 } | |
624 | |
3645
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
625 static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
626 int bidir, int edges, int step, int mask_mv0, int mask_mv1 ) { |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
627 int dir; |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
628 asm volatile( |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
629 "pxor %%mm7, %%mm7 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
630 "movq %0, %%mm6 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
631 "movq %1, %%mm5 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
632 "movq %2, %%mm4 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
633 ::"m"(ff_pb_1), "m"(ff_pb_3), "m"(ff_pb_7) |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
634 ); |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
635 // could do a special case for dir==0 && edges==1, but it only reduces the |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
636 // average filter time by 1.2% |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
637 for( dir=1; dir>=0; dir-- ) { |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
638 const int d_idx = dir ? -8 : -1; |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
639 const int mask_mv = dir ? mask_mv1 : mask_mv0; |
4137 | 640 DECLARE_ALIGNED_8(const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL; |
3645
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
641 int b_idx, edge, l; |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
642 for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) { |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
643 asm volatile( |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
644 "pand %0, %%mm0 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
645 ::"m"(mask_dir) |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
646 ); |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
647 if(!(mask_mv & edge)) { |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
648 asm volatile("pxor %%mm0, %%mm0 \n\t":); |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
649 for( l = bidir; l >= 0; l-- ) { |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
650 asm volatile( |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
651 "movd %0, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
652 "punpckldq %1, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
653 "movq %%mm1, %%mm2 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
654 "psrlw $7, %%mm2 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
655 "pand %%mm6, %%mm2 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
656 "por %%mm2, %%mm1 \n\t" // ref_cache with -2 mapped to -1 |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
657 "punpckldq %%mm1, %%mm2 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
658 "pcmpeqb %%mm2, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
659 "paddb %%mm6, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
660 "punpckhbw %%mm7, %%mm1 \n\t" // ref[b] != ref[bn] |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
661 "por %%mm1, %%mm0 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
662 |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
663 "movq %2, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
664 "movq %3, %%mm2 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
665 "psubw %4, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
666 "psubw %5, %%mm2 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
667 "packsswb %%mm2, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
668 "paddb %%mm5, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
669 "pminub %%mm4, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
670 "pcmpeqb %%mm4, %%mm1 \n\t" // abs(mv[b] - mv[bn]) >= limit |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
671 "por %%mm1, %%mm0 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
672 ::"m"(ref[l][b_idx]), |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
673 "m"(ref[l][b_idx+d_idx]), |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
674 "m"(mv[l][b_idx][0]), |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
675 "m"(mv[l][b_idx+2][0]), |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
676 "m"(mv[l][b_idx+d_idx][0]), |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
677 "m"(mv[l][b_idx+d_idx+2][0]) |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
678 ); |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
679 } |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
680 } |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
681 asm volatile( |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
682 "movd %0, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
683 "por %1, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
684 "punpcklbw %%mm7, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
685 "pcmpgtw %%mm7, %%mm1 \n\t" // nnz[b] || nnz[bn] |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
686 ::"m"(nnz[b_idx]), |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
687 "m"(nnz[b_idx+d_idx]) |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
688 ); |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
689 asm volatile( |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
690 "pcmpeqw %%mm7, %%mm0 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
691 "pcmpeqw %%mm7, %%mm0 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
692 "psrlw $15, %%mm0 \n\t" // nonzero -> 1 |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
693 "psrlw $14, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
694 "movq %%mm0, %%mm2 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
695 "por %%mm1, %%mm2 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
696 "psrlw $1, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
697 "pandn %%mm2, %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
698 "movq %%mm1, %0 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
699 :"=m"(*bS[dir][edge]) |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
700 ::"memory" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
701 ); |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
702 } |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
703 edges = 4; |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
704 step = 1; |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
705 } |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
706 asm volatile( |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
707 "movq (%0), %%mm0 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
708 "movq 8(%0), %%mm1 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
709 "movq 16(%0), %%mm2 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
710 "movq 24(%0), %%mm3 \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
711 TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4) |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
712 "movq %%mm0, (%0) \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
713 "movq %%mm3, 8(%0) \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
714 "movq %%mm4, 16(%0) \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
715 "movq %%mm2, 24(%0) \n\t" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
716 ::"r"(bS[0]) |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
717 :"memory" |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
718 ); |
47821be55b6c
mmx implementation of deblocking strength decision.
lorenm
parents:
3394
diff
changeset
|
719 } |
2754 | 720 |
721 /***********************************/ | |
722 /* motion compensation */ | |
723 | |
724 #define QPEL_H264V(A,B,C,D,E,F,OP)\ | |
2979 | 725 "movd (%0), "#F" \n\t"\ |
726 "movq "#C", %%mm6 \n\t"\ | |
727 "paddw "#D", %%mm6 \n\t"\ | |
728 "psllw $2, %%mm6 \n\t"\ | |
729 "psubw "#B", %%mm6 \n\t"\ | |
730 "psubw "#E", %%mm6 \n\t"\ | |
731 "pmullw %4, %%mm6 \n\t"\ | |
732 "add %2, %0 \n\t"\ | |
733 "punpcklbw %%mm7, "#F" \n\t"\ | |
734 "paddw %5, "#A" \n\t"\ | |
735 "paddw "#F", "#A" \n\t"\ | |
736 "paddw "#A", %%mm6 \n\t"\ | |
737 "psraw $5, %%mm6 \n\t"\ | |
738 "packuswb %%mm6, %%mm6 \n\t"\ | |
2754 | 739 OP(%%mm6, (%1), A, d)\ |
2979 | 740 "add %3, %1 \n\t" |
2754 | 741 |
742 #define QPEL_H264HV(A,B,C,D,E,F,OF)\ | |
2979 | 743 "movd (%0), "#F" \n\t"\ |
744 "movq "#C", %%mm6 \n\t"\ | |
745 "paddw "#D", %%mm6 \n\t"\ | |
746 "psllw $2, %%mm6 \n\t"\ | |
747 "psubw "#B", %%mm6 \n\t"\ | |
748 "psubw "#E", %%mm6 \n\t"\ | |
749 "pmullw %3, %%mm6 \n\t"\ | |
750 "add %2, %0 \n\t"\ | |
751 "punpcklbw %%mm7, "#F" \n\t"\ | |
752 "paddw "#F", "#A" \n\t"\ | |
753 "paddw "#A", %%mm6 \n\t"\ | |
754 "movq %%mm6, "#OF"(%1) \n\t" | |
2967 | 755 |
2754 | 756 #define QPEL_H264(OPNAME, OP, MMX)\ |
4527
481763d70193
prevent h.264 MC related functions from being inlined (yes this is much faster the code just doesnt fit in the code cache otherwise)
michael
parents:
4137
diff
changeset
|
757 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
2754 | 758 int h=4;\ |
759 \ | |
760 asm volatile(\ | |
2979 | 761 "pxor %%mm7, %%mm7 \n\t"\ |
762 "movq %5, %%mm4 \n\t"\ | |
763 "movq %6, %%mm5 \n\t"\ | |
764 "1: \n\t"\ | |
765 "movd -1(%0), %%mm1 \n\t"\ | |
766 "movd (%0), %%mm2 \n\t"\ | |
767 "movd 1(%0), %%mm3 \n\t"\ | |
768 "movd 2(%0), %%mm0 \n\t"\ | |
769 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
770 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
771 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
772 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
773 "paddw %%mm0, %%mm1 \n\t"\ | |
774 "paddw %%mm3, %%mm2 \n\t"\ | |
775 "movd -2(%0), %%mm0 \n\t"\ | |
776 "movd 3(%0), %%mm3 \n\t"\ | |
777 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
778 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
779 "paddw %%mm3, %%mm0 \n\t"\ | |
780 "psllw $2, %%mm2 \n\t"\ | |
781 "psubw %%mm1, %%mm2 \n\t"\ | |
782 "pmullw %%mm4, %%mm2 \n\t"\ | |
783 "paddw %%mm5, %%mm0 \n\t"\ | |
784 "paddw %%mm2, %%mm0 \n\t"\ | |
785 "psraw $5, %%mm0 \n\t"\ | |
786 "packuswb %%mm0, %%mm0 \n\t"\ | |
2754 | 787 OP(%%mm0, (%1),%%mm6, d)\ |
2979 | 788 "add %3, %0 \n\t"\ |
789 "add %4, %1 \n\t"\ | |
790 "decl %2 \n\t"\ | |
791 " jnz 1b \n\t"\ | |
2754 | 792 : "+a"(src), "+c"(dst), "+m"(h)\ |
793 : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
794 : "memory"\ | |
795 );\ | |
796 }\ | |
4527
481763d70193
prevent h.264 MC related functions from being inlined (yes this is much faster the code just doesnt fit in the code cache otherwise)
michael
parents:
4137
diff
changeset
|
797 static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ |
3156 | 798 int h=4;\ |
799 asm volatile(\ | |
800 "pxor %%mm7, %%mm7 \n\t"\ | |
3165 | 801 "movq %0, %%mm4 \n\t"\ |
802 "movq %1, %%mm5 \n\t"\ | |
803 :: "m"(ff_pw_5), "m"(ff_pw_16)\ | |
804 );\ | |
805 do{\ | |
806 asm volatile(\ | |
3156 | 807 "movd -1(%0), %%mm1 \n\t"\ |
808 "movd (%0), %%mm2 \n\t"\ | |
809 "movd 1(%0), %%mm3 \n\t"\ | |
810 "movd 2(%0), %%mm0 \n\t"\ | |
811 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
812 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
813 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
814 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
815 "paddw %%mm0, %%mm1 \n\t"\ | |
816 "paddw %%mm3, %%mm2 \n\t"\ | |
817 "movd -2(%0), %%mm0 \n\t"\ | |
818 "movd 3(%0), %%mm3 \n\t"\ | |
819 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
820 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
821 "paddw %%mm3, %%mm0 \n\t"\ | |
822 "psllw $2, %%mm2 \n\t"\ | |
823 "psubw %%mm1, %%mm2 \n\t"\ | |
824 "pmullw %%mm4, %%mm2 \n\t"\ | |
825 "paddw %%mm5, %%mm0 \n\t"\ | |
826 "paddw %%mm2, %%mm0 \n\t"\ | |
827 "movd (%2), %%mm3 \n\t"\ | |
828 "psraw $5, %%mm0 \n\t"\ | |
829 "packuswb %%mm0, %%mm0 \n\t"\ | |
830 PAVGB" %%mm3, %%mm0 \n\t"\ | |
831 OP(%%mm0, (%1),%%mm6, d)\ | |
3165 | 832 "add %4, %0 \n\t"\ |
833 "add %4, %1 \n\t"\ | |
834 "add %3, %2 \n\t"\ | |
835 : "+a"(src), "+c"(dst), "+d"(src2)\ | |
836 : "D"((long)src2Stride), "S"((long)dstStride)\ | |
3156 | 837 : "memory"\ |
838 );\ | |
3165 | 839 }while(--h);\ |
3156 | 840 }\ |
4527
481763d70193
prevent h.264 MC related functions from being inlined (yes this is much faster the code just doesnt fit in the code cache otherwise)
michael
parents:
4137
diff
changeset
|
841 static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
2754 | 842 src -= 2*srcStride;\ |
843 asm volatile(\ | |
2979 | 844 "pxor %%mm7, %%mm7 \n\t"\ |
845 "movd (%0), %%mm0 \n\t"\ | |
846 "add %2, %0 \n\t"\ | |
847 "movd (%0), %%mm1 \n\t"\ | |
848 "add %2, %0 \n\t"\ | |
849 "movd (%0), %%mm2 \n\t"\ | |
850 "add %2, %0 \n\t"\ | |
851 "movd (%0), %%mm3 \n\t"\ | |
852 "add %2, %0 \n\t"\ | |
853 "movd (%0), %%mm4 \n\t"\ | |
854 "add %2, %0 \n\t"\ | |
855 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
856 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
857 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
858 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
859 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
2754 | 860 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ |
861 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ | |
862 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ | |
863 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ | |
864 \ | |
865 : "+a"(src), "+c"(dst)\ | |
866 : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
867 : "memory"\ | |
868 );\ | |
869 }\ | |
4527
481763d70193
prevent h.264 MC related functions from being inlined (yes this is much faster the code just doesnt fit in the code cache otherwise)
michael
parents:
4137
diff
changeset
|
870 static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ |
2754 | 871 int h=4;\ |
872 int w=3;\ | |
873 src -= 2*srcStride+2;\ | |
874 while(w--){\ | |
875 asm volatile(\ | |
2979 | 876 "pxor %%mm7, %%mm7 \n\t"\ |
877 "movd (%0), %%mm0 \n\t"\ | |
878 "add %2, %0 \n\t"\ | |
879 "movd (%0), %%mm1 \n\t"\ | |
880 "add %2, %0 \n\t"\ | |
881 "movd (%0), %%mm2 \n\t"\ | |
882 "add %2, %0 \n\t"\ | |
883 "movd (%0), %%mm3 \n\t"\ | |
884 "add %2, %0 \n\t"\ | |
885 "movd (%0), %%mm4 \n\t"\ | |
886 "add %2, %0 \n\t"\ | |
887 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
888 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
889 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
890 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
891 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
2754 | 892 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\ |
893 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\ | |
894 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\ | |
895 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\ | |
896 \ | |
897 : "+a"(src)\ | |
898 : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\ | |
899 : "memory"\ | |
900 );\ | |
901 tmp += 4;\ | |
902 src += 4 - 9*srcStride;\ | |
903 }\ | |
904 tmp -= 3*4;\ | |
905 asm volatile(\ | |
2979 | 906 "movq %4, %%mm6 \n\t"\ |
907 "1: \n\t"\ | |
908 "movq (%0), %%mm0 \n\t"\ | |
909 "paddw 10(%0), %%mm0 \n\t"\ | |
910 "movq 2(%0), %%mm1 \n\t"\ | |
911 "paddw 8(%0), %%mm1 \n\t"\ | |
912 "movq 4(%0), %%mm2 \n\t"\ | |
913 "paddw 6(%0), %%mm2 \n\t"\ | |
914 "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\ | |
915 "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\ | |
916 "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\ | |
917 "paddsw %%mm2, %%mm0 \n\t"\ | |
3001
b52d8ee430f6
fix some potential arithmetic overflows in pred_direct_motion() and
lorenm
parents:
2979
diff
changeset
|
918 "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\ |
2979 | 919 "paddw %%mm6, %%mm2 \n\t"\ |
3001
b52d8ee430f6
fix some potential arithmetic overflows in pred_direct_motion() and
lorenm
parents:
2979
diff
changeset
|
920 "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 +32 */\ |
2979 | 921 "psraw $6, %%mm0 \n\t"\ |
922 "packuswb %%mm0, %%mm0 \n\t"\ | |
2754 | 923 OP(%%mm0, (%1),%%mm7, d)\ |
2979 | 924 "add $24, %0 \n\t"\ |
925 "add %3, %1 \n\t"\ | |
926 "decl %2 \n\t"\ | |
927 " jnz 1b \n\t"\ | |
2754 | 928 : "+a"(tmp), "+c"(dst), "+m"(h)\ |
6320 | 929 : "S"((long)dstStride), "m"(*ff_pw_32)\ |
2754 | 930 : "memory"\ |
931 );\ | |
932 }\ | |
933 \ | |
4527
481763d70193
prevent h.264 MC related functions from being inlined (yes this is much faster the code just doesnt fit in the code cache otherwise)
michael
parents:
4137
diff
changeset
|
934 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
2754 | 935 int h=8;\ |
936 asm volatile(\ | |
2979 | 937 "pxor %%mm7, %%mm7 \n\t"\ |
938 "movq %5, %%mm6 \n\t"\ | |
939 "1: \n\t"\ | |
940 "movq (%0), %%mm0 \n\t"\ | |
941 "movq 1(%0), %%mm2 \n\t"\ | |
942 "movq %%mm0, %%mm1 \n\t"\ | |
943 "movq %%mm2, %%mm3 \n\t"\ | |
944 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
945 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
946 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
947 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
948 "paddw %%mm2, %%mm0 \n\t"\ | |
949 "paddw %%mm3, %%mm1 \n\t"\ | |
950 "psllw $2, %%mm0 \n\t"\ | |
951 "psllw $2, %%mm1 \n\t"\ | |
952 "movq -1(%0), %%mm2 \n\t"\ | |
953 "movq 2(%0), %%mm4 \n\t"\ | |
954 "movq %%mm2, %%mm3 \n\t"\ | |
955 "movq %%mm4, %%mm5 \n\t"\ | |
956 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
957 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
958 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
959 "punpckhbw %%mm7, %%mm5 \n\t"\ | |
960 "paddw %%mm4, %%mm2 \n\t"\ | |
961 "paddw %%mm3, %%mm5 \n\t"\ | |
962 "psubw %%mm2, %%mm0 \n\t"\ | |
963 "psubw %%mm5, %%mm1 \n\t"\ | |
964 "pmullw %%mm6, %%mm0 \n\t"\ | |
965 "pmullw %%mm6, %%mm1 \n\t"\ | |
966 "movd -2(%0), %%mm2 \n\t"\ | |
967 "movd 7(%0), %%mm5 \n\t"\ | |
968 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
969 "punpcklbw %%mm7, %%mm5 \n\t"\ | |
970 "paddw %%mm3, %%mm2 \n\t"\ | |
971 "paddw %%mm5, %%mm4 \n\t"\ | |
972 "movq %6, %%mm5 \n\t"\ | |
973 "paddw %%mm5, %%mm2 \n\t"\ | |
974 "paddw %%mm5, %%mm4 \n\t"\ | |
975 "paddw %%mm2, %%mm0 \n\t"\ | |
976 "paddw %%mm4, %%mm1 \n\t"\ | |
977 "psraw $5, %%mm0 \n\t"\ | |
978 "psraw $5, %%mm1 \n\t"\ | |
979 "packuswb %%mm1, %%mm0 \n\t"\ | |
2754 | 980 OP(%%mm0, (%1),%%mm5, q)\ |
2979 | 981 "add %3, %0 \n\t"\ |
982 "add %4, %1 \n\t"\ | |
983 "decl %2 \n\t"\ | |
984 " jnz 1b \n\t"\ | |
2754 | 985 : "+a"(src), "+c"(dst), "+m"(h)\ |
986 : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
987 : "memory"\ | |
988 );\ | |
989 }\ | |
990 \ | |
4527
481763d70193
prevent h.264 MC related functions from being inlined (yes this is much faster the code just doesnt fit in the code cache otherwise)
michael
parents:
4137
diff
changeset
|
991 static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ |
3156 | 992 int h=8;\ |
993 asm volatile(\ | |
994 "pxor %%mm7, %%mm7 \n\t"\ | |
3165 | 995 "movq %0, %%mm6 \n\t"\ |
996 :: "m"(ff_pw_5)\ | |
997 );\ | |
998 do{\ | |
999 asm volatile(\ | |
3156 | 1000 "movq (%0), %%mm0 \n\t"\ |
1001 "movq 1(%0), %%mm2 \n\t"\ | |
1002 "movq %%mm0, %%mm1 \n\t"\ | |
1003 "movq %%mm2, %%mm3 \n\t"\ | |
1004 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1005 "punpckhbw %%mm7, %%mm1 \n\t"\ | |
1006 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1007 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
1008 "paddw %%mm2, %%mm0 \n\t"\ | |
1009 "paddw %%mm3, %%mm1 \n\t"\ | |
1010 "psllw $2, %%mm0 \n\t"\ | |
1011 "psllw $2, %%mm1 \n\t"\ | |
1012 "movq -1(%0), %%mm2 \n\t"\ | |
1013 "movq 2(%0), %%mm4 \n\t"\ | |
1014 "movq %%mm2, %%mm3 \n\t"\ | |
1015 "movq %%mm4, %%mm5 \n\t"\ | |
1016 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1017 "punpckhbw %%mm7, %%mm3 \n\t"\ | |
1018 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
1019 "punpckhbw %%mm7, %%mm5 \n\t"\ | |
1020 "paddw %%mm4, %%mm2 \n\t"\ | |
1021 "paddw %%mm3, %%mm5 \n\t"\ | |
1022 "psubw %%mm2, %%mm0 \n\t"\ | |
1023 "psubw %%mm5, %%mm1 \n\t"\ | |
1024 "pmullw %%mm6, %%mm0 \n\t"\ | |
1025 "pmullw %%mm6, %%mm1 \n\t"\ | |
1026 "movd -2(%0), %%mm2 \n\t"\ | |
1027 "movd 7(%0), %%mm5 \n\t"\ | |
1028 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1029 "punpcklbw %%mm7, %%mm5 \n\t"\ | |
1030 "paddw %%mm3, %%mm2 \n\t"\ | |
1031 "paddw %%mm5, %%mm4 \n\t"\ | |
3165 | 1032 "movq %5, %%mm5 \n\t"\ |
3156 | 1033 "paddw %%mm5, %%mm2 \n\t"\ |
1034 "paddw %%mm5, %%mm4 \n\t"\ | |
1035 "paddw %%mm2, %%mm0 \n\t"\ | |
1036 "paddw %%mm4, %%mm1 \n\t"\ | |
1037 "psraw $5, %%mm0 \n\t"\ | |
1038 "psraw $5, %%mm1 \n\t"\ | |
1039 "movq (%2), %%mm4 \n\t"\ | |
1040 "packuswb %%mm1, %%mm0 \n\t"\ | |
1041 PAVGB" %%mm4, %%mm0 \n\t"\ | |
1042 OP(%%mm0, (%1),%%mm5, q)\ | |
3165 | 1043 "add %4, %0 \n\t"\ |
1044 "add %4, %1 \n\t"\ | |
1045 "add %3, %2 \n\t"\ | |
1046 : "+a"(src), "+c"(dst), "+d"(src2)\ | |
3156 | 1047 : "D"((long)src2Stride), "S"((long)dstStride),\ |
3165 | 1048 "m"(ff_pw_16)\ |
3156 | 1049 : "memory"\ |
1050 );\ | |
3165 | 1051 }while(--h);\ |
3156 | 1052 }\ |
1053 \ | |
4527
481763d70193
prevent h.264 MC related functions from being inlined (yes this is much faster the code just doesnt fit in the code cache otherwise)
michael
parents:
4137
diff
changeset
|
1054 static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ |
3094 | 1055 int w= 2;\ |
2754 | 1056 src -= 2*srcStride;\ |
1057 \ | |
3094 | 1058 while(w--){\ |
2754 | 1059 asm volatile(\ |
2979 | 1060 "pxor %%mm7, %%mm7 \n\t"\ |
1061 "movd (%0), %%mm0 \n\t"\ | |
1062 "add %2, %0 \n\t"\ | |
1063 "movd (%0), %%mm1 \n\t"\ | |
1064 "add %2, %0 \n\t"\ | |
1065 "movd (%0), %%mm2 \n\t"\ | |
1066 "add %2, %0 \n\t"\ | |
1067 "movd (%0), %%mm3 \n\t"\ | |
1068 "add %2, %0 \n\t"\ | |
1069 "movd (%0), %%mm4 \n\t"\ | |
1070 "add %2, %0 \n\t"\ | |
1071 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1072 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
1073 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1074 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
1075 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
2754 | 1076 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ |
1077 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ | |
1078 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ | |
1079 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ | |
1080 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ | |
1081 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ | |
1082 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ | |
1083 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ | |
1084 \ | |
1085 : "+a"(src), "+c"(dst)\ | |
1086 : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
1087 : "memory"\ | |
1088 );\ | |
3094 | 1089 if(h==16){\ |
1090 asm volatile(\ | |
1091 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ | |
1092 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ | |
1093 QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ | |
1094 QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ | |
1095 QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ | |
1096 QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ | |
1097 QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ | |
1098 QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ | |
1099 \ | |
1100 : "+a"(src), "+c"(dst)\ | |
1101 : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ | |
1102 : "memory"\ | |
1103 );\ | |
1104 }\ | |
1105 src += 4-(h+5)*srcStride;\ | |
1106 dst += 4-h*dstStride;\ | |
2754 | 1107 }\ |
1108 }\ | |
4527
481763d70193
prevent h.264 MC related functions from being inlined (yes this is much faster the code just doesnt fit in the code cache otherwise)
michael
parents:
4137
diff
changeset
|
1109 static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\ |
3093 | 1110 int h = size;\ |
1111 int w = (size+8)>>2;\ | |
2754 | 1112 src -= 2*srcStride+2;\ |
1113 while(w--){\ | |
1114 asm volatile(\ | |
2979 | 1115 "pxor %%mm7, %%mm7 \n\t"\ |
1116 "movd (%0), %%mm0 \n\t"\ | |
1117 "add %2, %0 \n\t"\ | |
1118 "movd (%0), %%mm1 \n\t"\ | |
1119 "add %2, %0 \n\t"\ | |
1120 "movd (%0), %%mm2 \n\t"\ | |
1121 "add %2, %0 \n\t"\ | |
1122 "movd (%0), %%mm3 \n\t"\ | |
1123 "add %2, %0 \n\t"\ | |
1124 "movd (%0), %%mm4 \n\t"\ | |
1125 "add %2, %0 \n\t"\ | |
1126 "punpcklbw %%mm7, %%mm0 \n\t"\ | |
1127 "punpcklbw %%mm7, %%mm1 \n\t"\ | |
1128 "punpcklbw %%mm7, %%mm2 \n\t"\ | |
1129 "punpcklbw %%mm7, %%mm3 \n\t"\ | |
1130 "punpcklbw %%mm7, %%mm4 \n\t"\ | |
3093 | 1131 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\ |
1132 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\ | |
1133 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\ | |
1134 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\ | |
1135 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\ | |
1136 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\ | |
1137 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\ | |
1138 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\ | |
2754 | 1139 : "+a"(src)\ |
1140 : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\ | |
1141 : "memory"\ | |
1142 );\ | |
3093 | 1143 if(size==16){\ |
1144 asm volatile(\ | |
1145 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\ | |
1146 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\ | |
1147 QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\ | |
1148 QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\ | |
1149 QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\ | |
1150 QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\ | |
1151 QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\ | |
1152 QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\ | |
1153 : "+a"(src)\ | |
1154 : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\ | |
1155 : "memory"\ | |
1156 );\ | |
1157 }\ | |
2754 | 1158 tmp += 4;\ |
3093 | 1159 src += 4 - (size+5)*srcStride;\ |
2754 | 1160 }\ |
3093 | 1161 tmp -= size+8;\ |
1162 w = size>>4;\ | |
1163 do{\ | |
1164 h = size;\ | |
2754 | 1165 asm volatile(\ |
2979 | 1166 "movq %4, %%mm6 \n\t"\ |
1167 "1: \n\t"\ | |
1168 "movq (%0), %%mm0 \n\t"\ | |
1169 "movq 8(%0), %%mm3 \n\t"\ | |
1170 "movq 2(%0), %%mm1 \n\t"\ | |
1171 "movq 10(%0), %%mm4 \n\t"\ | |
1172 "paddw %%mm4, %%mm0 \n\t"\ | |
1173 "paddw %%mm3, %%mm1 \n\t"\ | |
1174 "paddw 18(%0), %%mm3 \n\t"\ | |
1175 "paddw 16(%0), %%mm4 \n\t"\ | |
1176 "movq 4(%0), %%mm2 \n\t"\ | |
1177 "movq 12(%0), %%mm5 \n\t"\ | |
1178 "paddw 6(%0), %%mm2 \n\t"\ | |
1179 "paddw 14(%0), %%mm5 \n\t"\ | |
1180 "psubw %%mm1, %%mm0 \n\t"\ | |
1181 "psubw %%mm4, %%mm3 \n\t"\ | |
1182 "psraw $2, %%mm0 \n\t"\ | |
1183 "psraw $2, %%mm3 \n\t"\ | |
1184 "psubw %%mm1, %%mm0 \n\t"\ | |
1185 "psubw %%mm4, %%mm3 \n\t"\ | |
1186 "paddsw %%mm2, %%mm0 \n\t"\ | |
1187 "paddsw %%mm5, %%mm3 \n\t"\ | |
1188 "psraw $2, %%mm0 \n\t"\ | |
1189 "psraw $2, %%mm3 \n\t"\ | |
1190 "paddw %%mm6, %%mm2 \n\t"\ | |
1191 "paddw %%mm6, %%mm5 \n\t"\ | |
1192 "paddw %%mm2, %%mm0 \n\t"\ | |
1193 "paddw %%mm5, %%mm3 \n\t"\ | |
1194 "psraw $6, %%mm0 \n\t"\ | |
1195 "psraw $6, %%mm3 \n\t"\ | |
1196 "packuswb %%mm3, %%mm0 \n\t"\ | |
2754 | 1197 OP(%%mm0, (%1),%%mm7, q)\ |
3093 | 1198 "add $48, %0 \n\t"\ |
2979 | 1199 "add %3, %1 \n\t"\ |
1200 "decl %2 \n\t"\ | |
1201 " jnz 1b \n\t"\ | |
2754 | 1202 : "+a"(tmp), "+c"(dst), "+m"(h)\ |
6320 | 1203 : "S"((long)dstStride), "m"(*ff_pw_32)\ |
2754 | 1204 : "memory"\ |
1205 );\ | |
3093 | 1206 tmp += 8 - size*24;\ |
1207 dst += 8 - size*dstStride;\ | |
1208 }while(w--);\ | |
2754 | 1209 }\ |
3094 | 1210 \ |
1211 static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ | |
1212 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\ | |
1213 }\ | |
4527
481763d70193
prevent h.264 MC related functions from being inlined (yes this is much faster the code just doesnt fit in the code cache otherwise)
michael
parents:
4137
diff
changeset
|
1214 static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
3094 | 1215 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\ |
1216 OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ | |
2754 | 1217 }\ |
1218 \ | |
4527
481763d70193
prevent h.264 MC related functions from being inlined (yes this is much faster the code just doesnt fit in the code cache otherwise)
michael
parents:
4137
diff
changeset
|
1219 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ |
2754 | 1220 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ |
1221 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ | |
1222 src += 8*srcStride;\ | |
1223 dst += 8*dstStride;\ | |
1224 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ | |
1225 OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ | |
1226 }\ | |
1227 \ | |
4527
481763d70193
prevent h.264 MC related functions from being inlined (yes this is much faster the code just doesnt fit in the code cache otherwise)
michael
parents:
4137
diff
changeset
|
1228 static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ |
3156 | 1229 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ |
1230 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ | |
1231 src += 8*dstStride;\ | |
1232 dst += 8*dstStride;\ | |
1233 src2 += 8*src2Stride;\ | |
1234 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ | |
1235 OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ | |
1236 }\ | |
1237 \ | |
3093 | 1238 static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ |
1239 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\ | |
1240 }\ | |
1241 \ | |
2754 | 1242 static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ |
3093 | 1243 OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\ |
2754 | 1244 }\ |
3095 | 1245 \ |
4527
481763d70193
prevent h.264 MC related functions from being inlined (yes this is much faster the code just doesnt fit in the code cache otherwise)
michael
parents:
4137
diff
changeset
|
1246 static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ |
3095 | 1247 {\ |
1248 asm volatile(\ | |
1249 "movq %5, %%mm6 \n\t"\ | |
1250 "movq (%1), %%mm0 \n\t"\ | |
1251 "movq 24(%1), %%mm1 \n\t"\ | |
1252 "paddw %%mm6, %%mm0 \n\t"\ | |
1253 "paddw %%mm6, %%mm1 \n\t"\ | |
3102 | 1254 "psraw $5, %%mm0 \n\t"\ |
1255 "psraw $5, %%mm1 \n\t"\ | |
3163 | 1256 "packuswb %%mm0, %%mm0 \n\t"\ |
1257 "packuswb %%mm1, %%mm1 \n\t"\ | |
3095 | 1258 PAVGB" (%0), %%mm0 \n\t"\ |
1259 PAVGB" (%0,%3), %%mm1 \n\t"\ | |
1260 OP(%%mm0, (%2), %%mm4, d)\ | |
1261 OP(%%mm1, (%2,%4), %%mm5, d)\ | |
1262 "lea (%0,%3,2), %0 \n\t"\ | |
1263 "lea (%2,%4,2), %2 \n\t"\ | |
1264 "movq 48(%1), %%mm0 \n\t"\ | |
1265 "movq 72(%1), %%mm1 \n\t"\ | |
1266 "paddw %%mm6, %%mm0 \n\t"\ | |
1267 "paddw %%mm6, %%mm1 \n\t"\ | |
3102 | 1268 "psraw $5, %%mm0 \n\t"\ |
1269 "psraw $5, %%mm1 \n\t"\ | |
3163 | 1270 "packuswb %%mm0, %%mm0 \n\t"\ |
1271 "packuswb %%mm1, %%mm1 \n\t"\ | |
3095 | 1272 PAVGB" (%0), %%mm0 \n\t"\ |
1273 PAVGB" (%0,%3), %%mm1 \n\t"\ | |
1274 OP(%%mm0, (%2), %%mm4, d)\ | |
1275 OP(%%mm1, (%2,%4), %%mm5, d)\ | |
1276 :"+a"(src8), "+c"(src16), "+d"(dst)\ | |
3096 | 1277 :"S"((long)src8Stride), "D"((long)dstStride), "m"(ff_pw_16)\ |
3095 | 1278 :"memory");\ |
1279 }\ | |
4527
481763d70193
prevent h.264 MC related functions from being inlined (yes this is much faster the code just doesnt fit in the code cache otherwise)
michael
parents:
4137
diff
changeset
|
1280 static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ |
3095 | 1281 {\ |
1282 asm volatile(\ | |
1283 "movq %0, %%mm6 \n\t"\ | |
1284 ::"m"(ff_pw_16)\ | |
1285 );\ | |
1286 while(h--){\ | |
1287 asm volatile(\ | |
1288 "movq (%1), %%mm0 \n\t"\ | |
1289 "movq 8(%1), %%mm1 \n\t"\ | |
1290 "paddw %%mm6, %%mm0 \n\t"\ | |
1291 "paddw %%mm6, %%mm1 \n\t"\ | |
3102 | 1292 "psraw $5, %%mm0 \n\t"\ |
1293 "psraw $5, %%mm1 \n\t"\ | |
3095 | 1294 "packuswb %%mm1, %%mm0 \n\t"\ |
1295 PAVGB" (%0), %%mm0 \n\t"\ | |
1296 OP(%%mm0, (%2), %%mm5, q)\ | |
1297 ::"a"(src8), "c"(src16), "d"(dst)\ | |
1298 :"memory");\ | |
1299 src8 += src8Stride;\ | |
1300 src16 += 24;\ | |
1301 dst += dstStride;\ | |
1302 }\ | |
1303 }\ | |
1304 static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ | |
1305 {\ | |
1306 OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\ | |
1307 OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\ | |
1308 }\ | |
1309 | |
2754 | 1310 |
1311 #define H264_MC(OPNAME, SIZE, MMX) \ | |
1312 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ | |
1313 OPNAME ## pixels ## SIZE ## _mmx(dst, src, stride, SIZE);\ | |
1314 }\ | |
1315 \ | |
1316 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
3156 | 1317 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\ |
2754 | 1318 }\ |
1319 \ | |
1320 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1321 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\ | |
1322 }\ | |
1323 \ | |
1324 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
3156 | 1325 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\ |
2754 | 1326 }\ |
1327 \ | |
1328 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1329 uint64_t temp[SIZE*SIZE/8];\ | |
1330 uint8_t * const half= (uint8_t*)temp;\ | |
1331 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\ | |
1332 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\ | |
1333 }\ | |
1334 \ | |
1335 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1336 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\ | |
1337 }\ | |
1338 \ | |
1339 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
1340 uint64_t temp[SIZE*SIZE/8];\ | |
1341 uint8_t * const half= (uint8_t*)temp;\ | |
1342 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\ | |
1343 OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, half, stride, stride, SIZE);\ | |
1344 }\ | |
1345 \ | |
1346 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
3156 | 1347 uint64_t temp[SIZE*SIZE/8];\ |
1348 uint8_t * const halfV= (uint8_t*)temp;\ | |
2754 | 1349 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\ |
3156 | 1350 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\ |
2754 | 1351 }\ |
1352 \ | |
1353 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
3156 | 1354 uint64_t temp[SIZE*SIZE/8];\ |
1355 uint8_t * const halfV= (uint8_t*)temp;\ | |
2754 | 1356 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\ |
3156 | 1357 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\ |
2754 | 1358 }\ |
1359 \ | |
1360 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
3156 | 1361 uint64_t temp[SIZE*SIZE/8];\ |
1362 uint8_t * const halfV= (uint8_t*)temp;\ | |
2754 | 1363 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\ |
3156 | 1364 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\ |
2754 | 1365 }\ |
1366 \ | |
1367 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
3156 | 1368 uint64_t temp[SIZE*SIZE/8];\ |
1369 uint8_t * const halfV= (uint8_t*)temp;\ | |
2754 | 1370 put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\ |
3156 | 1371 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\ |
2754 | 1372 }\ |
1373 \ | |
1374 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
3093 | 1375 uint64_t temp[SIZE*(SIZE<8?12:24)/4];\ |
2754 | 1376 int16_t * const tmp= (int16_t*)temp;\ |
1377 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\ | |
1378 }\ | |
1379 \ | |
1380 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
3156 | 1381 uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\ |
1382 uint8_t * const halfHV= (uint8_t*)temp;\ | |
1383 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\ | |
5809
7301ea0ae221
Fix intended order of operations for 4 assert() checks.
cehoyos
parents:
4939
diff
changeset
|
1384 assert(((int)temp & 7) == 0);\ |
2754 | 1385 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\ |
3156 | 1386 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\ |
2754 | 1387 }\ |
1388 \ | |
1389 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
3156 | 1390 uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\ |
1391 uint8_t * const halfHV= (uint8_t*)temp;\ | |
1392 int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\ | |
5809
7301ea0ae221
Fix intended order of operations for 4 assert() checks.
cehoyos
parents:
4939
diff
changeset
|
1393 assert(((int)temp & 7) == 0);\ |
2754 | 1394 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\ |
3156 | 1395 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\ |
2754 | 1396 }\ |
1397 \ | |
1398 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
3095 | 1399 uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\ |
1400 int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\ | |
1401 uint8_t * const halfHV= ((uint8_t*)temp);\ | |
5809
7301ea0ae221
Fix intended order of operations for 4 assert() checks.
cehoyos
parents:
4939
diff
changeset
|
1402 assert(((int)temp & 7) == 0);\ |
3095 | 1403 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ |
1404 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\ | |
2754 | 1405 }\ |
1406 \ | |
1407 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ | |
3095 | 1408 uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\ |
1409 int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\ | |
1410 uint8_t * const halfHV= ((uint8_t*)temp);\ | |
5809
7301ea0ae221
Fix intended order of operations for 4 assert() checks.
cehoyos
parents:
4939
diff
changeset
|
1411 assert(((int)temp & 7) == 0);\ |
3095 | 1412 put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ |
1413 OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\ | |
2754 | 1414 }\ |
1415 | |
1416 | |
1417 #define AVG_3DNOW_OP(a,b,temp, size) \ | |
2979 | 1418 "mov" #size " " #b ", " #temp " \n\t"\ |
1419 "pavgusb " #temp ", " #a " \n\t"\ | |
1420 "mov" #size " " #a ", " #b " \n\t" | |
2754 | 1421 #define AVG_MMX2_OP(a,b,temp, size) \ |
2979 | 1422 "mov" #size " " #b ", " #temp " \n\t"\ |
1423 "pavgb " #temp ", " #a " \n\t"\ | |
1424 "mov" #size " " #a ", " #b " \n\t" | |
2754 | 1425 |
3095 | 1426 #define PAVGB "pavgusb" |
2754 | 1427 QPEL_H264(put_, PUT_OP, 3dnow) |
1428 QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow) | |
3095 | 1429 #undef PAVGB |
1430 #define PAVGB "pavgb" | |
2754 | 1431 QPEL_H264(put_, PUT_OP, mmx2) |
1432 QPEL_H264(avg_, AVG_MMX2_OP, mmx2) | |
3095 | 1433 #undef PAVGB |
2754 | 1434 |
1435 H264_MC(put_, 4, 3dnow) | |
1436 H264_MC(put_, 8, 3dnow) | |
1437 H264_MC(put_, 16,3dnow) | |
1438 H264_MC(avg_, 4, 3dnow) | |
1439 H264_MC(avg_, 8, 3dnow) | |
1440 H264_MC(avg_, 16,3dnow) | |
1441 H264_MC(put_, 4, mmx2) | |
1442 H264_MC(put_, 8, mmx2) | |
1443 H264_MC(put_, 16,mmx2) | |
1444 H264_MC(avg_, 4, mmx2) | |
1445 H264_MC(avg_, 8, mmx2) | |
1446 H264_MC(avg_, 16,mmx2) | |
1447 | |
1448 | |
1449 #define H264_CHROMA_OP(S,D) | |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1450 #define H264_CHROMA_OP4(S,D,T) |
2754 | 1451 #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1452 #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx |
3213 | 1453 #define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2 |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1454 #define H264_CHROMA_MC8_MV0 put_pixels8_mmx |
2754 | 1455 #include "dsputil_h264_template_mmx.c" |
6057
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1456 |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1457 static void put_h264_chroma_mc8_mmx_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1458 { |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1459 put_h264_chroma_mc8_mmx(dst, src, stride, h, x, y, 1); |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1460 } |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1461 static void put_h264_chroma_mc8_mmx_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1462 { |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1463 put_h264_chroma_mc8_mmx(dst, src, stride, h, x, y, 0); |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1464 } |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1465 |
2754 | 1466 #undef H264_CHROMA_OP |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1467 #undef H264_CHROMA_OP4 |
2754 | 1468 #undef H264_CHROMA_MC8_TMPL |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1469 #undef H264_CHROMA_MC4_TMPL |
3213 | 1470 #undef H264_CHROMA_MC2_TMPL |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1471 #undef H264_CHROMA_MC8_MV0 |
2754 | 1472 |
1473 #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t" | |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1474 #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\ |
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1475 "pavgb " #T ", " #D " \n\t" |
2754 | 1476 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2 |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1477 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2 |
3213 | 1478 #define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2 |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1479 #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2 |
2754 | 1480 #include "dsputil_h264_template_mmx.c" |
6057
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1481 static void avg_h264_chroma_mc8_mmx2_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1482 { |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1483 avg_h264_chroma_mc8_mmx2(dst, src, stride, h, x, y, 1); |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1484 } |
2754 | 1485 #undef H264_CHROMA_OP |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1486 #undef H264_CHROMA_OP4 |
2754 | 1487 #undef H264_CHROMA_MC8_TMPL |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1488 #undef H264_CHROMA_MC4_TMPL |
3213 | 1489 #undef H264_CHROMA_MC2_TMPL |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1490 #undef H264_CHROMA_MC8_MV0 |
2754 | 1491 |
1492 #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t" | |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1493 #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\ |
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1494 "pavgusb " #T ", " #D " \n\t" |
2754 | 1495 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1496 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_3dnow |
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1497 #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow |
2754 | 1498 #include "dsputil_h264_template_mmx.c" |
6057
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1499 static void avg_h264_chroma_mc8_3dnow_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1500 { |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1501 avg_h264_chroma_mc8_3dnow(dst, src, stride, h, x, y, 1); |
03febc8f506f
add MMX version for put_no_rnd_h264_chroma_mc8_c, used in VC-1 decoding.
gpoirier
parents:
5947
diff
changeset
|
1502 } |
2754 | 1503 #undef H264_CHROMA_OP |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1504 #undef H264_CHROMA_OP4 |
2754 | 1505 #undef H264_CHROMA_MC8_TMPL |
2922
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1506 #undef H264_CHROMA_MC4_TMPL |
d772011258ec
faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx.
lorenm
parents:
2902
diff
changeset
|
1507 #undef H264_CHROMA_MC8_MV0 |
2754 | 1508 |
2902
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1509 /***********************************/ |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1510 /* weighted prediction */ |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1511 |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1512 static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1513 { |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1514 int x, y; |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1515 offset <<= log2_denom; |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1516 offset += (1 << log2_denom) >> 1; |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1517 asm volatile( |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1518 "movd %0, %%mm4 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1519 "movd %1, %%mm5 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1520 "movd %2, %%mm6 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1521 "pshufw $0, %%mm4, %%mm4 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1522 "pshufw $0, %%mm5, %%mm5 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1523 "pxor %%mm7, %%mm7 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1524 :: "g"(weight), "g"(offset), "g"(log2_denom) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1525 ); |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1526 for(y=0; y<h; y+=2){ |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1527 for(x=0; x<w; x+=4){ |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1528 asm volatile( |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1529 "movd %0, %%mm0 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1530 "movd %1, %%mm1 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1531 "punpcklbw %%mm7, %%mm0 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1532 "punpcklbw %%mm7, %%mm1 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1533 "pmullw %%mm4, %%mm0 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1534 "pmullw %%mm4, %%mm1 \n\t" |
3001
b52d8ee430f6
fix some potential arithmetic overflows in pred_direct_motion() and
lorenm
parents:
2979
diff
changeset
|
1535 "paddsw %%mm5, %%mm0 \n\t" |
b52d8ee430f6
fix some potential arithmetic overflows in pred_direct_motion() and
lorenm
parents:
2979
diff
changeset
|
1536 "paddsw %%mm5, %%mm1 \n\t" |
2902
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1537 "psraw %%mm6, %%mm0 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1538 "psraw %%mm6, %%mm1 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1539 "packuswb %%mm7, %%mm0 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1540 "packuswb %%mm7, %%mm1 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1541 "movd %%mm0, %0 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1542 "movd %%mm1, %1 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1543 : "+m"(*(uint32_t*)(dst+x)), |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1544 "+m"(*(uint32_t*)(dst+x+stride)) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1545 ); |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1546 } |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1547 dst += 2*stride; |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1548 } |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1549 } |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1550 |
3029 | 1551 static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h) |
2902
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1552 { |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1553 int x, y; |
3029 | 1554 offset = ((offset + 1) | 1) << log2_denom; |
2902
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1555 asm volatile( |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1556 "movd %0, %%mm3 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1557 "movd %1, %%mm4 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1558 "movd %2, %%mm5 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1559 "movd %3, %%mm6 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1560 "pshufw $0, %%mm3, %%mm3 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1561 "pshufw $0, %%mm4, %%mm4 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1562 "pshufw $0, %%mm5, %%mm5 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1563 "pxor %%mm7, %%mm7 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1564 :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1565 ); |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1566 for(y=0; y<h; y++){ |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1567 for(x=0; x<w; x+=4){ |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1568 asm volatile( |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1569 "movd %0, %%mm0 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1570 "movd %1, %%mm1 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1571 "punpcklbw %%mm7, %%mm0 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1572 "punpcklbw %%mm7, %%mm1 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1573 "pmullw %%mm3, %%mm0 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1574 "pmullw %%mm4, %%mm1 \n\t" |
3001
b52d8ee430f6
fix some potential arithmetic overflows in pred_direct_motion() and
lorenm
parents:
2979
diff
changeset
|
1575 "paddsw %%mm1, %%mm0 \n\t" |
b52d8ee430f6
fix some potential arithmetic overflows in pred_direct_motion() and
lorenm
parents:
2979
diff
changeset
|
1576 "paddsw %%mm5, %%mm0 \n\t" |
2902
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1577 "psraw %%mm6, %%mm0 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1578 "packuswb %%mm0, %%mm0 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1579 "movd %%mm0, %0 \n\t" |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1580 : "+m"(*(uint32_t*)(dst+x)) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1581 : "m"(*(uint32_t*)(src+x)) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1582 ); |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1583 } |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1584 src += stride; |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1585 dst += stride; |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1586 } |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1587 } |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1588 |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1589 #define H264_WEIGHT(W,H) \ |
3029 | 1590 static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \ |
1591 ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \ | |
2902
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1592 } \ |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1593 static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \ |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1594 ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \ |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1595 } |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1596 |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1597 H264_WEIGHT(16,16) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1598 H264_WEIGHT(16, 8) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1599 H264_WEIGHT( 8,16) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1600 H264_WEIGHT( 8, 8) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1601 H264_WEIGHT( 8, 4) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1602 H264_WEIGHT( 4, 8) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1603 H264_WEIGHT( 4, 4) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1604 H264_WEIGHT( 4, 2) |
3c79bc9f3aa9
h264 mmx weighted prediction. up to 3% overall speedup.
lorenm
parents:
2855
diff
changeset
|
1605 |