comparison i386/dsputil_h264_template_mmx.c @ 4504:6287a2ff4d08 libavcodec

merge asm fragments in H264_CHROMA_MC2_TMPL() 10% faster avg_h264_chroma_mc2_mmx2() 5% faster put_h264_chroma_mc2_mmx2()
author michael
date Fri, 09 Feb 2007 12:24:22 +0000
parents c8c591fe26f8
children cb5628800a62
comparison
equal deleted inserted replaced
4503:f334529b46ec 4504:6287a2ff4d08
265 #ifdef H264_CHROMA_MC2_TMPL 265 #ifdef H264_CHROMA_MC2_TMPL
266 static void H264_CHROMA_MC2_TMPL(uint8_t *dst/*align 2*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) 266 static void H264_CHROMA_MC2_TMPL(uint8_t *dst/*align 2*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
267 { 267 {
268 int CD=((1<<16)-1)*x*y + 8*y; 268 int CD=((1<<16)-1)*x*y + 8*y;
269 int AB=((8<<16)-8)*x + 64 - CD; 269 int AB=((8<<16)-8)*x + 64 - CD;
270 int i;
271
272 asm volatile( 270 asm volatile(
273 /* mm5 = {A,B,A,B} */ 271 /* mm5 = {A,B,A,B} */
274 /* mm6 = {C,D,C,D} */ 272 /* mm6 = {C,D,C,D} */
275 "movd %0, %%mm5\n\t" 273 "movd %0, %%mm5\n\t"
276 "movd %1, %%mm6\n\t" 274 "movd %1, %%mm6\n\t"
277 "punpckldq %%mm5, %%mm5\n\t" 275 "punpckldq %%mm5, %%mm5\n\t"
278 "punpckldq %%mm6, %%mm6\n\t" 276 "punpckldq %%mm6, %%mm6\n\t"
279 "pxor %%mm7, %%mm7\n\t" 277 "pxor %%mm7, %%mm7\n\t"
280 :: "r"(AB), "r"(CD));
281
282 asm volatile(
283 /* mm0 = src[0,1,1,2] */ 278 /* mm0 = src[0,1,1,2] */
284 "movd %0, %%mm0\n\t" 279 "movd %2, %%mm0\n\t"
285 "punpcklbw %%mm7, %%mm0\n\t" 280 "punpcklbw %%mm7, %%mm0\n\t"
286 "pshufw $0x94, %%mm0, %%mm0\n\t" 281 "pshufw $0x94, %%mm0, %%mm0\n\t"
287 :: "m"(src[0])); 282 :: "r"(AB), "r"(CD), "m"(src[0]));
288 283
289 for(i=0; i<h; i++) { 284
290 asm volatile( 285 asm volatile(
291 /* mm1 = A * src[0,1] + B * src[1,2] */ 286 "1:\n\t"
292 "movq %%mm0, %%mm1\n\t" 287 "addl %4, %1\n\t"
293 "pmaddwd %%mm5, %%mm1\n\t" 288 /* mm1 = A * src[0,1] + B * src[1,2] */
294 ::); 289 "movq %%mm0, %%mm1\n\t"
295 290 "pmaddwd %%mm5, %%mm1\n\t"
296 src += stride; 291 /* mm0 = src[0,1,1,2] */
297 asm volatile( 292 "movd (%1), %%mm0\n\t"
298 /* mm0 = src[0,1,1,2] */ 293 "punpcklbw %%mm7, %%mm0\n\t"
299 "movd %0, %%mm0\n\t" 294 "pshufw $0x94, %%mm0, %%mm0\n\t"
300 "punpcklbw %%mm7, %%mm0\n\t" 295 /* mm1 += C * src[0,1] + D * src[1,2] */
301 "pshufw $0x94, %%mm0, %%mm0\n\t" 296 "movq %%mm0, %%mm2\n\t"
302 :: "m"(src[0])); 297 "pmaddwd %%mm6, %%mm2\n\t"
303 298 "paddw %%mm2, %%mm1\n\t"
304 asm volatile( 299 /* dst[0,1] = pack((mm1 + 32) >> 6) */
305 /* mm1 += C * src[0,1] + D * src[1,2] */ 300 "paddw %3, %%mm1\n\t"
306 "movq %%mm0, %%mm2\n\t" 301 "psrlw $6, %%mm1\n\t"
307 "pmaddwd %%mm6, %%mm2\n\t" 302 "packssdw %%mm7, %%mm1\n\t"
308 "paddw %%mm2, %%mm1\n\t" 303 "packuswb %%mm7, %%mm1\n\t"
309 ::); 304 /* writes garbage to the right of dst.
310 305 * ok because partitions are processed from left to right. */
311 asm volatile( 306 H264_CHROMA_OP4((%0), %%mm1, %%mm3)
312 /* dst[0,1] = pack((mm1 + 32) >> 6) */ 307 "movd %%mm1, (%0)\n\t"
313 "paddw %1, %%mm1\n\t" 308 "addl %4, %0\n\t"
314 "psrlw $6, %%mm1\n\t" 309 "subl $1, %2\n\t"
315 "packssdw %%mm7, %%mm1\n\t" 310 "jnz 1b\n\t"
316 "packuswb %%mm7, %%mm1\n\t" 311 : "+r" (dst), "+r"(src), "+r"(h) : "m" (ff_pw_32), "r"(stride));
317 /* writes garbage to the right of dst. 312
318 * ok because partitions are processed from left to right. */
319 H264_CHROMA_OP4(%0, %%mm1, %%mm3)
320 "movd %%mm1, %0\n\t"
321 : "=m" (dst[0]) : "m" (ff_pw_32));
322 dst += stride;
323 }
324 } 313 }
325 #endif 314 #endif
326 315