changeset 2922:d772011258ec libavcodec

faster h264_chroma_mc8_mmx, added h264_chroma_mc4_mmx. 2-4% overall speedup.
author lorenm
date Thu, 27 Oct 2005 06:45:29 +0000
parents d22a3556292a
children 7fa9106be552
files i386/dsputil_h264_template_mmx.c i386/dsputil_mmx.c i386/h264dsp_mmx.c
diffstat 3 files changed, 239 insertions(+), 34 deletions(-) [+]
line wrap: on
line diff
--- a/i386/dsputil_h264_template_mmx.c	Wed Oct 26 12:51:10 2005 +0000
+++ b/i386/dsputil_h264_template_mmx.c	Thu Oct 27 06:45:29 2005 +0000
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>
+ * Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>,
+ *                    Loren Merritt
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
@@ -18,22 +19,138 @@
 
 /**
  * MMX optimized version of (put|avg)_h264_chroma_mc8.
- * H264_CHROMA_MC8_TMPL must be defined to the desired function name and
- * H264_CHROMA_OP must be defined to empty for put and pavgb/pavgusb for avg.
+ * H264_CHROMA_MC8_TMPL must be defined to the desired function name
+ * H264_CHROMA_OP must be defined to empty for put and pavgb/pavgusb for avg
+ * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function
  */
 static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
 {
     uint64_t AA __align8;
     uint64_t DD __align8;
-    unsigned long srcos = (long)src & 7;
-    uint64_t sh1 __align8 = srcos * 8;
-    uint64_t sh2 __align8 = 56 - sh1;
     int i;
 
+    if(y==0 && x==0) {
+        /* no filter needed */
+        H264_CHROMA_MC8_MV0(dst, src, stride, h);
+        return;
+    }
+
     assert(x<8 && y<8 && x>=0 && y>=0);
 
-    asm volatile("movd %1, %%mm4\n\t"
-                 "movd %2, %%mm6\n\t"
+    if(y==0)
+    {
+        /* horizontal filter only */
+        asm volatile("movd %0, %%mm5\n\t"
+                     "punpcklwd %%mm5, %%mm5\n\t"
+                     "punpckldq %%mm5, %%mm5\n\t" /* mm5 = B = x */
+                     "movq %1, %%mm4\n\t"
+                     "pxor %%mm7, %%mm7\n\t"
+                     "psubw %%mm5, %%mm4\n\t"     /* mm4 = A = 8-x */
+                     : : "rm" (x), "m" (ff_pw_8));
+
+        for(i=0; i<h; i++) {
+            asm volatile(
+                /* mm0 = src[0..7], mm1 = src[1..8] */
+                "movq %0, %%mm0\n\t"
+                "movq %1, %%mm1\n\t"
+                : : "m" (src[0]), "m" (src[1]));
+
+            asm volatile(
+                /* [mm2,mm3] = A * src[0..7] */
+                "movq %%mm0, %%mm2\n\t"
+                "punpcklbw %%mm7, %%mm2\n\t"
+                "pmullw %%mm4, %%mm2\n\t"
+                "movq %%mm0, %%mm3\n\t"
+                "punpckhbw %%mm7, %%mm3\n\t"
+                "pmullw %%mm4, %%mm3\n\t"
+
+                /* [mm2,mm3] += B * src[1..8] */
+                "movq %%mm1, %%mm0\n\t"
+                "punpcklbw %%mm7, %%mm0\n\t"
+                "pmullw %%mm5, %%mm0\n\t"
+                "punpckhbw %%mm7, %%mm1\n\t"
+                "pmullw %%mm5, %%mm1\n\t"
+                "paddw %%mm0, %%mm2\n\t"
+                "paddw %%mm1, %%mm3\n\t"
+
+                /* dst[0..7] = pack(([mm2,mm3] + 32) >> 6) */
+                "paddw %1, %%mm2\n\t"
+                "paddw %1, %%mm3\n\t"
+                "psrlw $3, %%mm2\n\t"
+                "psrlw $3, %%mm3\n\t"
+                "packuswb %%mm3, %%mm2\n\t"
+                H264_CHROMA_OP(%0, %%mm2)
+                "movq %%mm2, %0\n\t"
+                : "=m" (dst[0]) : "m" (ff_pw_4));
+
+            src += stride;
+            dst += stride;
+        }
+        return;
+    }
+
+    if(x==0)
+    {
+        /* vertical filter only */
+        asm volatile("movd %0, %%mm6\n\t"
+                     "punpcklwd %%mm6, %%mm6\n\t"
+                     "punpckldq %%mm6, %%mm6\n\t" /* mm6 = C = y */
+                     "movq %1, %%mm4\n\t"
+                     "pxor %%mm7, %%mm7\n\t"
+                     "psubw %%mm6, %%mm4\n\t"     /* mm4 = A = 8-y */
+                     : : "rm" (y), "m" (ff_pw_8));
+
+        asm volatile(
+            /* mm0 = src[0..7] */
+            "movq %0, %%mm0\n\t"
+            : : "m" (src[0]));
+
+        for(i=0; i<h; i++) {
+            asm volatile(
+                /* [mm2,mm3] = A * src[0..7] */
+                "movq %mm0, %mm2\n\t"
+                "punpcklbw %mm7, %mm2\n\t"
+                "pmullw %mm4, %mm2\n\t"
+                "movq %mm0, %mm3\n\t"
+                "punpckhbw %mm7, %mm3\n\t"
+                "pmullw %mm4, %mm3\n\t");
+
+            src += stride;
+            asm volatile(
+                /* mm0 = src[0..7] */
+                "movq %0, %%mm0\n\t"
+                : : "m" (src[0]));
+
+            asm volatile(
+                /* [mm2,mm3] += C * src[0..7] */
+                "movq %mm0, %mm1\n\t"
+                "punpcklbw %mm7, %mm1\n\t"
+                "pmullw %mm6, %mm1\n\t"
+                "paddw %mm1, %mm2\n\t"
+                "movq %mm0, %mm5\n\t"
+                "punpckhbw %mm7, %mm5\n\t"
+                "pmullw %mm6, %mm5\n\t"
+                "paddw %mm5, %mm3\n\t");
+
+            asm volatile(
+                /* dst[0..7] = pack(([mm2,mm3] + 32) >> 6) */
+                "paddw %1, %%mm2\n\t"
+                "paddw %1, %%mm3\n\t"
+                "psrlw $3, %%mm2\n\t"
+                "psrlw $3, %%mm3\n\t"
+                "packuswb %%mm3, %%mm2\n\t"
+                H264_CHROMA_OP(%0, %%mm2)
+                "movq %%mm2, %0\n\t"
+                : "=m" (dst[0]) : "m" (ff_pw_4));
+
+            dst += stride;
+        }
+        return;
+    }
+
+    /* general case, bilinear */
+    asm volatile("movd %2, %%mm4\n\t"
+                 "movd %3, %%mm6\n\t"
                  "punpcklwd %%mm4, %%mm4\n\t"
                  "punpcklwd %%mm6, %%mm6\n\t"
                  "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */
@@ -44,29 +161,20 @@
                  "psllw $3, %%mm6\n\t"
                  "movq %%mm5, %%mm7\n\t"
                  "paddw %%mm6, %%mm7\n\t"
-                 "movq %%mm4, %0\n\t"         /* DD = x * y */
+                 "movq %%mm4, %1\n\t"         /* DD = x * y */
                  "psubw %%mm4, %%mm5\n\t"     /* mm5 = B = 8x - xy */
                  "psubw %%mm4, %%mm6\n\t"     /* mm6 = C = 8y - xy */
-                 "paddw %3, %%mm4\n\t"
+                 "paddw %4, %%mm4\n\t"
                  "psubw %%mm7, %%mm4\n\t"     /* mm4 = A = xy - (8x+8y) + 64 */
                  "pxor %%mm7, %%mm7\n\t"
-                 : "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64));
+                 "movq %%mm4, %0\n\t"
+                 : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64));
 
-    asm volatile("movq %%mm4, %0" : "=m" (AA));
-
-    src -= srcos;
     asm volatile(
         /* mm0 = src[0..7], mm1 = src[1..8] */
-        "movq %0, %%mm1\n\t"
-        "movq %1, %%mm0\n\t"
-        "psrlq %2, %%mm1\n\t"
-        "psllq %3, %%mm0\n\t"
-        "movq %%mm0, %%mm4\n\t"
-        "psllq $8, %%mm0\n\t"
-        "por %%mm1, %%mm0\n\t"
-        "psrlq $8, %%mm1\n\t"
-        "por %%mm4, %%mm1\n\t"
-        : : "m" (src[0]), "m" (src[8]), "m" (sh1), "m" (sh2));
+        "movq %0, %%mm0\n\t"
+        "movq %1, %%mm1\n\t"
+        : : "m" (src[0]), "m" (src[1]));
 
     for(i=0; i<h; i++) {
         asm volatile(
@@ -91,16 +199,9 @@
         src += stride;
         asm volatile(
             /* mm0 = src[0..7], mm1 = src[1..8] */
-            "movq %0, %%mm1\n\t"
-            "movq %1, %%mm0\n\t"
-            "psrlq %2, %%mm1\n\t"
-            "psllq %3, %%mm0\n\t"
-            "movq %%mm0, %%mm4\n\t"
-            "psllq $8, %%mm0\n\t"
-            "por %%mm1, %%mm0\n\t"
-            "psrlq $8, %%mm1\n\t"
-            "por %%mm4, %%mm1\n\t"
-            : : "m" (src[0]), "m" (src[8]), "m" (sh1), "m" (sh2));
+            "movq %0, %%mm0\n\t"
+            "movq %1, %%mm1\n\t"
+            : : "m" (src[0]), "m" (src[1]));
 
         asm volatile(
             /* [mm2,mm3] += C *  src[0..7] */
@@ -138,3 +239,83 @@
         dst+= stride;
     }
 }
+
+static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+    uint64_t AA __align8;
+    uint64_t DD __align8;
+    int i;
+
+    /* no special case for mv=(0,0) in 4x*, since it's much less common than in 8x*.
+     * could still save a few cycles, but maybe not worth the complexity. */
+
+    assert(x<8 && y<8 && x>=0 && y>=0);
+
+    asm volatile("movd %2, %%mm4\n\t"
+                 "movd %3, %%mm6\n\t"
+                 "punpcklwd %%mm4, %%mm4\n\t"
+                 "punpcklwd %%mm6, %%mm6\n\t"
+                 "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */
+                 "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */
+                 "movq %%mm4, %%mm5\n\t"
+                 "pmullw %%mm6, %%mm4\n\t"    /* mm4 = x * y */
+                 "psllw $3, %%mm5\n\t"
+                 "psllw $3, %%mm6\n\t"
+                 "movq %%mm5, %%mm7\n\t"
+                 "paddw %%mm6, %%mm7\n\t"
+                 "movq %%mm4, %1\n\t"         /* DD = x * y */
+                 "psubw %%mm4, %%mm5\n\t"     /* mm5 = B = 8x - xy */
+                 "psubw %%mm4, %%mm6\n\t"     /* mm6 = C = 8y - xy */
+                 "paddw %4, %%mm4\n\t"
+                 "psubw %%mm7, %%mm4\n\t"     /* mm4 = A = xy - (8x+8y) + 64 */
+                 "pxor %%mm7, %%mm7\n\t"
+                 "movq %%mm4, %0\n\t"
+                 : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64));
+
+    asm volatile(
+        /* mm0 = src[0..3], mm1 = src[1..4] */
+        "movd %0, %%mm0\n\t"
+        "movd %1, %%mm1\n\t"
+        "punpcklbw %%mm7, %%mm0\n\t"
+        "punpcklbw %%mm7, %%mm1\n\t"
+        : : "m" (src[0]), "m" (src[1]));
+
+    for(i=0; i<h; i++) {
+        asm volatile(
+            /* mm2 = A * src[0..3] + B * src[1..4] */
+            "movq %%mm0, %%mm2\n\t"
+            "pmullw %0, %%mm2\n\t"
+            "pmullw %%mm5, %%mm1\n\t"
+            "paddw %%mm1, %%mm2\n\t"
+            : : "m" (AA));
+
+        src += stride;
+        asm volatile(
+            /* mm0 = src[0..3], mm1 = src[1..4] */
+            "movd %0, %%mm0\n\t"
+            "movd %1, %%mm1\n\t"
+            "punpcklbw %%mm7, %%mm0\n\t"
+            "punpcklbw %%mm7, %%mm1\n\t"
+            : : "m" (src[0]), "m" (src[1]));
+
+        asm volatile(
+            /* mm2 += C * src[0..3] + D * src[1..4] */
+            "movq %%mm0, %%mm3\n\t"
+            "movq %%mm1, %%mm4\n\t"
+            "pmullw %%mm6, %%mm3\n\t"
+            "pmullw %0, %%mm4\n\t"
+            "paddw %%mm3, %%mm2\n\t"
+            "paddw %%mm4, %%mm2\n\t"
+            : : "m" (DD));
+
+        asm volatile(
+            /* dst[0..3] = pack((mm2 + 32) >> 6) */
+            "paddw %1, %%mm2\n\t"
+            "psrlw $6, %%mm2\n\t"
+            "packuswb %%mm7, %%mm2\n\t"
+            H264_CHROMA_OP4(%0, %%mm2, %%mm3)
+            "movd %%mm2, %0\n\t"
+            : "=m" (dst[0]) : "m" (ff_pw_32));
+        dst += stride;
+    }
+}
--- a/i386/dsputil_mmx.c	Wed Oct 26 12:51:10 2005 +0000
+++ b/i386/dsputil_mmx.c	Thu Oct 27 06:45:29 2005 +0000
@@ -43,6 +43,7 @@
 static const uint64_t ff_pw_3  attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
 static const uint64_t ff_pw_4  attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL;
 static const uint64_t ff_pw_5  attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL;
+static const uint64_t ff_pw_8  attribute_used __attribute__ ((aligned(8))) = 0x0008000800080008ULL;
 static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
 static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL;
 static const uint64_t ff_pw_64 attribute_used __attribute__ ((aligned(8))) = 0x0040004000400040ULL;
@@ -2726,6 +2727,7 @@
         c->h263_v_loop_filter= h263_v_loop_filter_mmx;
         c->h263_h_loop_filter= h263_h_loop_filter_mmx;        
 	c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx;
+        c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
         
         if (mm_flags & MM_MMXEXT) {
             c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
@@ -2825,6 +2827,7 @@
 #undef dspfunc
 
 	    c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2;
+            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
             c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
             c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
             c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
@@ -2936,6 +2939,7 @@
             dspfunc(avg_h264_qpel, 2, 4);
 
 	    c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow;
+            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
         }
     }
         
--- a/i386/h264dsp_mmx.c	Wed Oct 26 12:51:10 2005 +0000
+++ b/i386/h264dsp_mmx.c	Thu Oct 27 06:45:29 2005 +0000
@@ -892,22 +892,42 @@
 
 
 #define H264_CHROMA_OP(S,D)
+#define H264_CHROMA_OP4(S,D,T)
 #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
+#define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx
+#define H264_CHROMA_MC8_MV0 put_pixels8_mmx
 #include "dsputil_h264_template_mmx.c"
 #undef H264_CHROMA_OP
+#undef H264_CHROMA_OP4
 #undef H264_CHROMA_MC8_TMPL
+#undef H264_CHROMA_MC4_TMPL
+#undef H264_CHROMA_MC8_MV0
 
 #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
+#define H264_CHROMA_OP4(S,D,T) "movd  " #S ", " #T " \n\t"\
+                               "pavgb " #T ", " #D " \n\t"
 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
+#define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2
+#define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
 #include "dsputil_h264_template_mmx.c"
 #undef H264_CHROMA_OP
+#undef H264_CHROMA_OP4
 #undef H264_CHROMA_MC8_TMPL
+#undef H264_CHROMA_MC4_TMPL
+#undef H264_CHROMA_MC8_MV0
 
 #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
+#define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
+                               "pavgusb " #T ", " #D " \n\t"
 #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
+#define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_3dnow
+#define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
 #include "dsputil_h264_template_mmx.c"
 #undef H264_CHROMA_OP
+#undef H264_CHROMA_OP4
 #undef H264_CHROMA_MC8_TMPL
+#undef H264_CHROMA_MC4_TMPL
+#undef H264_CHROMA_MC8_MV0
 
 /***********************************/
 /* weighted prediction */