changeset 6557:e1208c4f8898 libavcodec

h264 chroma mc ssse3 width8: 180->92, width4: 78->63 cycles (core2)
author lorenm
date Tue, 01 Apr 2008 04:51:28 +0000
parents 8300baeb2b5f
children 06b1e0371e90
files i386/dsputil_h264_template_mmx.c i386/dsputil_h264_template_ssse3.c i386/dsputil_mmx.c i386/dsputil_mmx.h i386/h264dsp_mmx.c
diffstat 5 files changed, 251 insertions(+), 2 deletions(-) [+]
line wrap: on
line diff
--- a/i386/dsputil_h264_template_mmx.c	Tue Apr 01 01:28:26 2008 +0000
+++ b/i386/dsputil_h264_template_mmx.c	Tue Apr 01 04:51:28 2008 +0000
@@ -27,7 +27,6 @@
  */
 static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y, int rnd)
 {
-    DECLARE_ALIGNED_8(static const uint64_t, ff_pw_28) = 0x001C001C001C001CULL;
     const uint64_t *rnd_reg;
     DECLARE_ALIGNED_8(uint64_t, AA);
     DECLARE_ALIGNED_8(uint64_t, DD);
@@ -98,7 +97,7 @@
     }
 
     /* general case, bilinear */
-    rnd_reg = rnd ? &ff_pw_32.a : &ff_pw_28;
+    rnd_reg = rnd ? &ff_pw_32.a : &ff_pw_28.a;
     asm volatile("movd %2, %%mm4\n\t"
                  "movd %3, %%mm6\n\t"
                  "punpcklwd %%mm4, %%mm4\n\t"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/i386/dsputil_h264_template_ssse3.c	Tue Apr 01 04:51:28 2008 +0000
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2008 Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * SSSE3 optimized version of (put|avg)_h264_chroma_mc8.
+ * H264_CHROMA_MC8_TMPL must be defined to the desired function name
+ * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function
+ * AVG_OP must be defined to empty for put and the identify for avg
+ */
+static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y, int rnd)
+{
+    if(y==0 && x==0) {
+        /* no filter needed */
+        H264_CHROMA_MC8_MV0(dst, src, stride, h);
+        return;
+    }
+
+    assert(x<8 && y<8 && x>=0 && y>=0);
+
+    if(y==0 || x==0)
+    {
+        /* 1 dimensional filter only */
+        asm volatile(
+            "movd %0, %%xmm7 \n\t"
+            "movq %1, %%xmm6 \n\t"
+            "pshuflw $0, %%xmm7, %%xmm7 \n\t"
+            "movlhps %%xmm6, %%xmm6 \n\t"
+            "movlhps %%xmm7, %%xmm7 \n\t"
+            :: "r"(255*(x+y)+8), "m"(rnd?ff_pw_4:ff_pw_3));
+
+        if(x) {
+            asm volatile(
+                "1: \n\t"
+                "movq (%1), %%xmm0 \n\t"
+                "movq 1(%1), %%xmm1 \n\t"
+                "movq (%1,%3), %%xmm2 \n\t"
+                "movq 1(%1,%3), %%xmm3 \n\t"
+                "punpcklbw %%xmm1, %%xmm0 \n\t"
+                "punpcklbw %%xmm3, %%xmm2 \n\t"
+                "pmaddubsw %%xmm7, %%xmm0 \n\t"
+                "pmaddubsw %%xmm7, %%xmm2 \n\t"
+         AVG_OP("movq (%0), %%xmm4 \n\t")
+         AVG_OP("movhps (%0,%3), %%xmm4 \n\t")
+                "paddw %%xmm6, %%xmm0 \n\t"
+                "paddw %%xmm6, %%xmm2 \n\t"
+                "psrlw $3, %%xmm0 \n\t"
+                "psrlw $3, %%xmm2 \n\t"
+                "packuswb %%xmm2, %%xmm0 \n\t"
+         AVG_OP("pavgb %%xmm4, %%xmm0 \n\t")
+                "movq %%xmm0, (%0) \n\t"
+                "movhps %%xmm0, (%0,%3) \n\t"
+                "sub $2, %2 \n\t"
+                "lea (%1,%3,2), %1 \n\t"
+                "lea (%0,%3,2), %0 \n\t"
+                "jg 1b \n\t"
+                :"+r"(dst), "+r"(src), "+r"(h)
+                :"r"((long)stride)
+            );
+        } else {
+            asm volatile(
+                "1: \n\t"
+                "movq (%1), %%xmm0 \n\t"
+                "movq (%1,%3), %%xmm1 \n\t"
+                "movdqa %%xmm1, %%xmm2 \n\t"
+                "movq (%1,%3,2), %%xmm3 \n\t"
+                "punpcklbw %%xmm1, %%xmm0 \n\t"
+                "punpcklbw %%xmm3, %%xmm2 \n\t"
+                "pmaddubsw %%xmm7, %%xmm0 \n\t"
+                "pmaddubsw %%xmm7, %%xmm2 \n\t"
+         AVG_OP("movq (%0), %%xmm4 \n\t")
+         AVG_OP("movhps (%0,%3), %%xmm4 \n\t")
+                "paddw %%xmm6, %%xmm0 \n\t"
+                "paddw %%xmm6, %%xmm2 \n\t"
+                "psrlw $3, %%xmm0 \n\t"
+                "psrlw $3, %%xmm2 \n\t"
+                "packuswb %%xmm2, %%xmm0 \n\t"
+         AVG_OP("pavgb %%xmm4, %%xmm0 \n\t")
+                "movq %%xmm0, (%0) \n\t"
+                "movhps %%xmm0, (%0,%3) \n\t"
+                "sub $2, %2 \n\t"
+                "lea (%1,%3,2), %1 \n\t"
+                "lea (%0,%3,2), %0 \n\t"
+                "jg 1b \n\t"
+                :"+r"(dst), "+r"(src), "+r"(h)
+                :"r"((long)stride)
+            );
+        }
+        return;
+    }
+
+    /* general case, bilinear */
+    asm volatile(
+        "movd %0, %%xmm7 \n\t"
+        "movd %1, %%xmm6 \n\t"
+        "movdqa %2, %%xmm5 \n\t"
+        "pshuflw $0, %%xmm7, %%xmm7 \n\t"
+        "pshuflw $0, %%xmm6, %%xmm6 \n\t"
+        "movlhps %%xmm7, %%xmm7 \n\t"
+        "movlhps %%xmm6, %%xmm6 \n\t"
+        :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(rnd?ff_pw_32:ff_pw_28)
+    );
+
+    asm volatile(
+        "movq (%1), %%xmm0 \n\t"
+        "movq 1(%1), %%xmm1 \n\t"
+        "punpcklbw %%xmm1, %%xmm0 \n\t"
+        "add %3, %1 \n\t"
+        "1: \n\t"
+        "movq (%1), %%xmm1 \n\t"
+        "movq 1(%1), %%xmm2 \n\t"
+        "movq (%1,%3), %%xmm3 \n\t"
+        "movq 1(%1,%3), %%xmm4 \n\t"
+        "lea (%1,%3,2), %1 \n\t"
+        "punpcklbw %%xmm2, %%xmm1 \n\t"
+        "punpcklbw %%xmm4, %%xmm3 \n\t"
+        "movdqa %%xmm1, %%xmm2 \n\t"
+        "movdqa %%xmm3, %%xmm4 \n\t"
+        "pmaddubsw %%xmm7, %%xmm0 \n\t"
+        "pmaddubsw %%xmm6, %%xmm1 \n\t"
+        "pmaddubsw %%xmm7, %%xmm2 \n\t"
+        "pmaddubsw %%xmm6, %%xmm3 \n\t"
+        "paddw %%xmm5, %%xmm0 \n\t"
+        "paddw %%xmm5, %%xmm2 \n\t"
+        "paddw %%xmm0, %%xmm1 \n\t"
+        "paddw %%xmm2, %%xmm3 \n\t"
+        "movdqa %%xmm4, %%xmm0 \n\t"
+        "psrlw $6, %%xmm1 \n\t"
+        "psrlw $6, %%xmm3 \n\t"
+ AVG_OP("movq (%0), %%xmm2 \n\t")
+ AVG_OP("movhps (%0,%3), %%xmm2 \n\t")
+        "packuswb %%xmm3, %%xmm1 \n\t"
+ AVG_OP("pavgb %%xmm2, %%xmm1 \n\t")
+        "movq %%xmm1, (%0)\n\t"
+        "movhps %%xmm1, (%0,%3)\n\t"
+        "sub $2, %2 \n\t"
+        "lea (%0,%3,2), %0 \n\t"
+        "jg 1b \n\t"
+        :"+r"(dst), "+r"(src), "+r"(h)
+        :"r"((long)stride)
+    );
+}
+
+static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+    asm volatile(
+        "movd %0, %%mm7 \n\t"
+        "movd %1, %%mm6 \n\t"
+        "movq %2, %%mm5 \n\t"
+        "pshufw $0, %%mm7, %%mm7 \n\t"
+        "pshufw $0, %%mm6, %%mm6 \n\t"
+        :: "r"((x*255+8)*(8-y)), "r"((x*255+8)*y), "m"(ff_pw_32)
+    );
+
+    asm volatile(
+        "movd (%1), %%mm0 \n\t"
+        "punpcklbw 1(%1), %%mm0 \n\t"
+        "add %3, %1 \n\t"
+        "1: \n\t"
+        "movd (%1), %%mm1 \n\t"
+        "movd (%1,%3), %%mm3 \n\t"
+        "punpcklbw 1(%1), %%mm1 \n\t"
+        "punpcklbw 1(%1,%3), %%mm3 \n\t"
+        "lea (%1,%3,2), %1 \n\t"
+        "movq %%mm1, %%mm2 \n\t"
+        "movq %%mm3, %%mm4 \n\t"
+        "pmaddubsw %%mm7, %%mm0 \n\t"
+        "pmaddubsw %%mm6, %%mm1 \n\t"
+        "pmaddubsw %%mm7, %%mm2 \n\t"
+        "pmaddubsw %%mm6, %%mm3 \n\t"
+        "paddw %%mm5, %%mm0 \n\t"
+        "paddw %%mm5, %%mm2 \n\t"
+        "paddw %%mm0, %%mm1 \n\t"
+        "paddw %%mm2, %%mm3 \n\t"
+        "movq %%mm4, %%mm0 \n\t"
+        "psrlw $6, %%mm1 \n\t"
+        "psrlw $6, %%mm3 \n\t"
+        "packuswb %%mm1, %%mm1 \n\t"
+        "packuswb %%mm3, %%mm3 \n\t"
+ AVG_OP("pavgb (%0), %%mm1 \n\t")
+ AVG_OP("pavgb (%0,%3), %%mm3 \n\t")
+        "movd %%mm1, (%0)\n\t"
+        "movd %%mm3, (%0,%3)\n\t"
+        "sub $2, %2 \n\t"
+        "lea (%0,%3,2), %0 \n\t"
+        "jg 1b \n\t"
+        :"+r"(dst), "+r"(src), "+r"(h)
+        :"r"((long)stride)
+    );
+}
+
--- a/i386/dsputil_mmx.c	Tue Apr 01 01:28:26 2008 +0000
+++ b/i386/dsputil_mmx.c	Tue Apr 01 04:51:28 2008 +0000
@@ -54,6 +54,7 @@
 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
 DECLARE_ALIGNED_16(const xmm_t,    ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
+DECLARE_ALIGNED_16(const xmm_t,    ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
 DECLARE_ALIGNED_16(const xmm_t,    ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
 DECLARE_ALIGNED_8 (const uint64_t, ff_pw_64 ) = 0x0040004000400040ULL;
@@ -2376,6 +2377,11 @@
             H264_QPEL_FUNCS(3, 1, ssse3);
             H264_QPEL_FUNCS(3, 2, ssse3);
             H264_QPEL_FUNCS(3, 3, ssse3);
+            c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_nornd;
+            c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd;
+            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd;
+            c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3;
+            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3;
             c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
         }
 #endif
--- a/i386/dsputil_mmx.h	Tue Apr 01 01:28:26 2008 +0000
+++ b/i386/dsputil_mmx.h	Tue Apr 01 04:51:28 2008 +0000
@@ -39,6 +39,7 @@
 extern const uint64_t ff_pw_15;
 extern const xmm_t    ff_pw_16;
 extern const uint64_t ff_pw_20;
+extern const xmm_t    ff_pw_28;
 extern const xmm_t    ff_pw_32;
 extern const uint64_t ff_pw_42;
 extern const uint64_t ff_pw_64;
--- a/i386/h264dsp_mmx.c	Tue Apr 01 01:28:26 2008 +0000
+++ b/i386/h264dsp_mmx.c	Tue Apr 01 04:51:28 2008 +0000
@@ -1968,6 +1968,42 @@
 #undef H264_CHROMA_MC4_TMPL
 #undef H264_CHROMA_MC8_MV0
 
+#ifdef HAVE_SSSE3
+#define AVG_OP(X)
+#undef H264_CHROMA_MC8_TMPL
+#undef H264_CHROMA_MC4_TMPL
+#define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_ssse3
+#define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_ssse3
+#define H264_CHROMA_MC8_MV0 put_pixels8_mmx
+#include "dsputil_h264_template_ssse3.c"
+static void put_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+    put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
+}
+static void put_h264_chroma_mc8_ssse3_nornd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+    put_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 0);
+}
+
+#undef AVG_OP
+#undef H264_CHROMA_MC8_TMPL
+#undef H264_CHROMA_MC4_TMPL
+#undef H264_CHROMA_MC8_MV0
+#define AVG_OP(X) X
+#define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_ssse3
+#define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_ssse3
+#define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
+#include "dsputil_h264_template_ssse3.c"
+static void avg_h264_chroma_mc8_ssse3_rnd(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
+{
+    avg_h264_chroma_mc8_ssse3(dst, src, stride, h, x, y, 1);
+}
+#undef AVG_OP
+#undef H264_CHROMA_MC8_TMPL
+#undef H264_CHROMA_MC4_TMPL
+#undef H264_CHROMA_MC8_MV0
+#endif
+
 /***********************************/
 /* weighted prediction */