changeset 995:edc10966b081 libavcodec

altivec jumbo patch by (Romain Dolbeau <dolbeaur at club-internet dot fr>)
author michaelni
date Sat, 11 Jan 2003 20:51:03 +0000
parents 7701ff462e3a
children ad44196ea483
files Makefile dsputil.h fft.c ppc/dsputil_altivec.c ppc/dsputil_altivec.h ppc/dsputil_ppc.c ppc/fft_altivec.c ppc/gmc_altivec.c ppc/mpegvideo_altivec.c
diffstat 9 files changed, 262 insertions(+), 26 deletions(-) [+]
line wrap: on
line diff
--- a/Makefile	Sat Jan 11 20:34:38 2003 +0000
+++ b/Makefile	Sat Jan 11 20:51:03 2003 +0000
@@ -85,7 +85,7 @@
 ifeq ($(TARGET_ALTIVEC),yes)
 CFLAGS += -faltivec
 OBJS += ppc/dsputil_altivec.o ppc/mpegvideo_altivec.o ppc/idct_altivec.o \
-        ppc/fft_altivec.o
+        ppc/fft_altivec.o ppc/gmc_altivec.o
 endif
 
 SRCS := $(OBJS:.o=.c) $(ASM_OBJS:.o=.S)
--- a/dsputil.h	Sat Jan 11 20:34:38 2003 +0000
+++ b/dsputil.h	Sat Jan 11 20:51:03 2003 +0000
@@ -158,6 +158,10 @@
 
 #define emms_c()
 
+/* should be defined by architectures supporting
+   one or more MultiMedia extension */
+int mm_support(void);
+
 #if defined(HAVE_MMX)
 
 #undef emms_c
@@ -170,7 +174,6 @@
 
 extern int mm_flags;
 
-int mm_support(void);
 void add_pixels_clamped_mmx(const DCTELEM *block, UINT8 *pixels, int line_size);
 void put_pixels_clamped_mmx(const DCTELEM *block, UINT8 *pixels, int line_size);
 
--- a/fft.c	Sat Jan 11 20:34:38 2003 +0000
+++ b/fft.c	Sat Jan 11 20:51:03 2003 +0000
@@ -57,9 +57,9 @@
 
 #if defined(HAVE_MMX)
         has_vectors = mm_support() & MM_SSE;
-#else
-        /* XXX: should also use mm_support() ? */
-        has_vectors = has_altivec() & MM_ALTIVEC;
+#endif
+#if defined(HAVE_ALTIVEC)
+        has_vectors = mm_support() & MM_ALTIVEC;
 #endif
         if (has_vectors) {
             int np, nblocks, np2, l;
--- a/ppc/dsputil_altivec.c	Sat Jan 11 20:34:38 2003 +0000
+++ b/ppc/dsputil_altivec.c	Sat Jan 11 20:51:03 2003 +0000
@@ -585,6 +585,62 @@
     }
 }
 
+int sad16x16_altivec(void *s, uint8_t *a, uint8_t *b, int stride) {
+  return pix_abs16x16_altivec(a,b,stride);
+}
+
+int sad8x8_altivec(void *s, uint8_t *a, uint8_t *b, int stride) {
+  return pix_abs8x8_altivec(a,b,stride);
+}
+
+void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
+#if 0
+    int i;
+    for(i=0; i+7<w; i++){
+        dst[i+0] += src[i+0];
+        dst[i+1] += src[i+1];
+        dst[i+2] += src[i+2];
+        dst[i+3] += src[i+3];
+        dst[i+4] += src[i+4];
+        dst[i+5] += src[i+5];
+        dst[i+6] += src[i+6];
+        dst[i+7] += src[i+7];
+    }
+    for(; i<w; i++)
+        dst[i+0] += src[i+0];
+#else
+    register int i;
+    register uint8_t *temp_src = src, *temp_dst = dst;
+    register vector unsigned char vdst, vsrc, temp1, temp2;
+    register vector unsigned char perm;
+    register int count = 0;
+
+    for (i = 0; (i < w) && ((unsigned long)temp_dst & 0x0000000F) ; i++)
+    {
+      dst[i] = src[i];
+      temp_src ++;
+      temp_dst ++;
+    }
+    /* temp_dst is a properly aligned pointer */
+    /* we still need to deal with ill-aligned src */
+    perm = vec_lvsl(0, temp_src);
+    temp1 = vec_ld(0, temp_src);
+    while ((i + 15) < w)
+    {
+      temp2 = vec_ld(count + 16, temp_src);
+      vdst = vec_ld(count, temp_dst);
+      vsrc = vec_perm(temp1, temp2, perm);
+      temp1 = temp2;
+      vdst = vec_add(vsrc, vdst);
+      vec_st(vdst, count, temp_dst);
+      count += 16;
+    }
+    for (; (i < w) ; i++)
+    {
+      dst[i] = src[i];
+    }
+#endif
+}
 
 int has_altivec(void)
 {
@@ -600,4 +656,3 @@
 #endif
     return 0;
 }
-
--- a/ppc/dsputil_altivec.h	Sat Jan 11 20:34:38 2003 +0000
+++ b/ppc/dsputil_altivec.h	Sat Jan 11 20:51:03 2003 +0000
@@ -22,6 +22,8 @@
 extern int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
 extern int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
 extern int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
+extern int sad16x16_altivec(void *s, uint8_t *a, uint8_t *b, int stride);
+extern int sad8x8_altivec(void *s, uint8_t *a, uint8_t *b, int stride);
 extern int pix_norm1_altivec(uint8_t *pix, int line_size);
 extern int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size);
 extern int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size);
@@ -29,6 +31,10 @@
 extern void diff_pixels_altivec(DCTELEM* block, const UINT8* s1, const UINT8* s2, int stride);
 extern void get_pixels_altivec(DCTELEM* block, const UINT8 * pixels, int line_size);
 
+extern void gmc1_altivec(UINT8 *dst, UINT8 *src, int stride, int h, int x16, int y16, int rounder);
+
+extern void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w);
+
 extern int has_altivec(void);
 
 
--- a/ppc/dsputil_ppc.c	Sat Jan 11 20:34:38 2003 +0000
+++ b/ppc/dsputil_ppc.c	Sat Jan 11 20:51:03 2003 +0000
@@ -25,6 +25,17 @@
 
 int mm_flags = 0;
 
+int mm_support(void)
+{
+    int result = 0;
+#if HAVE_ALTIVEC
+    if (has_altivec()) {
+        result |= MM_ALTIVEC;
+    }
+#endif /* result */
+    return result;
+}
+
 void dsputil_init_ppc(DSPContext* c, unsigned mask)
 {
     // Common optimisations whether Altivec or not
@@ -41,13 +52,19 @@
         c->pix_abs16x16_xy2 = pix_abs16x16_xy2_altivec;
         c->pix_abs16x16 = pix_abs16x16_altivec;
         c->pix_abs8x8 = pix_abs8x8_altivec;
+        c->sad[0]= sad16x16_altivec;
+        c->sad[1]= sad8x8_altivec;
         c->pix_norm1 = pix_norm1_altivec;
         c->sse[1]= sse8_altivec;
         c->sse[0]= sse16_altivec;
         c->pix_sum = pix_sum_altivec;
         c->diff_pixels = diff_pixels_altivec;
         c->get_pixels = get_pixels_altivec;
-
+// next one disabled as it it untested.
+#if 0
+        c->add_bytes= add_bytes_altivec;
+#endif
+	c->gmc1 = gmc1_altivec;
     } else
 #endif
     {
--- a/ppc/fft_altivec.c	Sat Jan 11 20:34:38 2003 +0000
+++ b/ppc/fft_altivec.c	Sat Jan 11 20:51:03 2003 +0000
@@ -35,7 +35,7 @@
  */
 void fft_calc_altivec(FFTContext *s, FFTComplex *z)
 {
-    register const vector float vczero = (vector float)( 0., 0., 0., 0.);
+    register const vector float vczero = (const vector float)(0.);
     
     int ln = s->nbits;
     int	j, np, np2;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ppc/gmc_altivec.c	Sat Jan 11 20:51:03 2003 +0000
@@ -0,0 +1,159 @@
+/*
+ * GMC (???)
+ * AltiVec-enabled
+ * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "../dsputil.h"
+
+#include "dsputil_altivec.h"
+
+/*
+  altivec-enhanced gmc1. ATM this code assume stride is a multiple of 8,
+  to preserve proper dst alignement.
+*/
+void gmc1_altivec(UINT8 *dst /* align 8 */, UINT8 *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
+{
+#if 0
+    const int A=(16-x16)*(16-y16);
+    const int B=(   x16)*(16-y16);
+    const int C=(16-x16)*(   y16);
+    const int D=(   x16)*(   y16);
+    
+    int i;
+    
+    for(i=0; i<h; i++)
+    {
+        dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8;
+        dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8;
+        dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8;
+        dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8;
+        dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8;
+        dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8;
+        dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8;
+        dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8;
+        dst+= stride;
+        src+= stride;
+    }
+#else
+    const unsigned short __attribute__ ((aligned(16))) rounder_a[8] =
+      {rounder, rounder, rounder, rounder,
+       rounder, rounder, rounder, rounder};
+    const unsigned short __attribute__ ((aligned(16))) ABCD[8] =
+      {
+        (16-x16)*(16-y16), /* A */
+        (   x16)*(16-y16), /* B */
+        (16-x16)*(   y16), /* C */
+        (   x16)*(   y16), /* D */
+        0, 0, 0, 0         /* padding */
+      };
+    
+    register const vector unsigned char vczero = (const vector unsigned char)(0);
+    register const vector unsigned short vcsr8 = (const vector unsigned short)(8);
+    register vector unsigned char dstv, dstv2, src_0, src_1, srcvA, srcvB, srcvC, srcvD;
+    register vector unsigned short Av, Bv, Cv, Dv, rounderV, tempA, tempB, tempC, tempD;
+    int i;
+    unsigned long dst_odd = (unsigned long)dst & 0x0000000F;
+    unsigned long src_really_odd = (unsigned long)src & 0x0000000F;
+
+    tempA = vec_ld(0, (unsigned short*)ABCD);
+    Av = vec_splat(tempA, 0);
+    Bv = vec_splat(tempA, 1);
+    Cv = vec_splat(tempA, 2);
+    Dv = vec_splat(tempA, 3);
+
+    rounderV = vec_ld(0, (unsigned short*)rounder_a);
+    
+    // we'll be able to pick-up our 9 char elements
+    // at src from those 32 bytes
+    // we load the first batch here, as inside the loop
+    // we can re-use 'src+stride' from one iteration
+    // as the 'src' of the next.
+    src_0 = vec_ld(0, src);
+    src_1 = vec_ld(16, src);
+    srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
+    
+    if (src_really_odd != 0x0000000F)
+    { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
+      srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
+    }
+    else
+    {
+      srcvB = src_1;
+    }
+    srcvA = vec_mergeh(vczero, srcvA);
+    srcvB = vec_mergeh(vczero, srcvB);
+    
+    for(i=0; i<h; i++)
+    {
+      dst_odd = (unsigned long)dst & 0x0000000F;
+      src_really_odd = (((unsigned long)src) + stride) & 0x0000000F;
+      
+      dstv = vec_ld(0, dst);
+      
+      // we we'll be able to pick-up our 9 char elements
+      // at src + stride from those 32 bytes
+      // then reuse the resulting 2 vectors srvcC and srcvD
+      // as the next srcvA and srcvB
+      src_0 = vec_ld(stride + 0, src);
+      src_1 = vec_ld(stride + 16, src);
+      srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
+      
+      if (src_really_odd != 0x0000000F)
+      { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector.
+        srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
+      }
+      else
+      {
+        srcvD = src_1;
+      }
+      
+      srcvC = vec_mergeh(vczero, srcvC);
+      srcvD = vec_mergeh(vczero, srcvD);
+      
+
+      // OK, now we (finally) do the math :-)
+      // those four instructions replaces 32 int muls & 32 int adds.
+      // isn't AltiVec nice ?
+      tempA = vec_mladd((vector unsigned short)srcvA, Av, rounderV);
+      tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA);
+      tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB);
+      tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC);
+      
+      srcvA = srcvC;
+      srcvB = srcvD;
+      
+      tempD = vec_sr(tempD, vcsr8);
+      
+      dstv2 = vec_pack(tempD, (vector unsigned short)vczero);
+      
+      if (dst_odd)
+      {
+        dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1));
+      }
+      else
+      {
+        dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3));
+      }
+      
+      vec_st(dstv2, 0, dst);
+      
+      dst += stride;
+      src += stride;
+    }
+#endif
+}
--- a/ppc/mpegvideo_altivec.c	Sat Jan 11 20:34:38 2003 +0000
+++ b/ppc/mpegvideo_altivec.c	Sat Jan 11 20:51:03 2003 +0000
@@ -21,10 +21,6 @@
 #include "../dsputil.h"
 #include "../mpegvideo.h"
 
-
-// Used when initializing constant vectors
-#define FOUR_INSTANCES(x) x,x,x,x
-
 // Swaps two variables (used for altivec registers)
 #define SWAP(a,b) \
 do { \
@@ -100,7 +96,7 @@
     int lastNonZero;
     vector float row0, row1, row2, row3, row4, row5, row6, row7;
     vector float alt0, alt1, alt2, alt3, alt4, alt5, alt6, alt7;
-    const vector float zero = (vector float)(FOUR_INSTANCES(0.0f));
+    const vector float zero = (const vector float)(0.0f);
 
     // Load the data into the row/alt vectors
     {
@@ -144,18 +140,18 @@
 		// in the vector local variables, as floats, which we'll use during the
 		// quantize step...
     {
-        const vector float vec_0_298631336 = (vector float)(FOUR_INSTANCES(0.298631336f));
-        const vector float vec_0_390180644 = (vector float)(FOUR_INSTANCES(-0.390180644f));
-        const vector float vec_0_541196100 = (vector float)(FOUR_INSTANCES(0.541196100f));
-        const vector float vec_0_765366865 = (vector float)(FOUR_INSTANCES(0.765366865f));
-        const vector float vec_0_899976223 = (vector float)(FOUR_INSTANCES(-0.899976223f));
-        const vector float vec_1_175875602 = (vector float)(FOUR_INSTANCES(1.175875602f));
-        const vector float vec_1_501321110 = (vector float)(FOUR_INSTANCES(1.501321110f));
-        const vector float vec_1_847759065 = (vector float)(FOUR_INSTANCES(-1.847759065f));
-        const vector float vec_1_961570560 = (vector float)(FOUR_INSTANCES(-1.961570560f));
-        const vector float vec_2_053119869 = (vector float)(FOUR_INSTANCES(2.053119869f));
-        const vector float vec_2_562915447 = (vector float)(FOUR_INSTANCES(-2.562915447f));
-        const vector float vec_3_072711026 = (vector float)(FOUR_INSTANCES(3.072711026f));
+        const vector float vec_0_298631336 = (vector float)(0.298631336f);
+        const vector float vec_0_390180644 = (vector float)(-0.390180644f);
+        const vector float vec_0_541196100 = (vector float)(0.541196100f);
+        const vector float vec_0_765366865 = (vector float)(0.765366865f);
+        const vector float vec_0_899976223 = (vector float)(-0.899976223f);
+        const vector float vec_1_175875602 = (vector float)(1.175875602f);
+        const vector float vec_1_501321110 = (vector float)(1.501321110f);
+        const vector float vec_1_847759065 = (vector float)(-1.847759065f);
+        const vector float vec_1_961570560 = (vector float)(-1.961570560f);
+        const vector float vec_2_053119869 = (vector float)(2.053119869f);
+        const vector float vec_2_562915447 = (vector float)(-2.562915447f);
+        const vector float vec_3_072711026 = (vector float)(3.072711026f);
 
 
         int whichPass, whichHalf;
@@ -309,7 +305,7 @@
 				// rounding when we convert to int, instead of flooring.)
         {
             vector signed int biasInt;
-            const vector float negOneFloat = (vector float)(FOUR_INSTANCES(-1.0f));
+            const vector float negOneFloat = (vector float)(-1.0f);
             LOAD4(biasInt, biasAddr);
             bias = vec_ctf(biasInt, QUANT_BIAS_SHIFT);
             negBias = vec_madd(bias, negOneFloat, zero);