diff ppc/int_altivec.c @ 10644:5da7180afadf libavcodec

refactor and optimize scalarproduct 29-105% faster apply_filter, 6-90% faster ape decoding on core2 (Any x86 other than core2 probably gets much less, since this is mostly due to ssse3 cachesplit avoidance and I haven't written the full gamut of other cachesplit modes.) 9-123% faster ape decoding on G4.
author lorenm
date Sat, 05 Dec 2009 15:09:10 +0000
parents 9f4b529bd5c0
children 7dd2a45249a9
line wrap: on
line diff
--- a/ppc/int_altivec.c	Sat Dec 05 09:41:23 2009 +0000
+++ b/ppc/int_altivec.c	Sat Dec 05 15:09:10 2009 +0000
@@ -79,34 +79,6 @@
     return u.score[3];
 }
 
-static void add_int16_altivec(int16_t * v1, int16_t * v2, int order)
-{
-    int i;
-    register vec_s16 vec, *pv;
-
-    for(i = 0; i < order; i += 8){
-        pv = (vec_s16*)v2;
-        vec = vec_perm(pv[0], pv[1], vec_lvsl(0, v2));
-        vec_st(vec_add(vec_ld(0, v1), vec), 0, v1);
-        v1 += 8;
-        v2 += 8;
-    }
-}
-
-static void sub_int16_altivec(int16_t * v1, int16_t * v2, int order)
-{
-    int i;
-    register vec_s16 vec, *pv;
-
-    for(i = 0; i < order; i += 8){
-        pv = (vec_s16*)v2;
-        vec = vec_perm(pv[0], pv[1], vec_lvsl(0, v2));
-        vec_st(vec_sub(vec_ld(0, v1), vec), 0, v1);
-        v1 += 8;
-        v2 += 8;
-    }
-}
-
 static int32_t scalarproduct_int16_altivec(int16_t * v1, int16_t * v2, int order, const int shift)
 {
     int i;
@@ -137,10 +109,44 @@
     return ires;
 }
 
+static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
+{
+    LOAD_ZERO;
+    vec_s16 *pv1 = (vec_s16*)v1;
+    vec_s16 *pv2 = (vec_s16*)v2;
+    vec_s16 *pv3 = (vec_s16*)v3;
+    register vec_s16 muls = {mul,mul,mul,mul,mul,mul,mul,mul};
+    register vec_s16 t0, t1, i0, i1;
+    register vec_s16 i2 = pv2[0], i3 = pv3[0];
+    register vec_s32 res = zero_s32v;
+    register vec_u8 align = vec_lvsl(0, v2);
+    int32_t ires;
+    order >>= 4;
+    do {
+        t0 = vec_perm(i2, pv2[1], align);
+        i2 = pv2[2];
+        t1 = vec_perm(pv2[1], i2, align);
+        i0 = pv1[0];
+        i1 = pv1[1];
+        res = vec_msum(t0, i0, res);
+        res = vec_msum(t1, i1, res);
+        t0 = vec_perm(i3, pv3[1], align);
+        i3 = pv3[2];
+        t1 = vec_perm(pv3[1], i3, align);
+        pv1[0] = vec_mladd(t0, muls, i0);
+        pv1[1] = vec_mladd(t1, muls, i1);
+        pv1 += 2;
+        pv2 += 2;
+        pv3 += 2;
+    } while(--order);
+    res = vec_splat(vec_sums(res, zero_s32v), 3);
+    vec_ste(res, 0, &ires);
+    return ires;
+}
+
 void int_init_altivec(DSPContext* c, AVCodecContext *avctx)
 {
     c->ssd_int8_vs_int16 = ssd_int8_vs_int16_altivec;
-    c->add_int16 = add_int16_altivec;
-    c->sub_int16 = sub_int16_altivec;
     c->scalarproduct_int16 = scalarproduct_int16_altivec;
+    c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_altivec;
 }