changeset 3577:5be5a936c8a9 libavcodec

Clean up:make dsputil subfile names consistent
author lu_zero
date Sun, 13 Aug 2006 08:26:43 +0000
parents f7125bf10892
children e0fb0ff7c6a8
files Makefile ppc/dsputil_h264_altivec.c ppc/dsputil_h264_template_altivec.c ppc/dsputil_snow_altivec.c ppc/h264_altivec.c ppc/h264_template_altivec.c ppc/snow_altivec.c
diffstat 7 files changed, 1811 insertions(+), 1811 deletions(-) [+]
line wrap: on
line diff
--- a/Makefile	Sat Aug 12 16:37:31 2006 +0000
+++ b/Makefile	Sun Aug 13 08:26:43 2006 +0000
@@ -349,8 +349,8 @@
                                           ppc/fft_altivec.o          \
                                           ppc/gmc_altivec.o          \
                                           ppc/fdct_altivec.o         \
-                                          ppc/dsputil_h264_altivec.o \
-                                          ppc/dsputil_snow_altivec.o \
+                                          ppc/h264_altivec.o 	     \
+                                          ppc/snow_altivec.o 	     \
                                           ppc/vc1dsp_altivec.o
 
 CFLAGS += $(CFLAGS-yes)
--- a/ppc/dsputil_h264_altivec.c	Sat Aug 12 16:37:31 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,306 +0,0 @@
-/*
- * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "../dsputil.h"
-
-#include "gcc_fixes.h"
-
-#include "dsputil_altivec.h"
-
-#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
-#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
-
-#define OP_U8_ALTIVEC                          PUT_OP_U8_ALTIVEC
-#define PREFIX_h264_chroma_mc8_altivec         put_h264_chroma_mc8_altivec
-#define PREFIX_h264_chroma_mc8_num             altivec_put_h264_chroma_mc8_num
-#define PREFIX_h264_qpel16_h_lowpass_altivec   put_h264_qpel16_h_lowpass_altivec
-#define PREFIX_h264_qpel16_h_lowpass_num       altivec_put_h264_qpel16_h_lowpass_num
-#define PREFIX_h264_qpel16_v_lowpass_altivec   put_h264_qpel16_v_lowpass_altivec
-#define PREFIX_h264_qpel16_v_lowpass_num       altivec_put_h264_qpel16_v_lowpass_num
-#define PREFIX_h264_qpel16_hv_lowpass_altivec  put_h264_qpel16_hv_lowpass_altivec
-#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_put_h264_qpel16_hv_lowpass_num
-#include "dsputil_h264_template_altivec.c"
-#undef OP_U8_ALTIVEC
-#undef PREFIX_h264_chroma_mc8_altivec
-#undef PREFIX_h264_chroma_mc8_num
-#undef PREFIX_h264_qpel16_h_lowpass_altivec
-#undef PREFIX_h264_qpel16_h_lowpass_num
-#undef PREFIX_h264_qpel16_v_lowpass_altivec
-#undef PREFIX_h264_qpel16_v_lowpass_num
-#undef PREFIX_h264_qpel16_hv_lowpass_altivec
-#undef PREFIX_h264_qpel16_hv_lowpass_num
-
-#define OP_U8_ALTIVEC                          AVG_OP_U8_ALTIVEC
-#define PREFIX_h264_chroma_mc8_altivec         avg_h264_chroma_mc8_altivec
-#define PREFIX_h264_chroma_mc8_num             altivec_avg_h264_chroma_mc8_num
-#define PREFIX_h264_qpel16_h_lowpass_altivec   avg_h264_qpel16_h_lowpass_altivec
-#define PREFIX_h264_qpel16_h_lowpass_num       altivec_avg_h264_qpel16_h_lowpass_num
-#define PREFIX_h264_qpel16_v_lowpass_altivec   avg_h264_qpel16_v_lowpass_altivec
-#define PREFIX_h264_qpel16_v_lowpass_num       altivec_avg_h264_qpel16_v_lowpass_num
-#define PREFIX_h264_qpel16_hv_lowpass_altivec  avg_h264_qpel16_hv_lowpass_altivec
-#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_avg_h264_qpel16_hv_lowpass_num
-#include "dsputil_h264_template_altivec.c"
-#undef OP_U8_ALTIVEC
-#undef PREFIX_h264_chroma_mc8_altivec
-#undef PREFIX_h264_chroma_mc8_num
-#undef PREFIX_h264_qpel16_h_lowpass_altivec
-#undef PREFIX_h264_qpel16_h_lowpass_num
-#undef PREFIX_h264_qpel16_v_lowpass_altivec
-#undef PREFIX_h264_qpel16_v_lowpass_num
-#undef PREFIX_h264_qpel16_hv_lowpass_altivec
-#undef PREFIX_h264_qpel16_hv_lowpass_num
-
-#define H264_MC(OPNAME, SIZE, CODETYPE) \
-static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
-    OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
-    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
-    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
-    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
-    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
-    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
-    OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
-    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
-    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
-    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
-    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
-    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
-    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
-    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
-    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
-    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
-    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
-    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
-    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
-    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
-}\
-
-static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
-                                    const uint8_t * src2, int dst_stride,
-                                    int src_stride1, int h)
-{
-    int i;
-    vector unsigned char a, b, d, tmp1, tmp2, mask, mask_, edges, align;
-
-    mask_ = vec_lvsl(0, src2);
-
-    for (i = 0; i < h; i++) {
-
-        tmp1 = vec_ld(i * src_stride1, src1);
-        mask = vec_lvsl(i * src_stride1, src1);
-        tmp2 = vec_ld(i * src_stride1 + 15, src1);
-
-        a = vec_perm(tmp1, tmp2, mask);
-
-        tmp1 = vec_ld(i * 16, src2);
-        tmp2 = vec_ld(i * 16 + 15, src2);
-
-        b = vec_perm(tmp1, tmp2, mask_);
-
-        tmp1 = vec_ld(0, dst);
-        mask = vec_lvsl(0, dst);
-        tmp2 = vec_ld(15, dst);
-
-        d = vec_avg(a, b);
-
-        edges = vec_perm(tmp2, tmp1, mask);
-
-        align = vec_lvsr(0, dst);
-
-        tmp1 = vec_perm(edges, d, align);
-        tmp2 = vec_perm(d, edges, align);
-
-        vec_st(tmp2, 15, dst);
-        vec_st(tmp1, 0 , dst);
-
-        dst += dst_stride;
-    }
-}
-
-static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
-                                    const uint8_t * src2, int dst_stride,
-                                    int src_stride1, int h)
-{
-    int i;
-    vector unsigned char a, b, d, tmp1, tmp2, mask, mask_, edges, align;
-
-    mask_ = vec_lvsl(0, src2);
-
-    for (i = 0; i < h; i++) {
-
-        tmp1 = vec_ld(i * src_stride1, src1);
-        mask = vec_lvsl(i * src_stride1, src1);
-        tmp2 = vec_ld(i * src_stride1 + 15, src1);
-
-        a = vec_perm(tmp1, tmp2, mask);
-
-        tmp1 = vec_ld(i * 16, src2);
-        tmp2 = vec_ld(i * 16 + 15, src2);
-
-        b = vec_perm(tmp1, tmp2, mask_);
-
-        tmp1 = vec_ld(0, dst);
-        mask = vec_lvsl(0, dst);
-        tmp2 = vec_ld(15, dst);
-
-        d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
-
-        edges = vec_perm(tmp2, tmp1, mask);
-
-        align = vec_lvsr(0, dst);
-
-        tmp1 = vec_perm(edges, d, align);
-        tmp2 = vec_perm(d, edges, align);
-
-        vec_st(tmp2, 15, dst);
-        vec_st(tmp1, 0 , dst);
-
-        dst += dst_stride;
-    }
-}
-
-/* Implemented but could be faster
-#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
-#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
- */
-
-  H264_MC(put_, 16, altivec)
-  H264_MC(avg_, 16, altivec)
-
-void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
-
-#ifdef HAVE_ALTIVEC
-  if (has_altivec()) {
-    c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
-    c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
-
-#define dspfunc(PFX, IDX, NUM) \
-    c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
-    c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
-    c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
-    c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
-    c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
-    c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
-    c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
-    c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
-    c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
-    c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
-    c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
-    c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
-    c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
-    c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
-    c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
-    c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
-
-    dspfunc(put_h264_qpel, 0, 16);
-    dspfunc(avg_h264_qpel, 0, 16);
-#undef dspfunc
-
-  } else
-#endif /* HAVE_ALTIVEC */
-  {
-    // Non-AltiVec PPC optimisations
-
-    // ... pending ...
-  }
-}
--- a/ppc/dsputil_h264_template_altivec.c	Sat Aug 12 16:37:31 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,717 +0,0 @@
-/*
- * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/* this code assume that stride % 16 == 0 */
-void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
-  POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
-    signed int ABCD[4] __attribute__((aligned(16))) =
-                        {((8 - x) * (8 - y)),
-                          ((x) * (8 - y)),
-                          ((8 - x) * (y)),
-                          ((x) * (y))};
-    register int i;
-    vector unsigned char fperm;
-    const vector signed int vABCD = vec_ld(0, ABCD);
-    const vector signed short vA = vec_splat((vector signed short)vABCD, 1);
-    const vector signed short vB = vec_splat((vector signed short)vABCD, 3);
-    const vector signed short vC = vec_splat((vector signed short)vABCD, 5);
-    const vector signed short vD = vec_splat((vector signed short)vABCD, 7);
-    const vector signed int vzero = vec_splat_s32(0);
-    const vector signed short v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
-    const vector unsigned short v6us = vec_splat_u16(6);
-    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
-    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
-
-    vector unsigned char vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
-    vector unsigned char vsrc0uc, vsrc1uc;
-    vector signed short vsrc0ssH, vsrc1ssH;
-    vector unsigned char vsrcCuc, vsrc2uc, vsrc3uc;
-    vector signed short vsrc2ssH, vsrc3ssH, psum;
-    vector unsigned char vdst, ppsum, vfdst, fsum;
-
-  POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);
-
-    if (((unsigned long)dst) % 16 == 0) {
-      fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13,
-                                        0x14, 0x15, 0x16, 0x17,
-                                        0x08, 0x09, 0x0A, 0x0B,
-                                        0x0C, 0x0D, 0x0E, 0x0F);
-    } else {
-      fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03,
-                                        0x04, 0x05, 0x06, 0x07,
-                                        0x18, 0x19, 0x1A, 0x1B,
-                                        0x1C, 0x1D, 0x1E, 0x1F);
-    }
-
-    vsrcAuc = vec_ld(0, src);
-
-    if (loadSecond)
-      vsrcBuc = vec_ld(16, src);
-    vsrcperm0 = vec_lvsl(0, src);
-    vsrcperm1 = vec_lvsl(1, src);
-
-    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
-    if (reallyBadAlign)
-      vsrc1uc = vsrcBuc;
-    else
-      vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
-
-    vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
-                                               (vector unsigned char)vsrc0uc);
-    vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
-                                               (vector unsigned char)vsrc1uc);
-
-    if (!loadSecond) {// -> !reallyBadAlign
-      for (i = 0 ; i < h ; i++) {
-
-
-        vsrcCuc = vec_ld(stride + 0, src);
-
-        vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
-        vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
-        vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
-                                                (vector unsigned char)vsrc2uc);
-        vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
-                                                (vector unsigned char)vsrc3uc);
-
-        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
-        psum = vec_mladd(vB, vsrc1ssH, psum);
-        psum = vec_mladd(vC, vsrc2ssH, psum);
-        psum = vec_mladd(vD, vsrc3ssH, psum);
-        psum = vec_add(v32ss, psum);
-        psum = vec_sra(psum, v6us);
-
-        vdst = vec_ld(0, dst);
-        ppsum = (vector unsigned char)vec_packsu(psum, psum);
-        vfdst = vec_perm(vdst, ppsum, fperm);
-
-        OP_U8_ALTIVEC(fsum, vfdst, vdst);
-
-        vec_st(fsum, 0, dst);
-
-        vsrc0ssH = vsrc2ssH;
-        vsrc1ssH = vsrc3ssH;
-
-        dst += stride;
-        src += stride;
-      }
-    } else {
-        vector unsigned char vsrcDuc;
-      for (i = 0 ; i < h ; i++) {
-        vsrcCuc = vec_ld(stride + 0, src);
-        vsrcDuc = vec_ld(stride + 16, src);
-
-        vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
-        if (reallyBadAlign)
-          vsrc3uc = vsrcDuc;
-        else
-          vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
-        vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
-                                                (vector unsigned char)vsrc2uc);
-        vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
-                                                (vector unsigned char)vsrc3uc);
-
-        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
-        psum = vec_mladd(vB, vsrc1ssH, psum);
-        psum = vec_mladd(vC, vsrc2ssH, psum);
-        psum = vec_mladd(vD, vsrc3ssH, psum);
-        psum = vec_add(v32ss, psum);
-        psum = vec_sr(psum, v6us);
-
-        vdst = vec_ld(0, dst);
-        ppsum = (vector unsigned char)vec_pack(psum, psum);
-        vfdst = vec_perm(vdst, ppsum, fperm);
-
-        OP_U8_ALTIVEC(fsum, vfdst, vdst);
-
-        vec_st(fsum, 0, dst);
-
-        vsrc0ssH = vsrc2ssH;
-        vsrc1ssH = vsrc3ssH;
-
-        dst += stride;
-        src += stride;
-      }
-    }
-    POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1);
-}
-
-/* this code assume stride % 16 == 0 */
-static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
-  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1);
-  register int i;
-
-  const vector signed int vzero = vec_splat_s32(0);
-  const vector unsigned char permM2 = vec_lvsl(-2, src);
-  const vector unsigned char permM1 = vec_lvsl(-1, src);
-  const vector unsigned char permP0 = vec_lvsl(+0, src);
-  const vector unsigned char permP1 = vec_lvsl(+1, src);
-  const vector unsigned char permP2 = vec_lvsl(+2, src);
-  const vector unsigned char permP3 = vec_lvsl(+3, src);
-  const vector signed short v5ss = vec_splat_s16(5);
-  const vector unsigned short v5us = vec_splat_u16(5);
-  const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
-  const vector signed short v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
-  const vector unsigned char dstperm = vec_lvsr(0, dst);
-  const vector unsigned char neg1 =
-                                (const vector unsigned char) vec_splat_s8(-1);
-
-  const vector unsigned char dstmask =
-                                vec_perm((const vector unsigned char)vzero,
-                                                               neg1, dstperm);
-
-  vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
-
-  register int align = ((((unsigned long)src) - 2) % 16);
-
-  vector signed short srcP0A, srcP0B, srcP1A, srcP1B,
-                      srcP2A, srcP2B, srcP3A, srcP3B,
-                      srcM1A, srcM1B, srcM2A, srcM2B,
-                      sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
-                      pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
-                      psumA, psumB, sumA, sumB;
-
-  vector unsigned char sum, dst1, dst2, vdst, fsum,
-                       rsum, fdst1, fdst2;
-
-  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
-
-  for (i = 0 ; i < 16 ; i ++) {
-    vector unsigned char srcR1 = vec_ld(-2, src);
-    vector unsigned char srcR2 = vec_ld(14, src);
-
-    switch (align) {
-    default: {
-      srcM2 = vec_perm(srcR1, srcR2, permM2);
-      srcM1 = vec_perm(srcR1, srcR2, permM1);
-      srcP0 = vec_perm(srcR1, srcR2, permP0);
-      srcP1 = vec_perm(srcR1, srcR2, permP1);
-      srcP2 = vec_perm(srcR1, srcR2, permP2);
-      srcP3 = vec_perm(srcR1, srcR2, permP3);
-    } break;
-    case 11: {
-      srcM2 = vec_perm(srcR1, srcR2, permM2);
-      srcM1 = vec_perm(srcR1, srcR2, permM1);
-      srcP0 = vec_perm(srcR1, srcR2, permP0);
-      srcP1 = vec_perm(srcR1, srcR2, permP1);
-      srcP2 = vec_perm(srcR1, srcR2, permP2);
-      srcP3 = srcR2;
-    } break;
-    case 12: {
-      vector unsigned char srcR3 = vec_ld(30, src);
-      srcM2 = vec_perm(srcR1, srcR2, permM2);
-      srcM1 = vec_perm(srcR1, srcR2, permM1);
-      srcP0 = vec_perm(srcR1, srcR2, permP0);
-      srcP1 = vec_perm(srcR1, srcR2, permP1);
-      srcP2 = srcR2;
-      srcP3 = vec_perm(srcR2, srcR3, permP3);
-    } break;
-    case 13: {
-      vector unsigned char srcR3 = vec_ld(30, src);
-      srcM2 = vec_perm(srcR1, srcR2, permM2);
-      srcM1 = vec_perm(srcR1, srcR2, permM1);
-      srcP0 = vec_perm(srcR1, srcR2, permP0);
-      srcP1 = srcR2;
-      srcP2 = vec_perm(srcR2, srcR3, permP2);
-      srcP3 = vec_perm(srcR2, srcR3, permP3);
-    } break;
-    case 14: {
-      vector unsigned char srcR3 = vec_ld(30, src);
-      srcM2 = vec_perm(srcR1, srcR2, permM2);
-      srcM1 = vec_perm(srcR1, srcR2, permM1);
-      srcP0 = srcR2;
-      srcP1 = vec_perm(srcR2, srcR3, permP1);
-      srcP2 = vec_perm(srcR2, srcR3, permP2);
-      srcP3 = vec_perm(srcR2, srcR3, permP3);
-    } break;
-    case 15: {
-      vector unsigned char srcR3 = vec_ld(30, src);
-      srcM2 = vec_perm(srcR1, srcR2, permM2);
-      srcM1 = srcR2;
-      srcP0 = vec_perm(srcR2, srcR3, permP0);
-      srcP1 = vec_perm(srcR2, srcR3, permP1);
-      srcP2 = vec_perm(srcR2, srcR3, permP2);
-      srcP3 = vec_perm(srcR2, srcR3, permP3);
-    } break;
-    }
-
-    srcP0A = (vector signed short)
-                vec_mergeh((vector unsigned char)vzero, srcP0);
-    srcP0B = (vector signed short)
-                vec_mergel((vector unsigned char)vzero, srcP0);
-    srcP1A = (vector signed short)
-                vec_mergeh((vector unsigned char)vzero, srcP1);
-    srcP1B = (vector signed short)
-                vec_mergel((vector unsigned char)vzero, srcP1);
-
-    srcP2A = (vector signed short)
-                vec_mergeh((vector unsigned char)vzero, srcP2);
-    srcP2B = (vector signed short)
-                vec_mergel((vector unsigned char)vzero, srcP2);
-    srcP3A = (vector signed short)
-                vec_mergeh((vector unsigned char)vzero, srcP3);
-    srcP3B = (vector signed short)
-                vec_mergel((vector unsigned char)vzero, srcP3);
-
-    srcM1A = (vector signed short)
-                vec_mergeh((vector unsigned char)vzero, srcM1);
-    srcM1B = (vector signed short)
-                vec_mergel((vector unsigned char)vzero, srcM1);
-    srcM2A = (vector signed short)
-                vec_mergeh((vector unsigned char)vzero, srcM2);
-    srcM2B = (vector signed short)
-                vec_mergel((vector unsigned char)vzero, srcM2);
-
-    sum1A = vec_adds(srcP0A, srcP1A);
-    sum1B = vec_adds(srcP0B, srcP1B);
-    sum2A = vec_adds(srcM1A, srcP2A);
-    sum2B = vec_adds(srcM1B, srcP2B);
-    sum3A = vec_adds(srcM2A, srcP3A);
-    sum3B = vec_adds(srcM2B, srcP3B);
-
-    pp1A = vec_mladd(sum1A, v20ss, v16ss);
-    pp1B = vec_mladd(sum1B, v20ss, v16ss);
-
-    pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
-    pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
-
-    pp3A = vec_add(sum3A, pp1A);
-    pp3B = vec_add(sum3B, pp1B);
-
-    psumA = vec_sub(pp3A, pp2A);
-    psumB = vec_sub(pp3B, pp2B);
-
-    sumA = vec_sra(psumA, v5us);
-    sumB = vec_sra(psumB, v5us);
-
-    sum = vec_packsu(sumA, sumB);
-
-    dst1 = vec_ld(0, dst);
-    dst2 = vec_ld(16, dst);
-    vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));
-
-    OP_U8_ALTIVEC(fsum, sum, vdst);
-
-    rsum = vec_perm(fsum, fsum, dstperm);
-    fdst1 = vec_sel(dst1, rsum, dstmask);
-    fdst2 = vec_sel(rsum, dst2, dstmask);
-
-    vec_st(fdst1, 0, dst);
-    vec_st(fdst2, 16, dst);
-
-    src += srcStride;
-    dst += dstStride;
-  }
-POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
-}
-
-/* this code assume stride % 16 == 0 */
-static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
-  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1);
-
-  register int i;
-
-  const vector signed int vzero = vec_splat_s32(0);
-  const vector unsigned char perm = vec_lvsl(0, src);
-  const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
-  const vector unsigned short v5us = vec_splat_u16(5);
-  const vector signed short v5ss = vec_splat_s16(5);
-  const vector signed short v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
-  const vector unsigned char dstperm = vec_lvsr(0, dst);
-  const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
-  const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
-
-  uint8_t *srcbis = src - (srcStride * 2);
-
-  const vector unsigned char srcM2a = vec_ld(0, srcbis);
-  const vector unsigned char srcM2b = vec_ld(16, srcbis);
-  const vector unsigned char srcM2 = vec_perm(srcM2a, srcM2b, perm);
-//  srcbis += srcStride;
-  const vector unsigned char srcM1a = vec_ld(0, srcbis += srcStride);
-  const vector unsigned char srcM1b = vec_ld(16, srcbis);
-  const vector unsigned char srcM1 = vec_perm(srcM1a, srcM1b, perm);
-//  srcbis += srcStride;
-  const vector unsigned char srcP0a = vec_ld(0, srcbis += srcStride);
-  const vector unsigned char srcP0b = vec_ld(16, srcbis);
-  const vector unsigned char srcP0 = vec_perm(srcP0a, srcP0b, perm);
-//  srcbis += srcStride;
-  const vector unsigned char srcP1a = vec_ld(0, srcbis += srcStride);
-  const vector unsigned char srcP1b = vec_ld(16, srcbis);
-  const vector unsigned char srcP1 = vec_perm(srcP1a, srcP1b, perm);
-//  srcbis += srcStride;
-  const vector unsigned char srcP2a = vec_ld(0, srcbis += srcStride);
-  const vector unsigned char srcP2b = vec_ld(16, srcbis);
-  const vector unsigned char srcP2 = vec_perm(srcP2a, srcP2b, perm);
-//  srcbis += srcStride;
-
-  vector signed short srcM2ssA = (vector signed short)
-                                vec_mergeh((vector unsigned char)vzero, srcM2);
-  vector signed short srcM2ssB = (vector signed short)
-                                vec_mergel((vector unsigned char)vzero, srcM2);
-  vector signed short srcM1ssA = (vector signed short)
-                                vec_mergeh((vector unsigned char)vzero, srcM1);
-  vector signed short srcM1ssB = (vector signed short)
-                                vec_mergel((vector unsigned char)vzero, srcM1);
-  vector signed short srcP0ssA = (vector signed short)
-                                vec_mergeh((vector unsigned char)vzero, srcP0);
-  vector signed short srcP0ssB = (vector signed short)
-                                vec_mergel((vector unsigned char)vzero, srcP0);
-  vector signed short srcP1ssA = (vector signed short)
-                                vec_mergeh((vector unsigned char)vzero, srcP1);
-  vector signed short srcP1ssB = (vector signed short)
-                                vec_mergel((vector unsigned char)vzero, srcP1);
-  vector signed short srcP2ssA = (vector signed short)
-                                vec_mergeh((vector unsigned char)vzero, srcP2);
-  vector signed short srcP2ssB = (vector signed short)
-                                vec_mergel((vector unsigned char)vzero, srcP2);
-
-  vector signed short pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
-                      psumA, psumB, sumA, sumB,
-                      srcP3ssA, srcP3ssB,
-                      sum1A, sum1B, sum2A, sum2B, sum3A, sum3B;
-
-  vector unsigned char sum, dst1, dst2, vdst, fsum, rsum, fdst1, fdst2,
-                       srcP3a, srcP3b, srcP3;
-
-  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
-
-  for (i = 0 ; i < 16 ; i++) {
-    srcP3a = vec_ld(0, srcbis += srcStride);
-    srcP3b = vec_ld(16, srcbis);
-    srcP3 = vec_perm(srcP3a, srcP3b, perm);
-    srcP3ssA = (vector signed short)
-                                vec_mergeh((vector unsigned char)vzero, srcP3);
-    srcP3ssB = (vector signed short)
-                                vec_mergel((vector unsigned char)vzero, srcP3);
-//    srcbis += srcStride;
-
-    sum1A = vec_adds(srcP0ssA, srcP1ssA);
-    sum1B = vec_adds(srcP0ssB, srcP1ssB);
-    sum2A = vec_adds(srcM1ssA, srcP2ssA);
-    sum2B = vec_adds(srcM1ssB, srcP2ssB);
-    sum3A = vec_adds(srcM2ssA, srcP3ssA);
-    sum3B = vec_adds(srcM2ssB, srcP3ssB);
-
-    srcM2ssA = srcM1ssA;
-    srcM2ssB = srcM1ssB;
-    srcM1ssA = srcP0ssA;
-    srcM1ssB = srcP0ssB;
-    srcP0ssA = srcP1ssA;
-    srcP0ssB = srcP1ssB;
-    srcP1ssA = srcP2ssA;
-    srcP1ssB = srcP2ssB;
-    srcP2ssA = srcP3ssA;
-    srcP2ssB = srcP3ssB;
-
-    pp1A = vec_mladd(sum1A, v20ss, v16ss);
-    pp1B = vec_mladd(sum1B, v20ss, v16ss);
-
-    pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
-    pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
-
-    pp3A = vec_add(sum3A, pp1A);
-    pp3B = vec_add(sum3B, pp1B);
-
-    psumA = vec_sub(pp3A, pp2A);
-    psumB = vec_sub(pp3B, pp2B);
-
-    sumA = vec_sra(psumA, v5us);
-    sumB = vec_sra(psumB, v5us);
-
-    sum = vec_packsu(sumA, sumB);
-
-    dst1 = vec_ld(0, dst);
-    dst2 = vec_ld(16, dst);
-    vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));
-
-    OP_U8_ALTIVEC(fsum, sum, vdst);
-
-    rsum = vec_perm(fsum, fsum, dstperm);
-    fdst1 = vec_sel(dst1, rsum, dstmask);
-    fdst2 = vec_sel(rsum, dst2, dstmask);
-
-    vec_st(fdst1, 0, dst);
-    vec_st(fdst2, 16, dst);
-
-    dst += dstStride;
-  }
-  POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
-}
-
-/* this code assume stride % 16 == 0 *and* tmp is properly aligned */
-static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) {
-  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_hv_lowpass_num, 1);
-  register int i;
-  const vector signed int vzero = vec_splat_s32(0);
-  const vector unsigned char permM2 = vec_lvsl(-2, src);
-  const vector unsigned char permM1 = vec_lvsl(-1, src);
-  const vector unsigned char permP0 = vec_lvsl(+0, src);
-  const vector unsigned char permP1 = vec_lvsl(+1, src);
-  const vector unsigned char permP2 = vec_lvsl(+2, src);
-  const vector unsigned char permP3 = vec_lvsl(+3, src);
-  const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
-  const vector unsigned int v10ui = vec_splat_u32(10);
-  const vector signed short v5ss = vec_splat_s16(5);
-  const vector signed short v1ss = vec_splat_s16(1);
-  const vector signed int v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9));
-  const vector unsigned int v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4));
-
-  register int align = ((((unsigned long)src) - 2) % 16);
-
-  const vector unsigned char neg1 = (const vector unsigned char)
-                                                        vec_splat_s8(-1);
-
-  vector signed short srcP0A, srcP0B, srcP1A, srcP1B,
-                      srcP2A, srcP2B, srcP3A, srcP3B,
-                      srcM1A, srcM1B, srcM2A, srcM2B,
-                      sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
-                      pp1A, pp1B, pp2A, pp2B, psumA, psumB;
-
-  const vector unsigned char dstperm = vec_lvsr(0, dst);
-
-  const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
-
-  const vector unsigned char mperm = (const vector unsigned char)
-    AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
-        0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F);
-  int16_t *tmpbis = tmp;
-
-  vector signed short tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB,
-                      tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB,
-                      tmpP2ssA, tmpP2ssB;
-
-  vector signed int pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo,
-                    pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo,
-                    pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo,
-                    ssumAe, ssumAo, ssumBe, ssumBo;
-  vector unsigned char fsum, sumv, sum, dst1, dst2, vdst,
-                       rsum, fdst1, fdst2;
-  vector signed short ssume, ssumo;
-
-  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
-  src -= (2 * srcStride);
-  for (i = 0 ; i < 21 ; i ++) {
-    vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
-    vector unsigned char srcR1 = vec_ld(-2, src);
-    vector unsigned char srcR2 = vec_ld(14, src);
-
-    switch (align) {
-    default: {
-      srcM2 = vec_perm(srcR1, srcR2, permM2);
-      srcM1 = vec_perm(srcR1, srcR2, permM1);
-      srcP0 = vec_perm(srcR1, srcR2, permP0);
-      srcP1 = vec_perm(srcR1, srcR2, permP1);
-      srcP2 = vec_perm(srcR1, srcR2, permP2);
-      srcP3 = vec_perm(srcR1, srcR2, permP3);
-    } break;
-    case 11: {
-      srcM2 = vec_perm(srcR1, srcR2, permM2);
-      srcM1 = vec_perm(srcR1, srcR2, permM1);
-      srcP0 = vec_perm(srcR1, srcR2, permP0);
-      srcP1 = vec_perm(srcR1, srcR2, permP1);
-      srcP2 = vec_perm(srcR1, srcR2, permP2);
-      srcP3 = srcR2;
-    } break;
-    case 12: {
-      vector unsigned char srcR3 = vec_ld(30, src);
-      srcM2 = vec_perm(srcR1, srcR2, permM2);
-      srcM1 = vec_perm(srcR1, srcR2, permM1);
-      srcP0 = vec_perm(srcR1, srcR2, permP0);
-      srcP1 = vec_perm(srcR1, srcR2, permP1);
-      srcP2 = srcR2;
-      srcP3 = vec_perm(srcR2, srcR3, permP3);
-    } break;
-    case 13: {
-      vector unsigned char srcR3 = vec_ld(30, src);
-      srcM2 = vec_perm(srcR1, srcR2, permM2);
-      srcM1 = vec_perm(srcR1, srcR2, permM1);
-      srcP0 = vec_perm(srcR1, srcR2, permP0);
-      srcP1 = srcR2;
-      srcP2 = vec_perm(srcR2, srcR3, permP2);
-      srcP3 = vec_perm(srcR2, srcR3, permP3);
-    } break;
-    case 14: {
-      vector unsigned char srcR3 = vec_ld(30, src);
-      srcM2 = vec_perm(srcR1, srcR2, permM2);
-      srcM1 = vec_perm(srcR1, srcR2, permM1);
-      srcP0 = srcR2;
-      srcP1 = vec_perm(srcR2, srcR3, permP1);
-      srcP2 = vec_perm(srcR2, srcR3, permP2);
-      srcP3 = vec_perm(srcR2, srcR3, permP3);
-    } break;
-    case 15: {
-      vector unsigned char srcR3 = vec_ld(30, src);
-      srcM2 = vec_perm(srcR1, srcR2, permM2);
-      srcM1 = srcR2;
-      srcP0 = vec_perm(srcR2, srcR3, permP0);
-      srcP1 = vec_perm(srcR2, srcR3, permP1);
-      srcP2 = vec_perm(srcR2, srcR3, permP2);
-      srcP3 = vec_perm(srcR2, srcR3, permP3);
-    } break;
-    }
-
-    srcP0A = (vector signed short)
-                            vec_mergeh((vector unsigned char)vzero, srcP0);
-    srcP0B = (vector signed short)
-                            vec_mergel((vector unsigned char)vzero, srcP0);
-    srcP1A = (vector signed short)
-                            vec_mergeh((vector unsigned char)vzero, srcP1);
-    srcP1B = (vector signed short)
-                            vec_mergel((vector unsigned char)vzero, srcP1);
-
-    srcP2A = (vector signed short)
-                            vec_mergeh((vector unsigned char)vzero, srcP2);
-    srcP2B = (vector signed short)
-                            vec_mergel((vector unsigned char)vzero, srcP2);
-    srcP3A = (vector signed short)
-                            vec_mergeh((vector unsigned char)vzero, srcP3);
-    srcP3B = (vector signed short)
-                            vec_mergel((vector unsigned char)vzero, srcP3);
-
-    srcM1A = (vector signed short)
-                            vec_mergeh((vector unsigned char)vzero, srcM1);
-    srcM1B = (vector signed short)
-                            vec_mergel((vector unsigned char)vzero, srcM1);
-    srcM2A = (vector signed short)
-                            vec_mergeh((vector unsigned char)vzero, srcM2);
-    srcM2B = (vector signed short)
-                            vec_mergel((vector unsigned char)vzero, srcM2);
-
-    sum1A = vec_adds(srcP0A, srcP1A);
-    sum1B = vec_adds(srcP0B, srcP1B);
-    sum2A = vec_adds(srcM1A, srcP2A);
-    sum2B = vec_adds(srcM1B, srcP2B);
-    sum3A = vec_adds(srcM2A, srcP3A);
-    sum3B = vec_adds(srcM2B, srcP3B);
-
-    pp1A = vec_mladd(sum1A, v20ss, sum3A);
-    pp1B = vec_mladd(sum1B, v20ss, sum3B);
-
-    pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
-    pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
-
-    psumA = vec_sub(pp1A, pp2A);
-    psumB = vec_sub(pp1B, pp2B);
-
-    vec_st(psumA, 0, tmp);
-    vec_st(psumB, 16, tmp);
-
-    src += srcStride;
-    tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
-  }
-
-  tmpM2ssA = vec_ld(0, tmpbis);
-  tmpM2ssB = vec_ld(16, tmpbis);
-  tmpbis += tmpStride;
-  tmpM1ssA = vec_ld(0, tmpbis);
-  tmpM1ssB = vec_ld(16, tmpbis);
-  tmpbis += tmpStride;
-  tmpP0ssA = vec_ld(0, tmpbis);
-  tmpP0ssB = vec_ld(16, tmpbis);
-  tmpbis += tmpStride;
-  tmpP1ssA = vec_ld(0, tmpbis);
-  tmpP1ssB = vec_ld(16, tmpbis);
-  tmpbis += tmpStride;
-  tmpP2ssA = vec_ld(0, tmpbis);
-  tmpP2ssB = vec_ld(16, tmpbis);
-  tmpbis += tmpStride;
-
-  for (i = 0 ; i < 16 ; i++) {
-    const vector signed short tmpP3ssA = vec_ld(0, tmpbis);
-    const vector signed short tmpP3ssB = vec_ld(16, tmpbis);
-
-    const vector signed short sum1A = vec_adds(tmpP0ssA, tmpP1ssA);
-    const vector signed short sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
-    const vector signed short sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
-    const vector signed short sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
-    const vector signed short sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
-    const vector signed short sum3B = vec_adds(tmpM2ssB, tmpP3ssB);
-
-    tmpbis += tmpStride;
-
-    tmpM2ssA = tmpM1ssA;
-    tmpM2ssB = tmpM1ssB;
-    tmpM1ssA = tmpP0ssA;
-    tmpM1ssB = tmpP0ssB;
-    tmpP0ssA = tmpP1ssA;
-    tmpP0ssB = tmpP1ssB;
-    tmpP1ssA = tmpP2ssA;
-    tmpP1ssB = tmpP2ssB;
-    tmpP2ssA = tmpP3ssA;
-    tmpP2ssB = tmpP3ssB;
-
-    pp1Ae = vec_mule(sum1A, v20ss);
-    pp1Ao = vec_mulo(sum1A, v20ss);
-    pp1Be = vec_mule(sum1B, v20ss);
-    pp1Bo = vec_mulo(sum1B, v20ss);
-
-    pp2Ae = vec_mule(sum2A, v5ss);
-    pp2Ao = vec_mulo(sum2A, v5ss);
-    pp2Be = vec_mule(sum2B, v5ss);
-    pp2Bo = vec_mulo(sum2B, v5ss);
-
-    pp3Ae = vec_sra((vector signed int)sum3A, v16ui);
-    pp3Ao = vec_mulo(sum3A, v1ss);
-    pp3Be = vec_sra((vector signed int)sum3B, v16ui);
-    pp3Bo = vec_mulo(sum3B, v1ss);
-
-    pp1cAe = vec_add(pp1Ae, v512si);
-    pp1cAo = vec_add(pp1Ao, v512si);
-    pp1cBe = vec_add(pp1Be, v512si);
-    pp1cBo = vec_add(pp1Bo, v512si);
-
-    pp32Ae = vec_sub(pp3Ae, pp2Ae);
-    pp32Ao = vec_sub(pp3Ao, pp2Ao);
-    pp32Be = vec_sub(pp3Be, pp2Be);
-    pp32Bo = vec_sub(pp3Bo, pp2Bo);
-
-    sumAe = vec_add(pp1cAe, pp32Ae);
-    sumAo = vec_add(pp1cAo, pp32Ao);
-    sumBe = vec_add(pp1cBe, pp32Be);
-    sumBo = vec_add(pp1cBo, pp32Bo);
-
-    ssumAe = vec_sra(sumAe, v10ui);
-    ssumAo = vec_sra(sumAo, v10ui);
-    ssumBe = vec_sra(sumBe, v10ui);
-    ssumBo = vec_sra(sumBo, v10ui);
-
-    ssume = vec_packs(ssumAe, ssumBe);
-    ssumo = vec_packs(ssumAo, ssumBo);
-
-    sumv = vec_packsu(ssume, ssumo);
-    sum = vec_perm(sumv, sumv, mperm);
-
-    dst1 = vec_ld(0, dst);
-    dst2 = vec_ld(16, dst);
-    vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));
-
-    OP_U8_ALTIVEC(fsum, sum, vdst);
-
-    rsum = vec_perm(fsum, fsum, dstperm);
-    fdst1 = vec_sel(dst1, rsum, dstmask);
-    fdst2 = vec_sel(rsum, dst2, dstmask);
-
-    vec_st(fdst1, 0, dst);
-    vec_st(fdst2, 16, dst);
-
-    dst += dstStride;
-  }
-  POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
-}
--- a/ppc/dsputil_snow_altivec.c	Sat Aug 12 16:37:31 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,786 +0,0 @@
-/*
- * Altivec optimized snow DSP utils
- * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- *
- */
-
-#include "../dsputil.h"
-
-#include "gcc_fixes.h"
-#include "dsputil_altivec.h"
-#include "../snow.h"
-
-#undef NDEBUG
-#include <assert.h>
-
-
-
-//FIXME remove this replication
-#define slice_buffer_get_line(slice_buf, line_num) ((slice_buf)->line[line_num] ? (slice_buf)->line[line_num] : slice_buffer_load_line((slice_buf), (line_num)))
-
-static DWTELEM * slice_buffer_load_line(slice_buffer * buf, int line)
-{
-    int offset;
-    DWTELEM * buffer;
-
-//  av_log(NULL, AV_LOG_DEBUG, "Cache hit: %d\n", line);
-
-    assert(buf->data_stack_top >= 0);
-//  assert(!buf->line[line]);
-    if (buf->line[line])
-        return buf->line[line];
-
-    offset = buf->line_width * line;
-    buffer = buf->data_stack[buf->data_stack_top];
-    buf->data_stack_top--;
-    buf->line[line] = buffer;
-
-//  av_log(NULL, AV_LOG_DEBUG, "slice_buffer_load_line: line: %d remaining: %d\n", line, buf->data_stack_top + 1);
-
-    return buffer;
-}
-
-
-//altivec code
-
-void ff_snow_horizontal_compose97i_altivec(DWTELEM *b, int width)
-{
-    const int w2= (width+1)>>1;
-    DECLARE_ALIGNED_16(DWTELEM, temp[(width>>1)]);
-    const int w_l= (width>>1);
-    const int w_r= w2 - 1;
-    int i;
-    vector signed int t1, t2, x, y, tmp1, tmp2;
-    vector signed int *vbuf, *vtmp;
-    vector unsigned char align;
-
-
-
-    { // Lift 0
-        DWTELEM * const ref = b + w2 - 1;
-        DWTELEM b_0 = b[0];
-        vbuf = (vector signed int *)b;
-
-        tmp1 = vec_ld (0, ref);
-        align = vec_lvsl (0, ref);
-        tmp2 = vec_ld (15, ref);
-        t1= vec_perm(tmp1, tmp2, align);
-
-        i = 0;
-
-        for (i=0; i<w_l-15; i+=16) {
-#if 0
-        b[i+0] = b[i+0] - ((3 * (ref[i+0] + ref[i+1]) + 4) >> 3);
-        b[i+1] = b[i+1] - ((3 * (ref[i+1] + ref[i+2]) + 4) >> 3);
-        b[i+2] = b[i+2] - ((3 * (ref[i+2] + ref[i+3]) + 4) >> 3);
-        b[i+3] = b[i+3] - ((3 * (ref[i+3] + ref[i+4]) + 4) >> 3);
-#else
-
-        tmp1 = vec_ld (0, ref+4+i);
-        tmp2 = vec_ld (15, ref+4+i);
-
-        t2 = vec_perm(tmp1, tmp2, align);
-
-        y = vec_add(t1,vec_sld(t1,t2,4));
-        y = vec_add(vec_add(y,y),y);
-
-        tmp1 = vec_ld (0, ref+8+i);
-
-        y = vec_add(y, vec_splat_s32(4));
-        y = vec_sra(y, vec_splat_u32(3));
-
-        tmp2 = vec_ld (15, ref+8+i);
-
-        *vbuf = vec_sub(*vbuf, y);
-
-        t1=t2;
-
-        vbuf++;
-
-        t2 = vec_perm(tmp1, tmp2, align);
-
-        y = vec_add(t1,vec_sld(t1,t2,4));
-        y = vec_add(vec_add(y,y),y);
-
-        tmp1 = vec_ld (0, ref+12+i);
-
-        y = vec_add(y, vec_splat_s32(4));
-        y = vec_sra(y, vec_splat_u32(3));
-
-        tmp2 = vec_ld (15, ref+12+i);
-
-        *vbuf = vec_sub(*vbuf, y);
-
-        t1=t2;
-
-        vbuf++;
-
-        t2 = vec_perm(tmp1, tmp2, align);
-
-        y = vec_add(t1,vec_sld(t1,t2,4));
-        y = vec_add(vec_add(y,y),y);
-
-        tmp1 = vec_ld (0, ref+16+i);
-
-        y = vec_add(y, vec_splat_s32(4));
-        y = vec_sra(y, vec_splat_u32(3));
-
-        tmp2 = vec_ld (15, ref+16+i);
-
-        *vbuf = vec_sub(*vbuf, y);
-
-        t1=t2;
-
-        t2 = vec_perm(tmp1, tmp2, align);
-
-        y = vec_add(t1,vec_sld(t1,t2,4));
-        y = vec_add(vec_add(y,y),y);
-
-        vbuf++;
-
-        y = vec_add(y, vec_splat_s32(4));
-        y = vec_sra(y, vec_splat_u32(3));
-        *vbuf = vec_sub(*vbuf, y);
-
-        t1=t2;
-
-        vbuf++;
-
-#endif
-        }
-
-        snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
-        b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
-    }
-
-    { // Lift 1
-        DWTELEM * const dst = b+w2;
-
-        i = 0;
-        for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){
-            dst[i] = dst[i] - (b[i] + b[i + 1]);
-        }
-
-        align = vec_lvsl(0, b+i);
-        tmp1 = vec_ld(0, b+i);
-        vbuf = (vector signed int*) (dst + i);
-        tmp2 = vec_ld(15, b+i);
-
-        t1 = vec_perm(tmp1, tmp2, align);
-
-        for (; i<w_r-3; i+=4) {
-
-#if 0
-            dst[i]   = dst[i]   - (b[i]   + b[i + 1]);
-            dst[i+1] = dst[i+1] - (b[i+1] + b[i + 2]);
-            dst[i+2] = dst[i+2] - (b[i+2] + b[i + 3]);
-            dst[i+3] = dst[i+3] - (b[i+3] + b[i + 4]);
-#else
-
-        tmp1 = vec_ld(0, b+4+i);
-        tmp2 = vec_ld(15, b+4+i);
-
-        t2 = vec_perm(tmp1, tmp2, align);
-
-        y = vec_add(t1, vec_sld(t1,t2,4));
-        *vbuf = vec_sub (*vbuf, y);
-
-        vbuf++;
-
-        t1 = t2;
-
-#endif
-
-        }
-
-        snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
-    }
-
-    { // Lift 2
-        DWTELEM * const ref = b+w2 - 1;
-        DWTELEM b_0 = b[0];
-        vbuf= (vector signed int *) b;
-
-        tmp1 = vec_ld (0, ref);
-        align = vec_lvsl (0, ref);
-        tmp2 = vec_ld (15, ref);
-        t1= vec_perm(tmp1, tmp2, align);
-
-        i = 0;
-        for (; i<w_l-15; i+=16) {
-#if 0
-            b[i]   = b[i]   - (((8 -(ref[i]   + ref[i+1])) - (b[i]  <<2)) >> 4);
-            b[i+1] = b[i+1] - (((8 -(ref[i+1] + ref[i+2])) - (b[i+1]<<2)) >> 4);
-            b[i+2] = b[i+2] - (((8 -(ref[i+2] + ref[i+3])) - (b[i+2]<<2)) >> 4);
-            b[i+3] = b[i+3] - (((8 -(ref[i+3] + ref[i+4])) - (b[i+3]<<2)) >> 4);
-#else
-            tmp1 = vec_ld (0, ref+4+i);
-            tmp2 = vec_ld (15, ref+4+i);
-
-            t2 = vec_perm(tmp1, tmp2, align);
-
-            y = vec_add(t1,vec_sld(t1,t2,4));
-            y = vec_sub(vec_splat_s32(8),y);
-
-            tmp1 = vec_ld (0, ref+8+i);
-
-            x = vec_sl(*vbuf,vec_splat_u32(2));
-            y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
-
-            tmp2 = vec_ld (15, ref+8+i);
-
-            *vbuf = vec_sub( *vbuf, y);
-
-            t1 = t2;
-
-            vbuf++;
-
-            t2 = vec_perm(tmp1, tmp2, align);
-
-            y = vec_add(t1,vec_sld(t1,t2,4));
-            y = vec_sub(vec_splat_s32(8),y);
-
-            tmp1 = vec_ld (0, ref+12+i);
-
-            x = vec_sl(*vbuf,vec_splat_u32(2));
-            y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
-
-            tmp2 = vec_ld (15, ref+12+i);
-
-            *vbuf = vec_sub( *vbuf, y);
-
-            t1 = t2;
-
-            vbuf++;
-
-            t2 = vec_perm(tmp1, tmp2, align);
-
-            y = vec_add(t1,vec_sld(t1,t2,4));
-            y = vec_sub(vec_splat_s32(8),y);
-
-            tmp1 = vec_ld (0, ref+16+i);
-
-            x = vec_sl(*vbuf,vec_splat_u32(2));
-            y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
-
-            tmp2 = vec_ld (15, ref+16+i);
-
-            *vbuf = vec_sub( *vbuf, y);
-
-            t1 = t2;
-
-            vbuf++;
-
-            t2 = vec_perm(tmp1, tmp2, align);
-
-            y = vec_add(t1,vec_sld(t1,t2,4));
-            y = vec_sub(vec_splat_s32(8),y);
-
-            t1 = t2;
-
-            x = vec_sl(*vbuf,vec_splat_u32(2));
-            y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
-            *vbuf = vec_sub( *vbuf, y);
-
-            vbuf++;
-
-#endif
-        }
-
-        snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
-        b[0] = b_0 - (((-2 * ref[1] + W_BO) - 4 * b_0) >> W_BS);
-    }
-
-    { // Lift 3
-        DWTELEM * const src = b+w2;
-
-        vbuf = (vector signed int *)b;
-        vtmp = (vector signed int *)temp;
-
-        i = 0;
-        align = vec_lvsl(0, src);
-
-        for (; i<w_r-3; i+=4) {
-#if 0
-            temp[i] = src[i] - ((-3*(b[i] + b[i+1]))>>1);
-            temp[i+1] = src[i+1] - ((-3*(b[i+1] + b[i+2]))>>1);
-            temp[i+2] = src[i+2] - ((-3*(b[i+2] + b[i+3]))>>1);
-            temp[i+3] = src[i+3] - ((-3*(b[i+3] + b[i+4]))>>1);
-#else
-            tmp1 = vec_ld(0,src+i);
-            t1 = vec_add(vbuf[0],vec_sld(vbuf[0],vbuf[1],4));
-            tmp2 = vec_ld(15,src+i);
-            t1 = vec_sub(vec_splat_s32(0),t1); //bad!
-            t1 = vec_add(t1,vec_add(t1,t1));
-            t2 = vec_perm(tmp1 ,tmp2 ,align);
-            t1 = vec_sra(t1,vec_splat_u32(1));
-            vbuf++;
-            *vtmp = vec_sub(t2,t1);
-            vtmp++;
-
-#endif
-
-        }
-
-        snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -3, 0, 1);
-    }
-
-    {
-    //Interleave
-        int a;
-        vector signed int *t = (vector signed int *)temp,
-                          *v = (vector signed int *)b;
-
-        snow_interleave_line_header(&i, width, b, temp);
-
-        for (; (i & 0xE) != 0xE; i-=2){
-            b[i+1] = temp[i>>1];
-            b[i] = b[i>>1];
-        }
-        for (i-=14; i>=0; i-=16){
-           a=i/4;
-
-           v[a+3]=vec_mergel(v[(a>>1)+1],t[(a>>1)+1]);
-           v[a+2]=vec_mergeh(v[(a>>1)+1],t[(a>>1)+1]);
-           v[a+1]=vec_mergel(v[a>>1],t[a>>1]);
-           v[a]=vec_mergeh(v[a>>1],t[a>>1]);
-
-        }
-
-    }
-}
-
-void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width)
-{
-    int i, w4 = width/4;
-    vector signed int *v0, *v1,*v2,*v3,*v4,*v5;
-    vector signed int t1, t2;
-
-    v0=(vector signed int *)b0;
-    v1=(vector signed int *)b1;
-    v2=(vector signed int *)b2;
-    v3=(vector signed int *)b3;
-    v4=(vector signed int *)b4;
-    v5=(vector signed int *)b5;
-
-    for (i=0; i< w4;i++)
-    {
-
-    #if 0
-        b4[i] -= (3*(b3[i] + b5[i])+4)>>3;
-        b3[i] -= ((b2[i] + b4[i]));
-        b2[i] += ((b1[i] + b3[i])+4*b2[i]+8)>>4;
-        b1[i] += (3*(b0[i] + b2[i]))>>1;
-    #else
-        t1 = vec_add(v3[i], v5[i]);
-        t2 = vec_add(t1, vec_add(t1,t1));
-        t1 = vec_add(t2, vec_splat_s32(4));
-        v4[i] = vec_sub(v4[i], vec_sra(t1,vec_splat_u32(3)));
-
-        v3[i] = vec_sub(v3[i], vec_add(v2[i], v4[i]));
-
-        t1 = vec_add(vec_splat_s32(8), vec_add(v1[i], v3[i]));
-        t2 = vec_sl(v2[i], vec_splat_u32(2));
-        v2[i] = vec_add(v2[i], vec_sra(vec_add(t1,t2),vec_splat_u32(4)));
-        t1 = vec_add(v0[i], v2[i]);
-        t2 = vec_add(t1, vec_add(t1,t1));
-        v1[i] = vec_add(v1[i], vec_sra(t2,vec_splat_u32(1)));
-
-    #endif
-    }
-
-    for(i*=4; i < width; i++)
-    {
-        b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
-        b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
-        b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
-        b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
-    }
-}
-
-#define LOAD_BLOCKS \
-            tmp1 = vec_ld(0, &block[3][y*src_stride]);\
-            align = vec_lvsl(0, &block[3][y*src_stride]);\
-            tmp2 = vec_ld(15, &block[3][y*src_stride]);\
-\
-            b3 = vec_perm(tmp1,tmp2,align);\
-\
-            tmp1 = vec_ld(0, &block[2][y*src_stride]);\
-            align = vec_lvsl(0, &block[2][y*src_stride]);\
-            tmp2 = vec_ld(15, &block[2][y*src_stride]);\
-\
-            b2 = vec_perm(tmp1,tmp2,align);\
-\
-            tmp1 = vec_ld(0, &block[1][y*src_stride]);\
-            align = vec_lvsl(0, &block[1][y*src_stride]);\
-            tmp2 = vec_ld(15, &block[1][y*src_stride]);\
-\
-            b1 = vec_perm(tmp1,tmp2,align);\
-\
-            tmp1 = vec_ld(0, &block[0][y*src_stride]);\
-            align = vec_lvsl(0, &block[0][y*src_stride]);\
-            tmp2 = vec_ld(15, &block[0][y*src_stride]);\
-\
-            b0 = vec_perm(tmp1,tmp2,align);
-
-#define LOAD_OBMCS \
-            tmp1 = vec_ld(0, obmc1);\
-            align = vec_lvsl(0, obmc1);\
-            tmp2 = vec_ld(15, obmc1);\
-\
-            ob1 = vec_perm(tmp1,tmp2,align);\
-\
-            tmp1 = vec_ld(0, obmc2);\
-            align = vec_lvsl(0, obmc2);\
-            tmp2 = vec_ld(15, obmc2);\
-\
-            ob2 = vec_perm(tmp1,tmp2,align);\
-\
-            tmp1 = vec_ld(0, obmc3);\
-            align = vec_lvsl(0, obmc3);\
-            tmp2 = vec_ld(15, obmc3);\
-\
-            ob3 = vec_perm(tmp1,tmp2,align);\
-\
-            tmp1 = vec_ld(0, obmc4);\
-            align = vec_lvsl(0, obmc4);\
-            tmp2 = vec_ld(15, obmc4);\
-\
-            ob4 = vec_perm(tmp1,tmp2,align);
-
-/* interleave logic
- * h1 <- [ a,b,a,b, a,b,a,b, a,b,a,b, a,b,a,b ]
- * h2 <- [ c,d,c,d, c,d,c,d, c,d,c,d, c,d,c,d ]
- * h  <- [ a,b,c,d, a,b,c,d, a,b,c,d, a,b,c,d ]
- */
-
-#define STEPS_0_1\
-            h1 = (vector unsigned short)\
-                 vec_mergeh(ob1, ob2);\
-\
-            h2 = (vector unsigned short)\
-                 vec_mergeh(ob3, ob4);\
-\
-            ih = (vector unsigned char)\
-                 vec_mergeh(h1,h2);\
-\
-            l1 = (vector unsigned short) vec_mergeh(b3, b2);\
-\
-            ih1 = (vector unsigned char) vec_mergel(h1, h2);\
-\
-            l2 = (vector unsigned short) vec_mergeh(b1, b0);\
-\
-            il = (vector unsigned char) vec_mergeh(l1, l2);\
-\
-            v[0] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
-\
-            il1 = (vector unsigned char) vec_mergel(l1, l2);\
-\
-            v[1] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
-
-#define FINAL_STEP_SCALAR\
-        for(x=0; x<b_w; x++)\
-            if(add){\
-                vbuf[x] += dst[x + src_x];\
-                vbuf[x] = (vbuf[x] + (1<<(FRAC_BITS-1))) >> FRAC_BITS;\
-                if(vbuf[x]&(~255)) vbuf[x]= ~(vbuf[x]>>31);\
-                dst8[x + y*src_stride] = vbuf[x];\
-            }else{\
-                dst[x + src_x] -= vbuf[x];\
-            }
-
-static void inner_add_yblock_bw_8_obmc_16_altivec(uint8_t *obmc,
-                                             const int obmc_stride,
-                                             uint8_t * * block, int b_w,
-                                             int b_h, int src_x, int src_y,
-                                             int src_stride, slice_buffer * sb,
-                                             int add, uint8_t * dst8)
-{
-    int y, x;
-    DWTELEM * dst;
-    vector unsigned short h1, h2, l1, l2;
-    vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
-    vector unsigned char b0,b1,b2,b3;
-    vector unsigned char ob1,ob2,ob3,ob4;
-
-    DECLARE_ALIGNED_16(int, vbuf[16]);
-    vector signed int *v = (vector signed int *)vbuf, *d;
-
-    for(y=0; y<b_h; y++){
-        //FIXME ugly missue of obmc_stride
-
-        uint8_t *obmc1= obmc + y*obmc_stride;
-        uint8_t *obmc2= obmc1+ (obmc_stride>>1);
-        uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
-        uint8_t *obmc4= obmc3+ (obmc_stride>>1);
-
-        dst = slice_buffer_get_line(sb, src_y + y);
-        d = (vector signed int *)(dst + src_x);
-
-//FIXME i could avoid some loads!
-
-        // load blocks
-        LOAD_BLOCKS
-
-        // load obmcs
-        LOAD_OBMCS
-
-        // steps 0 1
-        STEPS_0_1
-
-        FINAL_STEP_SCALAR
-
-       }
-
-}
-
-#define STEPS_2_3\
-            h1 = (vector unsigned short) vec_mergel(ob1, ob2);\
-\
-            h2 = (vector unsigned short) vec_mergel(ob3, ob4);\
-\
-            ih = (vector unsigned char) vec_mergeh(h1,h2);\
-\
-            l1 = (vector unsigned short) vec_mergel(b3, b2);\
-\
-            l2 = (vector unsigned short) vec_mergel(b1, b0);\
-\
-            ih1 = (vector unsigned char) vec_mergel(h1,h2);\
-\
-            il = (vector unsigned char) vec_mergeh(l1,l2);\
-\
-            v[2] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
-\
-            il1 = (vector unsigned char) vec_mergel(l1,l2);\
-\
-            v[3] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
-
-
-static void inner_add_yblock_bw_16_obmc_32_altivec(uint8_t *obmc,
-                                             const int obmc_stride,
-                                             uint8_t * * block, int b_w,
-                                             int b_h, int src_x, int src_y,
-                                             int src_stride, slice_buffer * sb,
-                                             int add, uint8_t * dst8)
-{
-    int y, x;
-    DWTELEM * dst;
-    vector unsigned short h1, h2, l1, l2;
-    vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
-    vector unsigned char b0,b1,b2,b3;
-    vector unsigned char ob1,ob2,ob3,ob4;
-    DECLARE_ALIGNED_16(int, vbuf[b_w]);
-    vector signed int *v = (vector signed int *)vbuf, *d;
-
-    for(y=0; y<b_h; y++){
-        //FIXME ugly missue of obmc_stride
-
-        uint8_t *obmc1= obmc + y*obmc_stride;
-        uint8_t *obmc2= obmc1+ (obmc_stride>>1);
-        uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
-        uint8_t *obmc4= obmc3+ (obmc_stride>>1);
-
-        dst = slice_buffer_get_line(sb, src_y + y);
-        d = (vector signed int *)(dst + src_x);
-
-        // load blocks
-        LOAD_BLOCKS
-
-        // load obmcs
-        LOAD_OBMCS
-
-        // steps 0 1 2 3
-        STEPS_0_1
-
-        STEPS_2_3
-
-        FINAL_STEP_SCALAR
-
-    }
-}
-
-#define FINAL_STEP_VEC \
-\
-    if(add)\
-        {\
-            for(x=0; x<b_w/4; x++)\
-            {\
-                v[x] = vec_add(v[x], d[x]);\
-                v[x] = vec_sra(vec_add(v[x],\
-                                       vec_sl( vec_splat_s32(1),\
-                                               vec_splat_u32(7))),\
-                               vec_splat_u32(8));\
-\
-                mask = (vector bool int) vec_sl((vector signed int)\
-                        vec_cmpeq(v[x],v[x]),vec_splat_u32(8));\
-                mask = (vector bool int) vec_and(v[x],vec_nor(mask,mask));\
-\
-                mask = (vector bool int)\
-                        vec_cmpeq((vector signed int)mask,\
-                                  (vector signed int)vec_splat_u32(0));\
-\
-                vs = vec_sra(v[x],vec_splat_u32(8));\
-                vs = vec_sra(v[x],vec_splat_u32(8));\
-                vs = vec_sra(v[x],vec_splat_u32(15));\
-\
-                vs = vec_nor(vs,vs);\
-\
-                v[x]= vec_sel(v[x],vs,mask);\
-            }\
-\
-            for(x=0; x<b_w; x++)\
-                dst8[x + y*src_stride] = vbuf[x];\
-\
-        }\
-         else\
-            for(x=0; x<b_w/4; x++)\
-                d[x] = vec_sub(d[x], v[x]);
-
-static void inner_add_yblock_a_bw_8_obmc_16_altivec(uint8_t *obmc,
-                                             const int obmc_stride,
-                                             uint8_t * * block, int b_w,
-                                             int b_h, int src_x, int src_y,
-                                             int src_stride, slice_buffer * sb,
-                                             int add, uint8_t * dst8)
-{
-    int y, x;
-    DWTELEM * dst;
-    vector bool int mask;
-    vector signed int vs;
-    vector unsigned short h1, h2, l1, l2;
-    vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
-    vector unsigned char b0,b1,b2,b3;
-    vector unsigned char ob1,ob2,ob3,ob4;
-
-    DECLARE_ALIGNED_16(int, vbuf[16]);
-    vector signed int *v = (vector signed int *)vbuf, *d;
-
-    for(y=0; y<b_h; y++){
-        //FIXME ugly missue of obmc_stride
-
-        uint8_t *obmc1= obmc + y*obmc_stride;
-        uint8_t *obmc2= obmc1+ (obmc_stride>>1);
-        uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
-        uint8_t *obmc4= obmc3+ (obmc_stride>>1);
-
-        dst = slice_buffer_get_line(sb, src_y + y);
-        d = (vector signed int *)(dst + src_x);
-
-//FIXME i could avoid some loads!
-
-        // load blocks
-        LOAD_BLOCKS
-
-        // load obmcs
-        LOAD_OBMCS
-
-        // steps 0 1
-        STEPS_0_1
-
-        FINAL_STEP_VEC
-
-       }
-
-}
-
-static void inner_add_yblock_a_bw_16_obmc_32_altivec(uint8_t *obmc,
-                                             const int obmc_stride,
-                                             uint8_t * * block, int b_w,
-                                             int b_h, int src_x, int src_y,
-                                             int src_stride, slice_buffer * sb,
-                                             int add, uint8_t * dst8)
-{
-    int y, x;
-    DWTELEM * dst;
-    vector bool int mask;
-    vector signed int vs;
-    vector unsigned short h1, h2, l1, l2;
-    vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
-    vector unsigned char b0,b1,b2,b3;
-    vector unsigned char ob1,ob2,ob3,ob4;
-    DECLARE_ALIGNED_16(int, vbuf[b_w]);
-    vector signed int *v = (vector signed int *)vbuf, *d;
-
-    for(y=0; y<b_h; y++){
-        //FIXME ugly missue of obmc_stride
-
-        uint8_t *obmc1= obmc + y*obmc_stride;
-        uint8_t *obmc2= obmc1+ (obmc_stride>>1);
-        uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
-        uint8_t *obmc4= obmc3+ (obmc_stride>>1);
-
-        dst = slice_buffer_get_line(sb, src_y + y);
-        d = (vector signed int *)(dst + src_x);
-
-        // load blocks
-        LOAD_BLOCKS
-
-        // load obmcs
-        LOAD_OBMCS
-
-        // steps 0 1 2 3
-        STEPS_0_1
-
-        STEPS_2_3
-
-        FINAL_STEP_VEC
-
-    }
-}
-
-
-void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride,
-                                      uint8_t * * block, int b_w, int b_h,
-                                      int src_x, int src_y, int src_stride,
-                                      slice_buffer * sb, int add,
-                                      uint8_t * dst8)
-{
-    if (src_x&15) {
-        if (b_w == 16)
-            inner_add_yblock_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
-                                                   b_w, b_h, src_x, src_y,
-                                                   src_stride, sb, add, dst8);
-        else if (b_w == 8)
-            inner_add_yblock_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
-                                                  b_w, b_h, src_x, src_y,
-                                                  src_stride, sb, add, dst8);
-        else
-            ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
-                                     src_y, src_stride, sb, add, dst8);
-    } else {
-        if (b_w == 16)
-            inner_add_yblock_a_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
-                                                     b_w, b_h, src_x, src_y,
-                                                     src_stride, sb, add, dst8);
-        else if (b_w == 8)
-            inner_add_yblock_a_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
-                                                    b_w, b_h, src_x, src_y,
-                                                    src_stride, sb, add, dst8);
-        else
-            ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
-                                     src_y, src_stride, sb, add, dst8);
-    }
-}
-
-
-void snow_init_altivec(DSPContext* c, AVCodecContext *avctx)
-{
-        c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec;
-        c->vertical_compose97i = ff_snow_vertical_compose97i_altivec;
-        c->inner_add_yblock = ff_snow_inner_add_yblock_altivec;
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ppc/h264_altivec.c	Sun Aug 13 08:26:43 2006 +0000
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "../dsputil.h"
+
+#include "gcc_fixes.h"
+
+#include "dsputil_altivec.h"
+
+#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
+#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
+
+#define OP_U8_ALTIVEC                          PUT_OP_U8_ALTIVEC
+#define PREFIX_h264_chroma_mc8_altivec         put_h264_chroma_mc8_altivec
+#define PREFIX_h264_chroma_mc8_num             altivec_put_h264_chroma_mc8_num
+#define PREFIX_h264_qpel16_h_lowpass_altivec   put_h264_qpel16_h_lowpass_altivec
+#define PREFIX_h264_qpel16_h_lowpass_num       altivec_put_h264_qpel16_h_lowpass_num
+#define PREFIX_h264_qpel16_v_lowpass_altivec   put_h264_qpel16_v_lowpass_altivec
+#define PREFIX_h264_qpel16_v_lowpass_num       altivec_put_h264_qpel16_v_lowpass_num
+#define PREFIX_h264_qpel16_hv_lowpass_altivec  put_h264_qpel16_hv_lowpass_altivec
+#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_put_h264_qpel16_hv_lowpass_num
+#include "h264_template_altivec.c"
+#undef OP_U8_ALTIVEC
+#undef PREFIX_h264_chroma_mc8_altivec
+#undef PREFIX_h264_chroma_mc8_num
+#undef PREFIX_h264_qpel16_h_lowpass_altivec
+#undef PREFIX_h264_qpel16_h_lowpass_num
+#undef PREFIX_h264_qpel16_v_lowpass_altivec
+#undef PREFIX_h264_qpel16_v_lowpass_num
+#undef PREFIX_h264_qpel16_hv_lowpass_altivec
+#undef PREFIX_h264_qpel16_hv_lowpass_num
+
+#define OP_U8_ALTIVEC                          AVG_OP_U8_ALTIVEC
+#define PREFIX_h264_chroma_mc8_altivec         avg_h264_chroma_mc8_altivec
+#define PREFIX_h264_chroma_mc8_num             altivec_avg_h264_chroma_mc8_num
+#define PREFIX_h264_qpel16_h_lowpass_altivec   avg_h264_qpel16_h_lowpass_altivec
+#define PREFIX_h264_qpel16_h_lowpass_num       altivec_avg_h264_qpel16_h_lowpass_num
+#define PREFIX_h264_qpel16_v_lowpass_altivec   avg_h264_qpel16_v_lowpass_altivec
+#define PREFIX_h264_qpel16_v_lowpass_num       altivec_avg_h264_qpel16_v_lowpass_num
+#define PREFIX_h264_qpel16_hv_lowpass_altivec  avg_h264_qpel16_hv_lowpass_altivec
+#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_avg_h264_qpel16_hv_lowpass_num
+#include "h264_template_altivec.c"
+#undef OP_U8_ALTIVEC
+#undef PREFIX_h264_chroma_mc8_altivec
+#undef PREFIX_h264_chroma_mc8_num
+#undef PREFIX_h264_qpel16_h_lowpass_altivec
+#undef PREFIX_h264_qpel16_h_lowpass_num
+#undef PREFIX_h264_qpel16_v_lowpass_altivec
+#undef PREFIX_h264_qpel16_v_lowpass_num
+#undef PREFIX_h264_qpel16_hv_lowpass_altivec
+#undef PREFIX_h264_qpel16_hv_lowpass_num
+
+#define H264_MC(OPNAME, SIZE, CODETYPE) \
+static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
+    OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
+    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+    OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
+    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
+    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
+    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
+    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
+    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
+}\
+
+static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
+                                    const uint8_t * src2, int dst_stride,
+                                    int src_stride1, int h)
+{
+    int i;
+    vector unsigned char a, b, d, tmp1, tmp2, mask, mask_, edges, align;
+
+    mask_ = vec_lvsl(0, src2);
+
+    for (i = 0; i < h; i++) {
+
+        tmp1 = vec_ld(i * src_stride1, src1);
+        mask = vec_lvsl(i * src_stride1, src1);
+        tmp2 = vec_ld(i * src_stride1 + 15, src1);
+
+        a = vec_perm(tmp1, tmp2, mask);
+
+        tmp1 = vec_ld(i * 16, src2);
+        tmp2 = vec_ld(i * 16 + 15, src2);
+
+        b = vec_perm(tmp1, tmp2, mask_);
+
+        tmp1 = vec_ld(0, dst);
+        mask = vec_lvsl(0, dst);
+        tmp2 = vec_ld(15, dst);
+
+        d = vec_avg(a, b);
+
+        edges = vec_perm(tmp2, tmp1, mask);
+
+        align = vec_lvsr(0, dst);
+
+        tmp1 = vec_perm(edges, d, align);
+        tmp2 = vec_perm(d, edges, align);
+
+        vec_st(tmp2, 15, dst);
+        vec_st(tmp1, 0 , dst);
+
+        dst += dst_stride;
+    }
+}
+
+static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
+                                    const uint8_t * src2, int dst_stride,
+                                    int src_stride1, int h)
+{
+    int i;
+    vector unsigned char a, b, d, tmp1, tmp2, mask, mask_, edges, align;
+
+    mask_ = vec_lvsl(0, src2);
+
+    for (i = 0; i < h; i++) {
+
+        tmp1 = vec_ld(i * src_stride1, src1);
+        mask = vec_lvsl(i * src_stride1, src1);
+        tmp2 = vec_ld(i * src_stride1 + 15, src1);
+
+        a = vec_perm(tmp1, tmp2, mask);
+
+        tmp1 = vec_ld(i * 16, src2);
+        tmp2 = vec_ld(i * 16 + 15, src2);
+
+        b = vec_perm(tmp1, tmp2, mask_);
+
+        tmp1 = vec_ld(0, dst);
+        mask = vec_lvsl(0, dst);
+        tmp2 = vec_ld(15, dst);
+
+        d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
+
+        edges = vec_perm(tmp2, tmp1, mask);
+
+        align = vec_lvsr(0, dst);
+
+        tmp1 = vec_perm(edges, d, align);
+        tmp2 = vec_perm(d, edges, align);
+
+        vec_st(tmp2, 15, dst);
+        vec_st(tmp1, 0 , dst);
+
+        dst += dst_stride;
+    }
+}
+
+/* Implemented but could be faster
+#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
+#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
+ */
+
+  H264_MC(put_, 16, altivec)
+  H264_MC(avg_, 16, altivec)
+
+void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
+
+#ifdef HAVE_ALTIVEC
+  if (has_altivec()) {
+    c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
+    c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
+
+#define dspfunc(PFX, IDX, NUM) \
+    c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
+    c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
+    c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
+    c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
+    c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
+    c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
+    c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
+    c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
+    c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
+    c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
+    c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
+    c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
+    c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
+    c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
+    c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
+    c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
+
+    dspfunc(put_h264_qpel, 0, 16);
+    dspfunc(avg_h264_qpel, 0, 16);
+#undef dspfunc
+
+  } else
+#endif /* HAVE_ALTIVEC */
+  {
+    // Non-AltiVec PPC optimisations
+
+    // ... pending ...
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ppc/h264_template_altivec.c	Sun Aug 13 08:26:43 2006 +0000
@@ -0,0 +1,717 @@
+/*
+ * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* this code assume that stride % 16 == 0 */
+void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
+  POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
+    signed int ABCD[4] __attribute__((aligned(16))) =
+                        {((8 - x) * (8 - y)),
+                          ((x) * (8 - y)),
+                          ((8 - x) * (y)),
+                          ((x) * (y))};
+    register int i;
+    vector unsigned char fperm;
+    const vector signed int vABCD = vec_ld(0, ABCD);
+    const vector signed short vA = vec_splat((vector signed short)vABCD, 1);
+    const vector signed short vB = vec_splat((vector signed short)vABCD, 3);
+    const vector signed short vC = vec_splat((vector signed short)vABCD, 5);
+    const vector signed short vD = vec_splat((vector signed short)vABCD, 7);
+    const vector signed int vzero = vec_splat_s32(0);
+    const vector signed short v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
+    const vector unsigned short v6us = vec_splat_u16(6);
+    register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+
+    vector unsigned char vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
+    vector unsigned char vsrc0uc, vsrc1uc;
+    vector signed short vsrc0ssH, vsrc1ssH;
+    vector unsigned char vsrcCuc, vsrc2uc, vsrc3uc;
+    vector signed short vsrc2ssH, vsrc3ssH, psum;
+    vector unsigned char vdst, ppsum, vfdst, fsum;
+
+  POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);
+
+    if (((unsigned long)dst) % 16 == 0) {
+      fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13,
+                                        0x14, 0x15, 0x16, 0x17,
+                                        0x08, 0x09, 0x0A, 0x0B,
+                                        0x0C, 0x0D, 0x0E, 0x0F);
+    } else {
+      fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03,
+                                        0x04, 0x05, 0x06, 0x07,
+                                        0x18, 0x19, 0x1A, 0x1B,
+                                        0x1C, 0x1D, 0x1E, 0x1F);
+    }
+
+    vsrcAuc = vec_ld(0, src);
+
+    if (loadSecond)
+      vsrcBuc = vec_ld(16, src);
+    vsrcperm0 = vec_lvsl(0, src);
+    vsrcperm1 = vec_lvsl(1, src);
+
+    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
+    if (reallyBadAlign)
+      vsrc1uc = vsrcBuc;
+    else
+      vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
+
+    vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+                                               (vector unsigned char)vsrc0uc);
+    vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+                                               (vector unsigned char)vsrc1uc);
+
+    if (!loadSecond) {// -> !reallyBadAlign
+      for (i = 0 ; i < h ; i++) {
+
+
+        vsrcCuc = vec_ld(stride + 0, src);
+
+        vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
+        vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
+
+        vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+                                                (vector unsigned char)vsrc2uc);
+        vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+                                                (vector unsigned char)vsrc3uc);
+
+        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
+        psum = vec_mladd(vB, vsrc1ssH, psum);
+        psum = vec_mladd(vC, vsrc2ssH, psum);
+        psum = vec_mladd(vD, vsrc3ssH, psum);
+        psum = vec_add(v32ss, psum);
+        psum = vec_sra(psum, v6us);
+
+        vdst = vec_ld(0, dst);
+        ppsum = (vector unsigned char)vec_packsu(psum, psum);
+        vfdst = vec_perm(vdst, ppsum, fperm);
+
+        OP_U8_ALTIVEC(fsum, vfdst, vdst);
+
+        vec_st(fsum, 0, dst);
+
+        vsrc0ssH = vsrc2ssH;
+        vsrc1ssH = vsrc3ssH;
+
+        dst += stride;
+        src += stride;
+      }
+    } else {
+        vector unsigned char vsrcDuc;
+      for (i = 0 ; i < h ; i++) {
+        vsrcCuc = vec_ld(stride + 0, src);
+        vsrcDuc = vec_ld(stride + 16, src);
+
+        vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
+        if (reallyBadAlign)
+          vsrc3uc = vsrcDuc;
+        else
+          vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
+
+        vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+                                                (vector unsigned char)vsrc2uc);
+        vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+                                                (vector unsigned char)vsrc3uc);
+
+        psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
+        psum = vec_mladd(vB, vsrc1ssH, psum);
+        psum = vec_mladd(vC, vsrc2ssH, psum);
+        psum = vec_mladd(vD, vsrc3ssH, psum);
+        psum = vec_add(v32ss, psum);
+        psum = vec_sr(psum, v6us);
+
+        vdst = vec_ld(0, dst);
+        ppsum = (vector unsigned char)vec_pack(psum, psum);
+        vfdst = vec_perm(vdst, ppsum, fperm);
+
+        OP_U8_ALTIVEC(fsum, vfdst, vdst);
+
+        vec_st(fsum, 0, dst);
+
+        vsrc0ssH = vsrc2ssH;
+        vsrc1ssH = vsrc3ssH;
+
+        dst += stride;
+        src += stride;
+      }
+    }
+    POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1);
+}
+
+/* this code assume stride % 16 == 0 */
+static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
+  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1);
+  register int i;
+
+  const vector signed int vzero = vec_splat_s32(0);
+  const vector unsigned char permM2 = vec_lvsl(-2, src);
+  const vector unsigned char permM1 = vec_lvsl(-1, src);
+  const vector unsigned char permP0 = vec_lvsl(+0, src);
+  const vector unsigned char permP1 = vec_lvsl(+1, src);
+  const vector unsigned char permP2 = vec_lvsl(+2, src);
+  const vector unsigned char permP3 = vec_lvsl(+3, src);
+  const vector signed short v5ss = vec_splat_s16(5);
+  const vector unsigned short v5us = vec_splat_u16(5);
+  const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
+  const vector signed short v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
+  const vector unsigned char dstperm = vec_lvsr(0, dst);
+  const vector unsigned char neg1 =
+                                (const vector unsigned char) vec_splat_s8(-1);
+
+  const vector unsigned char dstmask =
+                                vec_perm((const vector unsigned char)vzero,
+                                                               neg1, dstperm);
+
+  vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
+
+  register int align = ((((unsigned long)src) - 2) % 16);
+
+  vector signed short srcP0A, srcP0B, srcP1A, srcP1B,
+                      srcP2A, srcP2B, srcP3A, srcP3B,
+                      srcM1A, srcM1B, srcM2A, srcM2B,
+                      sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
+                      pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
+                      psumA, psumB, sumA, sumB;
+
+  vector unsigned char sum, dst1, dst2, vdst, fsum,
+                       rsum, fdst1, fdst2;
+
+  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
+
+  for (i = 0 ; i < 16 ; i ++) {
+    vector unsigned char srcR1 = vec_ld(-2, src);
+    vector unsigned char srcR2 = vec_ld(14, src);
+
+    switch (align) {
+    default: {
+      srcM2 = vec_perm(srcR1, srcR2, permM2);
+      srcM1 = vec_perm(srcR1, srcR2, permM1);
+      srcP0 = vec_perm(srcR1, srcR2, permP0);
+      srcP1 = vec_perm(srcR1, srcR2, permP1);
+      srcP2 = vec_perm(srcR1, srcR2, permP2);
+      srcP3 = vec_perm(srcR1, srcR2, permP3);
+    } break;
+    case 11: {
+      srcM2 = vec_perm(srcR1, srcR2, permM2);
+      srcM1 = vec_perm(srcR1, srcR2, permM1);
+      srcP0 = vec_perm(srcR1, srcR2, permP0);
+      srcP1 = vec_perm(srcR1, srcR2, permP1);
+      srcP2 = vec_perm(srcR1, srcR2, permP2);
+      srcP3 = srcR2;
+    } break;
+    case 12: {
+      vector unsigned char srcR3 = vec_ld(30, src);
+      srcM2 = vec_perm(srcR1, srcR2, permM2);
+      srcM1 = vec_perm(srcR1, srcR2, permM1);
+      srcP0 = vec_perm(srcR1, srcR2, permP0);
+      srcP1 = vec_perm(srcR1, srcR2, permP1);
+      srcP2 = srcR2;
+      srcP3 = vec_perm(srcR2, srcR3, permP3);
+    } break;
+    case 13: {
+      vector unsigned char srcR3 = vec_ld(30, src);
+      srcM2 = vec_perm(srcR1, srcR2, permM2);
+      srcM1 = vec_perm(srcR1, srcR2, permM1);
+      srcP0 = vec_perm(srcR1, srcR2, permP0);
+      srcP1 = srcR2;
+      srcP2 = vec_perm(srcR2, srcR3, permP2);
+      srcP3 = vec_perm(srcR2, srcR3, permP3);
+    } break;
+    case 14: {
+      vector unsigned char srcR3 = vec_ld(30, src);
+      srcM2 = vec_perm(srcR1, srcR2, permM2);
+      srcM1 = vec_perm(srcR1, srcR2, permM1);
+      srcP0 = srcR2;
+      srcP1 = vec_perm(srcR2, srcR3, permP1);
+      srcP2 = vec_perm(srcR2, srcR3, permP2);
+      srcP3 = vec_perm(srcR2, srcR3, permP3);
+    } break;
+    case 15: {
+      vector unsigned char srcR3 = vec_ld(30, src);
+      srcM2 = vec_perm(srcR1, srcR2, permM2);
+      srcM1 = srcR2;
+      srcP0 = vec_perm(srcR2, srcR3, permP0);
+      srcP1 = vec_perm(srcR2, srcR3, permP1);
+      srcP2 = vec_perm(srcR2, srcR3, permP2);
+      srcP3 = vec_perm(srcR2, srcR3, permP3);
+    } break;
+    }
+
+    srcP0A = (vector signed short)
+                vec_mergeh((vector unsigned char)vzero, srcP0);
+    srcP0B = (vector signed short)
+                vec_mergel((vector unsigned char)vzero, srcP0);
+    srcP1A = (vector signed short)
+                vec_mergeh((vector unsigned char)vzero, srcP1);
+    srcP1B = (vector signed short)
+                vec_mergel((vector unsigned char)vzero, srcP1);
+
+    srcP2A = (vector signed short)
+                vec_mergeh((vector unsigned char)vzero, srcP2);
+    srcP2B = (vector signed short)
+                vec_mergel((vector unsigned char)vzero, srcP2);
+    srcP3A = (vector signed short)
+                vec_mergeh((vector unsigned char)vzero, srcP3);
+    srcP3B = (vector signed short)
+                vec_mergel((vector unsigned char)vzero, srcP3);
+
+    srcM1A = (vector signed short)
+                vec_mergeh((vector unsigned char)vzero, srcM1);
+    srcM1B = (vector signed short)
+                vec_mergel((vector unsigned char)vzero, srcM1);
+    srcM2A = (vector signed short)
+                vec_mergeh((vector unsigned char)vzero, srcM2);
+    srcM2B = (vector signed short)
+                vec_mergel((vector unsigned char)vzero, srcM2);
+
+    sum1A = vec_adds(srcP0A, srcP1A);
+    sum1B = vec_adds(srcP0B, srcP1B);
+    sum2A = vec_adds(srcM1A, srcP2A);
+    sum2B = vec_adds(srcM1B, srcP2B);
+    sum3A = vec_adds(srcM2A, srcP3A);
+    sum3B = vec_adds(srcM2B, srcP3B);
+
+    pp1A = vec_mladd(sum1A, v20ss, v16ss);
+    pp1B = vec_mladd(sum1B, v20ss, v16ss);
+
+    pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
+    pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
+
+    pp3A = vec_add(sum3A, pp1A);
+    pp3B = vec_add(sum3B, pp1B);
+
+    psumA = vec_sub(pp3A, pp2A);
+    psumB = vec_sub(pp3B, pp2B);
+
+    sumA = vec_sra(psumA, v5us);
+    sumB = vec_sra(psumB, v5us);
+
+    sum = vec_packsu(sumA, sumB);
+
+    dst1 = vec_ld(0, dst);
+    dst2 = vec_ld(16, dst);
+    vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));
+
+    OP_U8_ALTIVEC(fsum, sum, vdst);
+
+    rsum = vec_perm(fsum, fsum, dstperm);
+    fdst1 = vec_sel(dst1, rsum, dstmask);
+    fdst2 = vec_sel(rsum, dst2, dstmask);
+
+    vec_st(fdst1, 0, dst);
+    vec_st(fdst2, 16, dst);
+
+    src += srcStride;
+    dst += dstStride;
+  }
+POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
+}
+
+/* this code assume stride % 16 == 0 */
+static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
+  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1);
+
+  register int i;
+
+  const vector signed int vzero = vec_splat_s32(0);
+  const vector unsigned char perm = vec_lvsl(0, src);
+  const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
+  const vector unsigned short v5us = vec_splat_u16(5);
+  const vector signed short v5ss = vec_splat_s16(5);
+  const vector signed short v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
+  const vector unsigned char dstperm = vec_lvsr(0, dst);
+  const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
+  const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
+
+  uint8_t *srcbis = src - (srcStride * 2);
+
+  const vector unsigned char srcM2a = vec_ld(0, srcbis);
+  const vector unsigned char srcM2b = vec_ld(16, srcbis);
+  const vector unsigned char srcM2 = vec_perm(srcM2a, srcM2b, perm);
+//  srcbis += srcStride;
+  const vector unsigned char srcM1a = vec_ld(0, srcbis += srcStride);
+  const vector unsigned char srcM1b = vec_ld(16, srcbis);
+  const vector unsigned char srcM1 = vec_perm(srcM1a, srcM1b, perm);
+//  srcbis += srcStride;
+  const vector unsigned char srcP0a = vec_ld(0, srcbis += srcStride);
+  const vector unsigned char srcP0b = vec_ld(16, srcbis);
+  const vector unsigned char srcP0 = vec_perm(srcP0a, srcP0b, perm);
+//  srcbis += srcStride;
+  const vector unsigned char srcP1a = vec_ld(0, srcbis += srcStride);
+  const vector unsigned char srcP1b = vec_ld(16, srcbis);
+  const vector unsigned char srcP1 = vec_perm(srcP1a, srcP1b, perm);
+//  srcbis += srcStride;
+  const vector unsigned char srcP2a = vec_ld(0, srcbis += srcStride);
+  const vector unsigned char srcP2b = vec_ld(16, srcbis);
+  const vector unsigned char srcP2 = vec_perm(srcP2a, srcP2b, perm);
+//  srcbis += srcStride;
+
+  vector signed short srcM2ssA = (vector signed short)
+                                vec_mergeh((vector unsigned char)vzero, srcM2);
+  vector signed short srcM2ssB = (vector signed short)
+                                vec_mergel((vector unsigned char)vzero, srcM2);
+  vector signed short srcM1ssA = (vector signed short)
+                                vec_mergeh((vector unsigned char)vzero, srcM1);
+  vector signed short srcM1ssB = (vector signed short)
+                                vec_mergel((vector unsigned char)vzero, srcM1);
+  vector signed short srcP0ssA = (vector signed short)
+                                vec_mergeh((vector unsigned char)vzero, srcP0);
+  vector signed short srcP0ssB = (vector signed short)
+                                vec_mergel((vector unsigned char)vzero, srcP0);
+  vector signed short srcP1ssA = (vector signed short)
+                                vec_mergeh((vector unsigned char)vzero, srcP1);
+  vector signed short srcP1ssB = (vector signed short)
+                                vec_mergel((vector unsigned char)vzero, srcP1);
+  vector signed short srcP2ssA = (vector signed short)
+                                vec_mergeh((vector unsigned char)vzero, srcP2);
+  vector signed short srcP2ssB = (vector signed short)
+                                vec_mergel((vector unsigned char)vzero, srcP2);
+
+  vector signed short pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
+                      psumA, psumB, sumA, sumB,
+                      srcP3ssA, srcP3ssB,
+                      sum1A, sum1B, sum2A, sum2B, sum3A, sum3B;
+
+  vector unsigned char sum, dst1, dst2, vdst, fsum, rsum, fdst1, fdst2,
+                       srcP3a, srcP3b, srcP3;
+
+  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
+
+  for (i = 0 ; i < 16 ; i++) {
+    srcP3a = vec_ld(0, srcbis += srcStride);
+    srcP3b = vec_ld(16, srcbis);
+    srcP3 = vec_perm(srcP3a, srcP3b, perm);
+    srcP3ssA = (vector signed short)
+                                vec_mergeh((vector unsigned char)vzero, srcP3);
+    srcP3ssB = (vector signed short)
+                                vec_mergel((vector unsigned char)vzero, srcP3);
+//    srcbis += srcStride;
+
+    sum1A = vec_adds(srcP0ssA, srcP1ssA);
+    sum1B = vec_adds(srcP0ssB, srcP1ssB);
+    sum2A = vec_adds(srcM1ssA, srcP2ssA);
+    sum2B = vec_adds(srcM1ssB, srcP2ssB);
+    sum3A = vec_adds(srcM2ssA, srcP3ssA);
+    sum3B = vec_adds(srcM2ssB, srcP3ssB);
+
+    srcM2ssA = srcM1ssA;
+    srcM2ssB = srcM1ssB;
+    srcM1ssA = srcP0ssA;
+    srcM1ssB = srcP0ssB;
+    srcP0ssA = srcP1ssA;
+    srcP0ssB = srcP1ssB;
+    srcP1ssA = srcP2ssA;
+    srcP1ssB = srcP2ssB;
+    srcP2ssA = srcP3ssA;
+    srcP2ssB = srcP3ssB;
+
+    pp1A = vec_mladd(sum1A, v20ss, v16ss);
+    pp1B = vec_mladd(sum1B, v20ss, v16ss);
+
+    pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
+    pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
+
+    pp3A = vec_add(sum3A, pp1A);
+    pp3B = vec_add(sum3B, pp1B);
+
+    psumA = vec_sub(pp3A, pp2A);
+    psumB = vec_sub(pp3B, pp2B);
+
+    sumA = vec_sra(psumA, v5us);
+    sumB = vec_sra(psumB, v5us);
+
+    sum = vec_packsu(sumA, sumB);
+
+    dst1 = vec_ld(0, dst);
+    dst2 = vec_ld(16, dst);
+    vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));
+
+    OP_U8_ALTIVEC(fsum, sum, vdst);
+
+    rsum = vec_perm(fsum, fsum, dstperm);
+    fdst1 = vec_sel(dst1, rsum, dstmask);
+    fdst2 = vec_sel(rsum, dst2, dstmask);
+
+    vec_st(fdst1, 0, dst);
+    vec_st(fdst2, 16, dst);
+
+    dst += dstStride;
+  }
+  POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
+}
+
+/* this code assume stride % 16 == 0 *and* tmp is properly aligned */
+static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) {
+  POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_hv_lowpass_num, 1);
+  register int i;
+  const vector signed int vzero = vec_splat_s32(0);
+  const vector unsigned char permM2 = vec_lvsl(-2, src);
+  const vector unsigned char permM1 = vec_lvsl(-1, src);
+  const vector unsigned char permP0 = vec_lvsl(+0, src);
+  const vector unsigned char permP1 = vec_lvsl(+1, src);
+  const vector unsigned char permP2 = vec_lvsl(+2, src);
+  const vector unsigned char permP3 = vec_lvsl(+3, src);
+  const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
+  const vector unsigned int v10ui = vec_splat_u32(10);
+  const vector signed short v5ss = vec_splat_s16(5);
+  const vector signed short v1ss = vec_splat_s16(1);
+  const vector signed int v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9));
+  const vector unsigned int v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4));
+
+  register int align = ((((unsigned long)src) - 2) % 16);
+
+  const vector unsigned char neg1 = (const vector unsigned char)
+                                                        vec_splat_s8(-1);
+
+  vector signed short srcP0A, srcP0B, srcP1A, srcP1B,
+                      srcP2A, srcP2B, srcP3A, srcP3B,
+                      srcM1A, srcM1B, srcM2A, srcM2B,
+                      sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
+                      pp1A, pp1B, pp2A, pp2B, psumA, psumB;
+
+  const vector unsigned char dstperm = vec_lvsr(0, dst);
+
+  const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
+
+  const vector unsigned char mperm = (const vector unsigned char)
+    AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
+        0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F);
+  int16_t *tmpbis = tmp;
+
+  vector signed short tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB,
+                      tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB,
+                      tmpP2ssA, tmpP2ssB;
+
+  vector signed int pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo,
+                    pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo,
+                    pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo,
+                    ssumAe, ssumAo, ssumBe, ssumBo;
+  vector unsigned char fsum, sumv, sum, dst1, dst2, vdst,
+                       rsum, fdst1, fdst2;
+  vector signed short ssume, ssumo;
+
+  POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
+  src -= (2 * srcStride);
+  for (i = 0 ; i < 21 ; i ++) {
+    vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
+    vector unsigned char srcR1 = vec_ld(-2, src);
+    vector unsigned char srcR2 = vec_ld(14, src);
+
+    switch (align) {
+    default: {
+      srcM2 = vec_perm(srcR1, srcR2, permM2);
+      srcM1 = vec_perm(srcR1, srcR2, permM1);
+      srcP0 = vec_perm(srcR1, srcR2, permP0);
+      srcP1 = vec_perm(srcR1, srcR2, permP1);
+      srcP2 = vec_perm(srcR1, srcR2, permP2);
+      srcP3 = vec_perm(srcR1, srcR2, permP3);
+    } break;
+    case 11: {
+      srcM2 = vec_perm(srcR1, srcR2, permM2);
+      srcM1 = vec_perm(srcR1, srcR2, permM1);
+      srcP0 = vec_perm(srcR1, srcR2, permP0);
+      srcP1 = vec_perm(srcR1, srcR2, permP1);
+      srcP2 = vec_perm(srcR1, srcR2, permP2);
+      srcP3 = srcR2;
+    } break;
+    case 12: {
+      vector unsigned char srcR3 = vec_ld(30, src);
+      srcM2 = vec_perm(srcR1, srcR2, permM2);
+      srcM1 = vec_perm(srcR1, srcR2, permM1);
+      srcP0 = vec_perm(srcR1, srcR2, permP0);
+      srcP1 = vec_perm(srcR1, srcR2, permP1);
+      srcP2 = srcR2;
+      srcP3 = vec_perm(srcR2, srcR3, permP3);
+    } break;
+    case 13: {
+      vector unsigned char srcR3 = vec_ld(30, src);
+      srcM2 = vec_perm(srcR1, srcR2, permM2);
+      srcM1 = vec_perm(srcR1, srcR2, permM1);
+      srcP0 = vec_perm(srcR1, srcR2, permP0);
+      srcP1 = srcR2;
+      srcP2 = vec_perm(srcR2, srcR3, permP2);
+      srcP3 = vec_perm(srcR2, srcR3, permP3);
+    } break;
+    case 14: {
+      vector unsigned char srcR3 = vec_ld(30, src);
+      srcM2 = vec_perm(srcR1, srcR2, permM2);
+      srcM1 = vec_perm(srcR1, srcR2, permM1);
+      srcP0 = srcR2;
+      srcP1 = vec_perm(srcR2, srcR3, permP1);
+      srcP2 = vec_perm(srcR2, srcR3, permP2);
+      srcP3 = vec_perm(srcR2, srcR3, permP3);
+    } break;
+    case 15: {
+      vector unsigned char srcR3 = vec_ld(30, src);
+      srcM2 = vec_perm(srcR1, srcR2, permM2);
+      srcM1 = srcR2;
+      srcP0 = vec_perm(srcR2, srcR3, permP0);
+      srcP1 = vec_perm(srcR2, srcR3, permP1);
+      srcP2 = vec_perm(srcR2, srcR3, permP2);
+      srcP3 = vec_perm(srcR2, srcR3, permP3);
+    } break;
+    }
+
+    srcP0A = (vector signed short)
+                            vec_mergeh((vector unsigned char)vzero, srcP0);
+    srcP0B = (vector signed short)
+                            vec_mergel((vector unsigned char)vzero, srcP0);
+    srcP1A = (vector signed short)
+                            vec_mergeh((vector unsigned char)vzero, srcP1);
+    srcP1B = (vector signed short)
+                            vec_mergel((vector unsigned char)vzero, srcP1);
+
+    srcP2A = (vector signed short)
+                            vec_mergeh((vector unsigned char)vzero, srcP2);
+    srcP2B = (vector signed short)
+                            vec_mergel((vector unsigned char)vzero, srcP2);
+    srcP3A = (vector signed short)
+                            vec_mergeh((vector unsigned char)vzero, srcP3);
+    srcP3B = (vector signed short)
+                            vec_mergel((vector unsigned char)vzero, srcP3);
+
+    srcM1A = (vector signed short)
+                            vec_mergeh((vector unsigned char)vzero, srcM1);
+    srcM1B = (vector signed short)
+                            vec_mergel((vector unsigned char)vzero, srcM1);
+    srcM2A = (vector signed short)
+                            vec_mergeh((vector unsigned char)vzero, srcM2);
+    srcM2B = (vector signed short)
+                            vec_mergel((vector unsigned char)vzero, srcM2);
+
+    sum1A = vec_adds(srcP0A, srcP1A);
+    sum1B = vec_adds(srcP0B, srcP1B);
+    sum2A = vec_adds(srcM1A, srcP2A);
+    sum2B = vec_adds(srcM1B, srcP2B);
+    sum3A = vec_adds(srcM2A, srcP3A);
+    sum3B = vec_adds(srcM2B, srcP3B);
+
+    pp1A = vec_mladd(sum1A, v20ss, sum3A);
+    pp1B = vec_mladd(sum1B, v20ss, sum3B);
+
+    pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
+    pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
+
+    psumA = vec_sub(pp1A, pp2A);
+    psumB = vec_sub(pp1B, pp2B);
+
+    vec_st(psumA, 0, tmp);
+    vec_st(psumB, 16, tmp);
+
+    src += srcStride;
+    tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
+  }
+
+  tmpM2ssA = vec_ld(0, tmpbis);
+  tmpM2ssB = vec_ld(16, tmpbis);
+  tmpbis += tmpStride;
+  tmpM1ssA = vec_ld(0, tmpbis);
+  tmpM1ssB = vec_ld(16, tmpbis);
+  tmpbis += tmpStride;
+  tmpP0ssA = vec_ld(0, tmpbis);
+  tmpP0ssB = vec_ld(16, tmpbis);
+  tmpbis += tmpStride;
+  tmpP1ssA = vec_ld(0, tmpbis);
+  tmpP1ssB = vec_ld(16, tmpbis);
+  tmpbis += tmpStride;
+  tmpP2ssA = vec_ld(0, tmpbis);
+  tmpP2ssB = vec_ld(16, tmpbis);
+  tmpbis += tmpStride;
+
+  for (i = 0 ; i < 16 ; i++) {
+    const vector signed short tmpP3ssA = vec_ld(0, tmpbis);
+    const vector signed short tmpP3ssB = vec_ld(16, tmpbis);
+
+    const vector signed short sum1A = vec_adds(tmpP0ssA, tmpP1ssA);
+    const vector signed short sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
+    const vector signed short sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
+    const vector signed short sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
+    const vector signed short sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
+    const vector signed short sum3B = vec_adds(tmpM2ssB, tmpP3ssB);
+
+    tmpbis += tmpStride;
+
+    tmpM2ssA = tmpM1ssA;
+    tmpM2ssB = tmpM1ssB;
+    tmpM1ssA = tmpP0ssA;
+    tmpM1ssB = tmpP0ssB;
+    tmpP0ssA = tmpP1ssA;
+    tmpP0ssB = tmpP1ssB;
+    tmpP1ssA = tmpP2ssA;
+    tmpP1ssB = tmpP2ssB;
+    tmpP2ssA = tmpP3ssA;
+    tmpP2ssB = tmpP3ssB;
+
+    pp1Ae = vec_mule(sum1A, v20ss);
+    pp1Ao = vec_mulo(sum1A, v20ss);
+    pp1Be = vec_mule(sum1B, v20ss);
+    pp1Bo = vec_mulo(sum1B, v20ss);
+
+    pp2Ae = vec_mule(sum2A, v5ss);
+    pp2Ao = vec_mulo(sum2A, v5ss);
+    pp2Be = vec_mule(sum2B, v5ss);
+    pp2Bo = vec_mulo(sum2B, v5ss);
+
+    pp3Ae = vec_sra((vector signed int)sum3A, v16ui);
+    pp3Ao = vec_mulo(sum3A, v1ss);
+    pp3Be = vec_sra((vector signed int)sum3B, v16ui);
+    pp3Bo = vec_mulo(sum3B, v1ss);
+
+    pp1cAe = vec_add(pp1Ae, v512si);
+    pp1cAo = vec_add(pp1Ao, v512si);
+    pp1cBe = vec_add(pp1Be, v512si);
+    pp1cBo = vec_add(pp1Bo, v512si);
+
+    pp32Ae = vec_sub(pp3Ae, pp2Ae);
+    pp32Ao = vec_sub(pp3Ao, pp2Ao);
+    pp32Be = vec_sub(pp3Be, pp2Be);
+    pp32Bo = vec_sub(pp3Bo, pp2Bo);
+
+    sumAe = vec_add(pp1cAe, pp32Ae);
+    sumAo = vec_add(pp1cAo, pp32Ao);
+    sumBe = vec_add(pp1cBe, pp32Be);
+    sumBo = vec_add(pp1cBo, pp32Bo);
+
+    ssumAe = vec_sra(sumAe, v10ui);
+    ssumAo = vec_sra(sumAo, v10ui);
+    ssumBe = vec_sra(sumBe, v10ui);
+    ssumBo = vec_sra(sumBo, v10ui);
+
+    ssume = vec_packs(ssumAe, ssumBe);
+    ssumo = vec_packs(ssumAo, ssumBo);
+
+    sumv = vec_packsu(ssume, ssumo);
+    sum = vec_perm(sumv, sumv, mperm);
+
+    dst1 = vec_ld(0, dst);
+    dst2 = vec_ld(16, dst);
+    vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));
+
+    OP_U8_ALTIVEC(fsum, sum, vdst);
+
+    rsum = vec_perm(fsum, fsum, dstperm);
+    fdst1 = vec_sel(dst1, rsum, dstmask);
+    fdst2 = vec_sel(rsum, dst2, dstmask);
+
+    vec_st(fdst1, 0, dst);
+    vec_st(fdst2, 16, dst);
+
+    dst += dstStride;
+  }
+  POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ppc/snow_altivec.c	Sun Aug 13 08:26:43 2006 +0000
@@ -0,0 +1,786 @@
+/*
+ * Altivec optimized snow DSP utils
+ * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ *
+ */
+
+#include "../dsputil.h"
+
+#include "gcc_fixes.h"
+#include "dsputil_altivec.h"
+#include "../snow.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+
+
+//FIXME remove this replication
+#define slice_buffer_get_line(slice_buf, line_num) ((slice_buf)->line[line_num] ? (slice_buf)->line[line_num] : slice_buffer_load_line((slice_buf), (line_num)))
+
+static DWTELEM * slice_buffer_load_line(slice_buffer * buf, int line)
+{
+    int offset;
+    DWTELEM * buffer;
+
+//  av_log(NULL, AV_LOG_DEBUG, "Cache hit: %d\n", line);
+
+    assert(buf->data_stack_top >= 0);
+//  assert(!buf->line[line]);
+    if (buf->line[line])
+        return buf->line[line];
+
+    offset = buf->line_width * line;
+    buffer = buf->data_stack[buf->data_stack_top];
+    buf->data_stack_top--;
+    buf->line[line] = buffer;
+
+//  av_log(NULL, AV_LOG_DEBUG, "slice_buffer_load_line: line: %d remaining: %d\n", line, buf->data_stack_top + 1);
+
+    return buffer;
+}
+
+
+//altivec code
+
+void ff_snow_horizontal_compose97i_altivec(DWTELEM *b, int width)
+{
+    const int w2= (width+1)>>1;
+    DECLARE_ALIGNED_16(DWTELEM, temp[(width>>1)]);
+    const int w_l= (width>>1);
+    const int w_r= w2 - 1;
+    int i;
+    vector signed int t1, t2, x, y, tmp1, tmp2;
+    vector signed int *vbuf, *vtmp;
+    vector unsigned char align;
+
+
+
+    { // Lift 0
+        DWTELEM * const ref = b + w2 - 1;
+        DWTELEM b_0 = b[0];
+        vbuf = (vector signed int *)b;
+
+        tmp1 = vec_ld (0, ref);
+        align = vec_lvsl (0, ref);
+        tmp2 = vec_ld (15, ref);
+        t1= vec_perm(tmp1, tmp2, align);
+
+        i = 0;
+
+        for (i=0; i<w_l-15; i+=16) {
+#if 0
+        b[i+0] = b[i+0] - ((3 * (ref[i+0] + ref[i+1]) + 4) >> 3);
+        b[i+1] = b[i+1] - ((3 * (ref[i+1] + ref[i+2]) + 4) >> 3);
+        b[i+2] = b[i+2] - ((3 * (ref[i+2] + ref[i+3]) + 4) >> 3);
+        b[i+3] = b[i+3] - ((3 * (ref[i+3] + ref[i+4]) + 4) >> 3);
+#else
+
+        tmp1 = vec_ld (0, ref+4+i);
+        tmp2 = vec_ld (15, ref+4+i);
+
+        t2 = vec_perm(tmp1, tmp2, align);
+
+        y = vec_add(t1,vec_sld(t1,t2,4));
+        y = vec_add(vec_add(y,y),y);
+
+        tmp1 = vec_ld (0, ref+8+i);
+
+        y = vec_add(y, vec_splat_s32(4));
+        y = vec_sra(y, vec_splat_u32(3));
+
+        tmp2 = vec_ld (15, ref+8+i);
+
+        *vbuf = vec_sub(*vbuf, y);
+
+        t1=t2;
+
+        vbuf++;
+
+        t2 = vec_perm(tmp1, tmp2, align);
+
+        y = vec_add(t1,vec_sld(t1,t2,4));
+        y = vec_add(vec_add(y,y),y);
+
+        tmp1 = vec_ld (0, ref+12+i);
+
+        y = vec_add(y, vec_splat_s32(4));
+        y = vec_sra(y, vec_splat_u32(3));
+
+        tmp2 = vec_ld (15, ref+12+i);
+
+        *vbuf = vec_sub(*vbuf, y);
+
+        t1=t2;
+
+        vbuf++;
+
+        t2 = vec_perm(tmp1, tmp2, align);
+
+        y = vec_add(t1,vec_sld(t1,t2,4));
+        y = vec_add(vec_add(y,y),y);
+
+        tmp1 = vec_ld (0, ref+16+i);
+
+        y = vec_add(y, vec_splat_s32(4));
+        y = vec_sra(y, vec_splat_u32(3));
+
+        tmp2 = vec_ld (15, ref+16+i);
+
+        *vbuf = vec_sub(*vbuf, y);
+
+        t1=t2;
+
+        t2 = vec_perm(tmp1, tmp2, align);
+
+        y = vec_add(t1,vec_sld(t1,t2,4));
+        y = vec_add(vec_add(y,y),y);
+
+        vbuf++;
+
+        y = vec_add(y, vec_splat_s32(4));
+        y = vec_sra(y, vec_splat_u32(3));
+        *vbuf = vec_sub(*vbuf, y);
+
+        t1=t2;
+
+        vbuf++;
+
+#endif
+        }
+
+        snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
+        b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
+    }
+
+    { // Lift 1
+        DWTELEM * const dst = b+w2;
+
+        i = 0;
+        for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){
+            dst[i] = dst[i] - (b[i] + b[i + 1]);
+        }
+
+        align = vec_lvsl(0, b+i);
+        tmp1 = vec_ld(0, b+i);
+        vbuf = (vector signed int*) (dst + i);
+        tmp2 = vec_ld(15, b+i);
+
+        t1 = vec_perm(tmp1, tmp2, align);
+
+        for (; i<w_r-3; i+=4) {
+
+#if 0
+            dst[i]   = dst[i]   - (b[i]   + b[i + 1]);
+            dst[i+1] = dst[i+1] - (b[i+1] + b[i + 2]);
+            dst[i+2] = dst[i+2] - (b[i+2] + b[i + 3]);
+            dst[i+3] = dst[i+3] - (b[i+3] + b[i + 4]);
+#else
+
+        tmp1 = vec_ld(0, b+4+i);
+        tmp2 = vec_ld(15, b+4+i);
+
+        t2 = vec_perm(tmp1, tmp2, align);
+
+        y = vec_add(t1, vec_sld(t1,t2,4));
+        *vbuf = vec_sub (*vbuf, y);
+
+        vbuf++;
+
+        t1 = t2;
+
+#endif
+
+        }
+
+        snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
+    }
+
+    { // Lift 2
+        DWTELEM * const ref = b+w2 - 1;
+        DWTELEM b_0 = b[0];
+        vbuf= (vector signed int *) b;
+
+        tmp1 = vec_ld (0, ref);
+        align = vec_lvsl (0, ref);
+        tmp2 = vec_ld (15, ref);
+        t1= vec_perm(tmp1, tmp2, align);
+
+        i = 0;
+        for (; i<w_l-15; i+=16) {
+#if 0
+            b[i]   = b[i]   - (((8 -(ref[i]   + ref[i+1])) - (b[i]  <<2)) >> 4);
+            b[i+1] = b[i+1] - (((8 -(ref[i+1] + ref[i+2])) - (b[i+1]<<2)) >> 4);
+            b[i+2] = b[i+2] - (((8 -(ref[i+2] + ref[i+3])) - (b[i+2]<<2)) >> 4);
+            b[i+3] = b[i+3] - (((8 -(ref[i+3] + ref[i+4])) - (b[i+3]<<2)) >> 4);
+#else
+            tmp1 = vec_ld (0, ref+4+i);
+            tmp2 = vec_ld (15, ref+4+i);
+
+            t2 = vec_perm(tmp1, tmp2, align);
+
+            y = vec_add(t1,vec_sld(t1,t2,4));
+            y = vec_sub(vec_splat_s32(8),y);
+
+            tmp1 = vec_ld (0, ref+8+i);
+
+            x = vec_sl(*vbuf,vec_splat_u32(2));
+            y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
+
+            tmp2 = vec_ld (15, ref+8+i);
+
+            *vbuf = vec_sub( *vbuf, y);
+
+            t1 = t2;
+
+            vbuf++;
+
+            t2 = vec_perm(tmp1, tmp2, align);
+
+            y = vec_add(t1,vec_sld(t1,t2,4));
+            y = vec_sub(vec_splat_s32(8),y);
+
+            tmp1 = vec_ld (0, ref+12+i);
+
+            x = vec_sl(*vbuf,vec_splat_u32(2));
+            y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
+
+            tmp2 = vec_ld (15, ref+12+i);
+
+            *vbuf = vec_sub( *vbuf, y);
+
+            t1 = t2;
+
+            vbuf++;
+
+            t2 = vec_perm(tmp1, tmp2, align);
+
+            y = vec_add(t1,vec_sld(t1,t2,4));
+            y = vec_sub(vec_splat_s32(8),y);
+
+            tmp1 = vec_ld (0, ref+16+i);
+
+            x = vec_sl(*vbuf,vec_splat_u32(2));
+            y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
+
+            tmp2 = vec_ld (15, ref+16+i);
+
+            *vbuf = vec_sub( *vbuf, y);
+
+            t1 = t2;
+
+            vbuf++;
+
+            t2 = vec_perm(tmp1, tmp2, align);
+
+            y = vec_add(t1,vec_sld(t1,t2,4));
+            y = vec_sub(vec_splat_s32(8),y);
+
+            t1 = t2;
+
+            x = vec_sl(*vbuf,vec_splat_u32(2));
+            y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
+            *vbuf = vec_sub( *vbuf, y);
+
+            vbuf++;
+
+#endif
+        }
+
+        snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
+        b[0] = b_0 - (((-2 * ref[1] + W_BO) - 4 * b_0) >> W_BS);
+    }
+
+    { // Lift 3
+        DWTELEM * const src = b+w2;
+
+        vbuf = (vector signed int *)b;
+        vtmp = (vector signed int *)temp;
+
+        i = 0;
+        align = vec_lvsl(0, src);
+
+        for (; i<w_r-3; i+=4) {
+#if 0
+            temp[i] = src[i] - ((-3*(b[i] + b[i+1]))>>1);
+            temp[i+1] = src[i+1] - ((-3*(b[i+1] + b[i+2]))>>1);
+            temp[i+2] = src[i+2] - ((-3*(b[i+2] + b[i+3]))>>1);
+            temp[i+3] = src[i+3] - ((-3*(b[i+3] + b[i+4]))>>1);
+#else
+            tmp1 = vec_ld(0,src+i);
+            t1 = vec_add(vbuf[0],vec_sld(vbuf[0],vbuf[1],4));
+            tmp2 = vec_ld(15,src+i);
+            t1 = vec_sub(vec_splat_s32(0),t1); //bad!
+            t1 = vec_add(t1,vec_add(t1,t1));
+            t2 = vec_perm(tmp1 ,tmp2 ,align);
+            t1 = vec_sra(t1,vec_splat_u32(1));
+            vbuf++;
+            *vtmp = vec_sub(t2,t1);
+            vtmp++;
+
+#endif
+
+        }
+
+        snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -3, 0, 1);
+    }
+
+    {
+    //Interleave
+        int a;
+        vector signed int *t = (vector signed int *)temp,
+                          *v = (vector signed int *)b;
+
+        snow_interleave_line_header(&i, width, b, temp);
+
+        for (; (i & 0xE) != 0xE; i-=2){
+            b[i+1] = temp[i>>1];
+            b[i] = b[i>>1];
+        }
+        for (i-=14; i>=0; i-=16){
+           a=i/4;
+
+           v[a+3]=vec_mergel(v[(a>>1)+1],t[(a>>1)+1]);
+           v[a+2]=vec_mergeh(v[(a>>1)+1],t[(a>>1)+1]);
+           v[a+1]=vec_mergel(v[a>>1],t[a>>1]);
+           v[a]=vec_mergeh(v[a>>1],t[a>>1]);
+
+        }
+
+    }
+}
+
+void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width)
+{
+    int i, w4 = width/4;
+    vector signed int *v0, *v1,*v2,*v3,*v4,*v5;
+    vector signed int t1, t2;
+
+    v0=(vector signed int *)b0;
+    v1=(vector signed int *)b1;
+    v2=(vector signed int *)b2;
+    v3=(vector signed int *)b3;
+    v4=(vector signed int *)b4;
+    v5=(vector signed int *)b5;
+
+    for (i=0; i< w4;i++)
+    {
+
+    #if 0
+        b4[i] -= (3*(b3[i] + b5[i])+4)>>3;
+        b3[i] -= ((b2[i] + b4[i]));
+        b2[i] += ((b1[i] + b3[i])+4*b2[i]+8)>>4;
+        b1[i] += (3*(b0[i] + b2[i]))>>1;
+    #else
+        t1 = vec_add(v3[i], v5[i]);
+        t2 = vec_add(t1, vec_add(t1,t1));
+        t1 = vec_add(t2, vec_splat_s32(4));
+        v4[i] = vec_sub(v4[i], vec_sra(t1,vec_splat_u32(3)));
+
+        v3[i] = vec_sub(v3[i], vec_add(v2[i], v4[i]));
+
+        t1 = vec_add(vec_splat_s32(8), vec_add(v1[i], v3[i]));
+        t2 = vec_sl(v2[i], vec_splat_u32(2));
+        v2[i] = vec_add(v2[i], vec_sra(vec_add(t1,t2),vec_splat_u32(4)));
+        t1 = vec_add(v0[i], v2[i]);
+        t2 = vec_add(t1, vec_add(t1,t1));
+        v1[i] = vec_add(v1[i], vec_sra(t2,vec_splat_u32(1)));
+
+    #endif
+    }
+
+    for(i*=4; i < width; i++)
+    {
+        b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
+        b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
+        b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
+        b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
+    }
+}
+
+#define LOAD_BLOCKS \
+            tmp1 = vec_ld(0, &block[3][y*src_stride]);\
+            align = vec_lvsl(0, &block[3][y*src_stride]);\
+            tmp2 = vec_ld(15, &block[3][y*src_stride]);\
+\
+            b3 = vec_perm(tmp1,tmp2,align);\
+\
+            tmp1 = vec_ld(0, &block[2][y*src_stride]);\
+            align = vec_lvsl(0, &block[2][y*src_stride]);\
+            tmp2 = vec_ld(15, &block[2][y*src_stride]);\
+\
+            b2 = vec_perm(tmp1,tmp2,align);\
+\
+            tmp1 = vec_ld(0, &block[1][y*src_stride]);\
+            align = vec_lvsl(0, &block[1][y*src_stride]);\
+            tmp2 = vec_ld(15, &block[1][y*src_stride]);\
+\
+            b1 = vec_perm(tmp1,tmp2,align);\
+\
+            tmp1 = vec_ld(0, &block[0][y*src_stride]);\
+            align = vec_lvsl(0, &block[0][y*src_stride]);\
+            tmp2 = vec_ld(15, &block[0][y*src_stride]);\
+\
+            b0 = vec_perm(tmp1,tmp2,align);
+
+#define LOAD_OBMCS \
+            tmp1 = vec_ld(0, obmc1);\
+            align = vec_lvsl(0, obmc1);\
+            tmp2 = vec_ld(15, obmc1);\
+\
+            ob1 = vec_perm(tmp1,tmp2,align);\
+\
+            tmp1 = vec_ld(0, obmc2);\
+            align = vec_lvsl(0, obmc2);\
+            tmp2 = vec_ld(15, obmc2);\
+\
+            ob2 = vec_perm(tmp1,tmp2,align);\
+\
+            tmp1 = vec_ld(0, obmc3);\
+            align = vec_lvsl(0, obmc3);\
+            tmp2 = vec_ld(15, obmc3);\
+\
+            ob3 = vec_perm(tmp1,tmp2,align);\
+\
+            tmp1 = vec_ld(0, obmc4);\
+            align = vec_lvsl(0, obmc4);\
+            tmp2 = vec_ld(15, obmc4);\
+\
+            ob4 = vec_perm(tmp1,tmp2,align);
+
+/* interleave logic
+ * h1 <- [ a,b,a,b, a,b,a,b, a,b,a,b, a,b,a,b ]
+ * h2 <- [ c,d,c,d, c,d,c,d, c,d,c,d, c,d,c,d ]
+ * h  <- [ a,b,c,d, a,b,c,d, a,b,c,d, a,b,c,d ]
+ */
+
+#define STEPS_0_1\
+            h1 = (vector unsigned short)\
+                 vec_mergeh(ob1, ob2);\
+\
+            h2 = (vector unsigned short)\
+                 vec_mergeh(ob3, ob4);\
+\
+            ih = (vector unsigned char)\
+                 vec_mergeh(h1,h2);\
+\
+            l1 = (vector unsigned short) vec_mergeh(b3, b2);\
+\
+            ih1 = (vector unsigned char) vec_mergel(h1, h2);\
+\
+            l2 = (vector unsigned short) vec_mergeh(b1, b0);\
+\
+            il = (vector unsigned char) vec_mergeh(l1, l2);\
+\
+            v[0] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
+\
+            il1 = (vector unsigned char) vec_mergel(l1, l2);\
+\
+            v[1] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
+
+#define FINAL_STEP_SCALAR\
+        for(x=0; x<b_w; x++)\
+            if(add){\
+                vbuf[x] += dst[x + src_x];\
+                vbuf[x] = (vbuf[x] + (1<<(FRAC_BITS-1))) >> FRAC_BITS;\
+                if(vbuf[x]&(~255)) vbuf[x]= ~(vbuf[x]>>31);\
+                dst8[x + y*src_stride] = vbuf[x];\
+            }else{\
+                dst[x + src_x] -= vbuf[x];\
+            }
+
+static void inner_add_yblock_bw_8_obmc_16_altivec(uint8_t *obmc,
+                                             const int obmc_stride,
+                                             uint8_t * * block, int b_w,
+                                             int b_h, int src_x, int src_y,
+                                             int src_stride, slice_buffer * sb,
+                                             int add, uint8_t * dst8)
+{
+    int y, x;
+    DWTELEM * dst;
+    vector unsigned short h1, h2, l1, l2;
+    vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
+    vector unsigned char b0,b1,b2,b3;
+    vector unsigned char ob1,ob2,ob3,ob4;
+
+    DECLARE_ALIGNED_16(int, vbuf[16]);
+    vector signed int *v = (vector signed int *)vbuf, *d;
+
+    for(y=0; y<b_h; y++){
+        //FIXME ugly missue of obmc_stride
+
+        uint8_t *obmc1= obmc + y*obmc_stride;
+        uint8_t *obmc2= obmc1+ (obmc_stride>>1);
+        uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
+        uint8_t *obmc4= obmc3+ (obmc_stride>>1);
+
+        dst = slice_buffer_get_line(sb, src_y + y);
+        d = (vector signed int *)(dst + src_x);
+
+//FIXME i could avoid some loads!
+
+        // load blocks
+        LOAD_BLOCKS
+
+        // load obmcs
+        LOAD_OBMCS
+
+        // steps 0 1
+        STEPS_0_1
+
+        FINAL_STEP_SCALAR
+
+       }
+
+}
+
+#define STEPS_2_3\
+            h1 = (vector unsigned short) vec_mergel(ob1, ob2);\
+\
+            h2 = (vector unsigned short) vec_mergel(ob3, ob4);\
+\
+            ih = (vector unsigned char) vec_mergeh(h1,h2);\
+\
+            l1 = (vector unsigned short) vec_mergel(b3, b2);\
+\
+            l2 = (vector unsigned short) vec_mergel(b1, b0);\
+\
+            ih1 = (vector unsigned char) vec_mergel(h1,h2);\
+\
+            il = (vector unsigned char) vec_mergeh(l1,l2);\
+\
+            v[2] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
+\
+            il1 = (vector unsigned char) vec_mergel(l1,l2);\
+\
+            v[3] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
+
+
+static void inner_add_yblock_bw_16_obmc_32_altivec(uint8_t *obmc,
+                                             const int obmc_stride,
+                                             uint8_t * * block, int b_w,
+                                             int b_h, int src_x, int src_y,
+                                             int src_stride, slice_buffer * sb,
+                                             int add, uint8_t * dst8)
+{
+    int y, x;
+    DWTELEM * dst;
+    vector unsigned short h1, h2, l1, l2;
+    vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
+    vector unsigned char b0,b1,b2,b3;
+    vector unsigned char ob1,ob2,ob3,ob4;
+    DECLARE_ALIGNED_16(int, vbuf[b_w]);
+    vector signed int *v = (vector signed int *)vbuf, *d;
+
+    for(y=0; y<b_h; y++){
+        //FIXME ugly missue of obmc_stride
+
+        uint8_t *obmc1= obmc + y*obmc_stride;
+        uint8_t *obmc2= obmc1+ (obmc_stride>>1);
+        uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
+        uint8_t *obmc4= obmc3+ (obmc_stride>>1);
+
+        dst = slice_buffer_get_line(sb, src_y + y);
+        d = (vector signed int *)(dst + src_x);
+
+        // load blocks
+        LOAD_BLOCKS
+
+        // load obmcs
+        LOAD_OBMCS
+
+        // steps 0 1 2 3
+        STEPS_0_1
+
+        STEPS_2_3
+
+        FINAL_STEP_SCALAR
+
+    }
+}
+
+#define FINAL_STEP_VEC \
+\
+    if(add)\
+        {\
+            for(x=0; x<b_w/4; x++)\
+            {\
+                v[x] = vec_add(v[x], d[x]);\
+                v[x] = vec_sra(vec_add(v[x],\
+                                       vec_sl( vec_splat_s32(1),\
+                                               vec_splat_u32(7))),\
+                               vec_splat_u32(8));\
+\
+                mask = (vector bool int) vec_sl((vector signed int)\
+                        vec_cmpeq(v[x],v[x]),vec_splat_u32(8));\
+                mask = (vector bool int) vec_and(v[x],vec_nor(mask,mask));\
+\
+                mask = (vector bool int)\
+                        vec_cmpeq((vector signed int)mask,\
+                                  (vector signed int)vec_splat_u32(0));\
+\
+                vs = vec_sra(v[x],vec_splat_u32(8));\
+                vs = vec_sra(v[x],vec_splat_u32(8));\
+                vs = vec_sra(v[x],vec_splat_u32(15));\
+\
+                vs = vec_nor(vs,vs);\
+\
+                v[x]= vec_sel(v[x],vs,mask);\
+            }\
+\
+            for(x=0; x<b_w; x++)\
+                dst8[x + y*src_stride] = vbuf[x];\
+\
+        }\
+         else\
+            for(x=0; x<b_w/4; x++)\
+                d[x] = vec_sub(d[x], v[x]);
+
+static void inner_add_yblock_a_bw_8_obmc_16_altivec(uint8_t *obmc,
+                                             const int obmc_stride,
+                                             uint8_t * * block, int b_w,
+                                             int b_h, int src_x, int src_y,
+                                             int src_stride, slice_buffer * sb,
+                                             int add, uint8_t * dst8)
+{
+    int y, x;
+    DWTELEM * dst;
+    vector bool int mask;
+    vector signed int vs;
+    vector unsigned short h1, h2, l1, l2;
+    vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
+    vector unsigned char b0,b1,b2,b3;
+    vector unsigned char ob1,ob2,ob3,ob4;
+
+    DECLARE_ALIGNED_16(int, vbuf[16]);
+    vector signed int *v = (vector signed int *)vbuf, *d;
+
+    for(y=0; y<b_h; y++){
+        //FIXME ugly missue of obmc_stride
+
+        uint8_t *obmc1= obmc + y*obmc_stride;
+        uint8_t *obmc2= obmc1+ (obmc_stride>>1);
+        uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
+        uint8_t *obmc4= obmc3+ (obmc_stride>>1);
+
+        dst = slice_buffer_get_line(sb, src_y + y);
+        d = (vector signed int *)(dst + src_x);
+
+//FIXME i could avoid some loads!
+
+        // load blocks
+        LOAD_BLOCKS
+
+        // load obmcs
+        LOAD_OBMCS
+
+        // steps 0 1
+        STEPS_0_1
+
+        FINAL_STEP_VEC
+
+       }
+
+}
+
+static void inner_add_yblock_a_bw_16_obmc_32_altivec(uint8_t *obmc,
+                                             const int obmc_stride,
+                                             uint8_t * * block, int b_w,
+                                             int b_h, int src_x, int src_y,
+                                             int src_stride, slice_buffer * sb,
+                                             int add, uint8_t * dst8)
+{
+    int y, x;
+    DWTELEM * dst;
+    vector bool int mask;
+    vector signed int vs;
+    vector unsigned short h1, h2, l1, l2;
+    vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
+    vector unsigned char b0,b1,b2,b3;
+    vector unsigned char ob1,ob2,ob3,ob4;
+    DECLARE_ALIGNED_16(int, vbuf[b_w]);
+    vector signed int *v = (vector signed int *)vbuf, *d;
+
+    for(y=0; y<b_h; y++){
+        //FIXME ugly missue of obmc_stride
+
+        uint8_t *obmc1= obmc + y*obmc_stride;
+        uint8_t *obmc2= obmc1+ (obmc_stride>>1);
+        uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
+        uint8_t *obmc4= obmc3+ (obmc_stride>>1);
+
+        dst = slice_buffer_get_line(sb, src_y + y);
+        d = (vector signed int *)(dst + src_x);
+
+        // load blocks
+        LOAD_BLOCKS
+
+        // load obmcs
+        LOAD_OBMCS
+
+        // steps 0 1 2 3
+        STEPS_0_1
+
+        STEPS_2_3
+
+        FINAL_STEP_VEC
+
+    }
+}
+
+
+void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride,
+                                      uint8_t * * block, int b_w, int b_h,
+                                      int src_x, int src_y, int src_stride,
+                                      slice_buffer * sb, int add,
+                                      uint8_t * dst8)
+{
+    if (src_x&15) {
+        if (b_w == 16)
+            inner_add_yblock_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
+                                                   b_w, b_h, src_x, src_y,
+                                                   src_stride, sb, add, dst8);
+        else if (b_w == 8)
+            inner_add_yblock_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
+                                                  b_w, b_h, src_x, src_y,
+                                                  src_stride, sb, add, dst8);
+        else
+            ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
+                                     src_y, src_stride, sb, add, dst8);
+    } else {
+        if (b_w == 16)
+            inner_add_yblock_a_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
+                                                     b_w, b_h, src_x, src_y,
+                                                     src_stride, sb, add, dst8);
+        else if (b_w == 8)
+            inner_add_yblock_a_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
+                                                    b_w, b_h, src_x, src_y,
+                                                    src_stride, sb, add, dst8);
+        else
+            ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
+                                     src_y, src_stride, sb, add, dst8);
+    }
+}
+
+
+void snow_init_altivec(DSPContext* c, AVCodecContext *avctx)
+{
+        c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec;
+        c->vertical_compose97i = ff_snow_vertical_compose97i_altivec;
+        c->inner_add_yblock = ff_snow_inner_add_yblock_altivec;
+}