changeset 9857:89b48bc6c441

Importing libmpeg2 from mpeg2dec-0.3.1
author arpi
date Sun, 06 Apr 2003 16:41:49 +0000
parents 08496327b7ec
children 9af61fc7955c
files libmpeg2/alloc.c libmpeg2/alpha_asm.h libmpeg2/convert.h libmpeg2/cpu_accel.c libmpeg2/cpu_state.c libmpeg2/decode.c libmpeg2/idct_alpha.c libmpeg2/idct_altivec.c libmpeg2/motion_comp_alpha.c libmpeg2/motion_comp_altivec.c
diffstat 10 files changed, 4406 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libmpeg2/alloc.c	Sun Apr 06 16:41:49 2003 +0000
@@ -0,0 +1,76 @@
+/*
+ * alloc.c
+ * Copyright (C) 2000-2002 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
+ * See http://libmpeg2.sourceforge.net/ for updates.
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "config.h"
+
+#include <stdlib.h>
+#include <inttypes.h>
+
+#include "mpeg2.h"
+#include "mpeg2_internal.h"
+
+#if defined(HAVE_MEMALIGN) && !defined(__cplusplus)
+/* some systems have memalign() but no declaration for it */
+void * memalign (size_t align, size_t size);
+#endif
+
+void * (* mpeg2_malloc_hook) (int size, int reason) = NULL;
+int (* mpeg2_free_hook) (void * buf) = NULL;
+
+void * mpeg2_malloc (int size, int reason)
+{
+    char * buf;
+
+    if (mpeg2_malloc_hook) {
+	buf = (char *) mpeg2_malloc_hook (size, reason);
+	if (buf)
+	    return buf;
+    }
+
+#if defined(HAVE_MEMALIGN) && !defined(__cplusplus) && !defined(DEBUG)
+    return memalign (16, size);
+#else
+    buf = (char *) malloc (size + 15 + sizeof (void **));
+    if (buf) {
+	char * align_buf;
+
+	align_buf = buf + 15 + sizeof (void **);
+	align_buf -= (long)align_buf & 15;
+	*(((void **)align_buf) - 1) = buf;
+	return align_buf;
+    }
+    return NULL;
+#endif
+}
+
+void mpeg2_free (void * buf)
+{
+    if (mpeg2_free_hook && mpeg2_free_hook (buf))
+	return;
+
+#if defined(HAVE_MEMALIGN) && !defined(__cplusplus) && !defined(DEBUG)
+    free (buf);
+#else
+    free (*(((void **)buf) - 1));
+#endif
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libmpeg2/alpha_asm.h	Sun Apr 06 16:41:49 2003 +0000
@@ -0,0 +1,184 @@
+/*
+ * Alpha assembly macros
+ * Copyright (c) 2002 Falk Hueffner <falk@debian.org>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
+ * See http://libmpeg2.sourceforge.net/ for updates.
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307	 USA
+ */
+
+#ifndef ALPHA_ASM_H
+#define ALPHA_ASM_H
+
+#include <inttypes.h>
+
+#if defined __GNUC__
+# define GNUC_PREREQ(maj, min) \
+        ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))
+#else
+# define GNUC_PREREQ(maj, min) 0
+#endif
+
+#define AMASK_BWX (1 << 0)
+#define AMASK_FIX (1 << 1)
+#define AMASK_CIX (1 << 2)
+#define AMASK_MVI (1 << 8)
+
+#ifdef __alpha_bwx__
+# define HAVE_BWX() 1
+#else
+# define HAVE_BWX() (amask(AMASK_BWX) == 0)
+#endif
+#ifdef __alpha_fix__
+# define HAVE_FIX() 1
+#else
+# define HAVE_FIX() (amask(AMASK_FIX) == 0)
+#endif
+#ifdef __alpha_max__
+# define HAVE_MVI() 1
+#else
+# define HAVE_MVI() (amask(AMASK_MVI) == 0)
+#endif
+#ifdef __alpha_cix__
+# define HAVE_CIX() 1
+#else
+# define HAVE_CIX() (amask(AMASK_CIX) == 0)
+#endif
+
+inline static uint64_t BYTE_VEC(uint64_t x)
+{
+    x |= x <<  8;
+    x |= x << 16;
+    x |= x << 32;
+    return x;
+}
+inline static uint64_t WORD_VEC(uint64_t x)
+{
+    x |= x << 16;
+    x |= x << 32;
+    return x;
+}
+
+#define ldq(p) (*(const uint64_t *) (p))
+#define ldl(p) (*(const int32_t *) (p))
+#define stl(l, p) do { *(uint32_t *) (p) = (l); } while (0)
+#define stq(l, p) do { *(uint64_t *) (p) = (l); } while (0)
+#define sextw(x) ((int16_t) (x))
+
+#ifdef __GNUC__
+struct unaligned_long { uint64_t l; } __attribute__((packed));
+#define ldq_u(p)     (*(const uint64_t *) (((uint64_t) (p)) & ~7ul))
+#define uldq(a)	     (((const struct unaligned_long *) (a))->l)
+
+#if GNUC_PREREQ(3,0)
+/* Unfortunately, __builtin_prefetch is slightly buggy on Alpha. The
+   defines here are kludged so we still get the right
+   instruction. This needs to be adapted as soon as gcc is fixed.  */
+# define prefetch(p)     __builtin_prefetch((p), 0, 1)
+# define prefetch_en(p)  __builtin_prefetch((p), 1, 1)
+# define prefetch_m(p)   __builtin_prefetch((p), 0, 0)
+# define prefetch_men(p) __builtin_prefetch((p), 1, 0)
+#else
+# define prefetch(p)     asm volatile("ldl $31,%0"  : : "m"(*(const char *) (p)) : "memory")
+# define prefetch_en(p)  asm volatile("ldq $31,%0"  : : "m"(*(const char *) (p)) : "memory")
+# define prefetch_m(p)   asm volatile("lds $f31,%0" : : "m"(*(const char *) (p)) : "memory")
+# define prefetch_men(p) asm volatile("ldt $f31,%0" : : "m"(*(const char *) (p)) : "memory")
+#endif
+
+#if GNUC_PREREQ(3,3)
+#define cmpbge	__builtin_alpha_cmpbge
+/* Avoid warnings.  */
+#define extql(a, b)	__builtin_alpha_extql(a, (uint64_t) (b))
+#define extwl(a, b)	__builtin_alpha_extwl(a, (uint64_t) (b))
+#define extqh(a, b)	__builtin_alpha_extqh(a, (uint64_t) (b))
+#define zap	__builtin_alpha_zap
+#define zapnot	__builtin_alpha_zapnot
+#define amask	__builtin_alpha_amask
+#define implver	__builtin_alpha_implver
+#define rpcc	__builtin_alpha_rpcc
+#define minub8	__builtin_alpha_minub8
+#define minsb8	__builtin_alpha_minsb8
+#define minuw4	__builtin_alpha_minuw4
+#define minsw4	__builtin_alpha_minsw4
+#define maxub8	__builtin_alpha_maxub8
+#define maxsb8	__builtin_alpha_maxsb8
+#define maxuw4	__builtin_alpha_maxuw4	
+#define maxsw4	__builtin_alpha_maxsw4
+#define perr	__builtin_alpha_perr
+#define pklb	__builtin_alpha_pklb
+#define pkwb	__builtin_alpha_pkwb
+#define unpkbl	__builtin_alpha_unpkbl
+#define unpkbw	__builtin_alpha_unpkbw
+#else
+#define cmpbge(a, b) ({ uint64_t __r; asm ("cmpbge  %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
+#define extql(a, b)  ({ uint64_t __r; asm ("extql   %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
+#define extwl(a, b)  ({ uint64_t __r; asm ("extwl   %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
+#define extqh(a, b)  ({ uint64_t __r; asm ("extqh   %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
+#define zap(a, b)    ({ uint64_t __r; asm ("zap     %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
+#define zapnot(a, b) ({ uint64_t __r; asm ("zapnot  %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
+#define amask(a)     ({ uint64_t __r; asm ("amask   %1,%0"      : "=r" (__r) : "rI"  (a));	     __r; })
+#define implver()    ({ uint64_t __r; asm ("implver %0"         : "=r" (__r));			     __r; })
+#define rpcc()	     ({ uint64_t __r; asm volatile ("rpcc %0"   : "=r" (__r));			     __r; })
+#define minub8(a, b) ({ uint64_t __r; asm ("minub8  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define minsb8(a, b) ({ uint64_t __r; asm ("minsb8  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define minuw4(a, b) ({ uint64_t __r; asm ("minuw4  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define minsw4(a, b) ({ uint64_t __r; asm ("minsw4  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define maxub8(a, b) ({ uint64_t __r; asm ("maxub8  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define maxsb8(a, b) ({ uint64_t __r; asm ("maxsb8  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define maxuw4(a, b) ({ uint64_t __r; asm ("maxuw4  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define maxsw4(a, b) ({ uint64_t __r; asm ("maxsw4  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define perr(a, b)   ({ uint64_t __r; asm ("perr    %r1,%r2,%0" : "=r" (__r) : "%rJ" (a), "rJ" (b)); __r; })
+#define pklb(a)      ({ uint64_t __r; asm ("pklb    %r1,%0"     : "=r" (__r) : "rJ"  (a));	     __r; })
+#define pkwb(a)      ({ uint64_t __r; asm ("pkwb    %r1,%0"     : "=r" (__r) : "rJ"  (a));	     __r; })
+#define unpkbl(a)    ({ uint64_t __r; asm ("unpkbl  %r1,%0"     : "=r" (__r) : "rJ"  (a));	     __r; })
+#define unpkbw(a)    ({ uint64_t __r; asm ("unpkbw  %r1,%0"     : "=r" (__r) : "rJ"  (a));	     __r; })
+#endif
+#define wh64(p) asm volatile("wh64 (%0)" : : "r"(p) : "memory")
+
+#elif defined(__DECC)		/* Digital/Compaq/hp "ccc" compiler */
+
+#include <c_asm.h>
+#define ldq_u(a)     asm ("ldq_u   %v0,0(%a0)", a)
+#define uldq(a)	     (*(const __unaligned uint64_t *) (a))
+#define cmpbge(a, b) asm ("cmpbge  %a0,%a1,%v0", a, b)
+#define extql(a, b)  asm ("extql   %a0,%a1,%v0", a, b)
+#define extwl(a, b)  asm ("extwl   %a0,%a1,%v0", a, b)
+#define extqh(a, b)  asm ("extqh   %a0,%a1,%v0", a, b)
+#define zap(a, b)    asm ("zap     %a0,%a1,%v0", a, b)
+#define zapnot(a, b) asm ("zapnot  %a0,%a1,%v0", a, b)
+#define amask(a)     asm ("amask   %a0,%v0", a)
+#define implver()    asm ("implver %v0")
+#define rpcc()	     asm ("rpcc	   %v0")
+#define minub8(a, b) asm ("minub8  %a0,%a1,%v0", a, b)
+#define minsb8(a, b) asm ("minsb8  %a0,%a1,%v0", a, b)
+#define minuw4(a, b) asm ("minuw4  %a0,%a1,%v0", a, b)
+#define minsw4(a, b) asm ("minsw4  %a0,%a1,%v0", a, b)
+#define maxub8(a, b) asm ("maxub8  %a0,%a1,%v0", a, b)
+#define maxsb8(a, b) asm ("maxsb8  %a0,%a1,%v0", a, b)
+#define maxuw4(a, b) asm ("maxuw4  %a0,%a1,%v0", a, b)
+#define maxsw4(a, b) asm ("maxsw4  %a0,%a1,%v0", a, b)
+#define perr(a, b)   asm ("perr    %a0,%a1,%v0", a, b)
+#define pklb(a)      asm ("pklb    %a0,%v0", a)
+#define pkwb(a)      asm ("pkwb    %a0,%v0", a)
+#define unpkbl(a)    asm ("unpkbl  %a0,%v0", a)
+#define unpkbw(a)    asm ("unpkbw  %a0,%v0", a)
+#define wh64(a)      asm ("wh64    %a0", a)
+
+#else
+#error "Unknown compiler!"
+#endif
+
+#endif /* ALPHA_ASM_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libmpeg2/convert.h	Sun Apr 06 16:41:49 2003 +0000
@@ -0,0 +1,56 @@
+/*
+ * convert.h
+ * Copyright (C) 2000-2002 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
+ * See http://libmpeg2.sourceforge.net/ for updates.
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef CONVERT_H
+#define CONVERT_H
+
+#define CONVERT_FRAME 0
+#define CONVERT_TOP_FIELD 1
+#define CONVERT_BOTTOM_FIELD 2
+#define CONVERT_BOTH_FIELDS 3
+
+typedef struct convert_init_s {
+    void * id;
+    int id_size;
+    int buf_size[3];
+    void (* start) (void * id, uint8_t * const * dest, int flags);
+    void (* copy) (void * id, uint8_t * const * src, unsigned int v_offset);
+} convert_init_t;
+
+typedef void convert_t (int width, int height, uint32_t accel, void * arg,
+			convert_init_t * result);
+
+convert_t convert_rgb32;
+convert_t convert_rgb24;
+convert_t convert_rgb16;
+convert_t convert_rgb15;
+convert_t convert_bgr32;
+convert_t convert_bgr24;
+convert_t convert_bgr16;
+convert_t convert_bgr15;
+
+#define CONVERT_RGB 0
+#define CONVERT_BGR 1
+convert_t * convert_rgb (int order, int bpp);
+
+#endif /* CONVERT_H */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libmpeg2/cpu_accel.c	Sun Apr 06 16:41:49 2003 +0000
@@ -0,0 +1,175 @@
+/*
+ * cpu_accel.c
+ * Copyright (C) 2000-2002 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
+ * See http://libmpeg2.sourceforge.net/ for updates.
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "config.h"
+
+#include <inttypes.h>
+
+#include "mpeg2.h"
+
+#ifdef ACCEL_DETECT
+#ifdef ARCH_X86
+static inline uint32_t arch_accel (void)
+{
+    uint32_t eax, ebx, ecx, edx;
+    int AMD;
+    uint32_t caps;
+
+#ifndef PIC
+#define cpuid(op,eax,ebx,ecx,edx)	\
+    __asm__ ("cpuid"			\
+	     : "=a" (eax),		\
+	       "=b" (ebx),		\
+	       "=c" (ecx),		\
+	       "=d" (edx)		\
+	     : "a" (op)			\
+	     : "cc")
+#else	/* PIC version : save ebx */
+#define cpuid(op,eax,ebx,ecx,edx)	\
+    __asm__ ("push %%ebx\n\t"		\
+	     "cpuid\n\t"		\
+	     "movl %%ebx,%1\n\t"	\
+	     "pop %%ebx"		\
+	     : "=a" (eax),		\
+	       "=r" (ebx),		\
+	       "=c" (ecx),		\
+	       "=d" (edx)		\
+	     : "a" (op)			\
+	     : "cc")
+#endif
+
+    __asm__ ("pushf\n\t"
+	     "pushf\n\t"
+	     "pop %0\n\t"
+	     "movl %0,%1\n\t"
+	     "xorl $0x200000,%0\n\t"
+	     "push %0\n\t"
+	     "popf\n\t"
+	     "pushf\n\t"
+	     "pop %0\n\t"
+	     "popf"
+	     : "=r" (eax),
+	       "=r" (ebx)
+	     :
+	     : "cc");
+
+    if (eax == ebx)		/* no cpuid */
+	return 0;
+
+    cpuid (0x00000000, eax, ebx, ecx, edx);
+    if (!eax)			/* vendor string only */
+	return 0;
+
+    AMD = (ebx == 0x68747541) && (ecx == 0x444d4163) && (edx == 0x69746e65);
+
+    cpuid (0x00000001, eax, ebx, ecx, edx);
+    if (! (edx & 0x00800000))	/* no MMX */
+	return 0;
+
+    caps = MPEG2_ACCEL_X86_MMX;
+    if (edx & 0x02000000)	/* SSE - identical to AMD MMX extensions */
+	caps = MPEG2_ACCEL_X86_MMX | MPEG2_ACCEL_X86_MMXEXT;
+
+    cpuid (0x80000000, eax, ebx, ecx, edx);
+    if (eax < 0x80000001)	/* no extended capabilities */
+	return caps;
+
+    cpuid (0x80000001, eax, ebx, ecx, edx);
+
+    if (edx & 0x80000000)
+	caps |= MPEG2_ACCEL_X86_3DNOW;
+
+    if (AMD && (edx & 0x00400000))	/* AMD MMX extensions */
+	caps |= MPEG2_ACCEL_X86_MMXEXT;
+
+    return caps;
+}
+#endif /* ARCH_X86 */
+
+#ifdef ARCH_PPC
+#include <signal.h>
+#include <setjmp.h>
+
+static sigjmp_buf jmpbuf;
+static volatile sig_atomic_t canjump = 0;
+
+static RETSIGTYPE sigill_handler (int sig)
+{
+    if (!canjump) {
+	signal (sig, SIG_DFL);
+	raise (sig);
+    }
+
+    canjump = 0;
+    siglongjmp (jmpbuf, 1);
+}
+
+static inline uint32_t arch_accel (void)
+{
+    signal (SIGILL, sigill_handler);
+    if (sigsetjmp (jmpbuf, 1)) {
+	signal (SIGILL, SIG_DFL);
+	return 0;
+    }
+
+    canjump = 1;
+
+    asm volatile ("mtspr 256, %0\n\t"
+		  "vand %%v0, %%v0, %%v0"
+		  :
+		  : "r" (-1));
+
+    signal (SIGILL, SIG_DFL);
+    return MPEG2_ACCEL_PPC_ALTIVEC;
+}
+#endif /* ARCH_PPC */
+
+#ifdef ARCH_ALPHA
+static inline uint32_t arch_accel (void)
+{
+    uint64_t no_mvi;
+
+    asm volatile ("amask %1, %0"
+		  : "=r" (no_mvi)
+		  : "rI" (256));	/* AMASK_MVI */
+    return no_mvi ? MPEG2_ACCEL_ALPHA : (MPEG2_ACCEL_ALPHA |
+					 MPEG2_ACCEL_ALPHA_MVI);
+}
+#endif /* ARCH_ALPHA */
+#endif
+
+uint32_t mpeg2_detect_accel (void)
+{
+    uint32_t accel;
+
+    accel = 0;
+#ifdef ACCEL_DETECT
+#ifdef LIBMPEG2_MLIB
+    accel = MPEG2_ACCEL_MLIB;
+#endif
+#if defined (ARCH_X86) || defined (ARCH_PPC) || defined (ARCH_ALPHA)
+    accel |= arch_accel ();
+#endif
+#endif
+    return accel;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libmpeg2/cpu_state.c	Sun Apr 06 16:41:49 2003 +0000
@@ -0,0 +1,119 @@
+/*
+ * cpu_state.c
+ * Copyright (C) 2000-2002 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
+ * See http://libmpeg2.sourceforge.net/ for updates.
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "config.h"
+
+#include <stdlib.h>
+#include <inttypes.h>
+
+#include "mpeg2.h"
+#include "mpeg2_internal.h"
+#include "attributes.h"
+#ifdef ARCH_X86
+#include "mmx.h"
+#endif
+
+void (* mpeg2_cpu_state_save) (cpu_state_t * state) = NULL;
+void (* mpeg2_cpu_state_restore) (cpu_state_t * state) = NULL;
+
+#ifdef ARCH_X86
+static void state_restore_mmx (cpu_state_t * state)
+{
+    emms ();
+}
+#endif
+
+#ifdef ARCH_PPC
+static void state_save_altivec (cpu_state_t * state)
+{
+    asm ("						\n"
+	"	li		%r9,  16		\n"
+	"	stvx		%v20, 0,    %r3		\n"
+	"	li		%r11, 32		\n"
+	"	stvx		%v21, %r9,  %r3		\n"
+	"	li		%r9,  48		\n"
+	"	stvx		%v22, %r11, %r3		\n"
+	"	li		%r11, 64		\n"
+	"	stvx		%v23, %r9,  %r3		\n"
+	"	li		%r9,  80		\n"
+	"	stvx		%v24, %r11, %r3		\n"
+	"	li		%r11, 96		\n"
+	"	stvx		%v25, %r9,  %r3		\n"
+	"	li		%r9,  112		\n"
+	"	stvx		%v26, %r11, %r3		\n"
+	"	li		%r11, 128		\n"
+	"	stvx		%v27, %r9,  %r3		\n"
+	"	li		%r9,  144		\n"
+	"	stvx		%v28, %r11, %r3		\n"
+	"	li		%r11, 160		\n"
+	"	stvx		%v29, %r9,  %r3		\n"
+	"	li		%r9,  176		\n"
+	"	stvx		%v30, %r11, %r3		\n"
+	"	stvx		%v31, %r9,  %r3		\n"
+	 );
+}
+
+static void state_restore_altivec (cpu_state_t * state)
+{
+    asm ("						\n"
+	"	li		%r9,  16		\n"
+	"	lvx		%v20, 0,    %r3		\n"
+	"	li		%r11, 32		\n"
+	"	lvx		%v21, %r9,  %r3		\n"
+	"	li		%r9,  48		\n"
+	"	lvx		%v22, %r11, %r3		\n"
+	"	li		%r11, 64		\n"
+	"	lvx		%v23, %r9,  %r3		\n"
+	"	li		%r9,  80		\n"
+	"	lvx		%v24, %r11, %r3		\n"
+	"	li		%r11, 96		\n"
+	"	lvx		%v25, %r9,  %r3		\n"
+	"	li		%r9,  112		\n"
+	"	lvx		%v26, %r11, %r3		\n"
+	"	li		%r11, 128		\n"
+	"	lvx		%v27, %r9,  %r3		\n"
+	"	li		%r9,  144		\n"
+	"	lvx		%v28, %r11, %r3		\n"
+	"	li		%r11, 160		\n"
+	"	lvx		%v29, %r9,  %r3		\n"
+	"	li		%r9,  176		\n"
+	"	lvx		%v30, %r11, %r3		\n"
+	"	lvx		%v31, %r9,  %r3		\n"
+	 );
+}
+#endif
+
+void mpeg2_cpu_state_init (uint32_t accel)
+{
+#ifdef ARCH_X86
+    if (accel & MPEG2_ACCEL_X86_MMX) {
+	mpeg2_cpu_state_restore = state_restore_mmx;
+    }
+#endif
+#ifdef ARCH_PPC
+    if (accel & MPEG2_ACCEL_PPC_ALTIVEC) {
+	mpeg2_cpu_state_save = state_save_altivec;
+	mpeg2_cpu_state_restore = state_restore_altivec;
+    }
+#endif
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libmpeg2/decode.c	Sun Apr 06 16:41:49 2003 +0000
@@ -0,0 +1,439 @@
+/*
+ * decode.c
+ * Copyright (C) 2000-2002 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
+ * See http://libmpeg2.sourceforge.net/ for updates.
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "config.h"
+
+#include <string.h>	/* memcmp/memset, try to remove */
+#include <stdlib.h>
+#include <inttypes.h>
+
+#include "mpeg2.h"
+#include "mpeg2_internal.h"
+#include "convert.h"
+
+static int mpeg2_accels = 0;
+
+#define BUFFER_SIZE (1194 * 1024)
+
+const mpeg2_info_t * mpeg2_info (mpeg2dec_t * mpeg2dec)
+{
+    return &(mpeg2dec->info);
+}
+
+static inline int skip_chunk (mpeg2dec_t * mpeg2dec, int bytes)
+{
+    uint8_t * current;
+    uint32_t shift;
+    uint8_t * chunk_ptr;
+    uint8_t * limit;
+    uint8_t byte;
+
+    if (!bytes)
+	return 0;
+
+    current = mpeg2dec->buf_start;
+    shift = mpeg2dec->shift;
+    chunk_ptr = mpeg2dec->chunk_ptr;
+    limit = current + bytes;
+
+    do {
+	byte = *current++;
+	if (shift == 0x00000100) {
+	    int skipped;
+
+	    mpeg2dec->shift = 0xffffff00;
+	    skipped = current - mpeg2dec->buf_start;
+	    mpeg2dec->buf_start = current;
+	    return skipped;
+	}
+	shift = (shift | byte) << 8;
+    } while (current < limit);
+
+    mpeg2dec->shift = shift;
+    mpeg2dec->buf_start = current;
+    return 0;
+}
+
+static inline int copy_chunk (mpeg2dec_t * mpeg2dec, int bytes)
+{
+    uint8_t * current;
+    uint32_t shift;
+    uint8_t * chunk_ptr;
+    uint8_t * limit;
+    uint8_t byte;
+
+    if (!bytes)
+	return 0;
+
+    current = mpeg2dec->buf_start;
+    shift = mpeg2dec->shift;
+    chunk_ptr = mpeg2dec->chunk_ptr;
+    limit = current + bytes;
+
+    do {
+	byte = *current++;
+	if (shift == 0x00000100) {
+	    int copied;
+
+	    mpeg2dec->shift = 0xffffff00;
+	    mpeg2dec->chunk_ptr = chunk_ptr + 1;
+	    copied = current - mpeg2dec->buf_start;
+	    mpeg2dec->buf_start = current;
+	    return copied;
+	}
+	shift = (shift | byte) << 8;
+	*chunk_ptr++ = byte;
+    } while (current < limit);
+
+    mpeg2dec->shift = shift;
+    mpeg2dec->buf_start = current;
+    return 0;
+}
+
+void mpeg2_buffer (mpeg2dec_t * mpeg2dec, uint8_t * start, uint8_t * end)
+{
+    mpeg2dec->buf_start = start;
+    mpeg2dec->buf_end = end;
+}
+
+static inline int seek_chunk (mpeg2dec_t * mpeg2dec)
+{
+    int size, skipped;
+
+    size = mpeg2dec->buf_end - mpeg2dec->buf_start;
+    skipped = skip_chunk (mpeg2dec, size);
+    if (!skipped) {
+	mpeg2dec->bytes_since_pts += size;
+	return -1;
+    }
+    mpeg2dec->bytes_since_pts += skipped;
+    mpeg2dec->code = mpeg2dec->buf_start[-1];
+    return 0;
+}
+
+int mpeg2_seek_header (mpeg2dec_t * mpeg2dec)
+{
+    while (mpeg2dec->code != 0xb3 &&
+	   ((mpeg2dec->code != 0xb7 && mpeg2dec->code != 0xb8 &&
+	     mpeg2dec->code) || mpeg2dec->sequence.width == -1))
+	if (seek_chunk (mpeg2dec))
+	    return -1;
+    mpeg2dec->chunk_start = mpeg2dec->chunk_ptr = mpeg2dec->chunk_buffer;
+    return mpeg2_parse_header (mpeg2dec);
+}
+
+int mpeg2_seek_sequence (mpeg2dec_t * mpeg2dec)
+{
+    mpeg2dec->sequence.width = -1;
+    return mpeg2_seek_header (mpeg2dec);
+}
+
+#define RECEIVED(code,state) (((state) << 8) + (code))
+
+int mpeg2_parse (mpeg2dec_t * mpeg2dec)
+{
+    int size_buffer, size_chunk, copied;
+
+    if (mpeg2dec->action) {
+	int state;
+
+	state = mpeg2dec->action (mpeg2dec);
+	if (state)
+	    return state;
+    }
+
+    while (1) {
+	while ((unsigned) (mpeg2dec->code - mpeg2dec->first_decode_slice) <
+	       mpeg2dec->nb_decode_slices) {
+	    size_buffer = mpeg2dec->buf_end - mpeg2dec->buf_start;
+	    size_chunk = (mpeg2dec->chunk_buffer + BUFFER_SIZE -
+			  mpeg2dec->chunk_ptr);
+	    if (size_buffer <= size_chunk) {
+		copied = copy_chunk (mpeg2dec, size_buffer);
+		if (!copied) {
+		    mpeg2dec->bytes_since_pts += size_buffer;
+		    mpeg2dec->chunk_ptr += size_buffer;
+		    return -1;
+		}
+	    } else {
+		copied = copy_chunk (mpeg2dec, size_chunk);
+		if (!copied) {
+		    /* filled the chunk buffer without finding a start code */
+		    mpeg2dec->bytes_since_pts += size_chunk;
+		    mpeg2dec->action = seek_chunk;
+		    return STATE_INVALID;
+		}
+	    }
+	    mpeg2dec->bytes_since_pts += copied;
+
+	    mpeg2_slice (&(mpeg2dec->decoder), mpeg2dec->code,
+			 mpeg2dec->chunk_start);
+	    mpeg2dec->code = mpeg2dec->buf_start[-1];
+	    mpeg2dec->chunk_ptr = mpeg2dec->chunk_start;
+	}
+	if ((unsigned) (mpeg2dec->code - 1) >= 0xb0 - 1)
+	    break;
+	if (seek_chunk (mpeg2dec))
+	    return -1;
+    }
+
+    switch (RECEIVED (mpeg2dec->code, mpeg2dec->state)) {
+    case RECEIVED (0x00, STATE_SLICE_1ST):
+    case RECEIVED (0x00, STATE_SLICE):
+	mpeg2dec->action = mpeg2_header_picture_start;
+	break;
+    case RECEIVED (0xb7, STATE_SLICE):
+	mpeg2dec->action = mpeg2_header_end;
+	break;
+    case RECEIVED (0xb3, STATE_SLICE):
+    case RECEIVED (0xb8, STATE_SLICE):
+	mpeg2dec->action = mpeg2_parse_header;
+	break;
+    default:
+	mpeg2dec->action = mpeg2_seek_header;
+	return STATE_INVALID;
+    }
+    return mpeg2dec->state;
+}
+
+int mpeg2_parse_header (mpeg2dec_t * mpeg2dec)
+{
+    static int (* process_header[]) (mpeg2dec_t * mpeg2dec) = {
+	mpeg2_header_picture, mpeg2_header_extension, mpeg2_header_user_data,
+	mpeg2_header_sequence, NULL, NULL, NULL, NULL, mpeg2_header_gop
+    };
+    int size_buffer, size_chunk, copied;
+
+    mpeg2dec->action = mpeg2_parse_header;
+    while (1) {
+	size_buffer = mpeg2dec->buf_end - mpeg2dec->buf_start;
+	size_chunk = (mpeg2dec->chunk_buffer + BUFFER_SIZE -
+		      mpeg2dec->chunk_ptr);
+	if (size_buffer <= size_chunk) {
+	    copied = copy_chunk (mpeg2dec, size_buffer);
+	    if (!copied) {
+		mpeg2dec->bytes_since_pts += size_buffer;
+		mpeg2dec->chunk_ptr += size_buffer;
+		return -1;
+	    }
+	} else {
+	    copied = copy_chunk (mpeg2dec, size_chunk);
+	    if (!copied) {
+		/* filled the chunk buffer without finding a start code */
+		mpeg2dec->bytes_since_pts += size_chunk;
+		mpeg2dec->code = 0xb4;
+		mpeg2dec->action = mpeg2_seek_header;
+		return STATE_INVALID;
+	    }
+	}
+	mpeg2dec->bytes_since_pts += copied;
+
+	if (process_header[mpeg2dec->code & 0x0b] (mpeg2dec)) {
+	    mpeg2dec->code = mpeg2dec->buf_start[-1];
+	    mpeg2dec->action = mpeg2_seek_header;
+	    return STATE_INVALID;
+	}
+
+	mpeg2dec->code = mpeg2dec->buf_start[-1];
+	switch (RECEIVED (mpeg2dec->code, mpeg2dec->state)) {
+
+	/* state transition after a sequence header */
+	case RECEIVED (0x00, STATE_SEQUENCE):
+	    mpeg2dec->action = mpeg2_header_picture_start;
+	case RECEIVED (0xb8, STATE_SEQUENCE):
+	    mpeg2_header_sequence_finalize (mpeg2dec);
+	    break;
+
+	/* other legal state transitions */
+	case RECEIVED (0x00, STATE_GOP):
+	    mpeg2dec->action = mpeg2_header_picture_start;
+	    break;
+	case RECEIVED (0x01, STATE_PICTURE):
+	case RECEIVED (0x01, STATE_PICTURE_2ND):
+	    mpeg2dec->action = mpeg2_header_slice_start;
+	    break;
+
+	/* legal headers within a given state */
+	case RECEIVED (0xb2, STATE_SEQUENCE):
+	case RECEIVED (0xb2, STATE_GOP):
+	case RECEIVED (0xb2, STATE_PICTURE):
+	case RECEIVED (0xb2, STATE_PICTURE_2ND):
+	case RECEIVED (0xb5, STATE_SEQUENCE):
+	case RECEIVED (0xb5, STATE_PICTURE):
+	case RECEIVED (0xb5, STATE_PICTURE_2ND):
+	    mpeg2dec->chunk_ptr = mpeg2dec->chunk_start;
+	    continue;
+
+	default:
+	    mpeg2dec->action = mpeg2_seek_header;
+	    return STATE_INVALID;
+	}
+
+	mpeg2dec->chunk_start = mpeg2dec->chunk_ptr = mpeg2dec->chunk_buffer;
+	return mpeg2dec->state;
+    }
+}
+
+void mpeg2_convert (mpeg2dec_t * mpeg2dec,
+		    void (* convert) (int, int, uint32_t, void *,
+				      struct convert_init_s *), void * arg)
+{
+    convert_init_t convert_init;
+    int size;
+
+    convert_init.id = NULL;
+    convert (mpeg2dec->decoder.width, mpeg2dec->decoder.height,
+	     mpeg2_accels, arg, &convert_init);
+    if (convert_init.id_size) {
+	convert_init.id = mpeg2dec->convert_id =
+	    mpeg2_malloc (convert_init.id_size, ALLOC_CONVERT_ID);
+	convert (mpeg2dec->decoder.width, mpeg2dec->decoder.height,
+		 mpeg2_accels, arg, &convert_init);
+    }
+    mpeg2dec->convert_size[0] = size = convert_init.buf_size[0];
+    mpeg2dec->convert_size[1] = size += convert_init.buf_size[1];
+    mpeg2dec->convert_size[2] = size += convert_init.buf_size[2];
+    mpeg2dec->convert_start = convert_init.start;
+    mpeg2dec->convert_copy = convert_init.copy;
+
+    size = mpeg2dec->decoder.width * mpeg2dec->decoder.height >> 2;
+    mpeg2dec->yuv_buf[0][0] = (uint8_t *) mpeg2_malloc (6 * size, ALLOC_YUV);
+    mpeg2dec->yuv_buf[0][1] = mpeg2dec->yuv_buf[0][0] + 4 * size;
+    mpeg2dec->yuv_buf[0][2] = mpeg2dec->yuv_buf[0][0] + 5 * size;
+    mpeg2dec->yuv_buf[1][0] = (uint8_t *) mpeg2_malloc (6 * size, ALLOC_YUV);
+    mpeg2dec->yuv_buf[1][1] = mpeg2dec->yuv_buf[1][0] + 4 * size;
+    mpeg2dec->yuv_buf[1][2] = mpeg2dec->yuv_buf[1][0] + 5 * size;
+    size = mpeg2dec->decoder.width * 8;
+    mpeg2dec->yuv_buf[2][0] = (uint8_t *) mpeg2_malloc (6 * size, ALLOC_YUV);
+    mpeg2dec->yuv_buf[2][1] = mpeg2dec->yuv_buf[2][0] + 4 * size;
+    mpeg2dec->yuv_buf[2][2] = mpeg2dec->yuv_buf[2][0] + 5 * size;
+}
+
+void mpeg2_set_buf (mpeg2dec_t * mpeg2dec, uint8_t * buf[3], void * id)
+{
+    fbuf_t * fbuf;
+
+    if (mpeg2dec->custom_fbuf) {
+	mpeg2_set_fbuf (mpeg2dec, mpeg2dec->decoder.coding_type);
+	fbuf = mpeg2dec->fbuf[0];
+	if (mpeg2dec->state == STATE_SEQUENCE) {
+	    mpeg2dec->fbuf[2] = mpeg2dec->fbuf[1];
+	    mpeg2dec->fbuf[1] = mpeg2dec->fbuf[0];
+	}
+    } else {
+	fbuf = &(mpeg2dec->fbuf_alloc[mpeg2dec->alloc_index].fbuf);
+	mpeg2dec->alloc_index_user = ++mpeg2dec->alloc_index;
+    }
+    fbuf->buf[0] = buf[0];
+    fbuf->buf[1] = buf[1];
+    fbuf->buf[2] = buf[2];
+    fbuf->id = id;
+}
+
+void mpeg2_custom_fbuf (mpeg2dec_t * mpeg2dec, int custom_fbuf)
+{
+    mpeg2dec->custom_fbuf = custom_fbuf;
+}
+
+void mpeg2_skip (mpeg2dec_t * mpeg2dec, int skip)
+{
+    mpeg2dec->first_decode_slice = 1;
+    mpeg2dec->nb_decode_slices = skip ? 0 : (0xb0 - 1);
+}
+
+void mpeg2_slice_region (mpeg2dec_t * mpeg2dec, int start, int end)
+{
+    start = (start < 1) ? 1 : (start > 0xb0) ? 0xb0 : start;
+    end = (end < start) ? start : (end > 0xb0) ? 0xb0 : end;
+    mpeg2dec->first_decode_slice = start;
+    mpeg2dec->nb_decode_slices = end - start;
+}
+
+void mpeg2_pts (mpeg2dec_t * mpeg2dec, uint32_t pts)
+{
+    mpeg2dec->pts_previous = mpeg2dec->pts_current;
+    mpeg2dec->pts_current = pts;
+    mpeg2dec->num_pts++;
+    mpeg2dec->bytes_since_pts = 0;
+}
+
+uint32_t mpeg2_accel (uint32_t accel)
+{
+    if (!mpeg2_accels) {
+	if (accel & MPEG2_ACCEL_DETECT)
+	    accel |= mpeg2_detect_accel ();
+	mpeg2_accels = accel |= MPEG2_ACCEL_DETECT;
+	mpeg2_cpu_state_init (accel);
+	mpeg2_idct_init (accel);
+	mpeg2_mc_init (accel);
+    }
+    return mpeg2_accels & ~MPEG2_ACCEL_DETECT;
+}
+
+mpeg2dec_t * mpeg2_init (void)
+{
+    mpeg2dec_t * mpeg2dec;
+
+    mpeg2_accel (MPEG2_ACCEL_DETECT);
+
+    mpeg2dec = (mpeg2dec_t *) mpeg2_malloc (sizeof (mpeg2dec_t),
+					    ALLOC_MPEG2DEC);
+    if (mpeg2dec == NULL)
+	return NULL;
+
+    memset (mpeg2dec, 0, sizeof (mpeg2dec_t));
+
+    mpeg2dec->chunk_buffer = (uint8_t *) mpeg2_malloc (BUFFER_SIZE + 4,
+						       ALLOC_CHUNK);
+
+    mpeg2dec->shift = 0xffffff00;
+    mpeg2dec->action = mpeg2_seek_sequence;
+    mpeg2dec->code = 0xb4;
+    mpeg2dec->first_decode_slice = 1;
+    mpeg2dec->nb_decode_slices = 0xb0 - 1;
+    mpeg2dec->convert_id = NULL;
+
+    /* initialize substructures */
+    mpeg2_header_state_init (mpeg2dec);
+
+    return mpeg2dec;
+}
+
+void mpeg2_close (mpeg2dec_t * mpeg2dec)
+{
+    int i;
+
+    /* static uint8_t finalizer[] = {0,0,1,0xb4}; */
+    /* mpeg2_decode_data (mpeg2dec, finalizer, finalizer+4); */
+
+    mpeg2_free (mpeg2dec->chunk_buffer);
+    if (!mpeg2dec->custom_fbuf)
+	for (i = mpeg2dec->alloc_index_user; i < mpeg2dec->alloc_index; i++)
+	    mpeg2_free (mpeg2dec->fbuf_alloc[i].fbuf.buf[0]);
+    if (mpeg2dec->convert_start)
+	for (i = 0; i < 3; i++)
+	    mpeg2_free (mpeg2dec->yuv_buf[i][0]);
+    if (mpeg2dec->convert_id)
+	mpeg2_free (mpeg2dec->convert_id);
+    mpeg2_free (mpeg2dec);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libmpeg2/idct_alpha.c	Sun Apr 06 16:41:49 2003 +0000
@@ -0,0 +1,380 @@
+/*
+ * idct_alpha.c
+ * Copyright (C) 2002 Falk Hueffner <falk@debian.org>
+ * Copyright (C) 2000-2002 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
+ * See http://libmpeg2.sourceforge.net/ for updates.
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "config.h"
+
+#ifdef ARCH_ALPHA
+
+#include <stdlib.h>
+#include <inttypes.h>
+
+#include "alpha_asm.h"
+#include "attributes.h"
+
+#define W1 2841 /* 2048*sqrt (2)*cos (1*pi/16) */
+#define W2 2676 /* 2048*sqrt (2)*cos (2*pi/16) */
+#define W3 2408 /* 2048*sqrt (2)*cos (3*pi/16) */
+#define W5 1609 /* 2048*sqrt (2)*cos (5*pi/16) */
+#define W6 1108 /* 2048*sqrt (2)*cos (6*pi/16) */
+#define W7 565  /* 2048*sqrt (2)*cos (7*pi/16) */
+
+static uint8_t clip_lut[1024];
+#define CLIP(i) ((clip_lut+384)[(i)])
+
+#if 0
+#define BUTTERFLY(t0,t1,W0,W1,d0,d1)	\
+do {					\
+    t0 = W0*d0 + W1*d1;			\
+    t1 = W0*d1 - W1*d0;			\
+} while (0)
+#else
+#define BUTTERFLY(t0,t1,W0,W1,d0,d1)	\
+do {					\
+    int_fast32_t tmp = W0 * (d0 + d1);	\
+    t0 = tmp + (W1 - W0) * d1;		\
+    t1 = tmp - (W1 + W0) * d0;		\
+} while (0)
+#endif
+
+static void inline idct_row (int16_t * const block)
+{
+    uint64_t l, r;
+    int_fast32_t d0, d1, d2, d3;
+    int_fast32_t a0, a1, a2, a3, b0, b1, b2, b3;
+    int_fast32_t t0, t1, t2, t3;
+
+    l = ldq (block);
+    r = ldq (block + 4);
+
+    /* shortcut */
+    if (likely (!((l & ~0xffffUL) | r))) {
+	uint64_t tmp = (uint16_t) (l << 3);
+	tmp |= tmp << 16;
+	tmp |= tmp << 32;
+	((int32_t *)block)[0] = tmp;
+	((int32_t *)block)[1] = tmp;
+	((int32_t *)block)[2] = tmp;
+	((int32_t *)block)[3] = tmp;
+	return;
+    }
+
+    d0 = (sextw (l) << 11) + 128;
+    d1 = sextw (extwl (l, 2));
+    d2 = sextw (extwl (l, 4)) << 11;
+    d3 = sextw (extwl (l, 6));
+    t0 = d0 + d2;
+    t1 = d0 - d2;
+    BUTTERFLY (t2, t3, W6, W2, d3, d1);
+    a0 = t0 + t2;
+    a1 = t1 + t3;
+    a2 = t1 - t3;
+    a3 = t0 - t2;
+
+    d0 = sextw (r);
+    d1 = sextw (extwl (r, 2));
+    d2 = sextw (extwl (r, 4));
+    d3 = sextw (extwl (r, 6));
+    BUTTERFLY (t0, t1, W7, W1, d3, d0);
+    BUTTERFLY (t2, t3, W3, W5, d1, d2);
+    b0 = t0 + t2;
+    b3 = t1 + t3;
+    t0 -= t2;
+    t1 -= t3;
+    b1 = ((t0 + t1) * 181) >> 8;
+    b2 = ((t0 - t1) * 181) >> 8;
+
+    block[0] = (a0 + b0) >> 8;
+    block[1] = (a1 + b1) >> 8;
+    block[2] = (a2 + b2) >> 8;
+    block[3] = (a3 + b3) >> 8;
+    block[4] = (a3 - b3) >> 8;
+    block[5] = (a2 - b2) >> 8;
+    block[6] = (a1 - b1) >> 8;
+    block[7] = (a0 - b0) >> 8;
+}
+
+static void inline idct_col (int16_t * const block)
+{
+    int_fast32_t d0, d1, d2, d3;
+    int_fast32_t a0, a1, a2, a3, b0, b1, b2, b3;
+    int_fast32_t t0, t1, t2, t3;
+
+    d0 = (block[8*0] << 11) + 65536;
+    d1 = block[8*1];
+    d2 = block[8*2] << 11;
+    d3 = block[8*3];
+    t0 = d0 + d2;
+    t1 = d0 - d2;
+    BUTTERFLY (t2, t3, W6, W2, d3, d1);
+    a0 = t0 + t2;
+    a1 = t1 + t3;
+    a2 = t1 - t3;
+    a3 = t0 - t2;
+
+    d0 = block[8*4];
+    d1 = block[8*5];
+    d2 = block[8*6];
+    d3 = block[8*7];
+    BUTTERFLY (t0, t1, W7, W1, d3, d0);
+    BUTTERFLY (t2, t3, W3, W5, d1, d2);
+    b0 = t0 + t2;
+    b3 = t1 + t3;
+    t0 = (t0 - t2) >> 8;
+    t1 = (t1 - t3) >> 8;
+    b1 = (t0 + t1) * 181;
+    b2 = (t0 - t1) * 181;
+
+    block[8*0] = (a0 + b0) >> 17;
+    block[8*1] = (a1 + b1) >> 17;
+    block[8*2] = (a2 + b2) >> 17;
+    block[8*3] = (a3 + b3) >> 17;
+    block[8*4] = (a3 - b3) >> 17;
+    block[8*5] = (a2 - b2) >> 17;
+    block[8*6] = (a1 - b1) >> 17;
+    block[8*7] = (a0 - b0) >> 17;
+}
+
+void mpeg2_idct_copy_mvi (int16_t * block, uint8_t * dest, const int stride)
+{
+    uint64_t clampmask;
+    int i;
+
+    for (i = 0; i < 8; i++)
+	idct_row (block + 8 * i);
+
+    for (i = 0; i < 8; i++)
+	idct_col (block + i);
+
+    clampmask = zap (-1, 0xaa);	/* 0x00ff00ff00ff00ff */
+    do {
+	uint64_t shorts0, shorts1;
+
+	shorts0 = ldq (block);
+	shorts0 = maxsw4 (shorts0, 0);
+	shorts0 = minsw4 (shorts0, clampmask);
+	stl (pkwb (shorts0), dest);
+
+	shorts1 = ldq (block + 4);
+	shorts1 = maxsw4 (shorts1, 0);
+	shorts1 = minsw4 (shorts1, clampmask);
+	stl (pkwb (shorts1), dest + 4);
+
+	stq (0, block);
+	stq (0, block + 4);
+
+	dest += stride;
+	block += 8;
+    } while (--i);
+}
+
+void mpeg2_idct_add_mvi (const int last, int16_t * block,
+			 uint8_t * dest, const int stride)
+{
+    uint64_t clampmask;
+    uint64_t signmask;
+    int i;
+
+    if (last != 129 || (block[0] & 7) == 4) {
+	for (i = 0; i < 8; i++)
+	    idct_row (block + 8 * i);
+	for (i = 0; i < 8; i++)
+	    idct_col (block + i);
+	clampmask = zap (-1, 0xaa);	/* 0x00ff00ff00ff00ff */
+	signmask = zap (-1, 0x33);
+	signmask ^= signmask >> 1;	/* 0x8000800080008000 */
+
+	do {
+	    uint64_t shorts0, pix0, signs0;
+	    uint64_t shorts1, pix1, signs1;
+
+	    shorts0 = ldq (block);
+	    shorts1 = ldq (block + 4);
+
+	    pix0 = unpkbw (ldl (dest));
+	    /* signed subword add (MMX paddw).  */
+	    signs0 = shorts0 & signmask;
+	    shorts0 &= ~signmask;
+	    shorts0 += pix0;
+	    shorts0 ^= signs0;
+	    /* clamp. */
+	    shorts0 = maxsw4 (shorts0, 0);
+	    shorts0 = minsw4 (shorts0, clampmask);	
+
+	    /* next 4.  */
+	    pix1 = unpkbw (ldl (dest + 4));
+	    signs1 = shorts1 & signmask;
+	    shorts1 &= ~signmask;
+	    shorts1 += pix1;
+	    shorts1 ^= signs1;
+	    shorts1 = maxsw4 (shorts1, 0);
+	    shorts1 = minsw4 (shorts1, clampmask);
+
+	    stl (pkwb (shorts0), dest);
+	    stl (pkwb (shorts1), dest + 4);
+	    stq (0, block);
+	    stq (0, block + 4);
+
+	    dest += stride;
+	    block += 8;
+	} while (--i);
+    } else {
+	int DC;
+	uint64_t p0, p1, p2, p3, p4, p5, p6, p7;
+	uint64_t DCs;
+
+	DC = (block[0] + 4) >> 3;
+	block[0] = block[63] = 0;
+
+	p0 = ldq (dest + 0 * stride);
+	p1 = ldq (dest + 1 * stride);
+	p2 = ldq (dest + 2 * stride);
+	p3 = ldq (dest + 3 * stride);
+	p4 = ldq (dest + 4 * stride);
+	p5 = ldq (dest + 5 * stride);
+	p6 = ldq (dest + 6 * stride);
+	p7 = ldq (dest + 7 * stride);
+
+	if (DC > 0) {
+	    DCs = BYTE_VEC (likely (DC <= 255) ? DC : 255);
+	    p0 += minub8 (DCs, ~p0);
+	    p1 += minub8 (DCs, ~p1);
+	    p2 += minub8 (DCs, ~p2);
+	    p3 += minub8 (DCs, ~p3);
+	    p4 += minub8 (DCs, ~p4);
+	    p5 += minub8 (DCs, ~p5);
+	    p6 += minub8 (DCs, ~p6);
+	    p7 += minub8 (DCs, ~p7);
+	} else {
+	    DCs = BYTE_VEC (likely (-DC <= 255) ? -DC : 255);
+	    p0 -= minub8 (DCs, p0);
+	    p1 -= minub8 (DCs, p1);
+	    p2 -= minub8 (DCs, p2);
+	    p3 -= minub8 (DCs, p3);
+	    p4 -= minub8 (DCs, p4);
+	    p5 -= minub8 (DCs, p5);
+	    p6 -= minub8 (DCs, p6);
+	    p7 -= minub8 (DCs, p7);
+	}
+
+	stq (p0, dest + 0 * stride);
+	stq (p1, dest + 1 * stride);
+	stq (p2, dest + 2 * stride);
+	stq (p3, dest + 3 * stride);
+	stq (p4, dest + 4 * stride);
+	stq (p5, dest + 5 * stride);
+	stq (p6, dest + 6 * stride);
+	stq (p7, dest + 7 * stride);
+    }
+}
+
+void mpeg2_idct_copy_alpha (int16_t * block, uint8_t * dest, const int stride)
+{
+    int i;
+
+    for (i = 0; i < 8; i++)
+	idct_row (block + 8 * i);
+    for (i = 0; i < 8; i++)
+	idct_col (block + i);
+    do {
+	dest[0] = CLIP (block[0]);
+	dest[1] = CLIP (block[1]);
+	dest[2] = CLIP (block[2]);
+	dest[3] = CLIP (block[3]);
+	dest[4] = CLIP (block[4]);
+	dest[5] = CLIP (block[5]);
+	dest[6] = CLIP (block[6]);
+	dest[7] = CLIP (block[7]);
+
+	stq(0, block);
+	stq(0, block + 4);
+
+	dest += stride;
+	block += 8;
+    } while (--i);
+}
+
+void mpeg2_idct_add_alpha (const int last, int16_t * block,
+			   uint8_t * dest, const int stride)
+{
+    int i;
+
+    if (last != 129 || (block[0] & 7) == 4) {
+	for (i = 0; i < 8; i++)
+	    idct_row (block + 8 * i);
+	for (i = 0; i < 8; i++)
+	    idct_col (block + i);
+	do {
+	    dest[0] = CLIP (block[0] + dest[0]);
+	    dest[1] = CLIP (block[1] + dest[1]);
+	    dest[2] = CLIP (block[2] + dest[2]);
+	    dest[3] = CLIP (block[3] + dest[3]);
+	    dest[4] = CLIP (block[4] + dest[4]);
+	    dest[5] = CLIP (block[5] + dest[5]);
+	    dest[6] = CLIP (block[6] + dest[6]);
+	    dest[7] = CLIP (block[7] + dest[7]);
+
+	    stq(0, block);
+	    stq(0, block + 4);
+
+	    dest += stride;
+	    block += 8;
+	} while (--i);
+    } else {
+	int DC;
+
+	DC = (block[0] + 4) >> 3;
+	block[0] = block[63] = 0;
+	i = 8;
+	do {
+	    dest[0] = CLIP (DC + dest[0]);
+	    dest[1] = CLIP (DC + dest[1]);
+	    dest[2] = CLIP (DC + dest[2]);
+	    dest[3] = CLIP (DC + dest[3]);
+	    dest[4] = CLIP (DC + dest[4]);
+	    dest[5] = CLIP (DC + dest[5]);
+	    dest[6] = CLIP (DC + dest[6]);
+	    dest[7] = CLIP (DC + dest[7]);
+	    dest += stride;
+	} while (--i);
+    }
+}
+
+void mpeg2_idct_alpha_init(int no_mvi)
+{
+    extern uint8_t mpeg2_scan_norm[64];
+    extern uint8_t mpeg2_scan_alt[64];
+    int i, j;
+
+    if (no_mvi)
+	for (i = -384; i < 640; i++)
+	    clip_lut[i + 384] = (i < 0) ? 0 : ((i > 255) ? 255 : i);
+    for (i = 0; i < 64; i++) {
+	j = mpeg2_scan_norm[i];
+	mpeg2_scan_norm[i] = ((j & 0x36) >> 1) | ((j & 0x09) << 2);
+	j = mpeg2_scan_alt[i];
+	mpeg2_scan_alt[i] = ((j & 0x36) >> 1) | ((j & 0x09) << 2);
+    }
+}
+
+#endif /* ARCH_ALPHA */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libmpeg2/idct_altivec.c	Sun Apr 06 16:41:49 2003 +0000
@@ -0,0 +1,705 @@
+/*
+ * idct_altivec.c
+ * Copyright (C) 2000-2002 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
+ * See http://libmpeg2.sourceforge.net/ for updates.
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __ALTIVEC__
+
+#include "config.h"
+
+#ifdef ARCH_PPC
+
+#include <inttypes.h>
+
+#include "mpeg2.h"
+#include "mpeg2_internal.h"
+#include "attributes.h"
+
+static const int16_t constants[5][8] ATTR_ALIGN(16) = {
+    {23170, 13573, 6518, 21895, -23170, -21895, 32, 31},
+    {16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725},
+    {22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521},
+    {21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692},
+    {19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722}
+};
+
+/*
+ * The asm code is generated with:
+ *
+ * gcc-2.95 -fvec -D__ALTIVEC__	-O9 -fomit-frame-pointer -mregnames -S
+ *	idct_altivec.c
+ *
+ * awk '{args=""; len=split ($2, arg, ",");
+ *	for (i=1; i<=len; i++) { a=arg[i]; if (i<len) a=a",";
+ *				 args = args sprintf ("%-6s", a) }
+ *	printf ("\t\"\t%-16s%-24s\\n\"\n", $1, args) }' idct_altivec.s |
+ * unexpand -a
+ *
+ * I then do some simple trimming on the function prolog/trailers
+ */
+
+void mpeg2_idct_copy_altivec (int16_t * block, uint8_t * dest, int stride)
+{
+    asm ("						\n"
+	"#	stwu		%r1,  -128(%r1)		\n"
+	"#	mflr		%r0			\n"
+	"#	stw		%r0,  132(%r1)		\n"
+	"#	addi		%r0,  %r1,  128		\n"
+	"#	bl		_savev25		\n"
+
+	"	addi		%r9,  %r3,  112		\n"
+	"	vspltish	%v25, 4			\n"
+	"	vxor		%v13, %v13, %v13	\n"
+	"	lis		%r10, constants@ha	\n"
+	"	lvx		%v1,  0,    %r9		\n"
+	"	la		%r10, constants@l(%r10) \n"
+	"	lvx		%v5,  0,    %r3		\n"
+	"	addi		%r9,  %r3,  16		\n"
+	"	lvx		%v8,  0,    %r10	\n"
+	"	addi		%r11, %r10, 32		\n"
+	"	lvx		%v12, 0,    %r9		\n"
+	"	lvx		%v6,  0,    %r11	\n"
+	"	addi		%r8,  %r3,  48		\n"
+	"	vslh		%v1,  %v1,  %v25	\n"
+	"	addi		%r9,  %r3,  80		\n"
+	"	lvx		%v11, 0,    %r8		\n"
+	"	vslh		%v5,  %v5,  %v25	\n"
+	"	lvx		%v0,  0,    %r9		\n"
+	"	addi		%r11, %r10, 64		\n"
+	"	vsplth		%v3,  %v8,  2		\n"
+	"	lvx		%v7,  0,    %r11	\n"
+	"	addi		%r9,  %r3,  96		\n"
+	"	vslh		%v12, %v12, %v25	\n"
+	"	vmhraddshs	%v27, %v1,  %v6,  %v13	\n"
+	"	addi		%r8,  %r3,  32		\n"
+	"	vsplth		%v2,  %v8,  5		\n"
+	"	lvx		%v1,  0,    %r9		\n"
+	"	vslh		%v11, %v11, %v25	\n"
+	"	addi		%r3,  %r3,  64		\n"
+	"	lvx		%v9,  0,    %r8		\n"
+	"	addi		%r9,  %r10, 48		\n"
+	"	vslh		%v0,  %v0,  %v25	\n"
+	"	lvx		%v4,  0,    %r9		\n"
+	"	vmhraddshs	%v31, %v12, %v6,  %v13	\n"
+	"	addi		%r10, %r10, 16		\n"
+	"	vmhraddshs	%v30, %v0,  %v7,  %v13	\n"
+	"	lvx		%v10, 0,    %r3		\n"
+	"	vsplth		%v19, %v8,  3		\n"
+	"	vmhraddshs	%v15, %v11, %v7,  %v13	\n"
+	"	lvx		%v12, 0,    %r10	\n"
+	"	vsplth		%v6,  %v8,  4		\n"
+	"	vslh		%v1,  %v1,  %v25	\n"
+	"	vsplth		%v11, %v8,  1		\n"
+	"	li		%r9,  4			\n"
+	"	vslh		%v9,  %v9,  %v25	\n"
+	"	vsplth		%v7,  %v8,  0		\n"
+	"	vmhraddshs	%v18, %v1,  %v4,  %v13	\n"
+	"	vspltw		%v8,  %v8,  3		\n"
+	"	vsubshs		%v0,  %v13, %v27	\n"
+	"	vmhraddshs	%v1,  %v9,  %v4,  %v13	\n"
+	"	vmhraddshs	%v17, %v3,  %v31, %v0	\n"
+	"	vmhraddshs	%v4,  %v2,  %v15, %v30	\n"
+	"	vslh		%v10, %v10, %v25	\n"
+	"	vmhraddshs	%v9,  %v5,  %v12, %v13	\n"
+	"	vspltish	%v25, 6			\n"
+	"	vmhraddshs	%v5,  %v10, %v12, %v13	\n"
+	"	vmhraddshs	%v28, %v19, %v30, %v15	\n"
+	"	vmhraddshs	%v27, %v3,  %v27, %v31	\n"
+	"	vsubshs		%v0,  %v13, %v18	\n"
+	"	vmhraddshs	%v18, %v11, %v18, %v1	\n"
+	"	vaddshs		%v30, %v17, %v4		\n"
+	"	vmhraddshs	%v12, %v11, %v1,  %v0	\n"
+	"	vsubshs		%v4,  %v17, %v4		\n"
+	"	vaddshs		%v10, %v9,  %v5		\n"
+	"	vsubshs		%v17, %v27, %v28	\n"
+	"	vaddshs		%v27, %v27, %v28	\n"
+	"	vsubshs		%v1,  %v9,  %v5		\n"
+	"	vaddshs		%v28, %v10, %v18	\n"
+	"	vsubshs		%v18, %v10, %v18	\n"
+	"	vaddshs		%v10, %v1,  %v12	\n"
+	"	vsubshs		%v1,  %v1,  %v12	\n"
+	"	vsubshs		%v12, %v17, %v4		\n"
+	"	vaddshs		%v4,  %v17, %v4		\n"
+	"	vmhraddshs	%v5,  %v7,  %v12, %v1	\n"
+	"	vmhraddshs	%v26, %v6,  %v4,  %v10	\n"
+	"	vmhraddshs	%v29, %v6,  %v12, %v1	\n"
+	"	vmhraddshs	%v14, %v7,  %v4,  %v10	\n"
+	"	vsubshs		%v12, %v18, %v30	\n"
+	"	vaddshs		%v9,  %v28, %v27	\n"
+	"	vaddshs		%v16, %v18, %v30	\n"
+	"	vsubshs		%v10, %v28, %v27	\n"
+	"	vmrglh		%v31, %v9,  %v12	\n"
+	"	vmrglh		%v30, %v5,  %v26	\n"
+	"	vmrglh		%v15, %v14, %v29	\n"
+	"	vmrghh		%v5,  %v5,  %v26	\n"
+	"	vmrglh		%v27, %v16, %v10	\n"
+	"	vmrghh		%v9,  %v9,  %v12	\n"
+	"	vmrghh		%v18, %v16, %v10	\n"
+	"	vmrghh		%v1,  %v14, %v29	\n"
+	"	vmrglh		%v14, %v9,  %v5		\n"
+	"	vmrglh		%v16, %v31, %v30	\n"
+	"	vmrglh		%v10, %v15, %v27	\n"
+	"	vmrghh		%v9,  %v9,  %v5		\n"
+	"	vmrghh		%v26, %v15, %v27	\n"
+	"	vmrglh		%v27, %v16, %v10	\n"
+	"	vmrghh		%v12, %v1,  %v18	\n"
+	"	vmrglh		%v29, %v1,  %v18	\n"
+	"	vsubshs		%v0,  %v13, %v27	\n"
+	"	vmrghh		%v5,  %v31, %v30	\n"
+	"	vmrglh		%v31, %v9,  %v12	\n"
+	"	vmrglh		%v30, %v5,  %v26	\n"
+	"	vmrglh		%v15, %v14, %v29	\n"
+	"	vmhraddshs	%v17, %v3,  %v31, %v0	\n"
+	"	vmrghh		%v18, %v16, %v10	\n"
+	"	vmhraddshs	%v27, %v3,  %v27, %v31	\n"
+	"	vmhraddshs	%v4,  %v2,  %v15, %v30	\n"
+	"	vmrghh		%v1,  %v14, %v29	\n"
+	"	vmhraddshs	%v28, %v19, %v30, %v15	\n"
+	"	vmrghh		%v0,  %v9,  %v12	\n"
+	"	vsubshs		%v13, %v13, %v18	\n"
+	"	vmrghh		%v5,  %v5,  %v26	\n"
+	"	vmhraddshs	%v18, %v11, %v18, %v1	\n"
+	"	vaddshs		%v9,  %v0,  %v8		\n"
+	"	vaddshs		%v30, %v17, %v4		\n"
+	"	vmhraddshs	%v12, %v11, %v1,  %v13	\n"
+	"	vsubshs		%v4,  %v17, %v4		\n"
+	"	vaddshs		%v10, %v9,  %v5		\n"
+	"	vsubshs		%v17, %v27, %v28	\n"
+	"	vaddshs		%v27, %v27, %v28	\n"
+	"	vsubshs		%v1,  %v9,  %v5		\n"
+	"	vaddshs		%v28, %v10, %v18	\n"
+	"	vsubshs		%v18, %v10, %v18	\n"
+	"	vaddshs		%v10, %v1,  %v12	\n"
+	"	vsubshs		%v1,  %v1,  %v12	\n"
+	"	vsubshs		%v12, %v17, %v4		\n"
+	"	vaddshs		%v4,  %v17, %v4		\n"
+	"	vaddshs		%v9,  %v28, %v27	\n"
+	"	vmhraddshs	%v14, %v7,  %v4,  %v10	\n"
+	"	vsrah		%v9,  %v9,  %v25	\n"
+	"	vmhraddshs	%v5,  %v7,  %v12, %v1	\n"
+	"	vpkshus		%v0,  %v9,  %v9		\n"
+	"	vmhraddshs	%v29, %v6,  %v12, %v1	\n"
+	"	stvewx		%v0,  0,    %r4		\n"
+	"	vaddshs		%v16, %v18, %v30	\n"
+	"	vsrah		%v31, %v14, %v25	\n"
+	"	stvewx		%v0,  %r9,  %r4		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vsrah		%v15, %v16, %v25	\n"
+	"	vpkshus		%v0,  %v31, %v31	\n"
+	"	vsrah		%v1,  %v5,  %v25	\n"
+	"	stvewx		%v0,  0,    %r4		\n"
+	"	vsubshs		%v12, %v18, %v30	\n"
+	"	stvewx		%v0,  %r9,  %r4		\n"
+	"	vmhraddshs	%v26, %v6,  %v4,  %v10	\n"
+	"	vpkshus		%v0,  %v1,  %v1		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vsrah		%v5,  %v12, %v25	\n"
+	"	stvewx		%v0,  0,    %r4		\n"
+	"	vsrah		%v30, %v29, %v25	\n"
+	"	stvewx		%v0,  %r9,  %r4		\n"
+	"	vsubshs		%v10, %v28, %v27	\n"
+	"	vpkshus		%v0,  %v15, %v15	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	stvewx		%v0,  0,    %r4		\n"
+	"	vsrah		%v18, %v26, %v25	\n"
+	"	stvewx		%v0,  %r9,  %r4		\n"
+	"	vsrah		%v27, %v10, %v25	\n"
+	"	vpkshus		%v0,  %v5,  %v5		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	stvewx		%v0,  0,    %r4		\n"
+	"	stvewx		%v0,  %r9,  %r4		\n"
+	"	vpkshus		%v0,  %v30, %v30	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	stvewx		%v0,  0,    %r4		\n"
+	"	stvewx		%v0,  %r9,  %r4		\n"
+	"	vpkshus		%v0,  %v18, %v18	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	stvewx		%v0,  0,    %r4		\n"
+	"	stvewx		%v0,  %r9,  %r4		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vpkshus		%v0,  %v27, %v27	\n"
+	"	stvewx		%v0,  0,    %r4		\n"
+	"	stvewx		%v0,  %r9,  %r4		\n"
+
+	"#	addi		%r0,  %r1,  128		\n"
+	"#	bl		_restv25		\n"
+	"#	lwz		%r0,  132(%r1)		\n"
+	"#	mtlr		%r0			\n"
+	"#	la		%r1,  128(%r1)		\n"
+
+	"	vxor		%v1,  %v1,  %v1		\n"
+	"	addi		%r9,  %r3,  16		\n"
+	"	stvx		%v1,  0,    %r3		\n"
+	"	stvx		%v1,  0,    %r9		\n"
+	"	addi		%r11, %r3,  32		\n"
+	"	stvx		%v1,  0,    %r11	\n"
+	"	addi		%r9,  %r3,  48		\n"
+	"	stvx		%v1,  0,    %r9		\n"
+	"	addi		%r11, %r3,  -64		\n"
+	"	stvx		%v1,  0,    %r11	\n"
+	"	addi		%r9,  %r3,  -48		\n"
+	"	stvx		%v1,  0,    %r9		\n"
+	"	addi		%r11, %r3,  -32		\n"
+	"	stvx		%v1,  0,    %r11	\n"
+	"	addi		%r3,  %r3,  -16		\n"
+	"	stvx		%v1,  0,    %r3		\n"
+	 );
+}
+
+void mpeg2_idct_add_altivec (int last, int16_t * block,
+			     uint8_t * dest, int stride)
+{
+    asm ("						\n"
+	"#	stwu		%r1,  -192(%r1)		\n"
+	"#	mflr		%r0			\n"
+	"#	stw		%r0,  196(%r1)		\n"
+	"#	addi		%r0,  %r1,  192		\n"
+	"#	bl		_savev21		\n"
+
+	"	addi		%r9,  %r4,  112		\n"
+	"	vspltish	%v21, 4			\n"
+	"	vxor		%v1,  %v1,  %v1		\n"
+	"	lvx		%v13, 0,    %r9		\n"
+	"	lis		%r10, constants@ha	\n"
+	"	vspltisw	%v3,  -1		\n"
+	"	la		%r10, constants@l(%r10) \n"
+	"	lvx		%v5,  0,    %r4		\n"
+	"	addi		%r9,  %r4,  16		\n"
+	"	lvx		%v8,  0,    %r10	\n"
+	"	lvx		%v12, 0,    %r9		\n"
+	"	addi		%r11, %r10, 32		\n"
+	"	lvx		%v6,  0,    %r11	\n"
+	"	addi		%r8,  %r4,  48		\n"
+	"	vslh		%v13, %v13, %v21	\n"
+	"	addi		%r9,  %r4,  80		\n"
+	"	lvx		%v11, 0,    %r8		\n"
+	"	vslh		%v5,  %v5,  %v21	\n"
+	"	lvx		%v0,  0,    %r9		\n"
+	"	addi		%r11, %r10, 64		\n"
+	"	vsplth		%v2,  %v8,  2		\n"
+	"	lvx		%v7,  0,    %r11	\n"
+	"	vslh		%v12, %v12, %v21	\n"
+	"	addi		%r9,  %r4,  96		\n"
+	"	vmhraddshs	%v24, %v13, %v6,  %v1	\n"
+	"	addi		%r8,  %r4,  32		\n"
+	"	vsplth		%v17, %v8,  5		\n"
+	"	lvx		%v13, 0,    %r9		\n"
+	"	vslh		%v11, %v11, %v21	\n"
+	"	addi		%r4,  %r4,  64		\n"
+	"	lvx		%v10, 0,    %r8		\n"
+	"	vslh		%v0,  %v0,  %v21	\n"
+	"	addi		%r9,  %r10, 48		\n"
+	"	vmhraddshs	%v31, %v12, %v6,  %v1	\n"
+	"	lvx		%v4,  0,    %r9		\n"
+	"	addi		%r10, %r10, 16		\n"
+	"	vmhraddshs	%v26, %v0,  %v7,  %v1	\n"
+	"	lvx		%v9,  0,    %r4		\n"
+	"	vsplth		%v16, %v8,  3		\n"
+	"	vmhraddshs	%v22, %v11, %v7,  %v1	\n"
+	"	lvx		%v6,  0,    %r10	\n"
+	"	lvsl		%v19, 0,    %r5		\n"
+	"	vsubshs		%v12, %v1,  %v24	\n"
+	"	lvsl		%v0,  %r6,  %r5		\n"
+	"	vsplth		%v11, %v8,  1		\n"
+	"	vslh		%v10, %v10, %v21	\n"
+	"	vmrghb		%v19, %v3,  %v19	\n"
+	"	lvx		%v15, 0,    %r5		\n"
+	"	vslh		%v13, %v13, %v21	\n"
+	"	vmrghb		%v3,  %v3,  %v0		\n"
+	"	li		%r9,  4			\n"
+	"	vmhraddshs	%v14, %v2,  %v31, %v12	\n"
+	"	vsplth		%v7,  %v8,  0		\n"
+	"	vmhraddshs	%v23, %v13, %v4,  %v1	\n"
+	"	vsplth		%v18, %v8,  4		\n"
+	"	vmhraddshs	%v27, %v10, %v4,  %v1	\n"
+	"	vspltw		%v8,  %v8,  3		\n"
+	"	vmhraddshs	%v12, %v17, %v22, %v26	\n"
+	"	vperm		%v15, %v15, %v1,  %v19	\n"
+	"	vslh		%v9,  %v9,  %v21	\n"
+	"	vmhraddshs	%v10, %v5,  %v6,  %v1	\n"
+	"	vspltish	%v21, 6			\n"
+	"	vmhraddshs	%v30, %v9,  %v6,  %v1	\n"
+	"	vmhraddshs	%v26, %v16, %v26, %v22	\n"
+	"	vmhraddshs	%v24, %v2,  %v24, %v31	\n"
+	"	vmhraddshs	%v31, %v11, %v23, %v27	\n"
+	"	vsubshs		%v0,  %v1,  %v23	\n"
+	"	vaddshs		%v23, %v14, %v12	\n"
+	"	vmhraddshs	%v9,  %v11, %v27, %v0	\n"
+	"	vsubshs		%v12, %v14, %v12	\n"
+	"	vaddshs		%v6,  %v10, %v30	\n"
+	"	vsubshs		%v14, %v24, %v26	\n"
+	"	vaddshs		%v24, %v24, %v26	\n"
+	"	vsubshs		%v13, %v10, %v30	\n"
+	"	vaddshs		%v26, %v6,  %v31	\n"
+	"	vsubshs		%v31, %v6,  %v31	\n"
+	"	vaddshs		%v6,  %v13, %v9		\n"
+	"	vsubshs		%v13, %v13, %v9		\n"
+	"	vsubshs		%v9,  %v14, %v12	\n"
+	"	vaddshs		%v12, %v14, %v12	\n"
+	"	vmhraddshs	%v30, %v7,  %v9,  %v13	\n"
+	"	vmhraddshs	%v25, %v18, %v12, %v6	\n"
+	"	vmhraddshs	%v28, %v18, %v9,  %v13	\n"
+	"	vmhraddshs	%v29, %v7,  %v12, %v6	\n"
+	"	vaddshs		%v10, %v26, %v24	\n"
+	"	vsubshs		%v5,  %v31, %v23	\n"
+	"	vsubshs		%v13, %v26, %v24	\n"
+	"	vaddshs		%v4,  %v31, %v23	\n"
+	"	vmrglh		%v26, %v30, %v25	\n"
+	"	vmrglh		%v31, %v10, %v5		\n"
+	"	vmrglh		%v22, %v29, %v28	\n"
+	"	vmrghh		%v30, %v30, %v25	\n"
+	"	vmrglh		%v24, %v4,  %v13	\n"
+	"	vmrghh		%v10, %v10, %v5		\n"
+	"	vmrghh		%v23, %v4,  %v13	\n"
+	"	vmrghh		%v27, %v29, %v28	\n"
+	"	vmrglh		%v29, %v10, %v30	\n"
+	"	vmrglh		%v4,  %v31, %v26	\n"
+	"	vmrglh		%v13, %v22, %v24	\n"
+	"	vmrghh		%v10, %v10, %v30	\n"
+	"	vmrghh		%v25, %v22, %v24	\n"
+	"	vmrglh		%v24, %v4,  %v13	\n"
+	"	vmrghh		%v5,  %v27, %v23	\n"
+	"	vmrglh		%v28, %v27, %v23	\n"
+	"	vsubshs		%v0,  %v1,  %v24	\n"
+	"	vmrghh		%v30, %v31, %v26	\n"
+	"	vmrglh		%v31, %v10, %v5		\n"
+	"	vmrglh		%v26, %v30, %v25	\n"
+	"	vmrglh		%v22, %v29, %v28	\n"
+	"	vmhraddshs	%v14, %v2,  %v31, %v0	\n"
+	"	vmrghh		%v23, %v4,  %v13	\n"
+	"	vmhraddshs	%v24, %v2,  %v24, %v31	\n"
+	"	vmhraddshs	%v12, %v17, %v22, %v26	\n"
+	"	vmrghh		%v27, %v29, %v28	\n"
+	"	vmhraddshs	%v26, %v16, %v26, %v22	\n"
+	"	vmrghh		%v0,  %v10, %v5		\n"
+	"	vmhraddshs	%v31, %v11, %v23, %v27	\n"
+	"	vmrghh		%v30, %v30, %v25	\n"
+	"	vsubshs		%v13, %v1,  %v23	\n"
+	"	vaddshs		%v10, %v0,  %v8		\n"
+	"	vaddshs		%v23, %v14, %v12	\n"
+	"	vsubshs		%v12, %v14, %v12	\n"
+	"	vaddshs		%v6,  %v10, %v30	\n"
+	"	vsubshs		%v14, %v24, %v26	\n"
+	"	vmhraddshs	%v9,  %v11, %v27, %v13	\n"
+	"	vaddshs		%v24, %v24, %v26	\n"
+	"	vaddshs		%v26, %v6,  %v31	\n"
+	"	vsubshs		%v13, %v10, %v30	\n"
+	"	vaddshs		%v10, %v26, %v24	\n"
+	"	vsubshs		%v31, %v6,  %v31	\n"
+	"	vaddshs		%v6,  %v13, %v9		\n"
+	"	vsrah		%v10, %v10, %v21	\n"
+	"	vsubshs		%v13, %v13, %v9		\n"
+	"	vaddshs		%v0,  %v15, %v10	\n"
+	"	vsubshs		%v9,  %v14, %v12	\n"
+	"	vaddshs		%v12, %v14, %v12	\n"
+	"	vpkshus		%v15, %v0,  %v0		\n"
+	"	stvewx		%v15, 0,    %r5		\n"
+	"	vaddshs		%v4,  %v31, %v23	\n"
+	"	vmhraddshs	%v29, %v7,  %v12, %v6	\n"
+	"	stvewx		%v15, %r9,  %r5		\n"
+	"	add		%r5,  %r5,  %r6		\n"
+	"	vsubshs		%v5,  %v31, %v23	\n"
+	"	lvx		%v15, 0,    %r5		\n"
+	"	vmhraddshs	%v30, %v7,  %v9,  %v13	\n"
+	"	vsrah		%v22, %v4,  %v21	\n"
+	"	vperm		%v15, %v15, %v1,  %v3	\n"
+	"	vmhraddshs	%v28, %v18, %v9,  %v13	\n"
+	"	vsrah		%v31, %v29, %v21	\n"
+	"	vsubshs		%v13, %v26, %v24	\n"
+	"	vaddshs		%v0,  %v15, %v31	\n"
+	"	vsrah		%v27, %v30, %v21	\n"
+	"	vpkshus		%v15, %v0,  %v0		\n"
+	"	vsrah		%v30, %v5,  %v21	\n"
+	"	stvewx		%v15, 0,    %r5		\n"
+	"	vsrah		%v26, %v28, %v21	\n"
+	"	stvewx		%v15, %r9,  %r5		\n"
+	"	vmhraddshs	%v25, %v18, %v12, %v6	\n"
+	"	add		%r5,  %r5,  %r6		\n"
+	"	vsrah		%v24, %v13, %v21	\n"
+	"	lvx		%v15, 0,    %r5		\n"
+	"	vperm		%v15, %v15, %v1,  %v19	\n"
+	"	vsrah		%v23, %v25, %v21	\n"
+	"	vaddshs		%v0,  %v15, %v27	\n"
+	"	vpkshus		%v15, %v0,  %v0		\n"
+	"	stvewx		%v15, 0,    %r5		\n"
+	"	stvewx		%v15, %r9,  %r5		\n"
+	"	add		%r5,  %r5,  %r6		\n"
+	"	lvx		%v15, 0,    %r5		\n"
+	"	vperm		%v15, %v15, %v1,  %v3	\n"
+	"	vaddshs		%v0,  %v15, %v22	\n"
+	"	vpkshus		%v15, %v0,  %v0		\n"
+	"	stvewx		%v15, 0,    %r5		\n"
+	"	stvewx		%v15, %r9,  %r5		\n"
+	"	add		%r5,  %r5,  %r6		\n"
+	"	lvx		%v15, 0,    %r5		\n"
+	"	vperm		%v15, %v15, %v1,  %v19	\n"
+	"	vaddshs		%v0,  %v15, %v30	\n"
+	"	vpkshus		%v15, %v0,  %v0		\n"
+	"	stvewx		%v15, 0,    %r5		\n"
+	"	stvewx		%v15, %r9,  %r5		\n"
+	"	add		%r5,  %r5,  %r6		\n"
+	"	lvx		%v15, 0,    %r5		\n"
+	"	vperm		%v15, %v15, %v1,  %v3	\n"
+	"	vaddshs		%v0,  %v15, %v26	\n"
+	"	vpkshus		%v15, %v0,  %v0		\n"
+	"	stvewx		%v15, 0,    %r5		\n"
+	"	stvewx		%v15, %r9,  %r5		\n"
+	"	add		%r5,  %r5,  %r6		\n"
+	"	lvx		%v15, 0,    %r5		\n"
+	"	vperm		%v15, %v15, %v1,  %v19	\n"
+	"	vaddshs		%v0,  %v15, %v23	\n"
+	"	vpkshus		%v15, %v0,  %v0		\n"
+	"	stvewx		%v15, 0,    %r5		\n"
+	"	stvewx		%v15, %r9,  %r5		\n"
+	"	add		%r5,  %r5,  %r6		\n"
+	"	lvx		%v15, 0,    %r5		\n"
+	"	vperm		%v15, %v15, %v1,  %v3	\n"
+	"	vaddshs		%v0,  %v15, %v24	\n"
+	"	vpkshus		%v15, %v0,  %v0		\n"
+	"	stvewx		%v15, 0,    %r5		\n"
+	"	stvewx		%v15, %r9,  %r5		\n"
+
+	"#	addi		%r0,  %r1,  192		\n"
+	"#	bl		_restv21		\n"
+	"#	lwz		%r0,  196(%r1)		\n"
+	"#	mtlr		%r0			\n"
+	"#	la		%r1,  192(%r1)		\n"
+
+	"	addi		%r9,  %r4,  16		\n"
+	"	stvx		%v1,  0,    %r4		\n"
+	"	stvx		%v1,  0,    %r9		\n"
+	"	addi		%r11, %r4,  32		\n"
+	"	stvx		%v1,  0,    %r11	\n"
+	"	addi		%r9,  %r4,  48		\n"
+	"	stvx		%v1,  0,    %r9		\n"
+	"	addi		%r11, %r4,  -64		\n"
+	"	stvx		%v1,  0,    %r11	\n"
+	"	addi		%r9,  %r4,  -48		\n"
+	"	stvx		%v1,  0,    %r9		\n"
+	"	addi		%r11, %r4,  -32		\n"
+	"	stvx		%v1,  0,    %r11	\n"
+	"	addi		%r4,  %r4,  -16		\n"
+	"	stvx		%v1,  0,    %r4		\n"
+	 );
+}
+
+void mpeg2_idct_altivec_init (void)
+{
+    extern uint8_t mpeg2_scan_norm[64];
+    extern uint8_t mpeg2_scan_alt[64];
+    int i, j;
+
+    i = constants[0][0];	/* just pretending - keeps gcc happy */
+
+    /* the altivec idct uses a transposed input, so we patch scan tables */
+    for (i = 0; i < 64; i++) {
+	j = mpeg2_scan_norm[i];
+	mpeg2_scan_norm[i] = (j >> 3) | ((j & 7) << 3);
+	j = mpeg2_scan_alt[i];
+	mpeg2_scan_alt[i] = (j >> 3) | ((j & 7) << 3);
+    }
+}
+
+#endif	/* ARCH_PPC */
+
+#else	/* __ALTIVEC__ */
+
+#define vector_s16_t vector signed short
+#define vector_u16_t vector unsigned short
+#define vector_s8_t vector signed char
+#define vector_u8_t vector unsigned char
+#define vector_s32_t vector signed int
+#define vector_u32_t vector unsigned int
+
+#define IDCT_HALF					\
+    /* 1st stage */					\
+    t1 = vec_mradds (a1, vx7, vx1 );			\
+    t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7));	\
+    t7 = vec_mradds (a2, vx5, vx3);			\
+    t3 = vec_mradds (ma2, vx3, vx5);			\
+							\
+    /* 2nd stage */					\
+    t5 = vec_adds (vx0, vx4);				\
+    t0 = vec_subs (vx0, vx4);				\
+    t2 = vec_mradds (a0, vx6, vx2);			\
+    t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6));	\
+    t6 = vec_adds (t8, t3);				\
+    t3 = vec_subs (t8, t3);				\
+    t8 = vec_subs (t1, t7);				\
+    t1 = vec_adds (t1, t7);				\
+							\
+    /* 3rd stage */					\
+    t7 = vec_adds (t5, t2);				\
+    t2 = vec_subs (t5, t2);				\
+    t5 = vec_adds (t0, t4);				\
+    t0 = vec_subs (t0, t4);				\
+    t4 = vec_subs (t8, t3);				\
+    t3 = vec_adds (t8, t3);				\
+							\
+    /* 4th stage */					\
+    vy0 = vec_adds (t7, t1);				\
+    vy7 = vec_subs (t7, t1);				\
+    vy1 = vec_mradds (c4, t3, t5);			\
+    vy6 = vec_mradds (mc4, t3, t5);			\
+    vy2 = vec_mradds (c4, t4, t0);			\
+    vy5 = vec_mradds (mc4, t4, t0);			\
+    vy3 = vec_adds (t2, t6);				\
+    vy4 = vec_subs (t2, t6);
+
+#define IDCT								\
+    vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7;		\
+    vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7;		\
+    vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias;			\
+    vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8;			\
+    vector_u16_t shift;							\
+									\
+    c4 = vec_splat (constants[0], 0);					\
+    a0 = vec_splat (constants[0], 1);					\
+    a1 = vec_splat (constants[0], 2);					\
+    a2 = vec_splat (constants[0], 3);					\
+    mc4 = vec_splat (constants[0], 4);					\
+    ma2 = vec_splat (constants[0], 5);					\
+    bias = (vector_s16_t)vec_splat ((vector_s32_t)constants[0], 3);	\
+									\
+    zero = vec_splat_s16 (0);						\
+    shift = vec_splat_u16 (4);						\
+									\
+    vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero);	\
+    vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero);	\
+    vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero);	\
+    vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero);	\
+    vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero);	\
+    vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero);	\
+    vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero);	\
+    vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero);	\
+									\
+    IDCT_HALF								\
+									\
+    vx0 = vec_mergeh (vy0, vy4);					\
+    vx1 = vec_mergel (vy0, vy4);					\
+    vx2 = vec_mergeh (vy1, vy5);					\
+    vx3 = vec_mergel (vy1, vy5);					\
+    vx4 = vec_mergeh (vy2, vy6);					\
+    vx5 = vec_mergel (vy2, vy6);					\
+    vx6 = vec_mergeh (vy3, vy7);					\
+    vx7 = vec_mergel (vy3, vy7);					\
+									\
+    vy0 = vec_mergeh (vx0, vx4);					\
+    vy1 = vec_mergel (vx0, vx4);					\
+    vy2 = vec_mergeh (vx1, vx5);					\
+    vy3 = vec_mergel (vx1, vx5);					\
+    vy4 = vec_mergeh (vx2, vx6);					\
+    vy5 = vec_mergel (vx2, vx6);					\
+    vy6 = vec_mergeh (vx3, vx7);					\
+    vy7 = vec_mergel (vx3, vx7);					\
+									\
+    vx0 = vec_adds (vec_mergeh (vy0, vy4), bias);			\
+    vx1 = vec_mergel (vy0, vy4);					\
+    vx2 = vec_mergeh (vy1, vy5);					\
+    vx3 = vec_mergel (vy1, vy5);					\
+    vx4 = vec_mergeh (vy2, vy6);					\
+    vx5 = vec_mergel (vy2, vy6);					\
+    vx6 = vec_mergeh (vy3, vy7);					\
+    vx7 = vec_mergel (vy3, vy7);					\
+									\
+    IDCT_HALF								\
+									\
+    shift = vec_splat_u16 (6);						\
+    vx0 = vec_sra (vy0, shift);						\
+    vx1 = vec_sra (vy1, shift);						\
+    vx2 = vec_sra (vy2, shift);						\
+    vx3 = vec_sra (vy3, shift);						\
+    vx4 = vec_sra (vy4, shift);						\
+    vx5 = vec_sra (vy5, shift);						\
+    vx6 = vec_sra (vy6, shift);						\
+    vx7 = vec_sra (vy7, shift);
+
+static const vector_s16_t constants[5] = {
+    (vector_s16_t)(23170, 13573, 6518, 21895, -23170, -21895, 32, 31),
+    (vector_s16_t)(16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725),
+    (vector_s16_t)(22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521),
+    (vector_s16_t)(21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692),
+    (vector_s16_t)(19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722)
+};
+
+void mpeg2_idct_copy_altivec (vector_s16_t * const block, unsigned char * dest,
+			      const int stride)
+{
+    vector_u8_t tmp;
+
+    IDCT
+
+#define COPY(dest,src)						\
+    tmp = vec_packsu (src, src);				\
+    vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);	\
+    vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+
+    COPY (dest, vx0)	dest += stride;
+    COPY (dest, vx1)	dest += stride;
+    COPY (dest, vx2)	dest += stride;
+    COPY (dest, vx3)	dest += stride;
+    COPY (dest, vx4)	dest += stride;
+    COPY (dest, vx5)	dest += stride;
+    COPY (dest, vx6)	dest += stride;
+    COPY (dest, vx7)
+
+    memset (block, 0, 64 * sizeof (signed short));
+}
+
+void mpeg2_idct_add_altivec (const int last, vector_s16_t * const block,
+			     unsigned char * dest, const int stride)
+{
+    vector_u8_t tmp;
+    vector_s16_t tmp2, tmp3;
+    vector_u8_t perm0;
+    vector_u8_t perm1;
+    vector_u8_t p0, p1, p;
+
+    IDCT
+
+    p0 = vec_lvsl (0, dest);
+    p1 = vec_lvsl (stride, dest);
+    p = vec_splat_u8 (-1);
+    perm0 = vec_mergeh (p, p0);
+    perm1 = vec_mergeh (p, p1);
+
+#define ADD(dest,src,perm)						\
+    /* *(uint64_t *)&tmp = *(uint64_t *)dest; */			\
+    tmp = vec_ld (0, dest);						\
+    tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm);	\
+    tmp3 = vec_adds (tmp2, src);					\
+    tmp = vec_packsu (tmp3, tmp3);					\
+    vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);		\
+    vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+
+    ADD (dest, vx0, perm0)	dest += stride;
+    ADD (dest, vx1, perm1)	dest += stride;
+    ADD (dest, vx2, perm0)	dest += stride;
+    ADD (dest, vx3, perm1)	dest += stride;
+    ADD (dest, vx4, perm0)	dest += stride;
+    ADD (dest, vx5, perm1)	dest += stride;
+    ADD (dest, vx6, perm0)	dest += stride;
+    ADD (dest, vx7, perm1)
+
+    memset (block, 0, 64 * sizeof (signed short));
+}
+
+#endif	/* __ALTIVEC__ */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libmpeg2/motion_comp_alpha.c	Sun Apr 06 16:41:49 2003 +0000
@@ -0,0 +1,253 @@
+/*
+ * motion_comp_alpha.c
+ * Copyright (C) 2002 Falk Hueffner <falk@debian.org>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
+ * See http://libmpeg2.sourceforge.net/ for updates.
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "config.h"
+
+#ifdef ARCH_ALPHA
+
+#include <inttypes.h>
+
+#include "mpeg2.h"
+#include "mpeg2_internal.h"
+#include "alpha_asm.h"
+
+static inline uint64_t avg2(uint64_t a, uint64_t b)
+{
+    return (a | b) - (((a ^ b) & BYTE_VEC(0xfe)) >> 1);    
+}
+
+// Load two unaligned quadwords from addr. This macro only works if
+// addr is actually unaligned.
+#define ULOAD16(ret_l, ret_r, addr)			\
+    do {						\
+	uint64_t _l = ldq_u(addr +  0);			\
+	uint64_t _m = ldq_u(addr +  8);			\
+	uint64_t _r = ldq_u(addr + 16);			\
+	ret_l = extql(_l, addr) | extqh(_m, addr);	\
+	ret_r = extql(_m, addr) | extqh(_r, addr);	\
+    } while (0)
+
+// Load two aligned quadwords from addr.
+#define ALOAD16(ret_l, ret_r, addr)			\
+    do {						\
+	ret_l = ldq(addr);				\
+	ret_r = ldq(addr + 8);				\
+    } while (0)
+
+#define OP8(LOAD, LOAD16, STORE)		\
+    do {					\
+	STORE(LOAD(pixels), block);		\
+	pixels += line_size;			\
+	block += line_size;			\
+    } while (--h)
+
+#define OP16(LOAD, LOAD16, STORE)		\
+    do {					\
+	uint64_t l, r;				\
+	LOAD16(l, r, pixels);			\
+	STORE(l, block);			\
+	STORE(r, block + 8);			\
+	pixels += line_size;			\
+	block += line_size;			\
+    } while (--h)
+
+#define OP8_X2(LOAD, LOAD16, STORE)			\
+    do {						\
+	uint64_t p0, p1;				\
+							\
+	p0 = LOAD(pixels);				\
+	p1 = p0 >> 8 | ((uint64_t) pixels[8] << 56);	\
+	STORE(avg2(p0, p1), block);			\
+	pixels += line_size;				\
+	block += line_size;				\
+    } while (--h)
+
+#define OP16_X2(LOAD, LOAD16, STORE)				\
+    do {							\
+	uint64_t p0, p1;					\
+								\
+	LOAD16(p0, p1, pixels);					\
+	STORE(avg2(p0, p0 >> 8 | p1 << 56), block);		\
+	STORE(avg2(p1, p1 >> 8 | (uint64_t) pixels[16] << 56),	\
+	      block + 8);					\
+	pixels += line_size;					\
+	block += line_size;					\
+    } while (--h)
+
+#define OP8_Y2(LOAD, LOAD16, STORE)		\
+    do {					\
+	uint64_t p0, p1;			\
+	p0 = LOAD(pixels);			\
+	pixels += line_size;			\
+	p1 = LOAD(pixels);			\
+	do {					\
+	    uint64_t av = avg2(p0, p1);		\
+	    if (--h == 0) line_size = 0;	\
+	    pixels += line_size;		\
+	    p0 = p1;				\
+	    p1 = LOAD(pixels);			\
+	    STORE(av, block);			\
+	    block += line_size;			\
+	} while (h);				\
+    } while (0)
+
+#define OP16_Y2(LOAD, LOAD16, STORE)		\
+    do {					\
+	uint64_t p0l, p0r, p1l, p1r;		\
+	LOAD16(p0l, p0r, pixels);		\
+	pixels += line_size;			\
+	LOAD16(p1l, p1r, pixels);		\
+	do {					\
+	    uint64_t avl, avr;			\
+	    if (--h == 0) line_size = 0;	\
+	    avl = avg2(p0l, p1l);		\
+	    avr = avg2(p0r, p1r);		\
+	    p0l = p1l;				\
+	    p0r = p1r;				\
+	    pixels += line_size;		\
+	    LOAD16(p1l, p1r, pixels);		\
+	    STORE(avl, block);			\
+	    STORE(avr, block + 8);		\
+	    block += line_size;			\
+	} while (h);				\
+    } while (0)
+
+#define OP8_XY2(LOAD, LOAD16, STORE)				\
+    do {							\
+	uint64_t pl, ph;					\
+	uint64_t p1 = LOAD(pixels);				\
+	uint64_t p2 = p1 >> 8 | ((uint64_t) pixels[8] << 56);	\
+								\
+	ph = ((p1 & ~BYTE_VEC(0x03)) >> 2)			\
+	   + ((p2 & ~BYTE_VEC(0x03)) >> 2);			\
+	pl = (p1 & BYTE_VEC(0x03))				\
+	   + (p2 & BYTE_VEC(0x03));				\
+								\
+	do {							\
+	    uint64_t npl, nph;					\
+								\
+	    pixels += line_size;				\
+	    p1 = LOAD(pixels);					\
+	    p2 = (p1 >> 8) | ((uint64_t) pixels[8] << 56);	\
+	    nph = ((p1 & ~BYTE_VEC(0x03)) >> 2)			\
+	        + ((p2 & ~BYTE_VEC(0x03)) >> 2);		\
+	    npl = (p1 & BYTE_VEC(0x03))				\
+	        + (p2 & BYTE_VEC(0x03));			\
+								\
+	    STORE(ph + nph					\
+		  + (((pl + npl + BYTE_VEC(0x02)) >> 2)		\
+		     & BYTE_VEC(0x03)), block);			\
+								\
+	    block += line_size;					\
+            pl = npl;						\
+	    ph = nph;						\
+	} while (--h);						\
+    } while (0)
+
+#define OP16_XY2(LOAD, LOAD16, STORE)				\
+    do {							\
+	uint64_t p0, p1, p2, p3, pl_l, ph_l, pl_r, ph_r;	\
+	LOAD16(p0, p2, pixels);					\
+	p1 = p0 >> 8 | (p2 << 56);				\
+	p3 = p2 >> 8 | ((uint64_t) pixels[16] << 56);		\
+								\
+	ph_l = ((p0 & ~BYTE_VEC(0x03)) >> 2)			\
+	     + ((p1 & ~BYTE_VEC(0x03)) >> 2);			\
+	pl_l = (p0 & BYTE_VEC(0x03))				\
+	     + (p1 & BYTE_VEC(0x03));				\
+	ph_r = ((p2 & ~BYTE_VEC(0x03)) >> 2)			\
+	     + ((p3 & ~BYTE_VEC(0x03)) >> 2);			\
+	pl_r = (p2 & BYTE_VEC(0x03))				\
+	     + (p3 & BYTE_VEC(0x03));				\
+								\
+	do {							\
+	    uint64_t npl_l, nph_l, npl_r, nph_r;		\
+								\
+	    pixels += line_size;				\
+	    LOAD16(p0, p2, pixels);				\
+	    p1 = p0 >> 8 | (p2 << 56);				\
+	    p3 = p2 >> 8 | ((uint64_t) pixels[16] << 56);	\
+	    nph_l = ((p0 & ~BYTE_VEC(0x03)) >> 2)		\
+		  + ((p1 & ~BYTE_VEC(0x03)) >> 2);		\
+	    npl_l = (p0 & BYTE_VEC(0x03))			\
+		  + (p1 & BYTE_VEC(0x03));			\
+	    nph_r = ((p2 & ~BYTE_VEC(0x03)) >> 2)		\
+		  + ((p3 & ~BYTE_VEC(0x03)) >> 2);		\
+	    npl_r = (p2 & BYTE_VEC(0x03))			\
+		  + (p3 & BYTE_VEC(0x03));			\
+								\
+	    STORE(ph_l + nph_l					\
+		  + (((pl_l + npl_l + BYTE_VEC(0x02)) >> 2)	\
+		     & BYTE_VEC(0x03)), block);			\
+	    STORE(ph_r + nph_r					\
+		  + (((pl_r + npl_r + BYTE_VEC(0x02)) >> 2)	\
+		     & BYTE_VEC(0x03)), block + 8);		\
+								\
+	    block += line_size;					\
+	    pl_l = npl_l;					\
+	    ph_l = nph_l;					\
+	    pl_r = npl_r;					\
+	    ph_r = nph_r;					\
+	} while (--h);						\
+    } while (0)
+
+#define MAKE_OP(OPNAME, SIZE, SUFF, OPKIND, STORE)			\
+static void MC_ ## OPNAME ## _ ## SUFF ## _ ## SIZE ## _alpha		\
+	(uint8_t *restrict block, const uint8_t *restrict pixels,	\
+	 int line_size, int h)						\
+{									\
+    if ((uint64_t) pixels & 0x7) {					\
+	OPKIND(uldq, ULOAD16, STORE);					\
+    } else {								\
+	OPKIND(ldq, ALOAD16, STORE);					\
+    }									\
+}
+
+#define PIXOP(OPNAME, STORE)			\
+    MAKE_OP(OPNAME, 8,  o,  OP8,      STORE);	\
+    MAKE_OP(OPNAME, 8,  x,  OP8_X2,   STORE);	\
+    MAKE_OP(OPNAME, 8,  y,  OP8_Y2,   STORE);	\
+    MAKE_OP(OPNAME, 8,  xy, OP8_XY2,  STORE);	\
+    MAKE_OP(OPNAME, 16, o,  OP16,     STORE);	\
+    MAKE_OP(OPNAME, 16, x,  OP16_X2,  STORE);	\
+    MAKE_OP(OPNAME, 16, y,  OP16_Y2,  STORE);	\
+    MAKE_OP(OPNAME, 16, xy, OP16_XY2, STORE);
+
+#define STORE(l, b) stq(l, b)
+PIXOP(put, STORE);
+
+#undef STORE
+#define STORE(l, b) stq(avg2(l, ldq(b)), b);
+PIXOP(avg, STORE);
+
+mpeg2_mc_t mpeg2_mc_alpha = {
+    { MC_put_o_16_alpha, MC_put_x_16_alpha,
+      MC_put_y_16_alpha, MC_put_xy_16_alpha,
+      MC_put_o_8_alpha, MC_put_x_8_alpha,
+      MC_put_y_8_alpha, MC_put_xy_8_alpha },
+    { MC_avg_o_16_alpha, MC_avg_x_16_alpha,
+      MC_avg_y_16_alpha, MC_avg_xy_16_alpha,
+      MC_avg_o_8_alpha, MC_avg_x_8_alpha,
+      MC_avg_y_8_alpha, MC_avg_xy_8_alpha }
+};
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libmpeg2/motion_comp_altivec.c	Sun Apr 06 16:41:49 2003 +0000
@@ -0,0 +1,2019 @@
+/*
+ * motion_comp_altivec.c
+ * Copyright (C) 2000-2002 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
+ * See http://libmpeg2.sourceforge.net/ for updates.
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __ALTIVEC__
+
+#include "config.h"
+
+#ifdef ARCH_PPC
+
+#include <inttypes.h>
+
+#include "mpeg2.h"
+#include "mpeg2_internal.h"
+
+/*
+ * The asm code is generated with:
+ *
+ * gcc-2.95 -fvec -D__ALTIVEC__ -O9 -fomit-frame-pointer -mregnames -S
+ *      motion_comp_altivec.c
+ *
+ * sed 's/.L/._L/g' motion_comp_altivec.s |
+ * awk '{args=""; len=split ($2, arg, ",");
+ *      for (i=1; i<=len; i++) { a=arg[i]; if (i<len) a=a",";
+ *                               args = args sprintf ("%-6s", a) }
+ *      printf ("\t\"\t%-16s%-24s\\n\"\n", $1, args) }' |
+ * unexpand -a
+ */
+
+static void MC_put_o_16_altivec (uint8_t * dest, const uint8_t * ref,
+				 int stride, int height)
+{
+    asm ("						\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	li		%r9,  15		\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	lvsl		%v12, 0,    %r4		\n"
+	"	mtctr		%r6			\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	add		%r0,  %r5,  %r5		\n"
+	"	vperm		%v13, %v1,  %v0,  %v12	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"._L6:						\n"
+	"	li		%r9,  15		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	stvx		%v13, 0,    %r3		\n"
+	"	vperm		%v13, %v1,  %v0,  %v12	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	stvx		%v13, %r5,  %r3		\n"
+	"	vperm		%v13, %v1,  %v0,  %v12	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	add		%r3,  %r3,  %r0		\n"
+	"	bdnz		._L6			\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	stvx		%v13, 0,    %r3		\n"
+	"	vperm		%v13, %v1,  %v0,  %v12	\n"
+	"	stvx		%v13, %r5,  %r3		\n"
+	 );
+}
+
+static void MC_put_o_8_altivec (uint8_t * dest, const uint8_t * ref,
+				int stride, int height)
+{
+    asm ("						\n"
+	"	lvsl		%v12, 0,    %r4		\n"
+	"	lvsl		%v1,  %r5,  %r4		\n"
+	"	vmrghb		%v12, %v12, %v12	\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	li		%r9,  7			\n"
+	"	vmrghb		%v1,  %v1,  %v1		\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	vpkuhum		%v10, %v12, %v12	\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	mtctr		%r6			\n"
+	"	vpkuhum		%v11, %v1,  %v1		\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v12, %v13, %v0,  %v10	\n"
+	"._L11:						\n"
+	"	li		%r9,  7			\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	stvewx		%v12, 0,    %r3		\n"
+	"	li		%r9,  4			\n"
+	"	vperm		%v1,  %v13, %v0,  %v11	\n"
+	"	stvewx		%v12, %r9,  %r3		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	li		%r9,  7			\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	stvewx		%v1,  0,    %r3		\n"
+	"	vperm		%v12, %v13, %v0,  %v10	\n"
+	"	li		%r9,  4			\n"
+	"	stvewx		%v1,  %r9,  %r3		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	bdnz		._L11			\n"
+	"	li		%r9,  7			\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	stvewx		%v12, 0,    %r3		\n"
+	"	li		%r9,  4			\n"
+	"	vperm		%v1,  %v13, %v0,  %v11	\n"
+	"	stvewx		%v12, %r9,  %r3		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	stvewx		%v1,  0,    %r3		\n"
+	"	stvewx		%v1,  %r9,  %r3		\n"
+	 );
+}
+
+static void MC_put_x_16_altivec (uint8_t * dest, const uint8_t * ref,
+				 int stride, int height)
+{
+    asm ("						\n"
+	"	lvsl		%v11, 0,    %r4		\n"
+	"	vspltisb	%v0,  1			\n"
+	"	li		%r9,  16		\n"
+	"	lvx		%v12, 0,    %r4		\n"
+	"	vaddubm		%v10, %v11, %v0		\n"
+	"	lvx		%v13, %r9,  %r4		\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	vperm		%v1,  %v12, %v13, %v10	\n"
+	"	vperm		%v0,  %v12, %v13, %v11	\n"
+	"	mtctr		%r6			\n"
+	"	add		%r0,  %r5,  %r5		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v0,  %v0,  %v1		\n"
+	"._L16:						\n"
+	"	li		%r9,  16		\n"
+	"	lvx		%v12, 0,    %r4		\n"
+	"	lvx		%v13, %r9,  %r4		\n"
+	"	stvx		%v0,  0,    %r3		\n"
+	"	vperm		%v1,  %v12, %v13, %v10	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v0,  %v12, %v13, %v11	\n"
+	"	lvx		%v12, 0,    %r4		\n"
+	"	lvx		%v13, %r9,  %r4		\n"
+	"	vavgub		%v0,  %v0,  %v1		\n"
+	"	stvx		%v0,  %r5,  %r3		\n"
+	"	vperm		%v1,  %v12, %v13, %v10	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v0,  %v12, %v13, %v11	\n"
+	"	add		%r3,  %r3,  %r0		\n"
+	"	vavgub		%v0,  %v0,  %v1		\n"
+	"	bdnz		._L16			\n"
+	"	lvx		%v13, %r9,  %r4		\n"
+	"	lvx		%v12, 0,    %r4		\n"
+	"	stvx		%v0,  0,    %r3		\n"
+	"	vperm		%v1,  %v12, %v13, %v10	\n"
+	"	vperm		%v0,  %v12, %v13, %v11	\n"
+	"	vavgub		%v0,  %v0,  %v1		\n"
+	"	stvx		%v0,  %r5,  %r3		\n"
+	 );
+}
+
+static void MC_put_x_8_altivec (uint8_t * dest, const uint8_t * ref,
+				int stride, int height)
+{
+    asm ("						\n"
+	"	lvsl		%v0,  0,    %r4		\n"
+	"	vspltisb	%v13, 1			\n"
+	"	lvsl		%v10, %r5,  %r4		\n"
+	"	vmrghb		%v0,  %v0,  %v0		\n"
+	"	li		%r9,  8			\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	vmrghb		%v10, %v10, %v10	\n"
+	"	vpkuhum		%v8,  %v0,  %v0		\n"
+	"	lvx		%v12, %r9,  %r4		\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	vpkuhum		%v9,  %v10, %v10	\n"
+	"	vaddubm		%v7,  %v8,  %v13	\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	vperm		%v1,  %v11, %v12, %v8	\n"
+	"	mtctr		%r6			\n"
+	"	vaddubm		%v13, %v9,  %v13	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v0,  %v11, %v12, %v7	\n"
+	"	vavgub		%v0,  %v1,  %v0		\n"
+	"._L21:						\n"
+	"	li		%r9,  8			\n"
+	"	lvx		%v12, %r9,  %r4		\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	stvewx		%v0,  0,    %r3		\n"
+	"	li		%r9,  4			\n"
+	"	vperm		%v1,  %v11, %v12, %v13	\n"
+	"	stvewx		%v0,  %r9,  %r3		\n"
+	"	vperm		%v0,  %v11, %v12, %v9	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	li		%r9,  8			\n"
+	"	lvx		%v12, %r9,  %r4		\n"
+	"	vavgub		%v10, %v0,  %v1		\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	stvewx		%v10, 0,    %r3		\n"
+	"	vperm		%v1,  %v11, %v12, %v7	\n"
+	"	vperm		%v0,  %v11, %v12, %v8	\n"
+	"	li		%r9,  4			\n"
+	"	stvewx		%v10, %r9,  %r3		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v0,  %v0,  %v1		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	bdnz		._L21			\n"
+	"	li		%r9,  8			\n"
+	"	lvx		%v12, %r9,  %r4		\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	stvewx		%v0,  0,    %r3		\n"
+	"	li		%r9,  4			\n"
+	"	vperm		%v1,  %v11, %v12, %v13	\n"
+	"	stvewx		%v0,  %r9,  %r3		\n"
+	"	vperm		%v0,  %v11, %v12, %v9	\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	vavgub		%v10, %v0,  %v1		\n"
+	"	stvewx		%v10, 0,    %r3		\n"
+	"	stvewx		%v10, %r9,  %r3		\n"
+	 );
+}
+
+static void MC_put_y_16_altivec (uint8_t * dest, const uint8_t * ref,
+				 int stride, int height)
+{
+    asm ("						\n"
+	"	li		%r9,  15		\n"
+	"	lvsl		%v10, 0,    %r4		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v12, %v13, %v1,  %v10	\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	vperm		%v11, %v13, %v1,  %v10	\n"
+	"	mtctr		%r6			\n"
+	"	add		%r0,  %r5,  %r5		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v0,  %v12, %v11	\n"
+	"._L26:						\n"
+	"	li		%r9,  15		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	stvx		%v0,  0,    %r3		\n"
+	"	vperm		%v12, %v13, %v1,  %v10	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	vavgub		%v0,  %v12, %v11	\n"
+	"	stvx		%v0,  %r5,  %r3		\n"
+	"	vperm		%v11, %v13, %v1,  %v10	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	add		%r3,  %r3,  %r0		\n"
+	"	vavgub		%v0,  %v12, %v11	\n"
+	"	bdnz		._L26			\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	stvx		%v0,  0,    %r3		\n"
+	"	vperm		%v12, %v13, %v1,  %v10	\n"
+	"	vavgub		%v0,  %v12, %v11	\n"
+	"	stvx		%v0,  %r5,  %r3		\n"
+	 );
+}
+
+static void MC_put_y_8_altivec (uint8_t * dest, const uint8_t * ref,
+				int stride, int height)
+{
+    asm ("						\n"
+	"	lvsl		%v13, 0,    %r4		\n"
+	"	lvsl		%v11, %r5,  %r4		\n"
+	"	vmrghb		%v13, %v13, %v13	\n"
+	"	li		%r9,  7			\n"
+	"	lvx		%v12, 0,    %r4		\n"
+	"	vmrghb		%v11, %v11, %v11	\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	vpkuhum		%v9,  %v13, %v13	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vpkuhum		%v10, %v11, %v11	\n"
+	"	vperm		%v13, %v12, %v1,  %v9	\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	lvx		%v12, 0,    %r4		\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	vperm		%v11, %v12, %v1,  %v10	\n"
+	"	mtctr		%r6			\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v0,  %v13, %v11	\n"
+	"._L31:						\n"
+	"	li		%r9,  7			\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	lvx		%v12, 0,    %r4		\n"
+	"	stvewx		%v0,  0,    %r3		\n"
+	"	li		%r9,  4			\n"
+	"	vperm		%v13, %v12, %v1,  %v9	\n"
+	"	stvewx		%v0,  %r9,  %r3		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v0,  %v13, %v11	\n"
+	"	li		%r9,  7			\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	lvx		%v12, 0,    %r4		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	stvewx		%v0,  0,    %r3		\n"
+	"	vperm		%v11, %v12, %v1,  %v10	\n"
+	"	li		%r9,  4			\n"
+	"	stvewx		%v0,  %r9,  %r3		\n"
+	"	vavgub		%v0,  %v13, %v11	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	bdnz		._L31			\n"
+	"	li		%r9,  7			\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	lvx		%v12, 0,    %r4		\n"
+	"	stvewx		%v0,  0,    %r3		\n"
+	"	li		%r9,  4			\n"
+	"	vperm		%v13, %v12, %v1,  %v9	\n"
+	"	stvewx		%v0,  %r9,  %r3		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	vavgub		%v0,  %v13, %v11	\n"
+	"	stvewx		%v0,  0,    %r3		\n"
+	"	stvewx		%v0,  %r9,  %r3		\n"
+	 );
+}
+
+static void MC_put_xy_16_altivec (uint8_t * dest, const uint8_t * ref,
+				  int stride, int height)
+{
+    asm ("						\n"
+	"	lvsl		%v5,  0,    %r4		\n"
+	"	vspltisb	%v3,  1			\n"
+	"	li		%r9,  16		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	vaddubm		%v4,  %v5,  %v3		\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v10, %v1,  %v0,  %v4	\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	vperm		%v11, %v1,  %v0,  %v5	\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	mtctr		%r6			\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	vavgub		%v9,  %v11, %v10	\n"
+	"	vxor		%v8,  %v11, %v10	\n"
+	"	add		%r0,  %r5,  %r5		\n"
+	"	vperm		%v10, %v1,  %v0,  %v4	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v11, %v1,  %v0,  %v5	\n"
+	"	vxor		%v6,  %v11, %v10	\n"
+	"	vavgub		%v7,  %v11, %v10	\n"
+	"	vor		%v0,  %v8,  %v6		\n"
+	"	vxor		%v13, %v9,  %v7		\n"
+	"	vand		%v0,  %v3,  %v0		\n"
+	"	vavgub		%v1,  %v9,  %v7		\n"
+	"	vand		%v0,  %v0,  %v13	\n"
+	"	vsububm		%v13, %v1,  %v0		\n"
+	"._L36:						\n"
+	"	li		%r9,  16		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	stvx		%v13, 0,    %r3		\n"
+	"	vperm		%v10, %v1,  %v0,  %v4	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v11, %v1,  %v0,  %v5	\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	vavgub		%v9,  %v11, %v10	\n"
+	"	vxor		%v8,  %v11, %v10	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v10, %v1,  %v0,  %v4	\n"
+	"	vavgub		%v12, %v9,  %v7		\n"
+	"	vperm		%v11, %v1,  %v0,  %v5	\n"
+	"	vor		%v13, %v8,  %v6		\n"
+	"	vxor		%v0,  %v9,  %v7		\n"
+	"	vxor		%v6,  %v11, %v10	\n"
+	"	vand		%v13, %v3,  %v13	\n"
+	"	vavgub		%v7,  %v11, %v10	\n"
+	"	vor		%v1,  %v8,  %v6		\n"
+	"	vand		%v13, %v13, %v0		\n"
+	"	vxor		%v0,  %v9,  %v7		\n"
+	"	vand		%v1,  %v3,  %v1		\n"
+	"	vsububm		%v13, %v12, %v13	\n"
+	"	vand		%v1,  %v1,  %v0		\n"
+	"	stvx		%v13, %r5,  %r3		\n"
+	"	vavgub		%v0,  %v9,  %v7		\n"
+	"	add		%r3,  %r3,  %r0		\n"
+	"	vsububm		%v13, %v0,  %v1		\n"
+	"	bdnz		._L36			\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	stvx		%v13, 0,    %r3		\n"
+	"	vperm		%v10, %v1,  %v0,  %v4	\n"
+	"	vperm		%v11, %v1,  %v0,  %v5	\n"
+	"	vxor		%v8,  %v11, %v10	\n"
+	"	vavgub		%v9,  %v11, %v10	\n"
+	"	vor		%v0,  %v8,  %v6		\n"
+	"	vxor		%v13, %v9,  %v7		\n"
+	"	vand		%v0,  %v3,  %v0		\n"
+	"	vavgub		%v1,  %v9,  %v7		\n"
+	"	vand		%v0,  %v0,  %v13	\n"
+	"	vsububm		%v13, %v1,  %v0		\n"
+	"	stvx		%v13, %r5,  %r3		\n"
+	 );
+}
+
+static void MC_put_xy_8_altivec (uint8_t * dest, const uint8_t * ref,
+				 int stride, int height)
+{
+    asm ("						\n"
+	"	lvsl		%v4,  0,    %r4		\n"
+	"	vspltisb	%v3,  1			\n"
+	"	lvsl		%v5,  %r5,  %r4		\n"
+	"	vmrghb		%v4,  %v4,  %v4		\n"
+	"	li		%r9,  8			\n"
+	"	vmrghb		%v5,  %v5,  %v5		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	vpkuhum		%v4,  %v4,  %v4		\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	vpkuhum		%v5,  %v5,  %v5		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vaddubm		%v2,  %v4,  %v3		\n"
+	"	vperm		%v11, %v1,  %v0,  %v4	\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	vaddubm		%v19, %v5,  %v3		\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	vperm		%v10, %v1,  %v0,  %v2	\n"
+	"	mtctr		%r6			\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	vavgub		%v9,  %v11, %v10	\n"
+	"	vxor		%v8,  %v11, %v10	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v10, %v1,  %v0,  %v19	\n"
+	"	vperm		%v11, %v1,  %v0,  %v5	\n"
+	"	vxor		%v6,  %v11, %v10	\n"
+	"	vavgub		%v7,  %v11, %v10	\n"
+	"	vor		%v0,  %v8,  %v6		\n"
+	"	vxor		%v13, %v9,  %v7		\n"
+	"	vand		%v0,  %v3,  %v0		\n"
+	"	vavgub		%v1,  %v9,  %v7		\n"
+	"	vand		%v0,  %v0,  %v13	\n"
+	"	vsububm		%v13, %v1,  %v0		\n"
+	"._L41:						\n"
+	"	li		%r9,  8			\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	stvewx		%v13, 0,    %r3		\n"
+	"	li		%r9,  4			\n"
+	"	vperm		%v10, %v1,  %v0,  %v2	\n"
+	"	stvewx		%v13, %r9,  %r3		\n"
+	"	vperm		%v11, %v1,  %v0,  %v4	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	li		%r9,  8			\n"
+	"	vavgub		%v9,  %v11, %v10	\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	vxor		%v8,  %v11, %v10	\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	vavgub		%v12, %v9,  %v7		\n"
+	"	vor		%v13, %v8,  %v6		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	vperm		%v10, %v1,  %v0,  %v19	\n"
+	"	li		%r9,  4			\n"
+	"	vperm		%v11, %v1,  %v0,  %v5	\n"
+	"	vand		%v13, %v3,  %v13	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vxor		%v0,  %v9,  %v7		\n"
+	"	vxor		%v6,  %v11, %v10	\n"
+	"	vavgub		%v7,  %v11, %v10	\n"
+	"	vor		%v1,  %v8,  %v6		\n"
+	"	vand		%v13, %v13, %v0		\n"
+	"	vxor		%v0,  %v9,  %v7		\n"
+	"	vand		%v1,  %v3,  %v1		\n"
+	"	vsububm		%v13, %v12, %v13	\n"
+	"	vand		%v1,  %v1,  %v0		\n"
+	"	stvewx		%v13, 0,    %r3		\n"
+	"	vavgub		%v0,  %v9,  %v7		\n"
+	"	stvewx		%v13, %r9,  %r3		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	vsububm		%v13, %v0,  %v1		\n"
+	"	bdnz		._L41			\n"
+	"	li		%r9,  8			\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	stvewx		%v13, 0,    %r3		\n"
+	"	vperm		%v10, %v1,  %v0,  %v2	\n"
+	"	li		%r9,  4			\n"
+	"	vperm		%v11, %v1,  %v0,  %v4	\n"
+	"	stvewx		%v13, %r9,  %r3		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	vxor		%v8,  %v11, %v10	\n"
+	"	vavgub		%v9,  %v11, %v10	\n"
+	"	vor		%v0,  %v8,  %v6		\n"
+	"	vxor		%v13, %v9,  %v7		\n"
+	"	vand		%v0,  %v3,  %v0		\n"
+	"	vavgub		%v1,  %v9,  %v7		\n"
+	"	vand		%v0,  %v0,  %v13	\n"
+	"	vsububm		%v13, %v1,  %v0		\n"
+	"	stvewx		%v13, 0,    %r3		\n"
+	"	stvewx		%v13, %r9,  %r3		\n"
+	 );
+}
+
+static void MC_avg_o_16_altivec (uint8_t * dest, const uint8_t * ref,
+				 int stride, int height)
+{
+    asm ("						\n"
+	"	li		%r9,  15		\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	lvsl		%v11, 0,    %r4		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	vperm		%v0,  %v1,  %v0,  %v11	\n"
+	"	lvx		%v13, 0,    %r3		\n"
+	"	mtctr		%r6			\n"
+	"	add		%r9,  %r5,  %r5		\n"
+	"	vavgub		%v12, %v13, %v0		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"._L46:						\n"
+	"	li		%r11, 15		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	lvx		%v0,  %r11, %r4		\n"
+	"	lvx		%v13, %r5,  %r3		\n"
+	"	vperm		%v0,  %v1,  %v0,  %v11	\n"
+	"	stvx		%v12, 0,    %r3		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v12, %v13, %v0		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	lvx		%v0,  %r11, %r4		\n"
+	"	lvx		%v13, %r9,  %r3		\n"
+	"	vperm		%v0,  %v1,  %v0,  %v11	\n"
+	"	stvx		%v12, %r5,  %r3		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v12, %v13, %v0		\n"
+	"	add		%r3,  %r3,  %r9		\n"
+	"	bdnz		._L46			\n"
+	"	lvx		%v0,  %r11, %r4		\n"
+	"	lvx		%v1,  0,    %r4		\n"
+	"	lvx		%v13, %r5,  %r3		\n"
+	"	vperm		%v0,  %v1,  %v0,  %v11	\n"
+	"	stvx		%v12, 0,    %r3		\n"
+	"	vavgub		%v12, %v13, %v0		\n"
+	"	stvx		%v12, %r5,  %r3		\n"
+	 );
+}
+
+static void MC_avg_o_8_altivec (uint8_t * dest, const uint8_t * ref,
+				int stride, int height)
+{
+    asm ("						\n"
+	"	lvsl		%v12, 0,    %r4		\n"
+	"	li		%r9,  7			\n"
+	"	vmrghb		%v12, %v12, %v12	\n"
+	"	lvsl		%v1,  %r5,  %r4		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	vpkuhum		%v9,  %v12, %v12	\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	vmrghb		%v1,  %v1,  %v1		\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	vperm		%v0,  %v13, %v0,  %v9	\n"
+	"	lvx		%v11, 0,    %r3		\n"
+	"	mtctr		%r6			\n"
+	"	vpkuhum		%v10, %v1,  %v1		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v12, %v11, %v0		\n"
+	"._L51:						\n"
+	"	li		%r9,  7			\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v11, %r5,  %r3		\n"
+	"	stvewx		%v12, 0,    %r3		\n"
+	"	vperm		%v0,  %v13, %v0,  %v10	\n"
+	"	li		%r9,  4			\n"
+	"	stvewx		%v12, %r9,  %r3		\n"
+	"	vavgub		%v1,  %v11, %v0		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	li		%r9,  7			\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v11, %r5,  %r3		\n"
+	"	stvewx		%v1,  0,    %r3		\n"
+	"	vperm		%v0,  %v13, %v0,  %v9	\n"
+	"	li		%r9,  4			\n"
+	"	stvewx		%v1,  %r9,  %r3		\n"
+	"	vavgub		%v12, %v11, %v0		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	bdnz		._L51			\n"
+	"	li		%r9,  7			\n"
+	"	lvx		%v0,  %r9,  %r4		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v11, %r5,  %r3		\n"
+	"	stvewx		%v12, 0,    %r3		\n"
+	"	vperm		%v0,  %v13, %v0,  %v10	\n"
+	"	li		%r9,  4			\n"
+	"	stvewx		%v12, %r9,  %r3		\n"
+	"	vavgub		%v1,  %v11, %v0		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	stvewx		%v1,  0,    %r3		\n"
+	"	stvewx		%v1,  %r9,  %r3		\n"
+	 );
+}
+
+static void MC_avg_x_16_altivec (uint8_t * dest, const uint8_t * ref,
+				 int stride, int height)
+{
+    asm ("						\n"
+	"	lvsl		%v8,  0,    %r4		\n"
+	"	vspltisb	%v0,  1			\n"
+	"	li		%r9,  16		\n"
+	"	lvx		%v12, %r9,  %r4		\n"
+	"	vaddubm		%v7,  %v8,  %v0		\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	vperm		%v1,  %v11, %v12, %v7	\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	vperm		%v0,  %v11, %v12, %v8	\n"
+	"	lvx		%v9,  0,    %r3		\n"
+	"	mtctr		%r6			\n"
+	"	add		%r9,  %r5,  %r5		\n"
+	"	vavgub		%v0,  %v0,  %v1		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v10, %v9,  %v0		\n"
+	"._L56:						\n"
+	"	li		%r11, 16		\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	lvx		%v12, %r11, %r4		\n"
+	"	lvx		%v9,  %r5,  %r3		\n"
+	"	stvx		%v10, 0,    %r3		\n"
+	"	vperm		%v0,  %v11, %v12, %v7	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v1,  %v11, %v12, %v8	\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	lvx		%v12, %r11, %r4		\n"
+	"	vavgub		%v1,  %v1,  %v0		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v13, %v11, %v12, %v7	\n"
+	"	vavgub		%v10, %v9,  %v1		\n"
+	"	vperm		%v0,  %v11, %v12, %v8	\n"
+	"	lvx		%v9,  %r9,  %r3		\n"
+	"	stvx		%v10, %r5,  %r3		\n"
+	"	vavgub		%v0,  %v0,  %v13	\n"
+	"	add		%r3,  %r3,  %r9		\n"
+	"	vavgub		%v10, %v9,  %v0		\n"
+	"	bdnz		._L56			\n"
+	"	lvx		%v12, %r11, %r4		\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	lvx		%v9,  %r5,  %r3		\n"
+	"	vperm		%v1,  %v11, %v12, %v7	\n"
+	"	stvx		%v10, 0,    %r3		\n"
+	"	vperm		%v0,  %v11, %v12, %v8	\n"
+	"	vavgub		%v0,  %v0,  %v1		\n"
+	"	vavgub		%v10, %v9,  %v0		\n"
+	"	stvx		%v10, %r5,  %r3		\n"
+	 );
+}
+
+static void MC_avg_x_8_altivec (uint8_t * dest, const uint8_t * ref,
+				int stride, int height)
+{
+    asm ("						\n"
+	"	lvsl		%v10, 0,    %r4		\n"
+	"	vspltisb	%v13, 1			\n"
+	"	li		%r9,  8			\n"
+	"	vmrghb		%v10, %v10, %v10	\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	lvx		%v12, %r9,  %r4		\n"
+	"	vpkuhum		%v7,  %v10, %v10	\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	lvsl		%v10, %r5,  %r4		\n"
+	"	vaddubm		%v6,  %v7,  %v13	\n"
+	"	vperm		%v0,  %v11, %v12, %v7	\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	vmrghb		%v10, %v10, %v10	\n"
+	"	lvx		%v9,  0,    %r3		\n"
+	"	mtctr		%r6			\n"
+	"	vperm		%v1,  %v11, %v12, %v6	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vpkuhum		%v8,  %v10, %v10	\n"
+	"	vavgub		%v0,  %v0,  %v1		\n"
+	"	vaddubm		%v13, %v8,  %v13	\n"
+	"	vavgub		%v10, %v9,  %v0		\n"
+	"._L61:						\n"
+	"	li		%r9,  8			\n"
+	"	lvx		%v12, %r9,  %r4		\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	lvx		%v9,  %r5,  %r3		\n"
+	"	stvewx		%v10, 0,    %r3		\n"
+	"	vperm		%v1,  %v11, %v12, %v13	\n"
+	"	vperm		%v0,  %v11, %v12, %v8	\n"
+	"	li		%r9,  4			\n"
+	"	stvewx		%v10, %r9,  %r3		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v0,  %v0,  %v1		\n"
+	"	li		%r9,  8			\n"
+	"	lvx		%v12, %r9,  %r4		\n"
+	"	vavgub		%v10, %v9,  %v0		\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	vperm		%v1,  %v11, %v12, %v6	\n"
+	"	lvx		%v9,  %r5,  %r3		\n"
+	"	vperm		%v0,  %v11, %v12, %v7	\n"
+	"	stvewx		%v10, 0,    %r3		\n"
+	"	li		%r9,  4			\n"
+	"	vavgub		%v0,  %v0,  %v1		\n"
+	"	stvewx		%v10, %r9,  %r3		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	vavgub		%v10, %v9,  %v0		\n"
+	"	bdnz		._L61			\n"
+	"	li		%r9,  8			\n"
+	"	lvx		%v12, %r9,  %r4		\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	lvx		%v9,  %r5,  %r3		\n"
+	"	vperm		%v1,  %v11, %v12, %v13	\n"
+	"	stvewx		%v10, 0,    %r3		\n"
+	"	vperm		%v0,  %v11, %v12, %v8	\n"
+	"	li		%r9,  4			\n"
+	"	stvewx		%v10, %r9,  %r3		\n"
+	"	vavgub		%v0,  %v0,  %v1		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	vavgub		%v10, %v9,  %v0		\n"
+	"	stvewx		%v10, 0,    %r3		\n"
+	"	stvewx		%v10, %r9,  %r3		\n"
+	 );
+}
+
+static void MC_avg_y_16_altivec (uint8_t * dest, const uint8_t * ref,
+				 int stride, int height)
+{
+    asm ("						\n"
+	"	li		%r9,  15		\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	lvsl		%v9,  0,    %r4		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v11, %v13, %v1,  %v9	\n"
+	"	li		%r11, 15		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v1,  %r11, %r4		\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	vperm		%v10, %v13, %v1,  %v9	\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	lvx		%v12, 0,    %r3		\n"
+	"	mtctr		%r6			\n"
+	"	vavgub		%v0,  %v11, %v10	\n"
+	"	add		%r9,  %r5,  %r5		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v0,  %v12, %v0		\n"
+	"._L66:						\n"
+	"	li		%r11, 15		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v1,  %r11, %r4		\n"
+	"	lvx		%v12, %r5,  %r3		\n"
+	"	vperm		%v11, %v13, %v1,  %v9	\n"
+	"	stvx		%v0,  0,    %r3		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v0,  %v11, %v10	\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v1,  %r11, %r4		\n"
+	"	vavgub		%v0,  %v12, %v0		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	lvx		%v12, %r9,  %r3		\n"
+	"	vperm		%v10, %v13, %v1,  %v9	\n"
+	"	stvx		%v0,  %r5,  %r3		\n"
+	"	vavgub		%v0,  %v11, %v10	\n"
+	"	add		%r3,  %r3,  %r9		\n"
+	"	vavgub		%v0,  %v12, %v0		\n"
+	"	bdnz		._L66			\n"
+	"	lvx		%v1,  %r11, %r4		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v12, %r5,  %r3		\n"
+	"	vperm		%v11, %v13, %v1,  %v9	\n"
+	"	stvx		%v0,  0,    %r3		\n"
+	"	vavgub		%v0,  %v11, %v10	\n"
+	"	vavgub		%v0,  %v12, %v0		\n"
+	"	stvx		%v0,  %r5,  %r3		\n"
+	 );
+}
+
+static void MC_avg_y_8_altivec (uint8_t * dest, const uint8_t * ref,
+				int stride, int height)
+{
+    asm ("						\n"
+	"	lvsl		%v12, 0,    %r4		\n"
+	"	lvsl		%v9,  %r5,  %r4		\n"
+	"	vmrghb		%v12, %v12, %v12	\n"
+	"	li		%r9,  7			\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	vmrghb		%v9,  %v9,  %v9		\n"
+	"	lvx		%v13, %r9,  %r4		\n"
+	"	vpkuhum		%v7,  %v12, %v12	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vpkuhum		%v8,  %v9,  %v9		\n"
+	"	vperm		%v12, %v11, %v13, %v7	\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	lvx		%v13, %r9,  %r4		\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	vperm		%v9,  %v11, %v13, %v8	\n"
+	"	lvx		%v10, 0,    %r3		\n"
+	"	mtctr		%r6			\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v0,  %v12, %v9		\n"
+	"	vavgub		%v1,  %v10, %v0		\n"
+	"._L71:						\n"
+	"	li		%r9,  7			\n"
+	"	lvx		%v13, %r9,  %r4		\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	lvx		%v10, %r5,  %r3		\n"
+	"	stvewx		%v1,  0,    %r3		\n"
+	"	vperm		%v12, %v11, %v13, %v7	\n"
+	"	li		%r9,  4			\n"
+	"	stvewx		%v1,  %r9,  %r3		\n"
+	"	vavgub		%v0,  %v12, %v9		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	li		%r9,  7			\n"
+	"	vavgub		%v1,  %v10, %v0		\n"
+	"	lvx		%v13, %r9,  %r4		\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	vperm		%v9,  %v11, %v13, %v8	\n"
+	"	lvx		%v10, %r5,  %r3		\n"
+	"	stvewx		%v1,  0,    %r3		\n"
+	"	vavgub		%v0,  %v12, %v9		\n"
+	"	li		%r9,  4			\n"
+	"	stvewx		%v1,  %r9,  %r3		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vavgub		%v1,  %v10, %v0		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	bdnz		._L71			\n"
+	"	li		%r9,  7			\n"
+	"	lvx		%v13, %r9,  %r4		\n"
+	"	lvx		%v11, 0,    %r4		\n"
+	"	lvx		%v10, %r5,  %r3		\n"
+	"	vperm		%v12, %v11, %v13, %v7	\n"
+	"	stvewx		%v1,  0,    %r3		\n"
+	"	li		%r9,  4			\n"
+	"	vavgub		%v0,  %v12, %v9		\n"
+	"	stvewx		%v1,  %r9,  %r3		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	vavgub		%v1,  %v10, %v0		\n"
+	"	stvewx		%v1,  0,    %r3		\n"
+	"	stvewx		%v1,  %r9,  %r3		\n"
+	 );
+}
+
+static void MC_avg_xy_16_altivec (uint8_t * dest, const uint8_t * ref,
+				  int stride, int height)
+{
+    asm ("						\n"
+	"	lvsl		%v4,  0,    %r4		\n"
+	"	vspltisb	%v2,  1			\n"
+	"	li		%r9,  16		\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	vaddubm		%v3,  %v4,  %v2		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v10, %v13, %v1,  %v3	\n"
+	"	li		%r11, 16		\n"
+	"	vperm		%v11, %v13, %v1,  %v4	\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v1,  %r11, %r4		\n"
+	"	vavgub		%v9,  %v11, %v10	\n"
+	"	vxor		%v8,  %v11, %v10	\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	vperm		%v10, %v13, %v1,  %v3	\n"
+	"	lvx		%v6,  0,    %r3		\n"
+	"	mtctr		%r6			\n"
+	"	vperm		%v11, %v13, %v1,  %v4	\n"
+	"	add		%r9,  %r5,  %r5		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vxor		%v5,  %v11, %v10	\n"
+	"	vavgub		%v7,  %v11, %v10	\n"
+	"	vor		%v1,  %v8,  %v5		\n"
+	"	vxor		%v13, %v9,  %v7		\n"
+	"	vand		%v1,  %v2,  %v1		\n"
+	"	vavgub		%v0,  %v9,  %v7		\n"
+	"	vand		%v1,  %v1,  %v13	\n"
+	"	vsububm		%v0,  %v0,  %v1		\n"
+	"	vavgub		%v12, %v6,  %v0		\n"
+	"._L76:						\n"
+	"	li		%r11, 16		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v1,  %r11, %r4		\n"
+	"	lvx		%v6,  %r5,  %r3		\n"
+	"	stvx		%v12, 0,    %r3		\n"
+	"	vperm		%v10, %v13, %v1,  %v3	\n"
+	"	vperm		%v11, %v13, %v1,  %v4	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v1,  %r11, %r4		\n"
+	"	vavgub		%v9,  %v11, %v10	\n"
+	"	vxor		%v8,  %v11, %v10	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v10, %v13, %v1,  %v3	\n"
+	"	vavgub		%v12, %v9,  %v7		\n"
+	"	vperm		%v11, %v13, %v1,  %v4	\n"
+	"	vor		%v0,  %v8,  %v5		\n"
+	"	vxor		%v13, %v9,  %v7		\n"
+	"	vxor		%v5,  %v11, %v10	\n"
+	"	vand		%v0,  %v2,  %v0		\n"
+	"	vavgub		%v7,  %v11, %v10	\n"
+	"	vor		%v1,  %v8,  %v5		\n"
+	"	vand		%v0,  %v0,  %v13	\n"
+	"	vand		%v1,  %v2,  %v1		\n"
+	"	vxor		%v13, %v9,  %v7		\n"
+	"	vsububm		%v12, %v12, %v0		\n"
+	"	vand		%v1,  %v1,  %v13	\n"
+	"	vavgub		%v0,  %v9,  %v7		\n"
+	"	vavgub		%v12, %v6,  %v12	\n"
+	"	lvx		%v6,  %r9,  %r3		\n"
+	"	vsububm		%v0,  %v0,  %v1		\n"
+	"	stvx		%v12, %r5,  %r3		\n"
+	"	vavgub		%v12, %v6,  %v0		\n"
+	"	add		%r3,  %r3,  %r9		\n"
+	"	bdnz		._L76			\n"
+	"	lvx		%v1,  %r11, %r4		\n"
+	"	lvx		%v13, 0,    %r4		\n"
+	"	lvx		%v6,  %r5,  %r3		\n"
+	"	vperm		%v10, %v13, %v1,  %v3	\n"
+	"	stvx		%v12, 0,    %r3		\n"
+	"	vperm		%v11, %v13, %v1,  %v4	\n"
+	"	vxor		%v8,  %v11, %v10	\n"
+	"	vavgub		%v9,  %v11, %v10	\n"
+	"	vor		%v0,  %v8,  %v5		\n"
+	"	vxor		%v13, %v9,  %v7		\n"
+	"	vand		%v0,  %v2,  %v0		\n"
+	"	vavgub		%v1,  %v9,  %v7		\n"
+	"	vand		%v0,  %v0,  %v13	\n"
+	"	vsububm		%v1,  %v1,  %v0		\n"
+	"	vavgub		%v12, %v6,  %v1		\n"
+	"	stvx		%v12, %r5,  %r3		\n"
+	 );
+}
+
+static void MC_avg_xy_8_altivec (uint8_t * dest, const uint8_t * ref,
+				 int stride, int height)
+{
+    asm ("						\n"
+	"	lvsl		%v2,  0,    %r4		\n"
+	"	vspltisb	%v19, 1			\n"
+	"	lvsl		%v3,  %r5,  %r4		\n"
+	"	vmrghb		%v2,  %v2,  %v2		\n"
+	"	li		%r9,  8			\n"
+	"	vmrghb		%v3,  %v3,  %v3		\n"
+	"	lvx		%v9,  0,    %r4		\n"
+	"	vpkuhum		%v2,  %v2,  %v2		\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	vpkuhum		%v3,  %v3,  %v3		\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vaddubm		%v18, %v2,  %v19	\n"
+	"	vperm		%v11, %v9,  %v1,  %v2	\n"
+	"	srawi		%r6,  %r6,  1		\n"
+	"	vaddubm		%v17, %v3,  %v19	\n"
+	"	addi		%r6,  %r6,  -1		\n"
+	"	vperm		%v10, %v9,  %v1,  %v18	\n"
+	"	lvx		%v4,  0,    %r3		\n"
+	"	mtctr		%r6			\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	lvx		%v9,  0,    %r4		\n"
+	"	vavgub		%v8,  %v11, %v10	\n"
+	"	vxor		%v7,  %v11, %v10	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vperm		%v10, %v9,  %v1,  %v17	\n"
+	"	vperm		%v11, %v9,  %v1,  %v3	\n"
+	"	vxor		%v5,  %v11, %v10	\n"
+	"	vavgub		%v6,  %v11, %v10	\n"
+	"	vor		%v1,  %v7,  %v5		\n"
+	"	vxor		%v13, %v8,  %v6		\n"
+	"	vand		%v1,  %v19, %v1		\n"
+	"	vavgub		%v0,  %v8,  %v6		\n"
+	"	vand		%v1,  %v1,  %v13	\n"
+	"	vsububm		%v0,  %v0,  %v1		\n"
+	"	vavgub		%v13, %v4,  %v0		\n"
+	"._L81:						\n"
+	"	li		%r9,  8			\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	lvx		%v9,  0,    %r4		\n"
+	"	lvx		%v4,  %r5,  %r3		\n"
+	"	stvewx		%v13, 0,    %r3		\n"
+	"	vperm		%v10, %v9,  %v1,  %v18	\n"
+	"	vperm		%v11, %v9,  %v1,  %v2	\n"
+	"	li		%r9,  4			\n"
+	"	stvewx		%v13, %r9,  %r3		\n"
+	"	vxor		%v7,  %v11, %v10	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	li		%r9,  8			\n"
+	"	vavgub		%v8,  %v11, %v10	\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	vor		%v0,  %v7,  %v5		\n"
+	"	lvx		%v9,  0,    %r4		\n"
+	"	vxor		%v12, %v8,  %v6		\n"
+	"	vand		%v0,  %v19, %v0		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	vperm		%v10, %v9,  %v1,  %v17	\n"
+	"	vavgub		%v13, %v8,  %v6		\n"
+	"	li		%r9,  4			\n"
+	"	vperm		%v11, %v9,  %v1,  %v3	\n"
+	"	vand		%v0,  %v0,  %v12	\n"
+	"	add		%r4,  %r4,  %r5		\n"
+	"	vxor		%v5,  %v11, %v10	\n"
+	"	vavgub		%v6,  %v11, %v10	\n"
+	"	vor		%v1,  %v7,  %v5		\n"
+	"	vsububm		%v13, %v13, %v0		\n"
+	"	vxor		%v0,  %v8,  %v6		\n"
+	"	vand		%v1,  %v19, %v1		\n"
+	"	vavgub		%v13, %v4,  %v13	\n"
+	"	vand		%v1,  %v1,  %v0		\n"
+	"	lvx		%v4,  %r5,  %r3		\n"
+	"	vavgub		%v0,  %v8,  %v6		\n"
+	"	stvewx		%v13, 0,    %r3		\n"
+	"	stvewx		%v13, %r9,  %r3		\n"
+	"	vsububm		%v0,  %v0,  %v1		\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	vavgub		%v13, %v4,  %v0		\n"
+	"	bdnz		._L81			\n"
+	"	li		%r9,  8			\n"
+	"	lvx		%v1,  %r9,  %r4		\n"
+	"	lvx		%v9,  0,    %r4		\n"
+	"	lvx		%v4,  %r5,  %r3		\n"
+	"	vperm		%v10, %v9,  %v1,  %v18	\n"
+	"	stvewx		%v13, 0,    %r3		\n"
+	"	vperm		%v11, %v9,  %v1,  %v2	\n"
+	"	li		%r9,  4			\n"
+	"	stvewx		%v13, %r9,  %r3		\n"
+	"	vxor		%v7,  %v11, %v10	\n"
+	"	add		%r3,  %r3,  %r5		\n"
+	"	vavgub		%v8,  %v11, %v10	\n"
+	"	vor		%v0,  %v7,  %v5		\n"
+	"	vxor		%v13, %v8,  %v6		\n"
+	"	vand		%v0,  %v19, %v0		\n"
+	"	vavgub		%v1,  %v8,  %v6		\n"
+	"	vand		%v0,  %v0,  %v13	\n"
+	"	vsububm		%v1,  %v1,  %v0		\n"
+	"	vavgub		%v13, %v4,  %v1		\n"
+	"	stvewx		%v13, 0,    %r3		\n"
+	"	stvewx		%v13, %r9,  %r3		\n"
+	 );
+}
+
+MPEG2_MC_EXTERN (altivec)
+
+#endif	/* ARCH_PPC */
+
+#else	/* __ALTIVEC__ */
+
+#define vector_s16_t vector signed short
+#define vector_u16_t vector unsigned short
+#define vector_s8_t vector signed char
+#define vector_u8_t vector unsigned char
+#define vector_s32_t vector signed int
+#define vector_u32_t vector unsigned int
+
+void MC_put_o_16_altivec (unsigned char * dest, const unsigned char * ref,
+			  const int stride, int height)
+{
+    vector_u8_t perm, ref0, ref1, tmp;
+
+    perm = vec_lvsl (0, ref);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (15, ref);
+    ref += stride;
+    tmp = vec_perm (ref0, ref1, perm);
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (15, ref);
+	ref += stride;
+	vec_st (tmp, 0, dest);
+	tmp = vec_perm (ref0, ref1, perm);
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (15, ref);
+	ref += stride;
+	vec_st (tmp, stride, dest);
+	dest += 2*stride;
+	tmp = vec_perm (ref0, ref1, perm);
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (15, ref);
+    vec_st (tmp, 0, dest);
+    tmp = vec_perm (ref0, ref1, perm);
+    vec_st (tmp, stride, dest);
+}
+
+void MC_put_o_8_altivec (unsigned char * dest, const unsigned char * ref,
+			 const int stride, int height)
+{
+    vector_u8_t perm0, perm1, tmp0, tmp1, ref0, ref1;
+
+    tmp0 = vec_lvsl (0, ref);
+    tmp0 = vec_mergeh (tmp0, tmp0);
+    perm0 = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0);
+    tmp1 = vec_lvsl (stride, ref);
+    tmp1 = vec_mergeh (tmp1, tmp1);
+    perm1 = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (7, ref);
+    ref += stride;
+    tmp0 = vec_perm (ref0, ref1, perm0);
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (7, ref);
+	ref += stride;
+	vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+	dest += stride;
+	tmp1 = vec_perm (ref0, ref1, perm1);
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (7, ref);
+	ref += stride;
+	vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+	dest += stride;
+	tmp0 = vec_perm (ref0, ref1, perm0);
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (7, ref);
+    vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+    dest += stride;
+    tmp1 = vec_perm (ref0, ref1, perm1);
+    vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+}
+
+void MC_put_x_16_altivec (unsigned char * dest, const unsigned char * ref,
+			  const int stride, int height)
+{
+    vector_u8_t permA, permB, ref0, ref1, tmp;
+
+    permA = vec_lvsl (0, ref);
+    permB = vec_add (permA, vec_splat_u8 (1));
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (16, ref);
+    ref += stride;
+    tmp = vec_avg (vec_perm (ref0, ref1, permA),
+		   vec_perm (ref0, ref1, permB));
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (16, ref);
+	ref += stride;
+	vec_st (tmp, 0, dest);
+	tmp = vec_avg (vec_perm (ref0, ref1, permA),
+		       vec_perm (ref0, ref1, permB));
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (16, ref);
+	ref += stride;
+	vec_st (tmp, stride, dest);
+	dest += 2*stride;
+	tmp = vec_avg (vec_perm (ref0, ref1, permA),
+		       vec_perm (ref0, ref1, permB));
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (16, ref);
+    vec_st (tmp, 0, dest);
+    tmp = vec_avg (vec_perm (ref0, ref1, permA),
+		   vec_perm (ref0, ref1, permB));
+    vec_st (tmp, stride, dest);
+}
+
+void MC_put_x_8_altivec (unsigned char * dest, const unsigned char * ref,
+			 const int stride, int height)
+{
+    vector_u8_t perm0A, perm0B, perm1A, perm1B, ones, tmp0, tmp1, ref0, ref1;
+
+    ones = vec_splat_u8 (1);
+    tmp0 = vec_lvsl (0, ref);
+    tmp0 = vec_mergeh (tmp0, tmp0);
+    perm0A = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0);
+    perm0B = vec_add (perm0A, ones);
+    tmp1 = vec_lvsl (stride, ref);
+    tmp1 = vec_mergeh (tmp1, tmp1);
+    perm1A = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1);
+    perm1B = vec_add (perm1A, ones);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (8, ref);
+    ref += stride;
+    tmp0 = vec_avg (vec_perm (ref0, ref1, perm0A),
+		    vec_perm (ref0, ref1, perm0B));
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (8, ref);
+	ref += stride;
+	vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+	dest += stride;
+	tmp1 = vec_avg (vec_perm (ref0, ref1, perm1A),
+			vec_perm (ref0, ref1, perm1B));
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (8, ref);
+	ref += stride;
+	vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+	dest += stride;
+	tmp0 = vec_avg (vec_perm (ref0, ref1, perm0A),
+			vec_perm (ref0, ref1, perm0B));
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (8, ref);
+    vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+    dest += stride;
+    tmp1 = vec_avg (vec_perm (ref0, ref1, perm1A),
+		    vec_perm (ref0, ref1, perm1B));
+    vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+}
+
+void MC_put_y_16_altivec (unsigned char * dest, const unsigned char * ref,
+			  const int stride, int height)
+{
+    vector_u8_t perm, ref0, ref1, tmp0, tmp1, tmp;
+
+    perm = vec_lvsl (0, ref);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (15, ref);
+    ref += stride;
+    tmp0 = vec_perm (ref0, ref1, perm);
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (15, ref);
+    ref += stride;
+    tmp1 = vec_perm (ref0, ref1, perm);
+    tmp = vec_avg (tmp0, tmp1);
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (15, ref);
+	ref += stride;
+	vec_st (tmp, 0, dest);
+	tmp0 = vec_perm (ref0, ref1, perm);
+	tmp = vec_avg (tmp0, tmp1);
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (15, ref);
+	ref += stride;
+	vec_st (tmp, stride, dest);
+	dest += 2*stride;
+	tmp1 = vec_perm (ref0, ref1, perm);
+	tmp = vec_avg (tmp0, tmp1);
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (15, ref);
+    vec_st (tmp, 0, dest);
+    tmp0 = vec_perm (ref0, ref1, perm);
+    tmp = vec_avg (tmp0, tmp1);
+    vec_st (tmp, stride, dest);
+}
+
+void MC_put_y_8_altivec (unsigned char * dest, const unsigned char * ref,
+			 const int stride, int height)
+{
+    vector_u8_t perm0, perm1, tmp0, tmp1, tmp, ref0, ref1;
+
+    tmp0 = vec_lvsl (0, ref);
+    tmp0 = vec_mergeh (tmp0, tmp0);
+    perm0 = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0);
+    tmp1 = vec_lvsl (stride, ref);
+    tmp1 = vec_mergeh (tmp1, tmp1);
+    perm1 = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (7, ref);
+    ref += stride;
+    tmp0 = vec_perm (ref0, ref1, perm0);
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (7, ref);
+    ref += stride;
+    tmp1 = vec_perm (ref0, ref1, perm1);
+    tmp = vec_avg (tmp0, tmp1);
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (7, ref);
+	ref += stride;
+	vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+	dest += stride;
+	tmp0 = vec_perm (ref0, ref1, perm0);
+	tmp = vec_avg (tmp0, tmp1);
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (7, ref);
+	ref += stride;
+	vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+	dest += stride;
+	tmp1 = vec_perm (ref0, ref1, perm1);
+	tmp = vec_avg (tmp0, tmp1);
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (7, ref);
+    vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+    dest += stride;
+    tmp0 = vec_perm (ref0, ref1, perm0);
+    tmp = vec_avg (tmp0, tmp1);
+    vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+}
+
+void MC_put_xy_16_altivec (unsigned char * dest, const unsigned char * ref,
+			   const int stride, int height)
+{
+    vector_u8_t permA, permB, ref0, ref1, A, B, avg0, avg1, xor0, xor1, tmp;
+    vector_u8_t ones;
+
+    ones = vec_splat_u8 (1);
+    permA = vec_lvsl (0, ref);
+    permB = vec_add (permA, ones);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (16, ref);
+    ref += stride;
+    A = vec_perm (ref0, ref1, permA);
+    B = vec_perm (ref0, ref1, permB);
+    avg0 = vec_avg (A, B);
+    xor0 = vec_xor (A, B);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (16, ref);
+    ref += stride;
+    A = vec_perm (ref0, ref1, permA);
+    B = vec_perm (ref0, ref1, permB);
+    avg1 = vec_avg (A, B);
+    xor1 = vec_xor (A, B);
+    tmp = vec_sub (vec_avg (avg0, avg1),
+		   vec_and (vec_and (ones, vec_or (xor0, xor1)),
+			    vec_xor (avg0, avg1)));
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (16, ref);
+	ref += stride;
+	vec_st (tmp, 0, dest);
+	A = vec_perm (ref0, ref1, permA);
+	B = vec_perm (ref0, ref1, permB);
+	avg0 = vec_avg (A, B);
+	xor0 = vec_xor (A, B);
+	tmp = vec_sub (vec_avg (avg0, avg1),
+		       vec_and (vec_and (ones, vec_or (xor0, xor1)),
+				vec_xor (avg0, avg1)));
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (16, ref);
+	ref += stride;
+	vec_st (tmp, stride, dest);
+	dest += 2*stride;
+	A = vec_perm (ref0, ref1, permA);
+	B = vec_perm (ref0, ref1, permB);
+	avg1 = vec_avg (A, B);
+	xor1 = vec_xor (A, B);
+	tmp = vec_sub (vec_avg (avg0, avg1),
+		       vec_and (vec_and (ones, vec_or (xor0, xor1)),
+				vec_xor (avg0, avg1)));
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (16, ref);
+    vec_st (tmp, 0, dest);
+    A = vec_perm (ref0, ref1, permA);
+    B = vec_perm (ref0, ref1, permB);
+    avg0 = vec_avg (A, B);
+    xor0 = vec_xor (A, B);
+    tmp = vec_sub (vec_avg (avg0, avg1),
+		   vec_and (vec_and (ones, vec_or (xor0, xor1)),
+			    vec_xor (avg0, avg1)));
+    vec_st (tmp, stride, dest);
+}
+
+void MC_put_xy_8_altivec (unsigned char * dest, const unsigned char * ref,
+			  const int stride, int height)
+{
+    vector_u8_t perm0A, perm0B, perm1A, perm1B, ref0, ref1, A, B;
+    vector_u8_t avg0, avg1, xor0, xor1, tmp, ones;
+
+    ones = vec_splat_u8 (1);
+    perm0A = vec_lvsl (0, ref);
+    perm0A = vec_mergeh (perm0A, perm0A);
+    perm0A = vec_pack ((vector_u16_t)perm0A, (vector_u16_t)perm0A);
+    perm0B = vec_add (perm0A, ones);
+    perm1A = vec_lvsl (stride, ref);
+    perm1A = vec_mergeh (perm1A, perm1A);
+    perm1A = vec_pack ((vector_u16_t)perm1A, (vector_u16_t)perm1A);
+    perm1B = vec_add (perm1A, ones);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (8, ref);
+    ref += stride;
+    A = vec_perm (ref0, ref1, perm0A);
+    B = vec_perm (ref0, ref1, perm0B);
+    avg0 = vec_avg (A, B);
+    xor0 = vec_xor (A, B);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (8, ref);
+    ref += stride;
+    A = vec_perm (ref0, ref1, perm1A);
+    B = vec_perm (ref0, ref1, perm1B);
+    avg1 = vec_avg (A, B);
+    xor1 = vec_xor (A, B);
+    tmp = vec_sub (vec_avg (avg0, avg1),
+		   vec_and (vec_and (ones, vec_or (xor0, xor1)),
+			    vec_xor (avg0, avg1)));
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (8, ref);
+	ref += stride;
+	vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+	dest += stride;
+	A = vec_perm (ref0, ref1, perm0A);
+	B = vec_perm (ref0, ref1, perm0B);
+	avg0 = vec_avg (A, B);
+	xor0 = vec_xor (A, B);
+	tmp = vec_sub (vec_avg (avg0, avg1),
+		       vec_and (vec_and (ones, vec_or (xor0, xor1)),
+				vec_xor (avg0, avg1)));
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (8, ref);
+	ref += stride;
+	vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+	dest += stride;
+	A = vec_perm (ref0, ref1, perm1A);
+	B = vec_perm (ref0, ref1, perm1B);
+	avg1 = vec_avg (A, B);
+	xor1 = vec_xor (A, B);
+	tmp = vec_sub (vec_avg (avg0, avg1),
+		       vec_and (vec_and (ones, vec_or (xor0, xor1)),
+				vec_xor (avg0, avg1)));
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (8, ref);
+    vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+    dest += stride;
+    A = vec_perm (ref0, ref1, perm0A);
+    B = vec_perm (ref0, ref1, perm0B);
+    avg0 = vec_avg (A, B);
+    xor0 = vec_xor (A, B);
+    tmp = vec_sub (vec_avg (avg0, avg1),
+		   vec_and (vec_and (ones, vec_or (xor0, xor1)),
+			    vec_xor (avg0, avg1)));
+    vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+}
+
+#if 0
+void MC_put_xy_8_altivec (unsigned char * dest, const unsigned char * ref,
+			  const int stride, int height)
+{
+    vector_u8_t permA, permB, ref0, ref1, A, B, C, D, tmp, zero, ones;
+    vector_u16_t splat2, temp;
+
+    ones = vec_splat_u8 (1);
+    permA = vec_lvsl (0, ref);
+    permB = vec_add (permA, ones);
+
+    zero = vec_splat_u8 (0);
+    splat2 = vec_splat_u16 (2);
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (8, ref);
+	ref += stride;
+	A = vec_perm (ref0, ref1, permA);
+	B = vec_perm (ref0, ref1, permB);
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (8, ref);
+	C = vec_perm (ref0, ref1, permA);
+	D = vec_perm (ref0, ref1, permB);
+
+	temp = vec_add (vec_add ((vector_u16_t)vec_mergeh (zero, A),
+				(vector_u16_t)vec_mergeh (zero, B)),
+		       vec_add ((vector_u16_t)vec_mergeh (zero, C),
+				(vector_u16_t)vec_mergeh (zero, D)));
+	temp = vec_sr (vec_add (temp, splat2), splat2);
+	tmp = vec_pack (temp, temp);
+
+	vec_st (tmp, 0, dest);
+	dest += stride;
+	tmp = vec_avg (vec_perm (ref0, ref1, permA),
+		       vec_perm (ref0, ref1, permB));
+    } while (--height);
+}
+#endif
+
+void MC_avg_o_16_altivec (unsigned char * dest, const unsigned char * ref,
+			  const int stride, int height)
+{
+    vector_u8_t perm, ref0, ref1, tmp, prev;
+
+    perm = vec_lvsl (0, ref);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (15, ref);
+    ref += stride;
+    prev = vec_ld (0, dest);
+    tmp = vec_avg (prev, vec_perm (ref0, ref1, perm));
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (15, ref);
+	ref += stride;
+	prev = vec_ld (stride, dest);
+	vec_st (tmp, 0, dest);
+	tmp = vec_avg (prev, vec_perm (ref0, ref1, perm));
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (15, ref);
+	ref += stride;
+	prev = vec_ld (2*stride, dest);
+	vec_st (tmp, stride, dest);
+	dest += 2*stride;
+	tmp = vec_avg (prev, vec_perm (ref0, ref1, perm));
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (15, ref);
+    prev = vec_ld (stride, dest);
+    vec_st (tmp, 0, dest);
+    tmp = vec_avg (prev, vec_perm (ref0, ref1, perm));
+    vec_st (tmp, stride, dest);
+}
+
+void MC_avg_o_8_altivec (unsigned char * dest, const unsigned char * ref,
+			 const int stride, int height)
+{
+    vector_u8_t perm0, perm1, tmp0, tmp1, ref0, ref1, prev;
+
+    tmp0 = vec_lvsl (0, ref);
+    tmp0 = vec_mergeh (tmp0, tmp0);
+    perm0 = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0);
+    tmp1 = vec_lvsl (stride, ref);
+    tmp1 = vec_mergeh (tmp1, tmp1);
+    perm1 = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (7, ref);
+    ref += stride;
+    prev = vec_ld (0, dest);
+    tmp0 = vec_avg (prev, vec_perm (ref0, ref1, perm0));
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (7, ref);
+	ref += stride;
+	prev = vec_ld (stride, dest);
+	vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+	dest += stride;
+	tmp1 = vec_avg (prev, vec_perm (ref0, ref1, perm1));
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (7, ref);
+	ref += stride;
+	prev = vec_ld (stride, dest);
+	vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+	dest += stride;
+	tmp0 = vec_avg (prev, vec_perm (ref0, ref1, perm0));
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (7, ref);
+    prev = vec_ld (stride, dest);
+    vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+    dest += stride;
+    tmp1 = vec_avg (prev, vec_perm (ref0, ref1, perm1));
+    vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+}
+
+void MC_avg_x_16_altivec (unsigned char * dest, const unsigned char * ref,
+			  const int stride, int height)
+{
+    vector_u8_t permA, permB, ref0, ref1, tmp, prev;
+
+    permA = vec_lvsl (0, ref);
+    permB = vec_add (permA, vec_splat_u8 (1));
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (16, ref);
+    prev = vec_ld (0, dest);
+    ref += stride;
+    tmp = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, permA),
+				  vec_perm (ref0, ref1, permB)));
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (16, ref);
+	ref += stride;
+	prev = vec_ld (stride, dest);
+	vec_st (tmp, 0, dest);
+	tmp = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, permA),
+				      vec_perm (ref0, ref1, permB)));
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (16, ref);
+	ref += stride;
+	prev = vec_ld (2*stride, dest);
+	vec_st (tmp, stride, dest);
+	dest += 2*stride;
+	tmp = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, permA),
+				      vec_perm (ref0, ref1, permB)));
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (16, ref);
+    prev = vec_ld (stride, dest);
+    vec_st (tmp, 0, dest);
+    tmp = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, permA),
+				  vec_perm (ref0, ref1, permB)));
+    vec_st (tmp, stride, dest);
+}
+
+void MC_avg_x_8_altivec (unsigned char * dest, const unsigned char * ref,
+			 const int stride, int height)
+{
+    vector_u8_t perm0A, perm0B, perm1A, perm1B, ones, tmp0, tmp1, ref0, ref1;
+    vector_u8_t prev;
+
+    ones = vec_splat_u8 (1);
+    tmp0 = vec_lvsl (0, ref);
+    tmp0 = vec_mergeh (tmp0, tmp0);
+    perm0A = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0);
+    perm0B = vec_add (perm0A, ones);
+    tmp1 = vec_lvsl (stride, ref);
+    tmp1 = vec_mergeh (tmp1, tmp1);
+    perm1A = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1);
+    perm1B = vec_add (perm1A, ones);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (8, ref);
+    prev = vec_ld (0, dest);
+    ref += stride;
+    tmp0 = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, perm0A),
+				   vec_perm (ref0, ref1, perm0B)));
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (8, ref);
+	ref += stride;
+	prev = vec_ld (stride, dest);
+	vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+	dest += stride;
+	tmp1 = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, perm1A),
+				       vec_perm (ref0, ref1, perm1B)));
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (8, ref);
+	ref += stride;
+	prev = vec_ld (stride, dest);
+	vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+	dest += stride;
+	tmp0 = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, perm0A),
+				       vec_perm (ref0, ref1, perm0B)));
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (8, ref);
+    prev = vec_ld (stride, dest);
+    vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+    dest += stride;
+    tmp1 = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, perm1A),
+				   vec_perm (ref0, ref1, perm1B)));
+    vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+}
+
+void MC_avg_y_16_altivec (unsigned char * dest, const unsigned char * ref,
+			  const int stride, int height)
+{
+    vector_u8_t perm, ref0, ref1, tmp0, tmp1, tmp, prev;
+
+    perm = vec_lvsl (0, ref);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (15, ref);
+    ref += stride;
+    tmp0 = vec_perm (ref0, ref1, perm);
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (15, ref);
+    ref += stride;
+    prev = vec_ld (0, dest);
+    tmp1 = vec_perm (ref0, ref1, perm);
+    tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (15, ref);
+	ref += stride;
+	prev = vec_ld (stride, dest);
+	vec_st (tmp, 0, dest);
+	tmp0 = vec_perm (ref0, ref1, perm);
+	tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (15, ref);
+	ref += stride;
+	prev = vec_ld (2*stride, dest);
+	vec_st (tmp, stride, dest);
+	dest += 2*stride;
+	tmp1 = vec_perm (ref0, ref1, perm);
+	tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (15, ref);
+    prev = vec_ld (stride, dest);
+    vec_st (tmp, 0, dest);
+    tmp0 = vec_perm (ref0, ref1, perm);
+    tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+    vec_st (tmp, stride, dest);
+}
+
+void MC_avg_y_8_altivec (unsigned char * dest, const unsigned char * ref,
+			 const int stride, int height)
+{
+    vector_u8_t perm0, perm1, tmp0, tmp1, tmp, ref0, ref1, prev;
+
+    tmp0 = vec_lvsl (0, ref);
+    tmp0 = vec_mergeh (tmp0, tmp0);
+    perm0 = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0);
+    tmp1 = vec_lvsl (stride, ref);
+    tmp1 = vec_mergeh (tmp1, tmp1);
+    perm1 = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (7, ref);
+    ref += stride;
+    tmp0 = vec_perm (ref0, ref1, perm0);
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (7, ref);
+    ref += stride;
+    prev = vec_ld (0, dest);
+    tmp1 = vec_perm (ref0, ref1, perm1);
+    tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (7, ref);
+	ref += stride;
+	prev = vec_ld (stride, dest);
+	vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+	dest += stride;
+	tmp0 = vec_perm (ref0, ref1, perm0);
+	tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (7, ref);
+	ref += stride;
+	prev = vec_ld (stride, dest);
+	vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+	dest += stride;
+	tmp1 = vec_perm (ref0, ref1, perm1);
+	tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (7, ref);
+    prev = vec_ld (stride, dest);
+    vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+    dest += stride;
+    tmp0 = vec_perm (ref0, ref1, perm0);
+    tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+    vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+}
+
+void MC_avg_xy_16_altivec (unsigned char * dest, const unsigned char * ref,
+			   const int stride, int height)
+{
+    vector_u8_t permA, permB, ref0, ref1, A, B, avg0, avg1, xor0, xor1, tmp;
+    vector_u8_t ones, prev;
+
+    ones = vec_splat_u8 (1);
+    permA = vec_lvsl (0, ref);
+    permB = vec_add (permA, ones);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (16, ref);
+    ref += stride;
+    A = vec_perm (ref0, ref1, permA);
+    B = vec_perm (ref0, ref1, permB);
+    avg0 = vec_avg (A, B);
+    xor0 = vec_xor (A, B);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (16, ref);
+    ref += stride;
+    prev = vec_ld (0, dest);
+    A = vec_perm (ref0, ref1, permA);
+    B = vec_perm (ref0, ref1, permB);
+    avg1 = vec_avg (A, B);
+    xor1 = vec_xor (A, B);
+    tmp = vec_avg (prev, vec_sub (vec_avg (avg0, avg1),
+				  vec_and (vec_and (ones, vec_or (xor0, xor1)),
+					   vec_xor (avg0, avg1))));
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (16, ref);
+	ref += stride;
+	prev = vec_ld (stride, dest);
+	vec_st (tmp, 0, dest);
+	A = vec_perm (ref0, ref1, permA);
+	B = vec_perm (ref0, ref1, permB);
+	avg0 = vec_avg (A, B);
+	xor0 = vec_xor (A, B);
+	tmp = vec_avg (prev,
+		       vec_sub (vec_avg (avg0, avg1),
+				vec_and (vec_and (ones, vec_or (xor0, xor1)),
+					 vec_xor (avg0, avg1))));
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (16, ref);
+	ref += stride;
+	prev = vec_ld (2*stride, dest);
+	vec_st (tmp, stride, dest);
+	dest += 2*stride;
+	A = vec_perm (ref0, ref1, permA);
+	B = vec_perm (ref0, ref1, permB);
+	avg1 = vec_avg (A, B);
+	xor1 = vec_xor (A, B);
+	tmp = vec_avg (prev,
+		       vec_sub (vec_avg (avg0, avg1),
+				vec_and (vec_and (ones, vec_or (xor0, xor1)),
+					 vec_xor (avg0, avg1))));
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (16, ref);
+    prev = vec_ld (stride, dest);
+    vec_st (tmp, 0, dest);
+    A = vec_perm (ref0, ref1, permA);
+    B = vec_perm (ref0, ref1, permB);
+    avg0 = vec_avg (A, B);
+    xor0 = vec_xor (A, B);
+    tmp = vec_avg (prev, vec_sub (vec_avg (avg0, avg1),
+				  vec_and (vec_and (ones, vec_or (xor0, xor1)),
+					   vec_xor (avg0, avg1))));
+    vec_st (tmp, stride, dest);
+}
+
+void MC_avg_xy_8_altivec (unsigned char * dest, const unsigned char * ref,
+			  const int stride, int height)
+{
+    vector_u8_t perm0A, perm0B, perm1A, perm1B, ref0, ref1, A, B;
+    vector_u8_t avg0, avg1, xor0, xor1, tmp, ones, prev;
+
+    ones = vec_splat_u8 (1);
+    perm0A = vec_lvsl (0, ref);
+    perm0A = vec_mergeh (perm0A, perm0A);
+    perm0A = vec_pack ((vector_u16_t)perm0A, (vector_u16_t)perm0A);
+    perm0B = vec_add (perm0A, ones);
+    perm1A = vec_lvsl (stride, ref);
+    perm1A = vec_mergeh (perm1A, perm1A);
+    perm1A = vec_pack ((vector_u16_t)perm1A, (vector_u16_t)perm1A);
+    perm1B = vec_add (perm1A, ones);
+
+    height = (height >> 1) - 1;
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (8, ref);
+    ref += stride;
+    A = vec_perm (ref0, ref1, perm0A);
+    B = vec_perm (ref0, ref1, perm0B);
+    avg0 = vec_avg (A, B);
+    xor0 = vec_xor (A, B);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (8, ref);
+    ref += stride;
+    prev = vec_ld (0, dest);
+    A = vec_perm (ref0, ref1, perm1A);
+    B = vec_perm (ref0, ref1, perm1B);
+    avg1 = vec_avg (A, B);
+    xor1 = vec_xor (A, B);
+    tmp = vec_avg (prev, vec_sub (vec_avg (avg0, avg1),
+				  vec_and (vec_and (ones, vec_or (xor0, xor1)),
+					   vec_xor (avg0, avg1))));
+
+    do {
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (8, ref);
+	ref += stride;
+	prev = vec_ld (stride, dest);
+	vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+	dest += stride;
+	A = vec_perm (ref0, ref1, perm0A);
+	B = vec_perm (ref0, ref1, perm0B);
+	avg0 = vec_avg (A, B);
+	xor0 = vec_xor (A, B);
+	tmp = vec_avg (prev,
+		       vec_sub (vec_avg (avg0, avg1),
+				vec_and (vec_and (ones, vec_or (xor0, xor1)),
+					 vec_xor (avg0, avg1))));
+
+	ref0 = vec_ld (0, ref);
+	ref1 = vec_ld (8, ref);
+	ref += stride;
+	prev = vec_ld (stride, dest);
+	vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+	vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+	dest += stride;
+	A = vec_perm (ref0, ref1, perm1A);
+	B = vec_perm (ref0, ref1, perm1B);
+	avg1 = vec_avg (A, B);
+	xor1 = vec_xor (A, B);
+	tmp = vec_avg (prev,
+		       vec_sub (vec_avg (avg0, avg1),
+				vec_and (vec_and (ones, vec_or (xor0, xor1)),
+					 vec_xor (avg0, avg1))));
+    } while (--height);
+
+    ref0 = vec_ld (0, ref);
+    ref1 = vec_ld (8, ref);
+    prev = vec_ld (stride, dest);
+    vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+    dest += stride;
+    A = vec_perm (ref0, ref1, perm0A);
+    B = vec_perm (ref0, ref1, perm0B);
+    avg0 = vec_avg (A, B);
+    xor0 = vec_xor (A, B);
+    tmp = vec_avg (prev, vec_sub (vec_avg (avg0, avg1),
+				  vec_and (vec_and (ones, vec_or (xor0, xor1)),
+					   vec_xor (avg0, avg1))));
+    vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+    vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+}
+
+#endif	/* __ALTIVEC__ */