# HG changeset patch # User arpi # Date 1049647309 0 # Node ID 89b48bc6c441dd31f666d26bc37a595b56ce0e23 # Parent 08496327b7ecbe1d8e554d19c00b84d03e903e33 Importing libmpeg2 from mpeg2dec-0.3.1 diff -r 08496327b7ec -r 89b48bc6c441 libmpeg2/alloc.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libmpeg2/alloc.c Sun Apr 06 16:41:49 2003 +0000 @@ -0,0 +1,76 @@ +/* + * alloc.c + * Copyright (C) 2000-2002 Michel Lespinasse + * Copyright (C) 1999-2000 Aaron Holtzman + * + * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. + * See http://libmpeg2.sourceforge.net/ for updates. + * + * mpeg2dec is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * mpeg2dec is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "config.h" + +#include +#include + +#include "mpeg2.h" +#include "mpeg2_internal.h" + +#if defined(HAVE_MEMALIGN) && !defined(__cplusplus) +/* some systems have memalign() but no declaration for it */ +void * memalign (size_t align, size_t size); +#endif + +void * (* mpeg2_malloc_hook) (int size, int reason) = NULL; +int (* mpeg2_free_hook) (void * buf) = NULL; + +void * mpeg2_malloc (int size, int reason) +{ + char * buf; + + if (mpeg2_malloc_hook) { + buf = (char *) mpeg2_malloc_hook (size, reason); + if (buf) + return buf; + } + +#if defined(HAVE_MEMALIGN) && !defined(__cplusplus) && !defined(DEBUG) + return memalign (16, size); +#else + buf = (char *) malloc (size + 15 + sizeof (void **)); + if (buf) { + char * align_buf; + + align_buf = buf + 15 + sizeof (void **); + align_buf -= (long)align_buf & 15; + *(((void **)align_buf) - 1) = buf; + return align_buf; + } + return NULL; +#endif +} + +void mpeg2_free (void * buf) +{ + if (mpeg2_free_hook && mpeg2_free_hook (buf)) + return; + +#if defined(HAVE_MEMALIGN) && !defined(__cplusplus) && !defined(DEBUG) + free (buf); +#else + free (*(((void **)buf) - 1)); +#endif +} diff -r 08496327b7ec -r 89b48bc6c441 libmpeg2/alpha_asm.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libmpeg2/alpha_asm.h Sun Apr 06 16:41:49 2003 +0000 @@ -0,0 +1,184 @@ +/* + * Alpha assembly macros + * Copyright (c) 2002 Falk Hueffner + * + * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. + * See http://libmpeg2.sourceforge.net/ for updates. + * + * mpeg2dec is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * mpeg2dec is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef ALPHA_ASM_H +#define ALPHA_ASM_H + +#include + +#if defined __GNUC__ +# define GNUC_PREREQ(maj, min) \ + ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) +#else +# define GNUC_PREREQ(maj, min) 0 +#endif + +#define AMASK_BWX (1 << 0) +#define AMASK_FIX (1 << 1) +#define AMASK_CIX (1 << 2) +#define AMASK_MVI (1 << 8) + +#ifdef __alpha_bwx__ +# define HAVE_BWX() 1 +#else +# define HAVE_BWX() (amask(AMASK_BWX) == 0) +#endif +#ifdef __alpha_fix__ +# define HAVE_FIX() 1 +#else +# define HAVE_FIX() (amask(AMASK_FIX) == 0) +#endif +#ifdef __alpha_max__ +# define HAVE_MVI() 1 +#else +# define HAVE_MVI() (amask(AMASK_MVI) == 0) +#endif +#ifdef __alpha_cix__ +# define HAVE_CIX() 1 +#else +# define HAVE_CIX() (amask(AMASK_CIX) == 0) +#endif + +inline static uint64_t BYTE_VEC(uint64_t x) +{ + x |= x << 8; + x |= x << 16; + x |= x << 32; + return x; +} +inline static uint64_t WORD_VEC(uint64_t x) +{ + x |= x << 16; + x |= x << 32; + return x; +} + +#define ldq(p) (*(const uint64_t *) (p)) +#define ldl(p) (*(const int32_t *) (p)) +#define stl(l, p) do { *(uint32_t *) (p) = (l); } while (0) +#define stq(l, p) do { *(uint64_t *) (p) = (l); } while (0) +#define sextw(x) ((int16_t) (x)) + +#ifdef __GNUC__ +struct unaligned_long { uint64_t l; } __attribute__((packed)); +#define ldq_u(p) (*(const uint64_t *) (((uint64_t) (p)) & ~7ul)) +#define uldq(a) (((const struct unaligned_long *) (a))->l) + +#if GNUC_PREREQ(3,0) +/* Unfortunately, __builtin_prefetch is slightly buggy on Alpha. The + defines here are kludged so we still get the right + instruction. This needs to be adapted as soon as gcc is fixed. */ +# define prefetch(p) __builtin_prefetch((p), 0, 1) +# define prefetch_en(p) __builtin_prefetch((p), 1, 1) +# define prefetch_m(p) __builtin_prefetch((p), 0, 0) +# define prefetch_men(p) __builtin_prefetch((p), 1, 0) +#else +# define prefetch(p) asm volatile("ldl $31,%0" : : "m"(*(const char *) (p)) : "memory") +# define prefetch_en(p) asm volatile("ldq $31,%0" : : "m"(*(const char *) (p)) : "memory") +# define prefetch_m(p) asm volatile("lds $f31,%0" : : "m"(*(const char *) (p)) : "memory") +# define prefetch_men(p) asm volatile("ldt $f31,%0" : : "m"(*(const char *) (p)) : "memory") +#endif + +#if GNUC_PREREQ(3,3) +#define cmpbge __builtin_alpha_cmpbge +/* Avoid warnings. */ +#define extql(a, b) __builtin_alpha_extql(a, (uint64_t) (b)) +#define extwl(a, b) __builtin_alpha_extwl(a, (uint64_t) (b)) +#define extqh(a, b) __builtin_alpha_extqh(a, (uint64_t) (b)) +#define zap __builtin_alpha_zap +#define zapnot __builtin_alpha_zapnot +#define amask __builtin_alpha_amask +#define implver __builtin_alpha_implver +#define rpcc __builtin_alpha_rpcc +#define minub8 __builtin_alpha_minub8 +#define minsb8 __builtin_alpha_minsb8 +#define minuw4 __builtin_alpha_minuw4 +#define minsw4 __builtin_alpha_minsw4 +#define maxub8 __builtin_alpha_maxub8 +#define maxsb8 __builtin_alpha_maxsb8 +#define maxuw4 __builtin_alpha_maxuw4 +#define maxsw4 __builtin_alpha_maxsw4 +#define perr __builtin_alpha_perr +#define pklb __builtin_alpha_pklb +#define pkwb __builtin_alpha_pkwb +#define unpkbl __builtin_alpha_unpkbl +#define unpkbw __builtin_alpha_unpkbw +#else +#define cmpbge(a, b) ({ uint64_t __r; asm ("cmpbge %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) +#define extql(a, b) ({ uint64_t __r; asm ("extql %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) +#define extwl(a, b) ({ uint64_t __r; asm ("extwl %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) +#define extqh(a, b) ({ uint64_t __r; asm ("extqh %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) +#define zap(a, b) ({ uint64_t __r; asm ("zap %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) +#define zapnot(a, b) ({ uint64_t __r; asm ("zapnot %r1,%2,%0" : "=r" (__r) : "rJ" (a), "rI" (b)); __r; }) +#define amask(a) ({ uint64_t __r; asm ("amask %1,%0" : "=r" (__r) : "rI" (a)); __r; }) +#define implver() ({ uint64_t __r; asm ("implver %0" : "=r" (__r)); __r; }) +#define rpcc() ({ uint64_t __r; asm volatile ("rpcc %0" : "=r" (__r)); __r; }) +#define minub8(a, b) ({ uint64_t __r; asm ("minub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define minsb8(a, b) ({ uint64_t __r; asm ("minsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define minuw4(a, b) ({ uint64_t __r; asm ("minuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define minsw4(a, b) ({ uint64_t __r; asm ("minsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define maxub8(a, b) ({ uint64_t __r; asm ("maxub8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define maxsb8(a, b) ({ uint64_t __r; asm ("maxsb8 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define maxuw4(a, b) ({ uint64_t __r; asm ("maxuw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define maxsw4(a, b) ({ uint64_t __r; asm ("maxsw4 %r1,%2,%0" : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; }) +#define perr(a, b) ({ uint64_t __r; asm ("perr %r1,%r2,%0" : "=r" (__r) : "%rJ" (a), "rJ" (b)); __r; }) +#define pklb(a) ({ uint64_t __r; asm ("pklb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; }) +#define pkwb(a) ({ uint64_t __r; asm ("pkwb %r1,%0" : "=r" (__r) : "rJ" (a)); __r; }) +#define unpkbl(a) ({ uint64_t __r; asm ("unpkbl %r1,%0" : "=r" (__r) : "rJ" (a)); __r; }) +#define unpkbw(a) ({ uint64_t __r; asm ("unpkbw %r1,%0" : "=r" (__r) : "rJ" (a)); __r; }) +#endif +#define wh64(p) asm volatile("wh64 (%0)" : : "r"(p) : "memory") + +#elif defined(__DECC) /* Digital/Compaq/hp "ccc" compiler */ + +#include +#define ldq_u(a) asm ("ldq_u %v0,0(%a0)", a) +#define uldq(a) (*(const __unaligned uint64_t *) (a)) +#define cmpbge(a, b) asm ("cmpbge %a0,%a1,%v0", a, b) +#define extql(a, b) asm ("extql %a0,%a1,%v0", a, b) +#define extwl(a, b) asm ("extwl %a0,%a1,%v0", a, b) +#define extqh(a, b) asm ("extqh %a0,%a1,%v0", a, b) +#define zap(a, b) asm ("zap %a0,%a1,%v0", a, b) +#define zapnot(a, b) asm ("zapnot %a0,%a1,%v0", a, b) +#define amask(a) asm ("amask %a0,%v0", a) +#define implver() asm ("implver %v0") +#define rpcc() asm ("rpcc %v0") +#define minub8(a, b) asm ("minub8 %a0,%a1,%v0", a, b) +#define minsb8(a, b) asm ("minsb8 %a0,%a1,%v0", a, b) +#define minuw4(a, b) asm ("minuw4 %a0,%a1,%v0", a, b) +#define minsw4(a, b) asm ("minsw4 %a0,%a1,%v0", a, b) +#define maxub8(a, b) asm ("maxub8 %a0,%a1,%v0", a, b) +#define maxsb8(a, b) asm ("maxsb8 %a0,%a1,%v0", a, b) +#define maxuw4(a, b) asm ("maxuw4 %a0,%a1,%v0", a, b) +#define maxsw4(a, b) asm ("maxsw4 %a0,%a1,%v0", a, b) +#define perr(a, b) asm ("perr %a0,%a1,%v0", a, b) +#define pklb(a) asm ("pklb %a0,%v0", a) +#define pkwb(a) asm ("pkwb %a0,%v0", a) +#define unpkbl(a) asm ("unpkbl %a0,%v0", a) +#define unpkbw(a) asm ("unpkbw %a0,%v0", a) +#define wh64(a) asm ("wh64 %a0", a) + +#else +#error "Unknown compiler!" +#endif + +#endif /* ALPHA_ASM_H */ diff -r 08496327b7ec -r 89b48bc6c441 libmpeg2/convert.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libmpeg2/convert.h Sun Apr 06 16:41:49 2003 +0000 @@ -0,0 +1,56 @@ +/* + * convert.h + * Copyright (C) 2000-2002 Michel Lespinasse + * Copyright (C) 1999-2000 Aaron Holtzman + * + * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. + * See http://libmpeg2.sourceforge.net/ for updates. + * + * mpeg2dec is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * mpeg2dec is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef CONVERT_H +#define CONVERT_H + +#define CONVERT_FRAME 0 +#define CONVERT_TOP_FIELD 1 +#define CONVERT_BOTTOM_FIELD 2 +#define CONVERT_BOTH_FIELDS 3 + +typedef struct convert_init_s { + void * id; + int id_size; + int buf_size[3]; + void (* start) (void * id, uint8_t * const * dest, int flags); + void (* copy) (void * id, uint8_t * const * src, unsigned int v_offset); +} convert_init_t; + +typedef void convert_t (int width, int height, uint32_t accel, void * arg, + convert_init_t * result); + +convert_t convert_rgb32; +convert_t convert_rgb24; +convert_t convert_rgb16; +convert_t convert_rgb15; +convert_t convert_bgr32; +convert_t convert_bgr24; +convert_t convert_bgr16; +convert_t convert_bgr15; + +#define CONVERT_RGB 0 +#define CONVERT_BGR 1 +convert_t * convert_rgb (int order, int bpp); + +#endif /* CONVERT_H */ diff -r 08496327b7ec -r 89b48bc6c441 libmpeg2/cpu_accel.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libmpeg2/cpu_accel.c Sun Apr 06 16:41:49 2003 +0000 @@ -0,0 +1,175 @@ +/* + * cpu_accel.c + * Copyright (C) 2000-2002 Michel Lespinasse + * Copyright (C) 1999-2000 Aaron Holtzman + * + * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. + * See http://libmpeg2.sourceforge.net/ for updates. + * + * mpeg2dec is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * mpeg2dec is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "config.h" + +#include + +#include "mpeg2.h" + +#ifdef ACCEL_DETECT +#ifdef ARCH_X86 +static inline uint32_t arch_accel (void) +{ + uint32_t eax, ebx, ecx, edx; + int AMD; + uint32_t caps; + +#ifndef PIC +#define cpuid(op,eax,ebx,ecx,edx) \ + __asm__ ("cpuid" \ + : "=a" (eax), \ + "=b" (ebx), \ + "=c" (ecx), \ + "=d" (edx) \ + : "a" (op) \ + : "cc") +#else /* PIC version : save ebx */ +#define cpuid(op,eax,ebx,ecx,edx) \ + __asm__ ("push %%ebx\n\t" \ + "cpuid\n\t" \ + "movl %%ebx,%1\n\t" \ + "pop %%ebx" \ + : "=a" (eax), \ + "=r" (ebx), \ + "=c" (ecx), \ + "=d" (edx) \ + : "a" (op) \ + : "cc") +#endif + + __asm__ ("pushf\n\t" + "pushf\n\t" + "pop %0\n\t" + "movl %0,%1\n\t" + "xorl $0x200000,%0\n\t" + "push %0\n\t" + "popf\n\t" + "pushf\n\t" + "pop %0\n\t" + "popf" + : "=r" (eax), + "=r" (ebx) + : + : "cc"); + + if (eax == ebx) /* no cpuid */ + return 0; + + cpuid (0x00000000, eax, ebx, ecx, edx); + if (!eax) /* vendor string only */ + return 0; + + AMD = (ebx == 0x68747541) && (ecx == 0x444d4163) && (edx == 0x69746e65); + + cpuid (0x00000001, eax, ebx, ecx, edx); + if (! (edx & 0x00800000)) /* no MMX */ + return 0; + + caps = MPEG2_ACCEL_X86_MMX; + if (edx & 0x02000000) /* SSE - identical to AMD MMX extensions */ + caps = MPEG2_ACCEL_X86_MMX | MPEG2_ACCEL_X86_MMXEXT; + + cpuid (0x80000000, eax, ebx, ecx, edx); + if (eax < 0x80000001) /* no extended capabilities */ + return caps; + + cpuid (0x80000001, eax, ebx, ecx, edx); + + if (edx & 0x80000000) + caps |= MPEG2_ACCEL_X86_3DNOW; + + if (AMD && (edx & 0x00400000)) /* AMD MMX extensions */ + caps |= MPEG2_ACCEL_X86_MMXEXT; + + return caps; +} +#endif /* ARCH_X86 */ + +#ifdef ARCH_PPC +#include +#include + +static sigjmp_buf jmpbuf; +static volatile sig_atomic_t canjump = 0; + +static RETSIGTYPE sigill_handler (int sig) +{ + if (!canjump) { + signal (sig, SIG_DFL); + raise (sig); + } + + canjump = 0; + siglongjmp (jmpbuf, 1); +} + +static inline uint32_t arch_accel (void) +{ + signal (SIGILL, sigill_handler); + if (sigsetjmp (jmpbuf, 1)) { + signal (SIGILL, SIG_DFL); + return 0; + } + + canjump = 1; + + asm volatile ("mtspr 256, %0\n\t" + "vand %%v0, %%v0, %%v0" + : + : "r" (-1)); + + signal (SIGILL, SIG_DFL); + return MPEG2_ACCEL_PPC_ALTIVEC; +} +#endif /* ARCH_PPC */ + +#ifdef ARCH_ALPHA +static inline uint32_t arch_accel (void) +{ + uint64_t no_mvi; + + asm volatile ("amask %1, %0" + : "=r" (no_mvi) + : "rI" (256)); /* AMASK_MVI */ + return no_mvi ? MPEG2_ACCEL_ALPHA : (MPEG2_ACCEL_ALPHA | + MPEG2_ACCEL_ALPHA_MVI); +} +#endif /* ARCH_ALPHA */ +#endif + +uint32_t mpeg2_detect_accel (void) +{ + uint32_t accel; + + accel = 0; +#ifdef ACCEL_DETECT +#ifdef LIBMPEG2_MLIB + accel = MPEG2_ACCEL_MLIB; +#endif +#if defined (ARCH_X86) || defined (ARCH_PPC) || defined (ARCH_ALPHA) + accel |= arch_accel (); +#endif +#endif + return accel; +} diff -r 08496327b7ec -r 89b48bc6c441 libmpeg2/cpu_state.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libmpeg2/cpu_state.c Sun Apr 06 16:41:49 2003 +0000 @@ -0,0 +1,119 @@ +/* + * cpu_state.c + * Copyright (C) 2000-2002 Michel Lespinasse + * Copyright (C) 1999-2000 Aaron Holtzman + * + * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. + * See http://libmpeg2.sourceforge.net/ for updates. + * + * mpeg2dec is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * mpeg2dec is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "config.h" + +#include +#include + +#include "mpeg2.h" +#include "mpeg2_internal.h" +#include "attributes.h" +#ifdef ARCH_X86 +#include "mmx.h" +#endif + +void (* mpeg2_cpu_state_save) (cpu_state_t * state) = NULL; +void (* mpeg2_cpu_state_restore) (cpu_state_t * state) = NULL; + +#ifdef ARCH_X86 +static void state_restore_mmx (cpu_state_t * state) +{ + emms (); +} +#endif + +#ifdef ARCH_PPC +static void state_save_altivec (cpu_state_t * state) +{ + asm (" \n" + " li %r9, 16 \n" + " stvx %v20, 0, %r3 \n" + " li %r11, 32 \n" + " stvx %v21, %r9, %r3 \n" + " li %r9, 48 \n" + " stvx %v22, %r11, %r3 \n" + " li %r11, 64 \n" + " stvx %v23, %r9, %r3 \n" + " li %r9, 80 \n" + " stvx %v24, %r11, %r3 \n" + " li %r11, 96 \n" + " stvx %v25, %r9, %r3 \n" + " li %r9, 112 \n" + " stvx %v26, %r11, %r3 \n" + " li %r11, 128 \n" + " stvx %v27, %r9, %r3 \n" + " li %r9, 144 \n" + " stvx %v28, %r11, %r3 \n" + " li %r11, 160 \n" + " stvx %v29, %r9, %r3 \n" + " li %r9, 176 \n" + " stvx %v30, %r11, %r3 \n" + " stvx %v31, %r9, %r3 \n" + ); +} + +static void state_restore_altivec (cpu_state_t * state) +{ + asm (" \n" + " li %r9, 16 \n" + " lvx %v20, 0, %r3 \n" + " li %r11, 32 \n" + " lvx %v21, %r9, %r3 \n" + " li %r9, 48 \n" + " lvx %v22, %r11, %r3 \n" + " li %r11, 64 \n" + " lvx %v23, %r9, %r3 \n" + " li %r9, 80 \n" + " lvx %v24, %r11, %r3 \n" + " li %r11, 96 \n" + " lvx %v25, %r9, %r3 \n" + " li %r9, 112 \n" + " lvx %v26, %r11, %r3 \n" + " li %r11, 128 \n" + " lvx %v27, %r9, %r3 \n" + " li %r9, 144 \n" + " lvx %v28, %r11, %r3 \n" + " li %r11, 160 \n" + " lvx %v29, %r9, %r3 \n" + " li %r9, 176 \n" + " lvx %v30, %r11, %r3 \n" + " lvx %v31, %r9, %r3 \n" + ); +} +#endif + +void mpeg2_cpu_state_init (uint32_t accel) +{ +#ifdef ARCH_X86 + if (accel & MPEG2_ACCEL_X86_MMX) { + mpeg2_cpu_state_restore = state_restore_mmx; + } +#endif +#ifdef ARCH_PPC + if (accel & MPEG2_ACCEL_PPC_ALTIVEC) { + mpeg2_cpu_state_save = state_save_altivec; + mpeg2_cpu_state_restore = state_restore_altivec; + } +#endif +} diff -r 08496327b7ec -r 89b48bc6c441 libmpeg2/decode.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libmpeg2/decode.c Sun Apr 06 16:41:49 2003 +0000 @@ -0,0 +1,439 @@ +/* + * decode.c + * Copyright (C) 2000-2002 Michel Lespinasse + * Copyright (C) 1999-2000 Aaron Holtzman + * + * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. + * See http://libmpeg2.sourceforge.net/ for updates. + * + * mpeg2dec is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * mpeg2dec is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "config.h" + +#include /* memcmp/memset, try to remove */ +#include +#include + +#include "mpeg2.h" +#include "mpeg2_internal.h" +#include "convert.h" + +static int mpeg2_accels = 0; + +#define BUFFER_SIZE (1194 * 1024) + +const mpeg2_info_t * mpeg2_info (mpeg2dec_t * mpeg2dec) +{ + return &(mpeg2dec->info); +} + +static inline int skip_chunk (mpeg2dec_t * mpeg2dec, int bytes) +{ + uint8_t * current; + uint32_t shift; + uint8_t * chunk_ptr; + uint8_t * limit; + uint8_t byte; + + if (!bytes) + return 0; + + current = mpeg2dec->buf_start; + shift = mpeg2dec->shift; + chunk_ptr = mpeg2dec->chunk_ptr; + limit = current + bytes; + + do { + byte = *current++; + if (shift == 0x00000100) { + int skipped; + + mpeg2dec->shift = 0xffffff00; + skipped = current - mpeg2dec->buf_start; + mpeg2dec->buf_start = current; + return skipped; + } + shift = (shift | byte) << 8; + } while (current < limit); + + mpeg2dec->shift = shift; + mpeg2dec->buf_start = current; + return 0; +} + +static inline int copy_chunk (mpeg2dec_t * mpeg2dec, int bytes) +{ + uint8_t * current; + uint32_t shift; + uint8_t * chunk_ptr; + uint8_t * limit; + uint8_t byte; + + if (!bytes) + return 0; + + current = mpeg2dec->buf_start; + shift = mpeg2dec->shift; + chunk_ptr = mpeg2dec->chunk_ptr; + limit = current + bytes; + + do { + byte = *current++; + if (shift == 0x00000100) { + int copied; + + mpeg2dec->shift = 0xffffff00; + mpeg2dec->chunk_ptr = chunk_ptr + 1; + copied = current - mpeg2dec->buf_start; + mpeg2dec->buf_start = current; + return copied; + } + shift = (shift | byte) << 8; + *chunk_ptr++ = byte; + } while (current < limit); + + mpeg2dec->shift = shift; + mpeg2dec->buf_start = current; + return 0; +} + +void mpeg2_buffer (mpeg2dec_t * mpeg2dec, uint8_t * start, uint8_t * end) +{ + mpeg2dec->buf_start = start; + mpeg2dec->buf_end = end; +} + +static inline int seek_chunk (mpeg2dec_t * mpeg2dec) +{ + int size, skipped; + + size = mpeg2dec->buf_end - mpeg2dec->buf_start; + skipped = skip_chunk (mpeg2dec, size); + if (!skipped) { + mpeg2dec->bytes_since_pts += size; + return -1; + } + mpeg2dec->bytes_since_pts += skipped; + mpeg2dec->code = mpeg2dec->buf_start[-1]; + return 0; +} + +int mpeg2_seek_header (mpeg2dec_t * mpeg2dec) +{ + while (mpeg2dec->code != 0xb3 && + ((mpeg2dec->code != 0xb7 && mpeg2dec->code != 0xb8 && + mpeg2dec->code) || mpeg2dec->sequence.width == -1)) + if (seek_chunk (mpeg2dec)) + return -1; + mpeg2dec->chunk_start = mpeg2dec->chunk_ptr = mpeg2dec->chunk_buffer; + return mpeg2_parse_header (mpeg2dec); +} + +int mpeg2_seek_sequence (mpeg2dec_t * mpeg2dec) +{ + mpeg2dec->sequence.width = -1; + return mpeg2_seek_header (mpeg2dec); +} + +#define RECEIVED(code,state) (((state) << 8) + (code)) + +int mpeg2_parse (mpeg2dec_t * mpeg2dec) +{ + int size_buffer, size_chunk, copied; + + if (mpeg2dec->action) { + int state; + + state = mpeg2dec->action (mpeg2dec); + if (state) + return state; + } + + while (1) { + while ((unsigned) (mpeg2dec->code - mpeg2dec->first_decode_slice) < + mpeg2dec->nb_decode_slices) { + size_buffer = mpeg2dec->buf_end - mpeg2dec->buf_start; + size_chunk = (mpeg2dec->chunk_buffer + BUFFER_SIZE - + mpeg2dec->chunk_ptr); + if (size_buffer <= size_chunk) { + copied = copy_chunk (mpeg2dec, size_buffer); + if (!copied) { + mpeg2dec->bytes_since_pts += size_buffer; + mpeg2dec->chunk_ptr += size_buffer; + return -1; + } + } else { + copied = copy_chunk (mpeg2dec, size_chunk); + if (!copied) { + /* filled the chunk buffer without finding a start code */ + mpeg2dec->bytes_since_pts += size_chunk; + mpeg2dec->action = seek_chunk; + return STATE_INVALID; + } + } + mpeg2dec->bytes_since_pts += copied; + + mpeg2_slice (&(mpeg2dec->decoder), mpeg2dec->code, + mpeg2dec->chunk_start); + mpeg2dec->code = mpeg2dec->buf_start[-1]; + mpeg2dec->chunk_ptr = mpeg2dec->chunk_start; + } + if ((unsigned) (mpeg2dec->code - 1) >= 0xb0 - 1) + break; + if (seek_chunk (mpeg2dec)) + return -1; + } + + switch (RECEIVED (mpeg2dec->code, mpeg2dec->state)) { + case RECEIVED (0x00, STATE_SLICE_1ST): + case RECEIVED (0x00, STATE_SLICE): + mpeg2dec->action = mpeg2_header_picture_start; + break; + case RECEIVED (0xb7, STATE_SLICE): + mpeg2dec->action = mpeg2_header_end; + break; + case RECEIVED (0xb3, STATE_SLICE): + case RECEIVED (0xb8, STATE_SLICE): + mpeg2dec->action = mpeg2_parse_header; + break; + default: + mpeg2dec->action = mpeg2_seek_header; + return STATE_INVALID; + } + return mpeg2dec->state; +} + +int mpeg2_parse_header (mpeg2dec_t * mpeg2dec) +{ + static int (* process_header[]) (mpeg2dec_t * mpeg2dec) = { + mpeg2_header_picture, mpeg2_header_extension, mpeg2_header_user_data, + mpeg2_header_sequence, NULL, NULL, NULL, NULL, mpeg2_header_gop + }; + int size_buffer, size_chunk, copied; + + mpeg2dec->action = mpeg2_parse_header; + while (1) { + size_buffer = mpeg2dec->buf_end - mpeg2dec->buf_start; + size_chunk = (mpeg2dec->chunk_buffer + BUFFER_SIZE - + mpeg2dec->chunk_ptr); + if (size_buffer <= size_chunk) { + copied = copy_chunk (mpeg2dec, size_buffer); + if (!copied) { + mpeg2dec->bytes_since_pts += size_buffer; + mpeg2dec->chunk_ptr += size_buffer; + return -1; + } + } else { + copied = copy_chunk (mpeg2dec, size_chunk); + if (!copied) { + /* filled the chunk buffer without finding a start code */ + mpeg2dec->bytes_since_pts += size_chunk; + mpeg2dec->code = 0xb4; + mpeg2dec->action = mpeg2_seek_header; + return STATE_INVALID; + } + } + mpeg2dec->bytes_since_pts += copied; + + if (process_header[mpeg2dec->code & 0x0b] (mpeg2dec)) { + mpeg2dec->code = mpeg2dec->buf_start[-1]; + mpeg2dec->action = mpeg2_seek_header; + return STATE_INVALID; + } + + mpeg2dec->code = mpeg2dec->buf_start[-1]; + switch (RECEIVED (mpeg2dec->code, mpeg2dec->state)) { + + /* state transition after a sequence header */ + case RECEIVED (0x00, STATE_SEQUENCE): + mpeg2dec->action = mpeg2_header_picture_start; + case RECEIVED (0xb8, STATE_SEQUENCE): + mpeg2_header_sequence_finalize (mpeg2dec); + break; + + /* other legal state transitions */ + case RECEIVED (0x00, STATE_GOP): + mpeg2dec->action = mpeg2_header_picture_start; + break; + case RECEIVED (0x01, STATE_PICTURE): + case RECEIVED (0x01, STATE_PICTURE_2ND): + mpeg2dec->action = mpeg2_header_slice_start; + break; + + /* legal headers within a given state */ + case RECEIVED (0xb2, STATE_SEQUENCE): + case RECEIVED (0xb2, STATE_GOP): + case RECEIVED (0xb2, STATE_PICTURE): + case RECEIVED (0xb2, STATE_PICTURE_2ND): + case RECEIVED (0xb5, STATE_SEQUENCE): + case RECEIVED (0xb5, STATE_PICTURE): + case RECEIVED (0xb5, STATE_PICTURE_2ND): + mpeg2dec->chunk_ptr = mpeg2dec->chunk_start; + continue; + + default: + mpeg2dec->action = mpeg2_seek_header; + return STATE_INVALID; + } + + mpeg2dec->chunk_start = mpeg2dec->chunk_ptr = mpeg2dec->chunk_buffer; + return mpeg2dec->state; + } +} + +void mpeg2_convert (mpeg2dec_t * mpeg2dec, + void (* convert) (int, int, uint32_t, void *, + struct convert_init_s *), void * arg) +{ + convert_init_t convert_init; + int size; + + convert_init.id = NULL; + convert (mpeg2dec->decoder.width, mpeg2dec->decoder.height, + mpeg2_accels, arg, &convert_init); + if (convert_init.id_size) { + convert_init.id = mpeg2dec->convert_id = + mpeg2_malloc (convert_init.id_size, ALLOC_CONVERT_ID); + convert (mpeg2dec->decoder.width, mpeg2dec->decoder.height, + mpeg2_accels, arg, &convert_init); + } + mpeg2dec->convert_size[0] = size = convert_init.buf_size[0]; + mpeg2dec->convert_size[1] = size += convert_init.buf_size[1]; + mpeg2dec->convert_size[2] = size += convert_init.buf_size[2]; + mpeg2dec->convert_start = convert_init.start; + mpeg2dec->convert_copy = convert_init.copy; + + size = mpeg2dec->decoder.width * mpeg2dec->decoder.height >> 2; + mpeg2dec->yuv_buf[0][0] = (uint8_t *) mpeg2_malloc (6 * size, ALLOC_YUV); + mpeg2dec->yuv_buf[0][1] = mpeg2dec->yuv_buf[0][0] + 4 * size; + mpeg2dec->yuv_buf[0][2] = mpeg2dec->yuv_buf[0][0] + 5 * size; + mpeg2dec->yuv_buf[1][0] = (uint8_t *) mpeg2_malloc (6 * size, ALLOC_YUV); + mpeg2dec->yuv_buf[1][1] = mpeg2dec->yuv_buf[1][0] + 4 * size; + mpeg2dec->yuv_buf[1][2] = mpeg2dec->yuv_buf[1][0] + 5 * size; + size = mpeg2dec->decoder.width * 8; + mpeg2dec->yuv_buf[2][0] = (uint8_t *) mpeg2_malloc (6 * size, ALLOC_YUV); + mpeg2dec->yuv_buf[2][1] = mpeg2dec->yuv_buf[2][0] + 4 * size; + mpeg2dec->yuv_buf[2][2] = mpeg2dec->yuv_buf[2][0] + 5 * size; +} + +void mpeg2_set_buf (mpeg2dec_t * mpeg2dec, uint8_t * buf[3], void * id) +{ + fbuf_t * fbuf; + + if (mpeg2dec->custom_fbuf) { + mpeg2_set_fbuf (mpeg2dec, mpeg2dec->decoder.coding_type); + fbuf = mpeg2dec->fbuf[0]; + if (mpeg2dec->state == STATE_SEQUENCE) { + mpeg2dec->fbuf[2] = mpeg2dec->fbuf[1]; + mpeg2dec->fbuf[1] = mpeg2dec->fbuf[0]; + } + } else { + fbuf = &(mpeg2dec->fbuf_alloc[mpeg2dec->alloc_index].fbuf); + mpeg2dec->alloc_index_user = ++mpeg2dec->alloc_index; + } + fbuf->buf[0] = buf[0]; + fbuf->buf[1] = buf[1]; + fbuf->buf[2] = buf[2]; + fbuf->id = id; +} + +void mpeg2_custom_fbuf (mpeg2dec_t * mpeg2dec, int custom_fbuf) +{ + mpeg2dec->custom_fbuf = custom_fbuf; +} + +void mpeg2_skip (mpeg2dec_t * mpeg2dec, int skip) +{ + mpeg2dec->first_decode_slice = 1; + mpeg2dec->nb_decode_slices = skip ? 0 : (0xb0 - 1); +} + +void mpeg2_slice_region (mpeg2dec_t * mpeg2dec, int start, int end) +{ + start = (start < 1) ? 1 : (start > 0xb0) ? 0xb0 : start; + end = (end < start) ? start : (end > 0xb0) ? 0xb0 : end; + mpeg2dec->first_decode_slice = start; + mpeg2dec->nb_decode_slices = end - start; +} + +void mpeg2_pts (mpeg2dec_t * mpeg2dec, uint32_t pts) +{ + mpeg2dec->pts_previous = mpeg2dec->pts_current; + mpeg2dec->pts_current = pts; + mpeg2dec->num_pts++; + mpeg2dec->bytes_since_pts = 0; +} + +uint32_t mpeg2_accel (uint32_t accel) +{ + if (!mpeg2_accels) { + if (accel & MPEG2_ACCEL_DETECT) + accel |= mpeg2_detect_accel (); + mpeg2_accels = accel |= MPEG2_ACCEL_DETECT; + mpeg2_cpu_state_init (accel); + mpeg2_idct_init (accel); + mpeg2_mc_init (accel); + } + return mpeg2_accels & ~MPEG2_ACCEL_DETECT; +} + +mpeg2dec_t * mpeg2_init (void) +{ + mpeg2dec_t * mpeg2dec; + + mpeg2_accel (MPEG2_ACCEL_DETECT); + + mpeg2dec = (mpeg2dec_t *) mpeg2_malloc (sizeof (mpeg2dec_t), + ALLOC_MPEG2DEC); + if (mpeg2dec == NULL) + return NULL; + + memset (mpeg2dec, 0, sizeof (mpeg2dec_t)); + + mpeg2dec->chunk_buffer = (uint8_t *) mpeg2_malloc (BUFFER_SIZE + 4, + ALLOC_CHUNK); + + mpeg2dec->shift = 0xffffff00; + mpeg2dec->action = mpeg2_seek_sequence; + mpeg2dec->code = 0xb4; + mpeg2dec->first_decode_slice = 1; + mpeg2dec->nb_decode_slices = 0xb0 - 1; + mpeg2dec->convert_id = NULL; + + /* initialize substructures */ + mpeg2_header_state_init (mpeg2dec); + + return mpeg2dec; +} + +void mpeg2_close (mpeg2dec_t * mpeg2dec) +{ + int i; + + /* static uint8_t finalizer[] = {0,0,1,0xb4}; */ + /* mpeg2_decode_data (mpeg2dec, finalizer, finalizer+4); */ + + mpeg2_free (mpeg2dec->chunk_buffer); + if (!mpeg2dec->custom_fbuf) + for (i = mpeg2dec->alloc_index_user; i < mpeg2dec->alloc_index; i++) + mpeg2_free (mpeg2dec->fbuf_alloc[i].fbuf.buf[0]); + if (mpeg2dec->convert_start) + for (i = 0; i < 3; i++) + mpeg2_free (mpeg2dec->yuv_buf[i][0]); + if (mpeg2dec->convert_id) + mpeg2_free (mpeg2dec->convert_id); + mpeg2_free (mpeg2dec); +} diff -r 08496327b7ec -r 89b48bc6c441 libmpeg2/idct_alpha.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libmpeg2/idct_alpha.c Sun Apr 06 16:41:49 2003 +0000 @@ -0,0 +1,380 @@ +/* + * idct_alpha.c + * Copyright (C) 2002 Falk Hueffner + * Copyright (C) 2000-2002 Michel Lespinasse + * Copyright (C) 1999-2000 Aaron Holtzman + * + * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. + * See http://libmpeg2.sourceforge.net/ for updates. + * + * mpeg2dec is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * mpeg2dec is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "config.h" + +#ifdef ARCH_ALPHA + +#include +#include + +#include "alpha_asm.h" +#include "attributes.h" + +#define W1 2841 /* 2048*sqrt (2)*cos (1*pi/16) */ +#define W2 2676 /* 2048*sqrt (2)*cos (2*pi/16) */ +#define W3 2408 /* 2048*sqrt (2)*cos (3*pi/16) */ +#define W5 1609 /* 2048*sqrt (2)*cos (5*pi/16) */ +#define W6 1108 /* 2048*sqrt (2)*cos (6*pi/16) */ +#define W7 565 /* 2048*sqrt (2)*cos (7*pi/16) */ + +static uint8_t clip_lut[1024]; +#define CLIP(i) ((clip_lut+384)[(i)]) + +#if 0 +#define BUTTERFLY(t0,t1,W0,W1,d0,d1) \ +do { \ + t0 = W0*d0 + W1*d1; \ + t1 = W0*d1 - W1*d0; \ +} while (0) +#else +#define BUTTERFLY(t0,t1,W0,W1,d0,d1) \ +do { \ + int_fast32_t tmp = W0 * (d0 + d1); \ + t0 = tmp + (W1 - W0) * d1; \ + t1 = tmp - (W1 + W0) * d0; \ +} while (0) +#endif + +static void inline idct_row (int16_t * const block) +{ + uint64_t l, r; + int_fast32_t d0, d1, d2, d3; + int_fast32_t a0, a1, a2, a3, b0, b1, b2, b3; + int_fast32_t t0, t1, t2, t3; + + l = ldq (block); + r = ldq (block + 4); + + /* shortcut */ + if (likely (!((l & ~0xffffUL) | r))) { + uint64_t tmp = (uint16_t) (l << 3); + tmp |= tmp << 16; + tmp |= tmp << 32; + ((int32_t *)block)[0] = tmp; + ((int32_t *)block)[1] = tmp; + ((int32_t *)block)[2] = tmp; + ((int32_t *)block)[3] = tmp; + return; + } + + d0 = (sextw (l) << 11) + 128; + d1 = sextw (extwl (l, 2)); + d2 = sextw (extwl (l, 4)) << 11; + d3 = sextw (extwl (l, 6)); + t0 = d0 + d2; + t1 = d0 - d2; + BUTTERFLY (t2, t3, W6, W2, d3, d1); + a0 = t0 + t2; + a1 = t1 + t3; + a2 = t1 - t3; + a3 = t0 - t2; + + d0 = sextw (r); + d1 = sextw (extwl (r, 2)); + d2 = sextw (extwl (r, 4)); + d3 = sextw (extwl (r, 6)); + BUTTERFLY (t0, t1, W7, W1, d3, d0); + BUTTERFLY (t2, t3, W3, W5, d1, d2); + b0 = t0 + t2; + b3 = t1 + t3; + t0 -= t2; + t1 -= t3; + b1 = ((t0 + t1) * 181) >> 8; + b2 = ((t0 - t1) * 181) >> 8; + + block[0] = (a0 + b0) >> 8; + block[1] = (a1 + b1) >> 8; + block[2] = (a2 + b2) >> 8; + block[3] = (a3 + b3) >> 8; + block[4] = (a3 - b3) >> 8; + block[5] = (a2 - b2) >> 8; + block[6] = (a1 - b1) >> 8; + block[7] = (a0 - b0) >> 8; +} + +static void inline idct_col (int16_t * const block) +{ + int_fast32_t d0, d1, d2, d3; + int_fast32_t a0, a1, a2, a3, b0, b1, b2, b3; + int_fast32_t t0, t1, t2, t3; + + d0 = (block[8*0] << 11) + 65536; + d1 = block[8*1]; + d2 = block[8*2] << 11; + d3 = block[8*3]; + t0 = d0 + d2; + t1 = d0 - d2; + BUTTERFLY (t2, t3, W6, W2, d3, d1); + a0 = t0 + t2; + a1 = t1 + t3; + a2 = t1 - t3; + a3 = t0 - t2; + + d0 = block[8*4]; + d1 = block[8*5]; + d2 = block[8*6]; + d3 = block[8*7]; + BUTTERFLY (t0, t1, W7, W1, d3, d0); + BUTTERFLY (t2, t3, W3, W5, d1, d2); + b0 = t0 + t2; + b3 = t1 + t3; + t0 = (t0 - t2) >> 8; + t1 = (t1 - t3) >> 8; + b1 = (t0 + t1) * 181; + b2 = (t0 - t1) * 181; + + block[8*0] = (a0 + b0) >> 17; + block[8*1] = (a1 + b1) >> 17; + block[8*2] = (a2 + b2) >> 17; + block[8*3] = (a3 + b3) >> 17; + block[8*4] = (a3 - b3) >> 17; + block[8*5] = (a2 - b2) >> 17; + block[8*6] = (a1 - b1) >> 17; + block[8*7] = (a0 - b0) >> 17; +} + +void mpeg2_idct_copy_mvi (int16_t * block, uint8_t * dest, const int stride) +{ + uint64_t clampmask; + int i; + + for (i = 0; i < 8; i++) + idct_row (block + 8 * i); + + for (i = 0; i < 8; i++) + idct_col (block + i); + + clampmask = zap (-1, 0xaa); /* 0x00ff00ff00ff00ff */ + do { + uint64_t shorts0, shorts1; + + shorts0 = ldq (block); + shorts0 = maxsw4 (shorts0, 0); + shorts0 = minsw4 (shorts0, clampmask); + stl (pkwb (shorts0), dest); + + shorts1 = ldq (block + 4); + shorts1 = maxsw4 (shorts1, 0); + shorts1 = minsw4 (shorts1, clampmask); + stl (pkwb (shorts1), dest + 4); + + stq (0, block); + stq (0, block + 4); + + dest += stride; + block += 8; + } while (--i); +} + +void mpeg2_idct_add_mvi (const int last, int16_t * block, + uint8_t * dest, const int stride) +{ + uint64_t clampmask; + uint64_t signmask; + int i; + + if (last != 129 || (block[0] & 7) == 4) { + for (i = 0; i < 8; i++) + idct_row (block + 8 * i); + for (i = 0; i < 8; i++) + idct_col (block + i); + clampmask = zap (-1, 0xaa); /* 0x00ff00ff00ff00ff */ + signmask = zap (-1, 0x33); + signmask ^= signmask >> 1; /* 0x8000800080008000 */ + + do { + uint64_t shorts0, pix0, signs0; + uint64_t shorts1, pix1, signs1; + + shorts0 = ldq (block); + shorts1 = ldq (block + 4); + + pix0 = unpkbw (ldl (dest)); + /* signed subword add (MMX paddw). */ + signs0 = shorts0 & signmask; + shorts0 &= ~signmask; + shorts0 += pix0; + shorts0 ^= signs0; + /* clamp. */ + shorts0 = maxsw4 (shorts0, 0); + shorts0 = minsw4 (shorts0, clampmask); + + /* next 4. */ + pix1 = unpkbw (ldl (dest + 4)); + signs1 = shorts1 & signmask; + shorts1 &= ~signmask; + shorts1 += pix1; + shorts1 ^= signs1; + shorts1 = maxsw4 (shorts1, 0); + shorts1 = minsw4 (shorts1, clampmask); + + stl (pkwb (shorts0), dest); + stl (pkwb (shorts1), dest + 4); + stq (0, block); + stq (0, block + 4); + + dest += stride; + block += 8; + } while (--i); + } else { + int DC; + uint64_t p0, p1, p2, p3, p4, p5, p6, p7; + uint64_t DCs; + + DC = (block[0] + 4) >> 3; + block[0] = block[63] = 0; + + p0 = ldq (dest + 0 * stride); + p1 = ldq (dest + 1 * stride); + p2 = ldq (dest + 2 * stride); + p3 = ldq (dest + 3 * stride); + p4 = ldq (dest + 4 * stride); + p5 = ldq (dest + 5 * stride); + p6 = ldq (dest + 6 * stride); + p7 = ldq (dest + 7 * stride); + + if (DC > 0) { + DCs = BYTE_VEC (likely (DC <= 255) ? DC : 255); + p0 += minub8 (DCs, ~p0); + p1 += minub8 (DCs, ~p1); + p2 += minub8 (DCs, ~p2); + p3 += minub8 (DCs, ~p3); + p4 += minub8 (DCs, ~p4); + p5 += minub8 (DCs, ~p5); + p6 += minub8 (DCs, ~p6); + p7 += minub8 (DCs, ~p7); + } else { + DCs = BYTE_VEC (likely (-DC <= 255) ? -DC : 255); + p0 -= minub8 (DCs, p0); + p1 -= minub8 (DCs, p1); + p2 -= minub8 (DCs, p2); + p3 -= minub8 (DCs, p3); + p4 -= minub8 (DCs, p4); + p5 -= minub8 (DCs, p5); + p6 -= minub8 (DCs, p6); + p7 -= minub8 (DCs, p7); + } + + stq (p0, dest + 0 * stride); + stq (p1, dest + 1 * stride); + stq (p2, dest + 2 * stride); + stq (p3, dest + 3 * stride); + stq (p4, dest + 4 * stride); + stq (p5, dest + 5 * stride); + stq (p6, dest + 6 * stride); + stq (p7, dest + 7 * stride); + } +} + +void mpeg2_idct_copy_alpha (int16_t * block, uint8_t * dest, const int stride) +{ + int i; + + for (i = 0; i < 8; i++) + idct_row (block + 8 * i); + for (i = 0; i < 8; i++) + idct_col (block + i); + do { + dest[0] = CLIP (block[0]); + dest[1] = CLIP (block[1]); + dest[2] = CLIP (block[2]); + dest[3] = CLIP (block[3]); + dest[4] = CLIP (block[4]); + dest[5] = CLIP (block[5]); + dest[6] = CLIP (block[6]); + dest[7] = CLIP (block[7]); + + stq(0, block); + stq(0, block + 4); + + dest += stride; + block += 8; + } while (--i); +} + +void mpeg2_idct_add_alpha (const int last, int16_t * block, + uint8_t * dest, const int stride) +{ + int i; + + if (last != 129 || (block[0] & 7) == 4) { + for (i = 0; i < 8; i++) + idct_row (block + 8 * i); + for (i = 0; i < 8; i++) + idct_col (block + i); + do { + dest[0] = CLIP (block[0] + dest[0]); + dest[1] = CLIP (block[1] + dest[1]); + dest[2] = CLIP (block[2] + dest[2]); + dest[3] = CLIP (block[3] + dest[3]); + dest[4] = CLIP (block[4] + dest[4]); + dest[5] = CLIP (block[5] + dest[5]); + dest[6] = CLIP (block[6] + dest[6]); + dest[7] = CLIP (block[7] + dest[7]); + + stq(0, block); + stq(0, block + 4); + + dest += stride; + block += 8; + } while (--i); + } else { + int DC; + + DC = (block[0] + 4) >> 3; + block[0] = block[63] = 0; + i = 8; + do { + dest[0] = CLIP (DC + dest[0]); + dest[1] = CLIP (DC + dest[1]); + dest[2] = CLIP (DC + dest[2]); + dest[3] = CLIP (DC + dest[3]); + dest[4] = CLIP (DC + dest[4]); + dest[5] = CLIP (DC + dest[5]); + dest[6] = CLIP (DC + dest[6]); + dest[7] = CLIP (DC + dest[7]); + dest += stride; + } while (--i); + } +} + +void mpeg2_idct_alpha_init(int no_mvi) +{ + extern uint8_t mpeg2_scan_norm[64]; + extern uint8_t mpeg2_scan_alt[64]; + int i, j; + + if (no_mvi) + for (i = -384; i < 640; i++) + clip_lut[i + 384] = (i < 0) ? 0 : ((i > 255) ? 255 : i); + for (i = 0; i < 64; i++) { + j = mpeg2_scan_norm[i]; + mpeg2_scan_norm[i] = ((j & 0x36) >> 1) | ((j & 0x09) << 2); + j = mpeg2_scan_alt[i]; + mpeg2_scan_alt[i] = ((j & 0x36) >> 1) | ((j & 0x09) << 2); + } +} + +#endif /* ARCH_ALPHA */ diff -r 08496327b7ec -r 89b48bc6c441 libmpeg2/idct_altivec.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libmpeg2/idct_altivec.c Sun Apr 06 16:41:49 2003 +0000 @@ -0,0 +1,705 @@ +/* + * idct_altivec.c + * Copyright (C) 2000-2002 Michel Lespinasse + * Copyright (C) 1999-2000 Aaron Holtzman + * + * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. + * See http://libmpeg2.sourceforge.net/ for updates. + * + * mpeg2dec is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * mpeg2dec is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ALTIVEC__ + +#include "config.h" + +#ifdef ARCH_PPC + +#include + +#include "mpeg2.h" +#include "mpeg2_internal.h" +#include "attributes.h" + +static const int16_t constants[5][8] ATTR_ALIGN(16) = { + {23170, 13573, 6518, 21895, -23170, -21895, 32, 31}, + {16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725}, + {22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521}, + {21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692}, + {19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722} +}; + +/* + * The asm code is generated with: + * + * gcc-2.95 -fvec -D__ALTIVEC__ -O9 -fomit-frame-pointer -mregnames -S + * idct_altivec.c + * + * awk '{args=""; len=split ($2, arg, ","); + * for (i=1; i<=len; i++) { a=arg[i]; if (i> 3) | ((j & 7) << 3); + j = mpeg2_scan_alt[i]; + mpeg2_scan_alt[i] = (j >> 3) | ((j & 7) << 3); + } +} + +#endif /* ARCH_PPC */ + +#else /* __ALTIVEC__ */ + +#define vector_s16_t vector signed short +#define vector_u16_t vector unsigned short +#define vector_s8_t vector signed char +#define vector_u8_t vector unsigned char +#define vector_s32_t vector signed int +#define vector_u32_t vector unsigned int + +#define IDCT_HALF \ + /* 1st stage */ \ + t1 = vec_mradds (a1, vx7, vx1 ); \ + t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \ + t7 = vec_mradds (a2, vx5, vx3); \ + t3 = vec_mradds (ma2, vx3, vx5); \ + \ + /* 2nd stage */ \ + t5 = vec_adds (vx0, vx4); \ + t0 = vec_subs (vx0, vx4); \ + t2 = vec_mradds (a0, vx6, vx2); \ + t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \ + t6 = vec_adds (t8, t3); \ + t3 = vec_subs (t8, t3); \ + t8 = vec_subs (t1, t7); \ + t1 = vec_adds (t1, t7); \ + \ + /* 3rd stage */ \ + t7 = vec_adds (t5, t2); \ + t2 = vec_subs (t5, t2); \ + t5 = vec_adds (t0, t4); \ + t0 = vec_subs (t0, t4); \ + t4 = vec_subs (t8, t3); \ + t3 = vec_adds (t8, t3); \ + \ + /* 4th stage */ \ + vy0 = vec_adds (t7, t1); \ + vy7 = vec_subs (t7, t1); \ + vy1 = vec_mradds (c4, t3, t5); \ + vy6 = vec_mradds (mc4, t3, t5); \ + vy2 = vec_mradds (c4, t4, t0); \ + vy5 = vec_mradds (mc4, t4, t0); \ + vy3 = vec_adds (t2, t6); \ + vy4 = vec_subs (t2, t6); + +#define IDCT \ + vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \ + vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \ + vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias; \ + vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8; \ + vector_u16_t shift; \ + \ + c4 = vec_splat (constants[0], 0); \ + a0 = vec_splat (constants[0], 1); \ + a1 = vec_splat (constants[0], 2); \ + a2 = vec_splat (constants[0], 3); \ + mc4 = vec_splat (constants[0], 4); \ + ma2 = vec_splat (constants[0], 5); \ + bias = (vector_s16_t)vec_splat ((vector_s32_t)constants[0], 3); \ + \ + zero = vec_splat_s16 (0); \ + shift = vec_splat_u16 (4); \ + \ + vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \ + vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \ + vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \ + vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \ + vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \ + vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \ + vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \ + vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \ + \ + IDCT_HALF \ + \ + vx0 = vec_mergeh (vy0, vy4); \ + vx1 = vec_mergel (vy0, vy4); \ + vx2 = vec_mergeh (vy1, vy5); \ + vx3 = vec_mergel (vy1, vy5); \ + vx4 = vec_mergeh (vy2, vy6); \ + vx5 = vec_mergel (vy2, vy6); \ + vx6 = vec_mergeh (vy3, vy7); \ + vx7 = vec_mergel (vy3, vy7); \ + \ + vy0 = vec_mergeh (vx0, vx4); \ + vy1 = vec_mergel (vx0, vx4); \ + vy2 = vec_mergeh (vx1, vx5); \ + vy3 = vec_mergel (vx1, vx5); \ + vy4 = vec_mergeh (vx2, vx6); \ + vy5 = vec_mergel (vx2, vx6); \ + vy6 = vec_mergeh (vx3, vx7); \ + vy7 = vec_mergel (vx3, vx7); \ + \ + vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \ + vx1 = vec_mergel (vy0, vy4); \ + vx2 = vec_mergeh (vy1, vy5); \ + vx3 = vec_mergel (vy1, vy5); \ + vx4 = vec_mergeh (vy2, vy6); \ + vx5 = vec_mergel (vy2, vy6); \ + vx6 = vec_mergeh (vy3, vy7); \ + vx7 = vec_mergel (vy3, vy7); \ + \ + IDCT_HALF \ + \ + shift = vec_splat_u16 (6); \ + vx0 = vec_sra (vy0, shift); \ + vx1 = vec_sra (vy1, shift); \ + vx2 = vec_sra (vy2, shift); \ + vx3 = vec_sra (vy3, shift); \ + vx4 = vec_sra (vy4, shift); \ + vx5 = vec_sra (vy5, shift); \ + vx6 = vec_sra (vy6, shift); \ + vx7 = vec_sra (vy7, shift); + +static const vector_s16_t constants[5] = { + (vector_s16_t)(23170, 13573, 6518, 21895, -23170, -21895, 32, 31), + (vector_s16_t)(16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725), + (vector_s16_t)(22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521), + (vector_s16_t)(21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692), + (vector_s16_t)(19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722) +}; + +void mpeg2_idct_copy_altivec (vector_s16_t * const block, unsigned char * dest, + const int stride) +{ + vector_u8_t tmp; + + IDCT + +#define COPY(dest,src) \ + tmp = vec_packsu (src, src); \ + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \ + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + + COPY (dest, vx0) dest += stride; + COPY (dest, vx1) dest += stride; + COPY (dest, vx2) dest += stride; + COPY (dest, vx3) dest += stride; + COPY (dest, vx4) dest += stride; + COPY (dest, vx5) dest += stride; + COPY (dest, vx6) dest += stride; + COPY (dest, vx7) + + memset (block, 0, 64 * sizeof (signed short)); +} + +void mpeg2_idct_add_altivec (const int last, vector_s16_t * const block, + unsigned char * dest, const int stride) +{ + vector_u8_t tmp; + vector_s16_t tmp2, tmp3; + vector_u8_t perm0; + vector_u8_t perm1; + vector_u8_t p0, p1, p; + + IDCT + + p0 = vec_lvsl (0, dest); + p1 = vec_lvsl (stride, dest); + p = vec_splat_u8 (-1); + perm0 = vec_mergeh (p, p0); + perm1 = vec_mergeh (p, p1); + +#define ADD(dest,src,perm) \ + /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \ + tmp = vec_ld (0, dest); \ + tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \ + tmp3 = vec_adds (tmp2, src); \ + tmp = vec_packsu (tmp3, tmp3); \ + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \ + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + + ADD (dest, vx0, perm0) dest += stride; + ADD (dest, vx1, perm1) dest += stride; + ADD (dest, vx2, perm0) dest += stride; + ADD (dest, vx3, perm1) dest += stride; + ADD (dest, vx4, perm0) dest += stride; + ADD (dest, vx5, perm1) dest += stride; + ADD (dest, vx6, perm0) dest += stride; + ADD (dest, vx7, perm1) + + memset (block, 0, 64 * sizeof (signed short)); +} + +#endif /* __ALTIVEC__ */ diff -r 08496327b7ec -r 89b48bc6c441 libmpeg2/motion_comp_alpha.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libmpeg2/motion_comp_alpha.c Sun Apr 06 16:41:49 2003 +0000 @@ -0,0 +1,253 @@ +/* + * motion_comp_alpha.c + * Copyright (C) 2002 Falk Hueffner + * + * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. + * See http://libmpeg2.sourceforge.net/ for updates. + * + * mpeg2dec is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * mpeg2dec is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "config.h" + +#ifdef ARCH_ALPHA + +#include + +#include "mpeg2.h" +#include "mpeg2_internal.h" +#include "alpha_asm.h" + +static inline uint64_t avg2(uint64_t a, uint64_t b) +{ + return (a | b) - (((a ^ b) & BYTE_VEC(0xfe)) >> 1); +} + +// Load two unaligned quadwords from addr. This macro only works if +// addr is actually unaligned. +#define ULOAD16(ret_l, ret_r, addr) \ + do { \ + uint64_t _l = ldq_u(addr + 0); \ + uint64_t _m = ldq_u(addr + 8); \ + uint64_t _r = ldq_u(addr + 16); \ + ret_l = extql(_l, addr) | extqh(_m, addr); \ + ret_r = extql(_m, addr) | extqh(_r, addr); \ + } while (0) + +// Load two aligned quadwords from addr. +#define ALOAD16(ret_l, ret_r, addr) \ + do { \ + ret_l = ldq(addr); \ + ret_r = ldq(addr + 8); \ + } while (0) + +#define OP8(LOAD, LOAD16, STORE) \ + do { \ + STORE(LOAD(pixels), block); \ + pixels += line_size; \ + block += line_size; \ + } while (--h) + +#define OP16(LOAD, LOAD16, STORE) \ + do { \ + uint64_t l, r; \ + LOAD16(l, r, pixels); \ + STORE(l, block); \ + STORE(r, block + 8); \ + pixels += line_size; \ + block += line_size; \ + } while (--h) + +#define OP8_X2(LOAD, LOAD16, STORE) \ + do { \ + uint64_t p0, p1; \ + \ + p0 = LOAD(pixels); \ + p1 = p0 >> 8 | ((uint64_t) pixels[8] << 56); \ + STORE(avg2(p0, p1), block); \ + pixels += line_size; \ + block += line_size; \ + } while (--h) + +#define OP16_X2(LOAD, LOAD16, STORE) \ + do { \ + uint64_t p0, p1; \ + \ + LOAD16(p0, p1, pixels); \ + STORE(avg2(p0, p0 >> 8 | p1 << 56), block); \ + STORE(avg2(p1, p1 >> 8 | (uint64_t) pixels[16] << 56), \ + block + 8); \ + pixels += line_size; \ + block += line_size; \ + } while (--h) + +#define OP8_Y2(LOAD, LOAD16, STORE) \ + do { \ + uint64_t p0, p1; \ + p0 = LOAD(pixels); \ + pixels += line_size; \ + p1 = LOAD(pixels); \ + do { \ + uint64_t av = avg2(p0, p1); \ + if (--h == 0) line_size = 0; \ + pixels += line_size; \ + p0 = p1; \ + p1 = LOAD(pixels); \ + STORE(av, block); \ + block += line_size; \ + } while (h); \ + } while (0) + +#define OP16_Y2(LOAD, LOAD16, STORE) \ + do { \ + uint64_t p0l, p0r, p1l, p1r; \ + LOAD16(p0l, p0r, pixels); \ + pixels += line_size; \ + LOAD16(p1l, p1r, pixels); \ + do { \ + uint64_t avl, avr; \ + if (--h == 0) line_size = 0; \ + avl = avg2(p0l, p1l); \ + avr = avg2(p0r, p1r); \ + p0l = p1l; \ + p0r = p1r; \ + pixels += line_size; \ + LOAD16(p1l, p1r, pixels); \ + STORE(avl, block); \ + STORE(avr, block + 8); \ + block += line_size; \ + } while (h); \ + } while (0) + +#define OP8_XY2(LOAD, LOAD16, STORE) \ + do { \ + uint64_t pl, ph; \ + uint64_t p1 = LOAD(pixels); \ + uint64_t p2 = p1 >> 8 | ((uint64_t) pixels[8] << 56); \ + \ + ph = ((p1 & ~BYTE_VEC(0x03)) >> 2) \ + + ((p2 & ~BYTE_VEC(0x03)) >> 2); \ + pl = (p1 & BYTE_VEC(0x03)) \ + + (p2 & BYTE_VEC(0x03)); \ + \ + do { \ + uint64_t npl, nph; \ + \ + pixels += line_size; \ + p1 = LOAD(pixels); \ + p2 = (p1 >> 8) | ((uint64_t) pixels[8] << 56); \ + nph = ((p1 & ~BYTE_VEC(0x03)) >> 2) \ + + ((p2 & ~BYTE_VEC(0x03)) >> 2); \ + npl = (p1 & BYTE_VEC(0x03)) \ + + (p2 & BYTE_VEC(0x03)); \ + \ + STORE(ph + nph \ + + (((pl + npl + BYTE_VEC(0x02)) >> 2) \ + & BYTE_VEC(0x03)), block); \ + \ + block += line_size; \ + pl = npl; \ + ph = nph; \ + } while (--h); \ + } while (0) + +#define OP16_XY2(LOAD, LOAD16, STORE) \ + do { \ + uint64_t p0, p1, p2, p3, pl_l, ph_l, pl_r, ph_r; \ + LOAD16(p0, p2, pixels); \ + p1 = p0 >> 8 | (p2 << 56); \ + p3 = p2 >> 8 | ((uint64_t) pixels[16] << 56); \ + \ + ph_l = ((p0 & ~BYTE_VEC(0x03)) >> 2) \ + + ((p1 & ~BYTE_VEC(0x03)) >> 2); \ + pl_l = (p0 & BYTE_VEC(0x03)) \ + + (p1 & BYTE_VEC(0x03)); \ + ph_r = ((p2 & ~BYTE_VEC(0x03)) >> 2) \ + + ((p3 & ~BYTE_VEC(0x03)) >> 2); \ + pl_r = (p2 & BYTE_VEC(0x03)) \ + + (p3 & BYTE_VEC(0x03)); \ + \ + do { \ + uint64_t npl_l, nph_l, npl_r, nph_r; \ + \ + pixels += line_size; \ + LOAD16(p0, p2, pixels); \ + p1 = p0 >> 8 | (p2 << 56); \ + p3 = p2 >> 8 | ((uint64_t) pixels[16] << 56); \ + nph_l = ((p0 & ~BYTE_VEC(0x03)) >> 2) \ + + ((p1 & ~BYTE_VEC(0x03)) >> 2); \ + npl_l = (p0 & BYTE_VEC(0x03)) \ + + (p1 & BYTE_VEC(0x03)); \ + nph_r = ((p2 & ~BYTE_VEC(0x03)) >> 2) \ + + ((p3 & ~BYTE_VEC(0x03)) >> 2); \ + npl_r = (p2 & BYTE_VEC(0x03)) \ + + (p3 & BYTE_VEC(0x03)); \ + \ + STORE(ph_l + nph_l \ + + (((pl_l + npl_l + BYTE_VEC(0x02)) >> 2) \ + & BYTE_VEC(0x03)), block); \ + STORE(ph_r + nph_r \ + + (((pl_r + npl_r + BYTE_VEC(0x02)) >> 2) \ + & BYTE_VEC(0x03)), block + 8); \ + \ + block += line_size; \ + pl_l = npl_l; \ + ph_l = nph_l; \ + pl_r = npl_r; \ + ph_r = nph_r; \ + } while (--h); \ + } while (0) + +#define MAKE_OP(OPNAME, SIZE, SUFF, OPKIND, STORE) \ +static void MC_ ## OPNAME ## _ ## SUFF ## _ ## SIZE ## _alpha \ + (uint8_t *restrict block, const uint8_t *restrict pixels, \ + int line_size, int h) \ +{ \ + if ((uint64_t) pixels & 0x7) { \ + OPKIND(uldq, ULOAD16, STORE); \ + } else { \ + OPKIND(ldq, ALOAD16, STORE); \ + } \ +} + +#define PIXOP(OPNAME, STORE) \ + MAKE_OP(OPNAME, 8, o, OP8, STORE); \ + MAKE_OP(OPNAME, 8, x, OP8_X2, STORE); \ + MAKE_OP(OPNAME, 8, y, OP8_Y2, STORE); \ + MAKE_OP(OPNAME, 8, xy, OP8_XY2, STORE); \ + MAKE_OP(OPNAME, 16, o, OP16, STORE); \ + MAKE_OP(OPNAME, 16, x, OP16_X2, STORE); \ + MAKE_OP(OPNAME, 16, y, OP16_Y2, STORE); \ + MAKE_OP(OPNAME, 16, xy, OP16_XY2, STORE); + +#define STORE(l, b) stq(l, b) +PIXOP(put, STORE); + +#undef STORE +#define STORE(l, b) stq(avg2(l, ldq(b)), b); +PIXOP(avg, STORE); + +mpeg2_mc_t mpeg2_mc_alpha = { + { MC_put_o_16_alpha, MC_put_x_16_alpha, + MC_put_y_16_alpha, MC_put_xy_16_alpha, + MC_put_o_8_alpha, MC_put_x_8_alpha, + MC_put_y_8_alpha, MC_put_xy_8_alpha }, + { MC_avg_o_16_alpha, MC_avg_x_16_alpha, + MC_avg_y_16_alpha, MC_avg_xy_16_alpha, + MC_avg_o_8_alpha, MC_avg_x_8_alpha, + MC_avg_y_8_alpha, MC_avg_xy_8_alpha } +}; + +#endif diff -r 08496327b7ec -r 89b48bc6c441 libmpeg2/motion_comp_altivec.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/libmpeg2/motion_comp_altivec.c Sun Apr 06 16:41:49 2003 +0000 @@ -0,0 +1,2019 @@ +/* + * motion_comp_altivec.c + * Copyright (C) 2000-2002 Michel Lespinasse + * Copyright (C) 1999-2000 Aaron Holtzman + * + * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. + * See http://libmpeg2.sourceforge.net/ for updates. + * + * mpeg2dec is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * mpeg2dec is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ALTIVEC__ + +#include "config.h" + +#ifdef ARCH_PPC + +#include + +#include "mpeg2.h" +#include "mpeg2_internal.h" + +/* + * The asm code is generated with: + * + * gcc-2.95 -fvec -D__ALTIVEC__ -O9 -fomit-frame-pointer -mregnames -S + * motion_comp_altivec.c + * + * sed 's/.L/._L/g' motion_comp_altivec.s | + * awk '{args=""; len=split ($2, arg, ","); + * for (i=1; i<=len; i++) { a=arg[i]; if (i> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + tmp = vec_perm (ref0, ref1, perm); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + vec_st (tmp, 0, dest); + tmp = vec_perm (ref0, ref1, perm); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + vec_st (tmp, stride, dest); + dest += 2*stride; + tmp = vec_perm (ref0, ref1, perm); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + vec_st (tmp, 0, dest); + tmp = vec_perm (ref0, ref1, perm); + vec_st (tmp, stride, dest); +} + +void MC_put_o_8_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t perm0, perm1, tmp0, tmp1, ref0, ref1; + + tmp0 = vec_lvsl (0, ref); + tmp0 = vec_mergeh (tmp0, tmp0); + perm0 = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0); + tmp1 = vec_lvsl (stride, ref); + tmp1 = vec_mergeh (tmp1, tmp1); + perm1 = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + tmp0 = vec_perm (ref0, ref1, perm0); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest); + dest += stride; + tmp1 = vec_perm (ref0, ref1, perm1); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest); + dest += stride; + tmp0 = vec_perm (ref0, ref1, perm0); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest); + dest += stride; + tmp1 = vec_perm (ref0, ref1, perm1); + vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest); +} + +void MC_put_x_16_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t permA, permB, ref0, ref1, tmp; + + permA = vec_lvsl (0, ref); + permB = vec_add (permA, vec_splat_u8 (1)); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + ref += stride; + tmp = vec_avg (vec_perm (ref0, ref1, permA), + vec_perm (ref0, ref1, permB)); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + ref += stride; + vec_st (tmp, 0, dest); + tmp = vec_avg (vec_perm (ref0, ref1, permA), + vec_perm (ref0, ref1, permB)); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + ref += stride; + vec_st (tmp, stride, dest); + dest += 2*stride; + tmp = vec_avg (vec_perm (ref0, ref1, permA), + vec_perm (ref0, ref1, permB)); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + vec_st (tmp, 0, dest); + tmp = vec_avg (vec_perm (ref0, ref1, permA), + vec_perm (ref0, ref1, permB)); + vec_st (tmp, stride, dest); +} + +void MC_put_x_8_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t perm0A, perm0B, perm1A, perm1B, ones, tmp0, tmp1, ref0, ref1; + + ones = vec_splat_u8 (1); + tmp0 = vec_lvsl (0, ref); + tmp0 = vec_mergeh (tmp0, tmp0); + perm0A = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0); + perm0B = vec_add (perm0A, ones); + tmp1 = vec_lvsl (stride, ref); + tmp1 = vec_mergeh (tmp1, tmp1); + perm1A = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1); + perm1B = vec_add (perm1A, ones); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + tmp0 = vec_avg (vec_perm (ref0, ref1, perm0A), + vec_perm (ref0, ref1, perm0B)); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest); + dest += stride; + tmp1 = vec_avg (vec_perm (ref0, ref1, perm1A), + vec_perm (ref0, ref1, perm1B)); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest); + dest += stride; + tmp0 = vec_avg (vec_perm (ref0, ref1, perm0A), + vec_perm (ref0, ref1, perm0B)); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest); + dest += stride; + tmp1 = vec_avg (vec_perm (ref0, ref1, perm1A), + vec_perm (ref0, ref1, perm1B)); + vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest); +} + +void MC_put_y_16_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t perm, ref0, ref1, tmp0, tmp1, tmp; + + perm = vec_lvsl (0, ref); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + tmp0 = vec_perm (ref0, ref1, perm); + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + tmp1 = vec_perm (ref0, ref1, perm); + tmp = vec_avg (tmp0, tmp1); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + vec_st (tmp, 0, dest); + tmp0 = vec_perm (ref0, ref1, perm); + tmp = vec_avg (tmp0, tmp1); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + vec_st (tmp, stride, dest); + dest += 2*stride; + tmp1 = vec_perm (ref0, ref1, perm); + tmp = vec_avg (tmp0, tmp1); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + vec_st (tmp, 0, dest); + tmp0 = vec_perm (ref0, ref1, perm); + tmp = vec_avg (tmp0, tmp1); + vec_st (tmp, stride, dest); +} + +void MC_put_y_8_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t perm0, perm1, tmp0, tmp1, tmp, ref0, ref1; + + tmp0 = vec_lvsl (0, ref); + tmp0 = vec_mergeh (tmp0, tmp0); + perm0 = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0); + tmp1 = vec_lvsl (stride, ref); + tmp1 = vec_mergeh (tmp1, tmp1); + perm1 = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + tmp0 = vec_perm (ref0, ref1, perm0); + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + tmp1 = vec_perm (ref0, ref1, perm1); + tmp = vec_avg (tmp0, tmp1); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + dest += stride; + tmp0 = vec_perm (ref0, ref1, perm0); + tmp = vec_avg (tmp0, tmp1); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + dest += stride; + tmp1 = vec_perm (ref0, ref1, perm1); + tmp = vec_avg (tmp0, tmp1); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + dest += stride; + tmp0 = vec_perm (ref0, ref1, perm0); + tmp = vec_avg (tmp0, tmp1); + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); +} + +void MC_put_xy_16_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t permA, permB, ref0, ref1, A, B, avg0, avg1, xor0, xor1, tmp; + vector_u8_t ones; + + ones = vec_splat_u8 (1); + permA = vec_lvsl (0, ref); + permB = vec_add (permA, ones); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + ref += stride; + A = vec_perm (ref0, ref1, permA); + B = vec_perm (ref0, ref1, permB); + avg0 = vec_avg (A, B); + xor0 = vec_xor (A, B); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + ref += stride; + A = vec_perm (ref0, ref1, permA); + B = vec_perm (ref0, ref1, permB); + avg1 = vec_avg (A, B); + xor1 = vec_xor (A, B); + tmp = vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1))); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + ref += stride; + vec_st (tmp, 0, dest); + A = vec_perm (ref0, ref1, permA); + B = vec_perm (ref0, ref1, permB); + avg0 = vec_avg (A, B); + xor0 = vec_xor (A, B); + tmp = vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1))); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + ref += stride; + vec_st (tmp, stride, dest); + dest += 2*stride; + A = vec_perm (ref0, ref1, permA); + B = vec_perm (ref0, ref1, permB); + avg1 = vec_avg (A, B); + xor1 = vec_xor (A, B); + tmp = vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1))); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + vec_st (tmp, 0, dest); + A = vec_perm (ref0, ref1, permA); + B = vec_perm (ref0, ref1, permB); + avg0 = vec_avg (A, B); + xor0 = vec_xor (A, B); + tmp = vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1))); + vec_st (tmp, stride, dest); +} + +void MC_put_xy_8_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t perm0A, perm0B, perm1A, perm1B, ref0, ref1, A, B; + vector_u8_t avg0, avg1, xor0, xor1, tmp, ones; + + ones = vec_splat_u8 (1); + perm0A = vec_lvsl (0, ref); + perm0A = vec_mergeh (perm0A, perm0A); + perm0A = vec_pack ((vector_u16_t)perm0A, (vector_u16_t)perm0A); + perm0B = vec_add (perm0A, ones); + perm1A = vec_lvsl (stride, ref); + perm1A = vec_mergeh (perm1A, perm1A); + perm1A = vec_pack ((vector_u16_t)perm1A, (vector_u16_t)perm1A); + perm1B = vec_add (perm1A, ones); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + A = vec_perm (ref0, ref1, perm0A); + B = vec_perm (ref0, ref1, perm0B); + avg0 = vec_avg (A, B); + xor0 = vec_xor (A, B); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + A = vec_perm (ref0, ref1, perm1A); + B = vec_perm (ref0, ref1, perm1B); + avg1 = vec_avg (A, B); + xor1 = vec_xor (A, B); + tmp = vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1))); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + dest += stride; + A = vec_perm (ref0, ref1, perm0A); + B = vec_perm (ref0, ref1, perm0B); + avg0 = vec_avg (A, B); + xor0 = vec_xor (A, B); + tmp = vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1))); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + dest += stride; + A = vec_perm (ref0, ref1, perm1A); + B = vec_perm (ref0, ref1, perm1B); + avg1 = vec_avg (A, B); + xor1 = vec_xor (A, B); + tmp = vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1))); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + dest += stride; + A = vec_perm (ref0, ref1, perm0A); + B = vec_perm (ref0, ref1, perm0B); + avg0 = vec_avg (A, B); + xor0 = vec_xor (A, B); + tmp = vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1))); + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); +} + +#if 0 +void MC_put_xy_8_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t permA, permB, ref0, ref1, A, B, C, D, tmp, zero, ones; + vector_u16_t splat2, temp; + + ones = vec_splat_u8 (1); + permA = vec_lvsl (0, ref); + permB = vec_add (permA, ones); + + zero = vec_splat_u8 (0); + splat2 = vec_splat_u16 (2); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + A = vec_perm (ref0, ref1, permA); + B = vec_perm (ref0, ref1, permB); + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + C = vec_perm (ref0, ref1, permA); + D = vec_perm (ref0, ref1, permB); + + temp = vec_add (vec_add ((vector_u16_t)vec_mergeh (zero, A), + (vector_u16_t)vec_mergeh (zero, B)), + vec_add ((vector_u16_t)vec_mergeh (zero, C), + (vector_u16_t)vec_mergeh (zero, D))); + temp = vec_sr (vec_add (temp, splat2), splat2); + tmp = vec_pack (temp, temp); + + vec_st (tmp, 0, dest); + dest += stride; + tmp = vec_avg (vec_perm (ref0, ref1, permA), + vec_perm (ref0, ref1, permB)); + } while (--height); +} +#endif + +void MC_avg_o_16_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t perm, ref0, ref1, tmp, prev; + + perm = vec_lvsl (0, ref); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + prev = vec_ld (0, dest); + tmp = vec_avg (prev, vec_perm (ref0, ref1, perm)); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + prev = vec_ld (stride, dest); + vec_st (tmp, 0, dest); + tmp = vec_avg (prev, vec_perm (ref0, ref1, perm)); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + prev = vec_ld (2*stride, dest); + vec_st (tmp, stride, dest); + dest += 2*stride; + tmp = vec_avg (prev, vec_perm (ref0, ref1, perm)); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + prev = vec_ld (stride, dest); + vec_st (tmp, 0, dest); + tmp = vec_avg (prev, vec_perm (ref0, ref1, perm)); + vec_st (tmp, stride, dest); +} + +void MC_avg_o_8_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t perm0, perm1, tmp0, tmp1, ref0, ref1, prev; + + tmp0 = vec_lvsl (0, ref); + tmp0 = vec_mergeh (tmp0, tmp0); + perm0 = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0); + tmp1 = vec_lvsl (stride, ref); + tmp1 = vec_mergeh (tmp1, tmp1); + perm1 = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + prev = vec_ld (0, dest); + tmp0 = vec_avg (prev, vec_perm (ref0, ref1, perm0)); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + prev = vec_ld (stride, dest); + vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest); + dest += stride; + tmp1 = vec_avg (prev, vec_perm (ref0, ref1, perm1)); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + prev = vec_ld (stride, dest); + vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest); + dest += stride; + tmp0 = vec_avg (prev, vec_perm (ref0, ref1, perm0)); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + prev = vec_ld (stride, dest); + vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest); + dest += stride; + tmp1 = vec_avg (prev, vec_perm (ref0, ref1, perm1)); + vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest); +} + +void MC_avg_x_16_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t permA, permB, ref0, ref1, tmp, prev; + + permA = vec_lvsl (0, ref); + permB = vec_add (permA, vec_splat_u8 (1)); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + prev = vec_ld (0, dest); + ref += stride; + tmp = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, permA), + vec_perm (ref0, ref1, permB))); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + ref += stride; + prev = vec_ld (stride, dest); + vec_st (tmp, 0, dest); + tmp = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, permA), + vec_perm (ref0, ref1, permB))); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + ref += stride; + prev = vec_ld (2*stride, dest); + vec_st (tmp, stride, dest); + dest += 2*stride; + tmp = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, permA), + vec_perm (ref0, ref1, permB))); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + prev = vec_ld (stride, dest); + vec_st (tmp, 0, dest); + tmp = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, permA), + vec_perm (ref0, ref1, permB))); + vec_st (tmp, stride, dest); +} + +void MC_avg_x_8_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t perm0A, perm0B, perm1A, perm1B, ones, tmp0, tmp1, ref0, ref1; + vector_u8_t prev; + + ones = vec_splat_u8 (1); + tmp0 = vec_lvsl (0, ref); + tmp0 = vec_mergeh (tmp0, tmp0); + perm0A = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0); + perm0B = vec_add (perm0A, ones); + tmp1 = vec_lvsl (stride, ref); + tmp1 = vec_mergeh (tmp1, tmp1); + perm1A = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1); + perm1B = vec_add (perm1A, ones); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + prev = vec_ld (0, dest); + ref += stride; + tmp0 = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, perm0A), + vec_perm (ref0, ref1, perm0B))); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + prev = vec_ld (stride, dest); + vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest); + dest += stride; + tmp1 = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, perm1A), + vec_perm (ref0, ref1, perm1B))); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + prev = vec_ld (stride, dest); + vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest); + dest += stride; + tmp0 = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, perm0A), + vec_perm (ref0, ref1, perm0B))); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + prev = vec_ld (stride, dest); + vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest); + dest += stride; + tmp1 = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, perm1A), + vec_perm (ref0, ref1, perm1B))); + vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest); +} + +void MC_avg_y_16_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t perm, ref0, ref1, tmp0, tmp1, tmp, prev; + + perm = vec_lvsl (0, ref); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + tmp0 = vec_perm (ref0, ref1, perm); + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + prev = vec_ld (0, dest); + tmp1 = vec_perm (ref0, ref1, perm); + tmp = vec_avg (prev, vec_avg (tmp0, tmp1)); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + prev = vec_ld (stride, dest); + vec_st (tmp, 0, dest); + tmp0 = vec_perm (ref0, ref1, perm); + tmp = vec_avg (prev, vec_avg (tmp0, tmp1)); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + ref += stride; + prev = vec_ld (2*stride, dest); + vec_st (tmp, stride, dest); + dest += 2*stride; + tmp1 = vec_perm (ref0, ref1, perm); + tmp = vec_avg (prev, vec_avg (tmp0, tmp1)); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (15, ref); + prev = vec_ld (stride, dest); + vec_st (tmp, 0, dest); + tmp0 = vec_perm (ref0, ref1, perm); + tmp = vec_avg (prev, vec_avg (tmp0, tmp1)); + vec_st (tmp, stride, dest); +} + +void MC_avg_y_8_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t perm0, perm1, tmp0, tmp1, tmp, ref0, ref1, prev; + + tmp0 = vec_lvsl (0, ref); + tmp0 = vec_mergeh (tmp0, tmp0); + perm0 = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0); + tmp1 = vec_lvsl (stride, ref); + tmp1 = vec_mergeh (tmp1, tmp1); + perm1 = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + tmp0 = vec_perm (ref0, ref1, perm0); + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + prev = vec_ld (0, dest); + tmp1 = vec_perm (ref0, ref1, perm1); + tmp = vec_avg (prev, vec_avg (tmp0, tmp1)); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + prev = vec_ld (stride, dest); + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + dest += stride; + tmp0 = vec_perm (ref0, ref1, perm0); + tmp = vec_avg (prev, vec_avg (tmp0, tmp1)); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + ref += stride; + prev = vec_ld (stride, dest); + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + dest += stride; + tmp1 = vec_perm (ref0, ref1, perm1); + tmp = vec_avg (prev, vec_avg (tmp0, tmp1)); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (7, ref); + prev = vec_ld (stride, dest); + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + dest += stride; + tmp0 = vec_perm (ref0, ref1, perm0); + tmp = vec_avg (prev, vec_avg (tmp0, tmp1)); + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); +} + +void MC_avg_xy_16_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t permA, permB, ref0, ref1, A, B, avg0, avg1, xor0, xor1, tmp; + vector_u8_t ones, prev; + + ones = vec_splat_u8 (1); + permA = vec_lvsl (0, ref); + permB = vec_add (permA, ones); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + ref += stride; + A = vec_perm (ref0, ref1, permA); + B = vec_perm (ref0, ref1, permB); + avg0 = vec_avg (A, B); + xor0 = vec_xor (A, B); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + ref += stride; + prev = vec_ld (0, dest); + A = vec_perm (ref0, ref1, permA); + B = vec_perm (ref0, ref1, permB); + avg1 = vec_avg (A, B); + xor1 = vec_xor (A, B); + tmp = vec_avg (prev, vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1)))); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + ref += stride; + prev = vec_ld (stride, dest); + vec_st (tmp, 0, dest); + A = vec_perm (ref0, ref1, permA); + B = vec_perm (ref0, ref1, permB); + avg0 = vec_avg (A, B); + xor0 = vec_xor (A, B); + tmp = vec_avg (prev, + vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1)))); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + ref += stride; + prev = vec_ld (2*stride, dest); + vec_st (tmp, stride, dest); + dest += 2*stride; + A = vec_perm (ref0, ref1, permA); + B = vec_perm (ref0, ref1, permB); + avg1 = vec_avg (A, B); + xor1 = vec_xor (A, B); + tmp = vec_avg (prev, + vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1)))); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (16, ref); + prev = vec_ld (stride, dest); + vec_st (tmp, 0, dest); + A = vec_perm (ref0, ref1, permA); + B = vec_perm (ref0, ref1, permB); + avg0 = vec_avg (A, B); + xor0 = vec_xor (A, B); + tmp = vec_avg (prev, vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1)))); + vec_st (tmp, stride, dest); +} + +void MC_avg_xy_8_altivec (unsigned char * dest, const unsigned char * ref, + const int stride, int height) +{ + vector_u8_t perm0A, perm0B, perm1A, perm1B, ref0, ref1, A, B; + vector_u8_t avg0, avg1, xor0, xor1, tmp, ones, prev; + + ones = vec_splat_u8 (1); + perm0A = vec_lvsl (0, ref); + perm0A = vec_mergeh (perm0A, perm0A); + perm0A = vec_pack ((vector_u16_t)perm0A, (vector_u16_t)perm0A); + perm0B = vec_add (perm0A, ones); + perm1A = vec_lvsl (stride, ref); + perm1A = vec_mergeh (perm1A, perm1A); + perm1A = vec_pack ((vector_u16_t)perm1A, (vector_u16_t)perm1A); + perm1B = vec_add (perm1A, ones); + + height = (height >> 1) - 1; + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + A = vec_perm (ref0, ref1, perm0A); + B = vec_perm (ref0, ref1, perm0B); + avg0 = vec_avg (A, B); + xor0 = vec_xor (A, B); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + prev = vec_ld (0, dest); + A = vec_perm (ref0, ref1, perm1A); + B = vec_perm (ref0, ref1, perm1B); + avg1 = vec_avg (A, B); + xor1 = vec_xor (A, B); + tmp = vec_avg (prev, vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1)))); + + do { + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + prev = vec_ld (stride, dest); + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + dest += stride; + A = vec_perm (ref0, ref1, perm0A); + B = vec_perm (ref0, ref1, perm0B); + avg0 = vec_avg (A, B); + xor0 = vec_xor (A, B); + tmp = vec_avg (prev, + vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1)))); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + ref += stride; + prev = vec_ld (stride, dest); + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + dest += stride; + A = vec_perm (ref0, ref1, perm1A); + B = vec_perm (ref0, ref1, perm1B); + avg1 = vec_avg (A, B); + xor1 = vec_xor (A, B); + tmp = vec_avg (prev, + vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1)))); + } while (--height); + + ref0 = vec_ld (0, ref); + ref1 = vec_ld (8, ref); + prev = vec_ld (stride, dest); + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); + dest += stride; + A = vec_perm (ref0, ref1, perm0A); + B = vec_perm (ref0, ref1, perm0B); + avg0 = vec_avg (A, B); + xor0 = vec_xor (A, B); + tmp = vec_avg (prev, vec_sub (vec_avg (avg0, avg1), + vec_and (vec_and (ones, vec_or (xor0, xor1)), + vec_xor (avg0, avg1)))); + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); + vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); +} + +#endif /* __ALTIVEC__ */