# HG changeset patch # User arpi_esp # Date 988763356 0 # Node ID 32697fe580552760779ea91316cd57a5530b3252 # Parent 5df429e875f1f88b556fd0f1e3510cb0775910d4 SSE+2.2.x+SIGILL bug fixed - SSE code disabled... diff -r 5df429e875f1 -r 32697fe58055 libvo/fastmemcpy.h --- a/libvo/fastmemcpy.h Wed May 02 00:12:01 2001 +0000 +++ b/libvo/fastmemcpy.h Wed May 02 00:29:16 2001 +0000 @@ -4,6 +4,13 @@ #ifdef USE_FASTMEMCPY #include +// Enable this code, if SSE version works (faster) for you! +#if 0 +#ifdef HAVE_SSE +#define FASTMEMCPY_SSE +#endif +#endif + /* This part of code was taken by from Linux-2.4.3 and slightly modified for MMX, MMX2, SSE instruction set. I have done it since linux uses page aligned @@ -54,7 +61,7 @@ #if defined( HAVE_MMX2 ) || defined( HAVE_3DNOW ) || defined( HAVE_MMX ) #undef HAVE_MMX1 -#if defined(HAVE_MMX) && !defined(HAVE_MMX2) && !defined(HAVE_3DNOW) && !defined(HAVE_SSE) +#if defined(HAVE_MMX) && !defined(HAVE_MMX2) && !defined(HAVE_3DNOW) && !defined(FASTMEMCPY_SSE) /* means: mmx v.1. Note: Since we added alignment of destinition it speedups of memory copying on PentMMX, Celeron-1 and P2 upto 12% versus standard (non MMX-optimized) version. @@ -82,7 +89,7 @@ : "memory");\ } -#ifdef HAVE_SSE +#ifdef FASTMEMCPY_SSE #define MMREG_SIZE 16 #else #define MMREG_SIZE 8 @@ -141,7 +148,7 @@ perform reading and writing to be multiple to a number of processor's decoders, but it's not always possible. */ -#ifdef HAVE_SSE /* Only P3 (may be Cyrix3) */ +#ifdef FASTMEMCPY_SSE /* Only P3 (may be Cyrix3) */ if(((unsigned long)from) & 15) /* if SRC is misaligned */ for(; i>0; i--) @@ -215,7 +222,7 @@ * is needed to become ordered again. */ __asm__ __volatile__ ("sfence":::"memory"); #endif -#ifndef HAVE_SSE +#ifndef FASTMEMCPY_SSE /* enables to use FPU */ __asm__ __volatile__ (EMMS:::"memory"); #endif