changeset 685:32697fe58055

SSE+2.2.x+SIGILL bug fixed - SSE code disabled...
author arpi_esp
date Wed, 02 May 2001 00:29:16 +0000
parents 5df429e875f1
children 0b3b56e9b9fa
files libvo/fastmemcpy.h
diffstat 1 files changed, 11 insertions(+), 4 deletions(-) [+]
line wrap: on
line diff
--- a/libvo/fastmemcpy.h	Wed May 02 00:12:01 2001 +0000
+++ b/libvo/fastmemcpy.h	Wed May 02 00:29:16 2001 +0000
@@ -4,6 +4,13 @@
 #ifdef USE_FASTMEMCPY
 #include <stddef.h>
 
+// Enable this code, if SSE version works (faster) for you!
+#if 0
+#ifdef HAVE_SSE
+#define FASTMEMCPY_SSE
+#endif
+#endif
+
 /*
  This part of code was taken by from Linux-2.4.3 and slightly modified
 for MMX, MMX2, SSE instruction set. I have done it since linux uses page aligned
@@ -54,7 +61,7 @@
 #if defined( HAVE_MMX2 ) || defined( HAVE_3DNOW ) || defined( HAVE_MMX )
 
 #undef HAVE_MMX1
-#if defined(HAVE_MMX) && !defined(HAVE_MMX2) && !defined(HAVE_3DNOW) && !defined(HAVE_SSE)
+#if defined(HAVE_MMX) && !defined(HAVE_MMX2) && !defined(HAVE_3DNOW) && !defined(FASTMEMCPY_SSE)
 /*  means: mmx v.1. Note: Since we added alignment of destinition it speedups
     of memory copying on PentMMX, Celeron-1 and P2 upto 12% versus
     standard (non MMX-optimized) version.
@@ -82,7 +89,7 @@
         : "memory");\
 }
 
-#ifdef HAVE_SSE
+#ifdef FASTMEMCPY_SSE
 #define MMREG_SIZE 16
 #else
 #define MMREG_SIZE 8
@@ -141,7 +148,7 @@
            perform reading and writing to be multiple to a number of
            processor's decoders, but it's not always possible.
         */
-#ifdef HAVE_SSE /* Only P3 (may be Cyrix3) */
+#ifdef FASTMEMCPY_SSE /* Only P3 (may be Cyrix3) */
          if(((unsigned long)from) & 15)
          /* if SRC is misaligned */
          for(; i>0; i--)
@@ -215,7 +222,7 @@
                  * is needed to become ordered again. */
                  __asm__ __volatile__ ("sfence":::"memory");
 #endif
-#ifndef HAVE_SSE
+#ifndef FASTMEMCPY_SSE
                  /* enables to use FPU */
                  __asm__ __volatile__ (EMMS:::"memory");
 #endif