changeset 13720:821f464b4d90

adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
author aurel
date Thu, 21 Oct 2004 11:55:20 +0000
parents 43ecd6a73ec0
children 0c2e5c9476df
files bswap.h configure cpudetect.c cpudetect.h libmpcodecs/pullup.c libmpcodecs/vf_decimate.c libmpcodecs/vf_divtc.c libmpcodecs/vf_eq.c libmpcodecs/vf_eq2.c libmpcodecs/vf_filmdint.c libmpcodecs/vf_halfpack.c libmpcodecs/vf_ilpack.c libmpcodecs/vf_ivtc.c libmpcodecs/vf_noise.c libmpcodecs/vf_spp.c libmpcodecs/vf_tfields.c libvo/aclib.c libvo/aclib_template.c libvo/osd.c libvo/osd_template.c postproc/rgb2rgb.c postproc/rgb2rgb_template.c postproc/swscale-example.c postproc/swscale.c postproc/swscale_template.c postproc/yuv2rgb.c postproc/yuv2rgb_template.c
diffstat 27 files changed, 1019 insertions(+), 947 deletions(-) [+]
line wrap: on
line diff
--- a/bswap.h	Thu Oct 21 11:36:20 2004 +0000
+++ b/bswap.h	Thu Oct 21 11:55:20 2004 +0000
@@ -7,17 +7,23 @@
 
 #include <inttypes.h>
 
-#ifdef ARCH_X86
-static inline unsigned short ByteSwap16(unsigned short x)
+#ifdef ARCH_X86_64
+#  define LEGACY_REGS "=Q"
+#else
+#  define LEGACY_REGS "=q"
+#endif
+
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
+static inline uint16_t ByteSwap16(uint16_t x)
 {
   __asm("xchgb %b0,%h0"	:
-        "=q" (x)	:
+        LEGACY_REGS (x)	:
         "0" (x));
     return x;
 }
 #define bswap_16(x) ByteSwap16(x)
 
-static inline unsigned int ByteSwap32(unsigned int x)
+static inline uint32_t ByteSwap32(uint32_t x)
 {
 #if __CPU__ > 386
  __asm("bswap	%0":
@@ -26,21 +32,28 @@
  __asm("xchgb	%b0,%h0\n"
       "	rorl	$16,%0\n"
       "	xchgb	%b0,%h0":
-      "=q" (x)		:
+      LEGACY_REGS (x)		:
 #endif
       "0" (x));
   return x;
 }
 #define bswap_32(x) ByteSwap32(x)
 
-static inline unsigned long long int ByteSwap64(unsigned long long int x)
+static inline uint64_t ByteSwap64(uint64_t x)
 {
+#ifdef ARCH_X86_64
+  __asm("bswap	%0":
+        "=r" (x)     :
+        "0" (x));
+  return x;
+#else
   register union { __extension__ uint64_t __ll;
           uint32_t __l[2]; } __x;
   asm("xchgl	%0,%1":
       "=r"(__x.__l[0]),"=r"(__x.__l[1]):
       "0"(bswap_32((unsigned long)x)),"1"(bswap_32((unsigned long)(x>>32))));
   return __x.__ll;
+#endif
 }
 #define bswap_64(x) ByteSwap64(x)
 
--- a/configure	Thu Oct 21 11:36:20 2004 +0000
+++ b/configure	Thu Oct 21 11:55:20 2004 +0000
@@ -456,7 +456,14 @@
       case "`( uname -m ) 2>&1`" in
       i[3-9]86*|x86|x86pc|k5|k6|k6_2|k6_3|k6-2|k6-3|pentium*|athlon*|i586_i686|i586-i686|BePC) host_arch=i386 ;;
       ia64) host_arch=ia64 ;;
-      x86_64|amd64) host_arch=x86_64 ;;
+      x86_64|amd64)
+        if [ "`$_cc -dumpmachine | grep x86_64 | cut -d- -f1`" = "x86_64" -a \
+             -z "`echo $CFLAGS | grep -- -m32`"  ]; then
+          host_arch=x86_64
+        else
+          host_arch=i386
+        fi
+      ;;
       macppc|ppc) host_arch=ppc ;;
       alpha) host_arch=alpha ;;
       sparc) host_arch=sparc ;;
@@ -672,17 +679,8 @@
   _cpuinfo="TOOLS/cpuinfo"
 fi
 
-case "$host_arch" in
-  i[3-9]86|x86|x86pc|k5|k6|k6-2|k6-3|pentium*|athlon*|i586-i686)
-  _def_arch="#define ARCH_X86 1"
-  _target_arch="TARGET_ARCH_X86 = yes"
-
-  pname=`$_cpuinfo | grep 'model name' | cut -d ':' -f 2 | head -1`
-  pvendor=`$_cpuinfo | grep 'vendor_id' | cut -d ':' -f 2  | cut -d ' ' -f 2 | head -1`
-  pfamily=`$_cpuinfo | grep 'cpu family' | cut -d ':' -f 2 | cut -d ' ' -f 2 | head -1`
-  pmodel=`$_cpuinfo | grep -v 'model name' | grep 'model' | cut -d ':' -f 2 | cut -d ' ' -f 2 | head -1`
-  pstepping=`$_cpuinfo | grep 'stepping' | cut -d ':' -f 2 | cut -d ' ' -f 2 | head -1`
-
+x86_exts_check()
+{
   pparam=`$_cpuinfo | grep 'features' | cut -d ':' -f 2 | head -1`
   if test -z "$pparam" ; then
     pparam=`$_cpuinfo | grep 'flags' | cut -d ':' -f 2 | head -1`
@@ -707,6 +705,20 @@
     sse2)         _sse2=yes                ;;
     esac
   done
+}
+
+case "$host_arch" in
+  i[3-9]86|x86|x86pc|k5|k6|k6-2|k6-3|pentium*|athlon*|i586-i686)
+  _def_arch="#define ARCH_X86 1"
+  _target_arch="TARGET_ARCH_X86 = yes"
+
+  pname=`$_cpuinfo | grep 'model name' | cut -d ':' -f 2 | head -1`
+  pvendor=`$_cpuinfo | grep 'vendor_id' | cut -d ':' -f 2  | cut -d ' ' -f 2 | head -1`
+  pfamily=`$_cpuinfo | grep 'cpu family' | cut -d ':' -f 2 | cut -d ' ' -f 2 | head -1`
+  pmodel=`$_cpuinfo | grep -v 'model name' | grep 'model' | cut -d ':' -f 2 | cut -d ' ' -f 2 | head -1`
+  pstepping=`$_cpuinfo | grep 'stepping' | cut -d ':' -f 2 | cut -d ' ' -f 2 | head -1`
+
+  x86_exts_check
 
   echocheck "CPU vendor"
   echores "$pvendor ($pfamily:$pmodel:$pstepping)"
@@ -904,6 +916,7 @@
     _march=''
     _mcpu=''
     _optimizing=''
+    x86_exts_check
     ;;
 
   sparc)
--- a/cpudetect.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/cpudetect.c	Thu Oct 21 11:55:20 2004 +0000
@@ -9,7 +9,7 @@
 #endif
 #include <stdlib.h>
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 
 #include <stdio.h>
 #include <string.h>
@@ -47,25 +47,25 @@
 // return TRUE if cpuid supported
 static int has_cpuid()
 {
-	int a, c;
+	long a, c;
 
 // code from libavcodec:
     __asm__ __volatile__ (
                           /* See if CPUID instruction is supported ... */
                           /* ... Get copies of EFLAGS into eax and ecx */
                           "pushf\n\t"
-                          "popl %0\n\t"
-                          "movl %0, %1\n\t"
+                          "pop %0\n\t"
+                          "mov %0, %1\n\t"
                           
                           /* ... Toggle the ID bit in one copy and store */
                           /*     to the EFLAGS reg */
-                          "xorl $0x200000, %0\n\t"
+                          "xor $0x200000, %0\n\t"
                           "push %0\n\t"
                           "popf\n\t"
                           
                           /* ... Get the (hopefully modified) EFLAGS */
                           "pushf\n\t"
-                          "popl %0\n\t"
+                          "pop %0\n\t"
                           : "=a" (a), "=c" (c)
                           :
                           : "cc" 
@@ -87,9 +87,9 @@
 #else
 // code from libavcodec:
     __asm __volatile
-	("movl %%ebx, %%esi\n\t"
+	("mov %%"REG_b", %%"REG_S"\n\t"
          "cpuid\n\t"
-         "xchgl %%ebx, %%esi"
+         "xchg %%"REG_b", %%"REG_S
          : "=a" (p[0]), "=S" (p[1]), 
            "=c" (p[2]), "=d" (p[3])
          : "0" (ax));
@@ -456,7 +456,7 @@
    gCpuCaps.hasSSE=0;
 #endif /* __linux__ */
 }
-#else /* ARCH_X86 */
+#else /* ARCH_X86 || ARCH_X86_64 */
 
 #ifdef SYS_DARWIN
 #include <sys/sysctl.h>
@@ -536,10 +536,6 @@
 	mp_msg(MSGT_CPUDETECT,MSGL_INFO,"CPU: Intel Itanium\n");
 #endif
 
-#ifdef ARCH_X86_64
-	mp_msg(MSGT_CPUDETECT,MSGL_INFO,"CPU: Advanced Micro Devices 64-bit CPU\n");
-#endif
-
 #ifdef ARCH_SPARC
 	mp_msg(MSGT_CPUDETECT,MSGL_INFO,"CPU: Sun Sparc\n");
 #endif
--- a/cpudetect.h	Thu Oct 21 11:36:20 2004 +0000
+++ b/cpudetect.h	Thu Oct 21 11:55:20 2004 +0000
@@ -6,6 +6,32 @@
 #define CPUTYPE_I586	5
 #define CPUTYPE_I686    6
 
+#ifdef ARCH_X86_64
+#  define REGa    rax
+#  define REGb    rbx
+#  define REGSP   rsp
+#  define REG_a  "rax"
+#  define REG_b  "rbx"
+#  define REG_c  "rcx"
+#  define REG_d  "rdx"
+#  define REG_S  "rsi"
+#  define REG_D  "rdi"
+#  define REG_SP "rsp"
+#  define REG_BP "rbp"
+#else
+#  define REGa    eax
+#  define REGb    ebx
+#  define REGSP   esp
+#  define REG_a  "eax"
+#  define REG_b  "ebx"
+#  define REG_c  "ecx"
+#  define REG_d  "edx"
+#  define REG_S  "esi"
+#  define REG_D  "edi"
+#  define REG_SP "esp"
+#  define REG_BP "ebp"
+#endif
+
 typedef struct cpucaps_s {
 	int cpuType;
 	int cpuStepping;
--- a/libmpcodecs/pullup.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libmpcodecs/pullup.c	Thu Oct 21 11:55:20 2004 +0000
@@ -8,6 +8,7 @@
 
 
 
+#ifdef ARCH_X86
 #ifdef HAVE_MMX
 static int diff_y_mmx(unsigned char *a, unsigned char *b, int s)
 {
@@ -147,6 +148,7 @@
 	return ret;
 }
 #endif
+#endif
 
 #define ABS(a) (((a)^((a)>>31))-((a)>>31))
 
@@ -682,12 +684,14 @@
 	case PULLUP_FMT_Y:
 		c->diff = diff_y;
 		c->comb = licomb_y;
+#ifdef ARCH_X86
 #ifdef HAVE_MMX
 		if (c->cpu & PULLUP_CPU_MMX) {
 			c->diff = diff_y_mmx;
 			c->comb = licomb_y_mmx;
 		}
 #endif
+#endif
 		/* c->comb = qpcomb_y; */
 		break;
 #if 0
--- a/libmpcodecs/vf_decimate.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libmpcodecs/vf_decimate.c	Thu Oct 21 11:55:20 2004 +0000
@@ -31,11 +31,11 @@
 		".balign 16 \n\t"
 		"1: \n\t"
 		
-		"movq (%%esi), %%mm0 \n\t"
-		"movq (%%esi), %%mm2 \n\t"
-		"addl %%eax, %%esi \n\t"
-		"movq (%%edi), %%mm1 \n\t"
-		"addl %%ebx, %%edi \n\t"
+		"movq (%%"REG_S"), %%mm0 \n\t"
+		"movq (%%"REG_S"), %%mm2 \n\t"
+		"add %%"REG_a", %%"REG_S" \n\t"
+		"movq (%%"REG_D"), %%mm1 \n\t"
+		"add %%"REG_b", %%"REG_D" \n\t"
 		"psubusb %%mm1, %%mm2 \n\t"
 		"psubusb %%mm0, %%mm1 \n\t"
 		"movq %%mm2, %%mm0 \n\t"
@@ -51,10 +51,10 @@
 		
 		"decl %%ecx \n\t"
 		"jnz 1b \n\t"
-		"movq %%mm4, (%%edx) \n\t"
+		"movq %%mm4, (%%"REG_d") \n\t"
 		"emms \n\t"
 		: 
-		: "S" (old), "D" (new), "a" (os), "b" (ns), "d" (out)
+		: "S" (old), "D" (new), "a" ((long)os), "b" ((long)ns), "d" (out)
 		: "memory"
 		);
 	return out[0]+out[1]+out[2]+out[3];
--- a/libmpcodecs/vf_divtc.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libmpcodecs/vf_divtc.c	Thu Oct 21 11:55:20 2004 +0000
@@ -44,11 +44,11 @@
 	".balign 16 \n\t"
 	"1: \n\t"
 
-	"movq (%%esi), %%mm0 \n\t"
-	"movq (%%esi), %%mm2 \n\t"
-	"addl %%eax, %%esi \n\t"
-	"movq (%%edi), %%mm1 \n\t"
-	"addl %%ebx, %%edi \n\t"
+	"movq (%%"REG_S"), %%mm0 \n\t"
+	"movq (%%"REG_S"), %%mm2 \n\t"
+	"add %%"REG_a", %%"REG_S" \n\t"
+	"movq (%%"REG_D"), %%mm1 \n\t"
+	"add %%"REG_b", %%"REG_D" \n\t"
 	"psubusb %%mm1, %%mm2 \n\t"
 	"psubusb %%mm0, %%mm1 \n\t"
 	"movq %%mm2, %%mm0 \n\t"
@@ -64,10 +64,10 @@
 
 	"decl %%ecx \n\t"
 	"jnz 1b \n\t"
-	"movq %%mm4, (%%edx) \n\t"
+	"movq %%mm4, (%%"REG_d") \n\t"
 	"emms \n\t"
 	:
-	: "S" (old), "D" (new), "a" (os), "b" (ns), "d" (out)
+	: "S" (old), "D" (new), "a" ((long)os), "b" ((long)ns), "d" (out)
 	: "memory"
 	);
    return out[0]+out[1]+out[2]+out[3];
--- a/libmpcodecs/vf_eq.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libmpcodecs/vf_eq.c	Thu Oct 21 11:55:20 2004 +0000
@@ -64,9 +64,9 @@
 			"paddw %%mm3, %%mm1 \n\t"
 			"paddw %%mm3, %%mm2 \n\t"
 			"packuswb %%mm2, %%mm1 \n\t"
-			"addl $8, %0 \n\t"
+			"add $8, %0 \n\t"
 			"movq %%mm1, (%1) \n\t"
-			"addl $8, %1 \n\t"
+			"add $8, %1 \n\t"
 			"decl %%eax \n\t"
 			"jnz 1b \n\t"
 			: "=r" (src), "=r" (dest)
--- a/libmpcodecs/vf_eq2.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libmpcodecs/vf_eq2.c	Thu Oct 21 11:55:20 2004 +0000
@@ -152,9 +152,9 @@
       "paddw %%mm3, %%mm1 \n\t"
       "paddw %%mm3, %%mm2 \n\t"
       "packuswb %%mm2, %%mm1 \n\t"
-      "addl $8, %0 \n\t"
+      "add $8, %0 \n\t"
       "movq %%mm1, (%1) \n\t"
-      "addl $8, %1 \n\t"
+      "add $8, %1 \n\t"
       "decl %%eax \n\t"
       "jnz 1b \n\t"
       : "=r" (src), "=r" (dst)
--- a/libmpcodecs/vf_filmdint.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libmpcodecs/vf_filmdint.c	Thu Oct 21 11:55:20 2004 +0000
@@ -406,8 +406,8 @@
 	    "psllq $16, %%mm0\n\t"					     \
 	    "paddusw %%mm0, %%mm7\n\t"					     \
 	    "movq (%1), %%mm4\n\t"					     \
-	    "leal (%0,%2,2), %0\n\t"					     \
-	    "leal (%1,%3,2), %1\n\t"					     \
+	    "lea (%0,%2,2), %0\n\t"					     \
+	    "lea (%1,%3,2), %1\n\t"					     \
 	    "psubusb %4, %%mm4\n\t"					     \
 	    PAVGB(%%mm2, %%mm4)						     \
 	    PAVGB(%%mm2, %%mm4)    /* mm4 = qup odd */			     \
@@ -440,7 +440,7 @@
 	    "paddusw %%mm2, %%mm7\n\t"					     \
 	    "paddusw %%mm1, %%mm7\n\t"					     \
 	    : "=r" (a), "=r" (b)					     \
-	    : "r"(as), "r"(bs), "m" (ones), "0"(a), "1"(b), "X"(*a), "X"(*b) \
+	    : "r"((long)as), "r"((long)bs), "m" (ones), "0"(a), "1"(b), "X"(*a), "X"(*b) \
 	    );								     \
     } while (--lines);
 
@@ -650,7 +650,7 @@
 	    "por %%mm3, %%mm1 \n\t"     /* avg if >= threshold */
 	    "movq %%mm1, (%2,%4) \n\t"
 	    : /* no output */
-	    : "r" (a), "r" (bos), "r" (dst), "r" (ss), "r" (ds), "r" (cos)
+	    : "r" (a), "r" (bos), "r" (dst), "r" ((long)ss), "r" ((long)ds), "r" (cos)
 	    );
 	a += 8;
 	dst += 8;
--- a/libmpcodecs/vf_halfpack.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libmpcodecs/vf_halfpack.c	Thu Oct 21 11:55:20 2004 +0000
@@ -75,13 +75,13 @@
 			"por %%mm5, %%mm1 \n\t"
 			"por %%mm6, %%mm2 \n\t"
 
-			"addl $8, %0 \n\t"
-			"addl $8, %1 \n\t"
-			"addl $4, %2 \n\t"
-			"addl $4, %3 \n\t"
+			"add $8, %0 \n\t"
+			"add $8, %1 \n\t"
+			"add $4, %2 \n\t"
+			"add $4, %3 \n\t"
 			"movq %%mm1, (%8) \n\t"
 			"movq %%mm2, 8(%8) \n\t"
-			"addl $16, %8 \n\t"
+			"add $16, %8 \n\t"
 			"decl %9 \n\t"
 			"jnz 1b \n\t"
 			: "=r" (y1), "=r" (y2), "=r" (u), "=r" (v)
--- a/libmpcodecs/vf_ilpack.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libmpcodecs/vf_ilpack.c	Thu Oct 21 11:55:20 2004 +0000
@@ -76,12 +76,12 @@
 		"punpcklbw %%mm4, %%mm1 \n\t"
 		"punpckhbw %%mm4, %%mm2 \n\t"
 		
-		"addl $8, %0 \n\t"
-		"addl $4, %1 \n\t"
-		"addl $4, %2 \n\t"
+		"add $8, %0 \n\t"
+		"add $4, %1 \n\t"
+		"add $4, %2 \n\t"
 		"movq %%mm1, (%3) \n\t"
 		"movq %%mm2, 8(%3) \n\t"
-		"addl $16, %3 \n\t"
+		"add $16, %3 \n\t"
 		"decl %4 \n\t"
 		"jnz 1b \n\t"
 		"emms \n\t"
@@ -96,22 +96,26 @@
 	unsigned char *u, unsigned char *v, int w, int us, int vs)
 {
 	asm volatile (""
-		"pushl %%ebp \n\t"
-		"movl 4(%%edx), %%ebp \n\t"
-		"movl (%%edx), %%edx \n\t"
+		"push %%"REG_BP" \n\t"
+#ifdef ARCH_X86_64
+		"mov %6, %%"REG_BP" \n\t"
+#else
+		"movl 4(%%"REG_d"), %%"REG_BP" \n\t"
+		"movl (%%"REG_d"), %%"REG_d" \n\t"
+#endif
 		"pxor %%mm0, %%mm0 \n\t"
 		
 		".balign 16 \n\t"
 		".Lli0: \n\t"
-		"movq (%%esi), %%mm1 \n\t"
-		"movq (%%esi), %%mm2 \n\t"
+		"movq (%%"REG_S"), %%mm1 \n\t"
+		"movq (%%"REG_S"), %%mm2 \n\t"
 		
-		"movq (%%eax,%%edx,2), %%mm4 \n\t"
-		"movq (%%ebx,%%ebp,2), %%mm6 \n\t"
+		"movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
+		"movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
 		"punpcklbw %%mm0, %%mm4 \n\t"
 		"punpcklbw %%mm0, %%mm6 \n\t"
-		"movq (%%eax), %%mm3 \n\t"
-		"movq (%%ebx), %%mm5 \n\t"
+		"movq (%%"REG_a"), %%mm3 \n\t"
+		"movq (%%"REG_b"), %%mm5 \n\t"
 		"punpcklbw %%mm0, %%mm3 \n\t"
 		"punpcklbw %%mm0, %%mm5 \n\t"
 		"paddw %%mm3, %%mm4 \n\t"
@@ -136,18 +140,18 @@
 		"punpcklbw %%mm4, %%mm1 \n\t"
 		"punpckhbw %%mm4, %%mm2 \n\t"
 		
-		"movq %%mm1, (%%edi) \n\t"
-		"movq %%mm2, 8(%%edi) \n\t"
+		"movq %%mm1, (%%"REG_D") \n\t"
+		"movq %%mm2, 8(%%"REG_D") \n\t"
 		
-		"movq 8(%%esi), %%mm1 \n\t"
-		"movq 8(%%esi), %%mm2 \n\t"
+		"movq 8(%%"REG_S"), %%mm1 \n\t"
+		"movq 8(%%"REG_S"), %%mm2 \n\t"
 		
-		"movq (%%eax,%%edx,2), %%mm4 \n\t"
-		"movq (%%ebx,%%ebp,2), %%mm6 \n\t"
+		"movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
+		"movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
 		"punpckhbw %%mm0, %%mm4 \n\t"
 		"punpckhbw %%mm0, %%mm6 \n\t"
-		"movq (%%eax), %%mm3 \n\t"
-		"movq (%%ebx), %%mm5 \n\t"
+		"movq (%%"REG_a"), %%mm3 \n\t"
+		"movq (%%"REG_b"), %%mm5 \n\t"
 		"punpckhbw %%mm0, %%mm3 \n\t"
 		"punpckhbw %%mm0, %%mm5 \n\t"
 		"paddw %%mm3, %%mm4 \n\t"
@@ -172,20 +176,25 @@
 		"punpcklbw %%mm4, %%mm1 \n\t"
 		"punpckhbw %%mm4, %%mm2 \n\t"
 		
-		"addl $16, %%esi \n\t"
-		"addl $8, %%eax \n\t"
-		"addl $8, %%ebx \n\t"
+		"add $16, %%"REG_S" \n\t"
+		"add $8, %%"REG_a" \n\t"
+		"add $8, %%"REG_b" \n\t"
 		
-		"movq %%mm1, 16(%%edi) \n\t"
-		"movq %%mm2, 24(%%edi) \n\t"
-		"addl $32, %%edi \n\t"
+		"movq %%mm1, 16(%%"REG_D") \n\t"
+		"movq %%mm2, 24(%%"REG_D") \n\t"
+		"add $32, %%"REG_D" \n\t"
 		
 		"decl %%ecx \n\t"
 		"jnz .Lli0 \n\t"
 		"emms \n\t"
-		"popl %%ebp \n\t"
+		"pop %%"REG_BP" \n\t"
 		: 
-		: "S" (y), "D" (dst), "a" (u), "b" (v), "d" (&us), "c" (w/16)
+		: "S" (y), "D" (dst), "a" (u), "b" (v), "c" (w/16),
+#ifdef ARCH_X86_64
+		"d" ((long)us), "r" ((long)vs)
+#else
+		"d" (&us)
+#endif
 		: "memory"
 		);
 	pack_li_0_C(dst, y, u, v, (w&15), us, vs);
@@ -195,22 +204,26 @@
 	unsigned char *u, unsigned char *v, int w, int us, int vs)
 {
 	asm volatile (""
-		"pushl %%ebp \n\t"
-		"movl 4(%%edx), %%ebp \n\t"
-		"movl (%%edx), %%edx \n\t"
+		"push %%"REG_BP" \n\t"
+#ifdef ARCH_X86_64
+		"mov %6, %%"REG_BP" \n\t"
+#else
+		"movl 4(%%"REG_d"), %%"REG_BP" \n\t"
+		"movl (%%"REG_d"), %%"REG_d" \n\t"
+#endif
 		"pxor %%mm0, %%mm0 \n\t"
 		
 		".balign 16 \n\t"
 		".Lli1: \n\t"
-		"movq (%%esi), %%mm1 \n\t"
-		"movq (%%esi), %%mm2 \n\t"
+		"movq (%%"REG_S"), %%mm1 \n\t"
+		"movq (%%"REG_S"), %%mm2 \n\t"
 		
-		"movq (%%eax,%%edx,2), %%mm4 \n\t"
-		"movq (%%ebx,%%ebp,2), %%mm6 \n\t"
+		"movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
+		"movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
 		"punpcklbw %%mm0, %%mm4 \n\t"
 		"punpcklbw %%mm0, %%mm6 \n\t"
-		"movq (%%eax), %%mm3 \n\t"
-		"movq (%%ebx), %%mm5 \n\t"
+		"movq (%%"REG_a"), %%mm3 \n\t"
+		"movq (%%"REG_b"), %%mm5 \n\t"
 		"punpcklbw %%mm0, %%mm3 \n\t"
 		"punpcklbw %%mm0, %%mm5 \n\t"
 		"movq %%mm4, %%mm7 \n\t"
@@ -237,18 +250,18 @@
 		"punpcklbw %%mm4, %%mm1 \n\t"
 		"punpckhbw %%mm4, %%mm2 \n\t"
 		
-		"movq %%mm1, (%%edi) \n\t"
-		"movq %%mm2, 8(%%edi) \n\t"
+		"movq %%mm1, (%%"REG_D") \n\t"
+		"movq %%mm2, 8(%%"REG_D") \n\t"
 		
-		"movq 8(%%esi), %%mm1 \n\t"
-		"movq 8(%%esi), %%mm2 \n\t"
+		"movq 8(%%"REG_S"), %%mm1 \n\t"
+		"movq 8(%%"REG_S"), %%mm2 \n\t"
 		
-		"movq (%%eax,%%edx,2), %%mm4 \n\t"
-		"movq (%%ebx,%%ebp,2), %%mm6 \n\t"
+		"movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
+		"movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
 		"punpckhbw %%mm0, %%mm4 \n\t"
 		"punpckhbw %%mm0, %%mm6 \n\t"
-		"movq (%%eax), %%mm3 \n\t"
-		"movq (%%ebx), %%mm5 \n\t"
+		"movq (%%"REG_a"), %%mm3 \n\t"
+		"movq (%%"REG_b"), %%mm5 \n\t"
 		"punpckhbw %%mm0, %%mm3 \n\t"
 		"punpckhbw %%mm0, %%mm5 \n\t"
 		"movq %%mm4, %%mm7 \n\t"
@@ -275,20 +288,25 @@
 		"punpcklbw %%mm4, %%mm1 \n\t"
 		"punpckhbw %%mm4, %%mm2 \n\t"
 		
-		"addl $16, %%esi \n\t"
-		"addl $8, %%eax \n\t"
-		"addl $8, %%ebx \n\t"
+		"add $16, %%"REG_S" \n\t"
+		"add $8, %%"REG_a" \n\t"
+		"add $8, %%"REG_b" \n\t"
 		
-		"movq %%mm1, 16(%%edi) \n\t"
-		"movq %%mm2, 24(%%edi) \n\t"
-		"addl $32, %%edi \n\t"
+		"movq %%mm1, 16(%%"REG_D") \n\t"
+		"movq %%mm2, 24(%%"REG_D") \n\t"
+		"add $32, %%"REG_D" \n\t"
 		
 		"decl %%ecx \n\t"
 		"jnz .Lli1 \n\t"
 		"emms \n\t"
-		"popl %%ebp \n\t"
+		"pop %%"REG_BP" \n\t"
 		: 
-		: "S" (y), "D" (dst), "a" (u), "b" (v), "d" (&us), "c" (w/16)
+		: "S" (y), "D" (dst), "a" (u), "b" (v), "c" (w/16),
+#ifdef ARCH_X86_64
+		"d" ((long)us), "r" ((long)vs)
+#else
+		"d" (&us)
+#endif
 		: "memory"
 		);
 	pack_li_1_C(dst, y, u, v, (w&15), us, vs);
--- a/libmpcodecs/vf_ivtc.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libmpcodecs/vf_ivtc.c	Thu Oct 21 11:55:20 2004 +0000
@@ -71,11 +71,11 @@
 		"1: \n\t"
 		
 		// Even difference
-		"movq (%%esi), %%mm0 \n\t"
-		"movq (%%esi), %%mm2 \n\t"
-		"addl %%eax, %%esi \n\t"
-		"movq (%%edi), %%mm1 \n\t"
-		"addl %%ebx, %%edi \n\t"
+		"movq (%%"REG_S"), %%mm0 \n\t"
+		"movq (%%"REG_S"), %%mm2 \n\t"
+		"add %%"REG_a", %%"REG_S" \n\t"
+		"movq (%%"REG_D"), %%mm1 \n\t"
+		"add %%"REG_b", %%"REG_D" \n\t"
 		"psubusb %%mm1, %%mm2 \n\t"
 		"psubusb %%mm0, %%mm1 \n\t"
 		"movq %%mm2, %%mm0 \n\t"
@@ -90,11 +90,11 @@
 		"paddw %%mm3, %%mm4 \n\t"
 		
 		// Odd difference
-		"movq (%%esi), %%mm0 \n\t"
-		"movq (%%esi), %%mm2 \n\t"
-		"addl %%eax, %%esi \n\t"
-		"movq (%%edi), %%mm1 \n\t"
-		"addl %%ebx, %%edi \n\t"
+		"movq (%%"REG_S"), %%mm0 \n\t"
+		"movq (%%"REG_S"), %%mm2 \n\t"
+		"add %%"REG_a", %%"REG_S" \n\t"
+		"movq (%%"REG_D"), %%mm1 \n\t"
+		"add %%"REG_b", %%"REG_D" \n\t"
 		"psubusb %%mm1, %%mm2 \n\t"
 		"psubusb %%mm0, %%mm1 \n\t"
 		"movq %%mm2, %%mm0 \n\t"
@@ -110,8 +110,8 @@
 			
 		"decl %%ecx \n\t"
 		"jnz 1b \n\t"
-		"movq %%mm4, (%%edx) \n\t"
-		"movq %%mm5, 8(%%edx) \n\t"
+		"movq %%mm4, (%%"REG_d") \n\t"
+		"movq %%mm5, 8(%%"REG_d") \n\t"
 		: 
 		: "S" (old), "D" (new), "a" (os), "b" (ns), "d" (out)
 		: "memory"
@@ -130,14 +130,14 @@
 		".balign 16 \n\t"
 		"2: \n\t"
 		
-		"movq (%%esi), %%mm0 \n\t"
-		"movq (%%esi,%%eax), %%mm1 \n\t"
-		"addl %%eax, %%esi \n\t"
-		"addl %%eax, %%esi \n\t"
-		"movq (%%edi), %%mm2 \n\t"
-		"movq (%%edi,%%ebx), %%mm3 \n\t"
-		"addl %%ebx, %%edi \n\t"
-		"addl %%ebx, %%edi \n\t"
+		"movq (%%"REG_S"), %%mm0 \n\t"
+		"movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
+		"add %%"REG_a", %%"REG_S" \n\t"
+		"add %%"REG_a", %%"REG_S" \n\t"
+		"movq (%%"REG_D"), %%mm2 \n\t"
+		"movq (%%"REG_D",%%"REG_b"), %%mm3 \n\t"
+		"add %%"REG_b", %%"REG_D" \n\t"
+		"add %%"REG_b", %%"REG_D" \n\t"
 		"punpcklbw %%mm7, %%mm0 \n\t"
 		"punpcklbw %%mm7, %%mm1 \n\t"
 		"punpcklbw %%mm7, %%mm2 \n\t"
@@ -164,16 +164,16 @@
 		"psubw %%mm1, %%mm4 \n\t"
 		"psubw %%mm2, %%mm5 \n\t"
 		"psubw %%mm3, %%mm6 \n\t"
-		"movq %%mm4, (%%edx) \n\t"
-		"movq %%mm5, 16(%%edx) \n\t"
-		"movq %%mm6, 32(%%edx) \n\t"
+		"movq %%mm4, (%%"REG_d") \n\t"
+		"movq %%mm5, 16(%%"REG_d") \n\t"
+		"movq %%mm6, 32(%%"REG_d") \n\t"
 
-		"movl %%eax, %%ecx \n\t"
-		"shll $3, %%ecx \n\t"
-		"subl %%ecx, %%esi \n\t"
-		"movl %%ebx, %%ecx \n\t"
-		"shll $3, %%ecx \n\t"
-		"subl %%ecx, %%edi \n\t"
+		"mov %%"REG_a", %%"REG_c" \n\t"
+		"shl $3, %%"REG_c" \n\t"
+		"sub %%"REG_c", %%"REG_S" \n\t"
+		"mov %%"REG_b", %%"REG_c" \n\t"
+		"shl $3, %%"REG_c" \n\t"
+		"sub %%"REG_c", %%"REG_D" \n\t"
 
 		// Second loop for the last four columns
 		"movl $4, %%ecx \n\t"
@@ -184,14 +184,14 @@
 		".balign 16 \n\t"
 		"3: \n\t"
 		
-		"movq (%%esi), %%mm0 \n\t"
-		"movq (%%esi,%%eax), %%mm1 \n\t"
-		"addl %%eax, %%esi \n\t"
-		"addl %%eax, %%esi \n\t"
-		"movq (%%edi), %%mm2 \n\t"
-		"movq (%%edi,%%ebx), %%mm3 \n\t"
-		"addl %%ebx, %%edi \n\t"
-		"addl %%ebx, %%edi \n\t"
+		"movq (%%"REG_S"), %%mm0 \n\t"
+		"movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
+		"add %%"REG_a", %%"REG_S" \n\t"
+		"add %%"REG_a", %%"REG_S" \n\t"
+		"movq (%%"REG_D"), %%mm2 \n\t"
+		"movq (%%"REG_D",%%"REG_b"), %%mm3 \n\t"
+		"add %%"REG_b", %%"REG_D" \n\t"
+		"add %%"REG_b", %%"REG_D" \n\t"
 		"punpckhbw %%mm7, %%mm0 \n\t"
 		"punpckhbw %%mm7, %%mm1 \n\t"
 		"punpckhbw %%mm7, %%mm2 \n\t"
@@ -218,13 +218,13 @@
 		"psubw %%mm1, %%mm4 \n\t"
 		"psubw %%mm2, %%mm5 \n\t"
 		"psubw %%mm3, %%mm6 \n\t"
-		"movq %%mm4, 8(%%edx) \n\t"
-		"movq %%mm5, 24(%%edx) \n\t"
-		"movq %%mm6, 40(%%edx) \n\t"
+		"movq %%mm4, 8(%%"REG_d") \n\t"
+		"movq %%mm5, 24(%%"REG_d") \n\t"
+		"movq %%mm6, 40(%%"REG_d") \n\t"
 
 		"emms \n\t"
 		: 
-		: "S" (old), "D" (new), "a" (os), "b" (ns), "d" (out)
+		: "S" (old), "D" (new), "a" ((long)os), "b" ((long)ns), "d" (out)
 		: "memory"
 		);
 	m->p = m->t = m->s = 0;
--- a/libmpcodecs/vf_noise.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libmpcodecs/vf_noise.c	Thu Oct 21 11:55:20 2004 +0000
@@ -143,26 +143,26 @@
 
 #ifdef HAVE_MMX
 static inline void lineNoise_MMX(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift){
-	int mmx_len= len&(~7);
+	long mmx_len= len&(~7);
 	noise+=shift;
 
 	asm volatile(
-		"movl %3, %%eax			\n\t"
+		"mov %3, %%"REG_a"		\n\t"
 		"pcmpeqb %%mm7, %%mm7		\n\t"
 		"psllw $15, %%mm7		\n\t"
 		"packsswb %%mm7, %%mm7		\n\t"
 		".balign 16			\n\t"
 		"1:				\n\t"
-		"movq (%0, %%eax), %%mm0	\n\t"
-		"movq (%1, %%eax), %%mm1	\n\t"
+		"movq (%0, %%"REG_a"), %%mm0	\n\t"
+		"movq (%1, %%"REG_a"), %%mm1	\n\t"
 		"pxor %%mm7, %%mm0		\n\t"
 		"paddsb %%mm1, %%mm0		\n\t"
 		"pxor %%mm7, %%mm0		\n\t"
-		"movq %%mm0, (%2, %%eax)	\n\t"
-		"addl $8, %%eax			\n\t"
+		"movq %%mm0, (%2, %%"REG_a")	\n\t"
+		"add $8, %%"REG_a"		\n\t"
 		" js 1b				\n\t"
 		:: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
-		: "%eax"
+		: "%"REG_a
 	);
 	if(mmx_len!=len)
 		lineNoise_C(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
@@ -172,26 +172,26 @@
 //duplicate of previous except movntq
 #ifdef HAVE_MMX2
 static inline void lineNoise_MMX2(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift){
-	int mmx_len= len&(~7);
+	long mmx_len= len&(~7);
 	noise+=shift;
 
 	asm volatile(
-		"movl %3, %%eax			\n\t"
+		"mov %3, %%"REG_a"		\n\t"
 		"pcmpeqb %%mm7, %%mm7		\n\t"
 		"psllw $15, %%mm7		\n\t"
 		"packsswb %%mm7, %%mm7		\n\t"
 		".balign 16			\n\t"
 		"1:				\n\t"
-		"movq (%0, %%eax), %%mm0	\n\t"
-		"movq (%1, %%eax), %%mm1	\n\t"
+		"movq (%0, %%"REG_a"), %%mm0	\n\t"
+		"movq (%1, %%"REG_a"), %%mm1	\n\t"
 		"pxor %%mm7, %%mm0		\n\t"
 		"paddsb %%mm1, %%mm0		\n\t"
 		"pxor %%mm7, %%mm0		\n\t"
-		"movntq %%mm0, (%2, %%eax)	\n\t"
-		"addl $8, %%eax			\n\t"
+		"movntq %%mm0, (%2, %%"REG_a")	\n\t"
+		"add $8, %%"REG_a"		\n\t"
 		" js 1b				\n\t"
 		:: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
-		: "%eax"
+		: "%"REG_a
 	);
 	if(mmx_len!=len)
 		lineNoise_C(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
@@ -214,16 +214,16 @@
 
 #ifdef HAVE_MMX
 static inline void lineNoiseAvg_MMX(uint8_t *dst, uint8_t *src, int len, int8_t **shift){
-	int mmx_len= len&(~7);
+	long mmx_len= len&(~7);
 
 	asm volatile(
-		"movl %5, %%eax			\n\t"
+		"mov %5, %%"REG_a"		\n\t"
 		".balign 16			\n\t"
 		"1:				\n\t"
-		"movq (%1, %%eax), %%mm1	\n\t"
-		"movq (%0, %%eax), %%mm0	\n\t"
-		"paddb (%2, %%eax), %%mm1	\n\t"
-		"paddb (%3, %%eax), %%mm1	\n\t"
+		"movq (%1, %%"REG_a"), %%mm1	\n\t"
+		"movq (%0, %%"REG_a"), %%mm0	\n\t"
+		"paddb (%2, %%"REG_a"), %%mm1	\n\t"
+		"paddb (%3, %%"REG_a"), %%mm1	\n\t"
 		"movq %%mm0, %%mm2		\n\t"
 		"movq %%mm1, %%mm3		\n\t"
 		"punpcklbw %%mm0, %%mm0		\n\t"
@@ -239,12 +239,12 @@
 		"psrlw $8, %%mm1		\n\t"
 		"psrlw $8, %%mm3		\n\t"
                 "packuswb %%mm3, %%mm1		\n\t"
-		"movq %%mm1, (%4, %%eax)	\n\t"
-		"addl $8, %%eax			\n\t"
+		"movq %%mm1, (%4, %%"REG_a")	\n\t"
+		"add $8, %%"REG_a"		\n\t"
 		" js 1b				\n\t"
 		:: "r" (src+mmx_len), "r" (shift[0]+mmx_len), "r" (shift[1]+mmx_len), "r" (shift[2]+mmx_len), 
                    "r" (dst+mmx_len), "g" (-mmx_len)
-		: "%eax"
+		: "%"REG_a
 	);
 
 	if(mmx_len!=len){
--- a/libmpcodecs/vf_spp.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libmpcodecs/vf_spp.c	Thu Oct 21 11:55:20 2004 +0000
@@ -357,9 +357,9 @@
 			"psraw %%mm2, %%mm1	\n\t"
 			"packuswb %%mm1, %%mm0	\n\t"
 			"movq %%mm0, (%1) 	\n\t"
-			"addl $16, %0		\n\t"
-			"addl $8, %1		\n\t"
-			"cmpl %2, %1		\n\t"
+			"add $16, %0		\n\t"
+			"add $8, %1		\n\t"
+			"cmp %2, %1		\n\t"
 			" jb 1b			\n\t"
 			: "+r" (src1), "+r"(dst1)
 			: "r"(dst + width), "r"(dither[y]), "g"(log2_scale), "g"(6-log2_scale)
--- a/libmpcodecs/vf_tfields.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libmpcodecs/vf_tfields.c	Thu Oct 21 11:55:20 2004 +0000
@@ -61,7 +61,7 @@
 static void qpel_li_3DNOW(unsigned char *d, unsigned char *s, int w, int h, int ds, int ss, int up)
 {
 	int i, j, ssd=ss;
-	int crap1, crap2;
+	long crap1, crap2;
 	if (up) {
 		ssd = -ss;
 		memcpy(d, s, w);
@@ -71,17 +71,17 @@
 	for (i=h-1; i; i--) {
 		asm volatile(
 			"1: \n\t"
-			"movq (%%esi), %%mm0 \n\t"
-			"movq (%%esi,%%eax), %%mm1 \n\t"
+			"movq (%%"REG_S"), %%mm0 \n\t"
+			"movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
 			"pavgusb %%mm0, %%mm1 \n\t"
-			"addl $8, %%esi \n\t"
+			"add $8, %%"REG_S" \n\t"
 			"pavgusb %%mm0, %%mm1 \n\t"
-			"movq %%mm1, (%%edi) \n\t"
-			"addl $8, %%edi \n\t"
+			"movq %%mm1, (%%"REG_D") \n\t"
+			"add $8, %%"REG_D" \n\t"
 			"decl %%ecx \n\t"
 			"jnz 1b \n\t"
 			: "=S"(crap1), "=D"(crap2)
-			: "c"(w>>3), "S"(s), "D"(d), "a"(ssd)
+			: "c"(w>>3), "S"(s), "D"(d), "a"((long)ssd)
 		);
 		for (j=w-(w&7); j<w; j++)
 			d[j] = (s[j+ssd] + 3*s[j])>>2;
@@ -97,7 +97,7 @@
 static void qpel_li_MMX2(unsigned char *d, unsigned char *s, int w, int h, int ds, int ss, int up)
 {
 	int i, j, ssd=ss;
-	int crap1, crap2;
+	long crap1, crap2;
 	if (up) {
 		ssd = -ss;
 		memcpy(d, s, w);
@@ -108,17 +108,17 @@
 		asm volatile(
 			"pxor %%mm7, %%mm7 \n\t"
 			"2: \n\t"
-			"movq (%%esi), %%mm0 \n\t"
-			"movq (%%esi,%%eax), %%mm1 \n\t"
+			"movq (%%"REG_S"), %%mm0 \n\t"
+			"movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
 			"pavgb %%mm0, %%mm1 \n\t"
-			"addl $8, %%esi \n\t"
+			"add $8, %%"REG_S" \n\t"
 			"pavgb %%mm0, %%mm1 \n\t"
-			"movq %%mm1, (%%edi) \n\t"
-			"addl $8, %%edi \n\t"
+			"movq %%mm1, (%%"REG_D") \n\t"
+			"add $8, %%"REG_D" \n\t"
 			"decl %%ecx \n\t"
 			"jnz 2b \n\t"
 			: "=S"(crap1), "=D"(crap2)
-			: "c"(w>>3), "S"(s), "D"(d), "a"(ssd)
+			: "c"(w>>3), "S"(s), "D"(d), "a"((long)ssd)
 		);
 		for (j=w-(w&7); j<w; j++)
 			d[j] = (s[j+ssd] + 3*s[j])>>2;
@@ -145,11 +145,11 @@
 		asm volatile(
 			"pxor %%mm7, %%mm7 \n\t"
 			"3: \n\t"
-			"movq (%%esi), %%mm0 \n\t"
-			"movq (%%esi), %%mm1 \n\t"
-			"movq (%%esi,%%eax), %%mm2 \n\t"
-			"movq (%%esi,%%eax), %%mm3 \n\t"
-			"addl $8, %%esi \n\t"
+			"movq (%%"REG_S"), %%mm0 \n\t"
+			"movq (%%"REG_S"), %%mm1 \n\t"
+			"movq (%%"REG_S",%%"REG_a"), %%mm2 \n\t"
+			"movq (%%"REG_S",%%"REG_a"), %%mm3 \n\t"
+			"add $8, %%"REG_S" \n\t"
 			"punpcklbw %%mm7, %%mm0 \n\t"
 			"punpckhbw %%mm7, %%mm1 \n\t"
 			"punpcklbw %%mm7, %%mm2 \n\t"
@@ -163,12 +163,12 @@
 			"psrlw $2, %%mm2 \n\t"
 			"psrlw $2, %%mm3 \n\t"
 			"packsswb %%mm3, %%mm2 \n\t"
-			"movq %%mm2, (%%edi) \n\t"
-			"addl $8, %%edi \n\t"
+			"movq %%mm2, (%%"REG_D") \n\t"
+			"add $8, %%"REG_D" \n\t"
 			"decl %%ecx \n\t"
 			"jnz 3b \n\t"
 			: "=S"(crap1), "=D"(crap2)
-			: "c"(w>>3), "S"(s), "D"(d), "a"(ssd)
+			: "c"(w>>3), "S"(s), "D"(d), "a"((long)ssd)
 		);
 		for (j=w-(w&7); j<w; j++)
 			d[j] = (s[j+ssd] + 3*s[j])>>2;
@@ -198,15 +198,15 @@
 	for (i=h-3; i; i--) {
 		asm volatile(
 			"pxor %%mm0, %%mm0 \n\t"
-			"movq (%%edx), %%mm4 \n\t"
-			"movq 8(%%edx), %%mm5 \n\t"
-			"movq 16(%%edx), %%mm6 \n\t"
-			"movq 24(%%edx), %%mm7 \n\t"
+			"movq (%%"REG_d"), %%mm4 \n\t"
+			"movq 8(%%"REG_d"), %%mm5 \n\t"
+			"movq 16(%%"REG_d"), %%mm6 \n\t"
+			"movq 24(%%"REG_d"), %%mm7 \n\t"
 			"4: \n\t"
 
-			"movq (%%esi,%%eax), %%mm1 \n\t"
-			"movq (%%esi), %%mm2 \n\t"
-			"movq (%%esi,%%ebx), %%mm3 \n\t"
+			"movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
+			"movq (%%"REG_S"), %%mm2 \n\t"
+			"movq (%%"REG_S",%%"REG_b"), %%mm3 \n\t"
 			"punpcklbw %%mm0, %%mm1 \n\t"
 			"punpcklbw %%mm0, %%mm2 \n\t"
 			"pmullw %%mm4, %%mm1 \n\t"
@@ -214,38 +214,38 @@
 			"pmullw %%mm5, %%mm2 \n\t"
 			"paddusw %%mm2, %%mm1 \n\t"
 			"pmullw %%mm6, %%mm3 \n\t"
-			"movq (%%esi,%%eax,2), %%mm2 \n\t"
+			"movq (%%"REG_S",%%"REG_a",2), %%mm2 \n\t"
 			"psubusw %%mm3, %%mm1 \n\t"
 			"punpcklbw %%mm0, %%mm2 \n\t"	
 			"pmullw %%mm7, %%mm2 \n\t"
 			"psubusw %%mm2, %%mm1 \n\t"
 			"psrlw $7, %%mm1 \n\t"
 
-			"movq (%%esi,%%eax), %%mm2 \n\t"
-			"movq (%%esi), %%mm3 \n\t"
+			"movq (%%"REG_S",%%"REG_a"), %%mm2 \n\t"
+			"movq (%%"REG_S"), %%mm3 \n\t"
 			"punpckhbw %%mm0, %%mm2 \n\t"
 			"punpckhbw %%mm0, %%mm3 \n\t"
 			"pmullw %%mm4, %%mm2 \n\t"
 			"pmullw %%mm5, %%mm3 \n\t"
 			"paddusw %%mm3, %%mm2 \n\t"
-			"movq (%%esi,%%ebx), %%mm3 \n\t"
+			"movq (%%"REG_S",%%"REG_b"), %%mm3 \n\t"
 			"punpckhbw %%mm0, %%mm3 \n\t"
 			"pmullw %%mm6, %%mm3 \n\t"
 			"psubusw %%mm3, %%mm2 \n\t"
-			"movq (%%esi,%%eax,2), %%mm3 \n\t"
+			"movq (%%"REG_S",%%"REG_a",2), %%mm3 \n\t"
 			"punpckhbw %%mm0, %%mm3 \n\t"	
-			"addl $8, %%esi \n\t"
+			"add $8, %%"REG_S" \n\t"
 			"pmullw %%mm7, %%mm3 \n\t"
 			"psubusw %%mm3, %%mm2 \n\t"
 			"psrlw $7, %%mm2 \n\t"
 			
 			"packuswb %%mm2, %%mm1 \n\t"
-			"movq %%mm1, (%%edi) \n\t"
-			"addl $8, %%edi \n\t"
+			"movq %%mm1, (%%"REG_D") \n\t"
+			"add $8, %%"REG_D" \n\t"
 			"decl %%ecx \n\t"
 			"jnz 4b \n\t"
 			: "=S"(crap1), "=D"(crap2)
-			: "c"(w>>3), "S"(s), "D"(d), "a"(ssd), "b"(-ssd), "d"(filter)
+			: "c"(w>>3), "S"(s), "D"(d), "a"((long)ssd), "b"((long)-ssd), "d"(filter)
 		);
 		for (j=w-(w&7); j<w; j++)
 			d[j] = (-9*s[j-ssd] + 111*s[j] + 29*s[j+ssd] - 3*s[j+ssd+ssd])>>7;
--- a/libvo/aclib.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libvo/aclib.c	Thu Oct 21 11:55:20 2004 +0000
@@ -17,7 +17,7 @@
 //Feel free to fine-tune the above 2, it might be possible to get some speedup with them :)
 
 //#define STATISTICS
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 #define CAN_COMPILE_X86_ASM
 #endif
 
@@ -50,7 +50,6 @@
 #undef HAVE_3DNOW
 #undef HAVE_SSE
 #undef HAVE_SSE2
-#undef ARCH_X86
 /*
 #ifdef COMPILE_C
 #undef HAVE_MMX
@@ -69,7 +68,6 @@
 #undef HAVE_3DNOW
 #undef HAVE_SSE
 #undef HAVE_SSE2
-#define ARCH_X86
 #define RENAME(a) a ## _MMX
 #include "aclib_template.c"
 #endif
@@ -82,7 +80,6 @@
 #undef HAVE_3DNOW
 #undef HAVE_SSE
 #undef HAVE_SSE2
-#define ARCH_X86
 #define RENAME(a) a ## _MMX2
 #include "aclib_template.c"
 #endif
@@ -95,7 +92,6 @@
 #define HAVE_3DNOW
 #undef HAVE_SSE
 #undef HAVE_SSE2
-#define ARCH_X86
 #define RENAME(a) a ## _3DNow
 #include "aclib_template.c"
 #endif
@@ -108,7 +104,6 @@
 #undef HAVE_3DNOW
 #define HAVE_SSE
 #define HAVE_SSE2
-#define ARCH_X86
 #define RENAME(a) a ## _SSE
 #include "aclib_template.c"
 #endif
--- a/libvo/aclib_template.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libvo/aclib_template.c	Thu Oct 21 11:55:20 2004 +0000
@@ -257,62 +257,62 @@
 	// Pure Assembly cuz gcc is a bit unpredictable ;)
 	if(i>=BLOCK_SIZE/64)
 		asm volatile(
-			"xorl %%eax, %%eax	\n\t"
+			"xor %%"REG_a", %%"REG_a"	\n\t"
 			".balign 16		\n\t"
 			"1:			\n\t"
-				"movl (%0, %%eax), %%ebx 	\n\t"
-				"movl 32(%0, %%eax), %%ebx 	\n\t"
-				"movl 64(%0, %%eax), %%ebx 	\n\t"
-				"movl 96(%0, %%eax), %%ebx 	\n\t"
-				"addl $128, %%eax		\n\t"
-				"cmpl %3, %%eax			\n\t"
+				"movl (%0, %%"REG_a"), %%ebx 	\n\t"
+				"movl 32(%0, %%"REG_a"), %%ebx 	\n\t"
+				"movl 64(%0, %%"REG_a"), %%ebx 	\n\t"
+				"movl 96(%0, %%"REG_a"), %%ebx 	\n\t"
+				"add $128, %%"REG_a"		\n\t"
+				"cmp %3, %%"REG_a"		\n\t"
 				" jb 1b				\n\t"
 
-			"xorl %%eax, %%eax	\n\t"
+			"xor %%"REG_a", %%"REG_a"	\n\t"
 
 				".balign 16		\n\t"
 				"2:			\n\t"
-				"movq (%0, %%eax), %%mm0\n"
-				"movq 8(%0, %%eax), %%mm1\n"
-				"movq 16(%0, %%eax), %%mm2\n"
-				"movq 24(%0, %%eax), %%mm3\n"
-				"movq 32(%0, %%eax), %%mm4\n"
-				"movq 40(%0, %%eax), %%mm5\n"
-				"movq 48(%0, %%eax), %%mm6\n"
-				"movq 56(%0, %%eax), %%mm7\n"
-				MOVNTQ" %%mm0, (%1, %%eax)\n"
-				MOVNTQ" %%mm1, 8(%1, %%eax)\n"
-				MOVNTQ" %%mm2, 16(%1, %%eax)\n"
-				MOVNTQ" %%mm3, 24(%1, %%eax)\n"
-				MOVNTQ" %%mm4, 32(%1, %%eax)\n"
-				MOVNTQ" %%mm5, 40(%1, %%eax)\n"
-				MOVNTQ" %%mm6, 48(%1, %%eax)\n"
-				MOVNTQ" %%mm7, 56(%1, %%eax)\n"
-				"addl $64, %%eax		\n\t"
-				"cmpl %3, %%eax		\n\t"
+				"movq (%0, %%"REG_a"), %%mm0\n"
+				"movq 8(%0, %%"REG_a"), %%mm1\n"
+				"movq 16(%0, %%"REG_a"), %%mm2\n"
+				"movq 24(%0, %%"REG_a"), %%mm3\n"
+				"movq 32(%0, %%"REG_a"), %%mm4\n"
+				"movq 40(%0, %%"REG_a"), %%mm5\n"
+				"movq 48(%0, %%"REG_a"), %%mm6\n"
+				"movq 56(%0, %%"REG_a"), %%mm7\n"
+				MOVNTQ" %%mm0, (%1, %%"REG_a")\n"
+				MOVNTQ" %%mm1, 8(%1, %%"REG_a")\n"
+				MOVNTQ" %%mm2, 16(%1, %%"REG_a")\n"
+				MOVNTQ" %%mm3, 24(%1, %%"REG_a")\n"
+				MOVNTQ" %%mm4, 32(%1, %%"REG_a")\n"
+				MOVNTQ" %%mm5, 40(%1, %%"REG_a")\n"
+				MOVNTQ" %%mm6, 48(%1, %%"REG_a")\n"
+				MOVNTQ" %%mm7, 56(%1, %%"REG_a")\n"
+				"add $64, %%"REG_a"		\n\t"
+				"cmp %3, %%"REG_a"		\n\t"
 				"jb 2b				\n\t"
 
 #if CONFUSION_FACTOR > 0
 	// a few percent speedup on out of order executing CPUs
-			"movl %5, %%eax		\n\t"
+			"mov %5, %%"REG_a"		\n\t"
 				"2:			\n\t"
 				"movl (%0), %%ebx	\n\t"
 				"movl (%0), %%ebx	\n\t"
 				"movl (%0), %%ebx	\n\t"
 				"movl (%0), %%ebx	\n\t"
-				"decl %%eax		\n\t"
+				"dec %%"REG_a"		\n\t"
 				" jnz 2b		\n\t"
 #endif
 
-			"xorl %%eax, %%eax	\n\t"
-			"addl %3, %0		\n\t"
-			"addl %3, %1		\n\t"
-			"subl %4, %2		\n\t"
-			"cmpl %4, %2		\n\t"
+			"xor %%"REG_a", %%"REG_a"	\n\t"
+			"add %3, %0		\n\t"
+			"add %3, %1		\n\t"
+			"sub %4, %2		\n\t"
+			"cmp %4, %2		\n\t"
 			" jae 1b		\n\t"
 				: "+r" (from), "+r" (to), "+r" (i)
-				: "r" (BLOCK_SIZE), "i" (BLOCK_SIZE/64), "i" (CONFUSION_FACTOR)
-				: "%eax", "%ebx"
+				: "r" ((long)BLOCK_SIZE), "i" (BLOCK_SIZE/64), "i" ((long)CONFUSION_FACTOR)
+				: "%"REG_a, "%ebx"
 		);
 
 	for(; i>0; i--)
--- a/libvo/osd.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libvo/osd.c	Thu Oct 21 11:55:20 2004 +0000
@@ -14,7 +14,7 @@
 
 extern int verbose; // defined in mplayer.c
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 #define CAN_COMPILE_X86_ASM
 #endif
 
@@ -48,18 +48,18 @@
 #undef HAVE_MMX
 #undef HAVE_MMX2
 #undef HAVE_3DNOW
-#undef ARCH_X86
+
+#ifndef CAN_COMPILE_X86_ASM
 
 #ifdef COMPILE_C
 #undef HAVE_MMX
 #undef HAVE_MMX2
 #undef HAVE_3DNOW
-#undef ARCH_X86
 #define RENAME(a) a ## _C
 #include "osd_template.c"
 #endif
 
-#ifdef CAN_COMPILE_X86_ASM
+#else
 
 //X86 noMMX versions
 #ifdef COMPILE_C
@@ -67,7 +67,6 @@
 #undef HAVE_MMX
 #undef HAVE_MMX2
 #undef HAVE_3DNOW
-#define ARCH_X86
 #define RENAME(a) a ## _X86
 #include "osd_template.c"
 #endif
@@ -78,7 +77,6 @@
 #define HAVE_MMX
 #undef HAVE_MMX2
 #undef HAVE_3DNOW
-#define ARCH_X86
 #define RENAME(a) a ## _MMX
 #include "osd_template.c"
 #endif
@@ -89,7 +87,6 @@
 #define HAVE_MMX
 #define HAVE_MMX2
 #undef HAVE_3DNOW
-#define ARCH_X86
 #define RENAME(a) a ## _MMX2
 #include "osd_template.c"
 #endif
@@ -100,7 +97,6 @@
 #define HAVE_MMX
 #undef HAVE_MMX2
 #define HAVE_3DNOW
-#define ARCH_X86
 #define RENAME(a) a ## _3DNow
 #include "osd_template.c"
 #endif
@@ -129,7 +125,7 @@
 		vo_draw_alpha_yv12_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
 #elif defined (HAVE_MMX)
 		vo_draw_alpha_yv12_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (ARCH_X86)
+#elif defined(ARCH_X86) || defined(ARCH_X86_64)
 		vo_draw_alpha_yv12_X86(w, h, src, srca, srcstride, dstbase, dststride);
 #else
 		vo_draw_alpha_yv12_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -159,7 +155,7 @@
 		vo_draw_alpha_yuy2_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
 #elif defined (HAVE_MMX)
 		vo_draw_alpha_yuy2_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (ARCH_X86)
+#elif defined(ARCH_X86) || defined(ARCH_X86_64)
 		vo_draw_alpha_yuy2_X86(w, h, src, srca, srcstride, dstbase, dststride);
 #else
 		vo_draw_alpha_yuy2_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -189,7 +185,7 @@
 		vo_draw_alpha_uyvy_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
 #elif defined (HAVE_MMX)
 		vo_draw_alpha_uyvy_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (ARCH_X86)
+#elif defined(ARCH_X86) || defined(ARCH_X86_64)
 		vo_draw_alpha_uyvy_X86(w, h, src, srca, srcstride, dstbase, dststride);
 #else
 		vo_draw_alpha_uyvy_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -219,7 +215,7 @@
 		vo_draw_alpha_rgb24_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
 #elif defined (HAVE_MMX)
 		vo_draw_alpha_rgb24_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (ARCH_X86)
+#elif defined(ARCH_X86) || defined(ARCH_X86_64)
 		vo_draw_alpha_rgb24_X86(w, h, src, srca, srcstride, dstbase, dststride);
 #else
 		vo_draw_alpha_rgb24_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -249,7 +245,7 @@
 		vo_draw_alpha_rgb32_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
 #elif defined (HAVE_MMX)
 		vo_draw_alpha_rgb32_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (ARCH_X86)
+#elif defined(ARCH_X86) || defined(ARCH_X86_64)
 		vo_draw_alpha_rgb32_X86(w, h, src, srca, srcstride, dstbase, dststride);
 #else
 		vo_draw_alpha_rgb32_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -294,7 +290,7 @@
 			mp_msg(MSGT_OSD,MSGL_INFO,"Using MMX (with tiny bit 3DNow) Optimized OnScreenDisplay\n");
 #elif defined (HAVE_MMX)
 			mp_msg(MSGT_OSD,MSGL_INFO,"Using MMX Optimized OnScreenDisplay\n");
-#elif defined (ARCH_X86)
+#elif defined(ARCH_X86) || defined(ARCH_X86_64)
 			mp_msg(MSGT_OSD,MSGL_INFO,"Using X86 Optimized OnScreenDisplay\n");
 #else
 			mp_msg(MSGT_OSD,MSGL_INFO,"Using Unoptimized OnScreenDisplay\n");
--- a/libvo/osd_template.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/libvo/osd_template.c	Thu Oct 21 11:55:20 2004 +0000
@@ -189,7 +189,7 @@
     for(y=0;y<h;y++){
         register unsigned char *dst = dstbase;
         register int x;
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 #ifdef HAVE_MMX
     asm volatile(
 	PREFETCHW" %0\n\t"
@@ -253,7 +253,7 @@
 		"addl %2, %%eax\n\t"
 		"movb %%ah, 2(%0)\n\t"
 		:
-		:"r" (dst),
+		:"D" (dst),
 		 "r" ((unsigned)srca[x]),
 		 "r" (((unsigned)src[x])<<8)
 		:"%eax", "%ecx"
@@ -293,7 +293,7 @@
 #endif
     for(y=0;y<h;y++){
         register int x;
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 #ifdef HAVE_MMX
 #ifdef HAVE_3DNOW
     asm volatile(
--- a/postproc/rgb2rgb.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/postproc/rgb2rgb.c	Thu Oct 21 11:55:20 2004 +0000
@@ -11,6 +11,7 @@
 #include "../config.h"
 #include "rgb2rgb.h"
 #include "swscale.h"
+#include "../cpudetect.h"
 #include "../mangle.h"
 #include "../bswap.h"
 #include "../libvo/fastmemcpy.h"
@@ -68,7 +69,7 @@
 			int srcStride1, int srcStride2,
 			int srcStride3, int dstStride);
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 static const uint64_t mmx_null  __attribute__((aligned(8))) = 0x0000000000000000ULL;
 static const uint64_t mmx_one   __attribute__((aligned(8))) = 0xFFFFFFFFFFFFFFFFULL;
 static const uint64_t mask32b  attribute_used __attribute__((aligned(8))) = 0x000000FF000000FFULL;
@@ -152,7 +153,7 @@
 #define RENAME(a) a ## _C
 #include "rgb2rgb_template.c"
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 
 //MMX versions
 #undef RENAME
@@ -181,7 +182,7 @@
 #define RENAME(a) a ## _3DNOW
 #include "rgb2rgb_template.c"
 
-#endif //ARCH_X86
+#endif //ARCH_X86 || ARCH_X86_64
 
 /*
  rgb15->rgb16 Original by Strepto/Astral
@@ -191,7 +192,7 @@
 */
 
 void sws_rgb2rgb_init(int flags){
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 	if(flags & SWS_CPU_CAPS_MMX2){
 		rgb15to16= rgb15to16_MMX2;
 		rgb15to24= rgb15to24_MMX2;
--- a/postproc/rgb2rgb_template.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/postproc/rgb2rgb_template.c	Thu Oct 21 11:55:20 2004 +0000
@@ -349,9 +349,9 @@
 		"pslld $11, %%mm3		\n\t"
 		"por %%mm3, %%mm0		\n\t"
 		MOVNTQ"	%%mm0, (%0)		\n\t"
-		"addl $16, %1			\n\t"
-		"addl $8, %0			\n\t"
-		"cmpl %2, %1			\n\t"
+		"add $16, %1			\n\t"
+		"add $8, %0			\n\t"
+		"cmp %2, %1			\n\t"
 		" jb 1b				\n\t"
 		: "+r" (d), "+r"(s)
 		: "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
@@ -509,9 +509,9 @@
 		"pslld $10, %%mm3		\n\t"
 		"por %%mm3, %%mm0		\n\t"
 		MOVNTQ"	%%mm0, (%0)		\n\t"
-		"addl $16, %1			\n\t"
-		"addl $8, %0			\n\t"
-		"cmpl %2, %1			\n\t"
+		"add $16, %1			\n\t"
+		"add $8, %0			\n\t"
+		"cmp %2, %1			\n\t"
 		" jb 1b				\n\t"
 		: "+r" (d), "+r"(s)
 		: "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
@@ -1345,11 +1345,11 @@
 #ifdef HAVE_MMX
 /* TODO: unroll this loop */
 	asm volatile (
-		"xorl %%eax, %%eax		\n\t"
+		"xor %%"REG_a", %%"REG_a"	\n\t"
 		".balign 16			\n\t"
 		"1:				\n\t"
-		PREFETCH" 32(%0, %%eax)		\n\t"
-		"movq (%0, %%eax), %%mm0	\n\t"
+		PREFETCH" 32(%0, %%"REG_a")	\n\t"
+		"movq (%0, %%"REG_a"), %%mm0	\n\t"
 		"movq %%mm0, %%mm1		\n\t"
 		"movq %%mm0, %%mm2		\n\t"
 		"pslld $16, %%mm0		\n\t"
@@ -1359,12 +1359,12 @@
 		"pand "MANGLE(mask32b)", %%mm1	\n\t"
 		"por %%mm0, %%mm2		\n\t"
 		"por %%mm1, %%mm2		\n\t"
-		MOVNTQ" %%mm2, (%1, %%eax)	\n\t"
-		"addl $8, %%eax			\n\t"
-		"cmpl %2, %%eax			\n\t"
+		MOVNTQ" %%mm2, (%1, %%"REG_a")	\n\t"
+		"add $8, %%"REG_a"		\n\t"
+		"cmp %2, %%"REG_a"		\n\t"
 		" jb 1b				\n\t"
-		:: "r" (src), "r"(dst), "r" (src_size-7)
-		: "%eax"
+		:: "r" (src), "r"(dst), "r" ((long)src_size-7)
+		: "%"REG_a
 	);
 
 	__asm __volatile(SFENCE:::"memory");
@@ -1391,43 +1391,43 @@
 {
 	unsigned i;
 #ifdef HAVE_MMX
-	int mmx_size= 23 - src_size;
+	long mmx_size= 23 - src_size;
 	asm volatile (
 		"movq "MANGLE(mask24r)", %%mm5	\n\t"
 		"movq "MANGLE(mask24g)", %%mm6	\n\t"
 		"movq "MANGLE(mask24b)", %%mm7	\n\t"
 		".balign 16			\n\t"
 		"1:				\n\t"
-		PREFETCH" 32(%1, %%eax)		\n\t"
-		"movq   (%1, %%eax), %%mm0	\n\t" // BGR BGR BG
-		"movq   (%1, %%eax), %%mm1	\n\t" // BGR BGR BG
-		"movq  2(%1, %%eax), %%mm2	\n\t" // R BGR BGR B
+		PREFETCH" 32(%1, %%"REG_a")	\n\t"
+		"movq   (%1, %%"REG_a"), %%mm0	\n\t" // BGR BGR BG
+		"movq   (%1, %%"REG_a"), %%mm1	\n\t" // BGR BGR BG
+		"movq  2(%1, %%"REG_a"), %%mm2	\n\t" // R BGR BGR B
 		"psllq $16, %%mm0		\n\t" // 00 BGR BGR
 		"pand %%mm5, %%mm0		\n\t"
 		"pand %%mm6, %%mm1		\n\t"
 		"pand %%mm7, %%mm2		\n\t"
 		"por %%mm0, %%mm1		\n\t"
 		"por %%mm2, %%mm1		\n\t"                
-		"movq  6(%1, %%eax), %%mm0	\n\t" // BGR BGR BG
-		MOVNTQ" %%mm1,   (%2, %%eax)	\n\t" // RGB RGB RG
-		"movq  8(%1, %%eax), %%mm1	\n\t" // R BGR BGR B
-		"movq 10(%1, %%eax), %%mm2	\n\t" // GR BGR BGR
+		"movq  6(%1, %%"REG_a"), %%mm0	\n\t" // BGR BGR BG
+		MOVNTQ" %%mm1,   (%2, %%"REG_a")\n\t" // RGB RGB RG
+		"movq  8(%1, %%"REG_a"), %%mm1	\n\t" // R BGR BGR B
+		"movq 10(%1, %%"REG_a"), %%mm2	\n\t" // GR BGR BGR
 		"pand %%mm7, %%mm0		\n\t"
 		"pand %%mm5, %%mm1		\n\t"
 		"pand %%mm6, %%mm2		\n\t"
 		"por %%mm0, %%mm1		\n\t"
 		"por %%mm2, %%mm1		\n\t"                
-		"movq 14(%1, %%eax), %%mm0	\n\t" // R BGR BGR B
-		MOVNTQ" %%mm1,  8(%2, %%eax)	\n\t" // B RGB RGB R
-		"movq 16(%1, %%eax), %%mm1	\n\t" // GR BGR BGR
-		"movq 18(%1, %%eax), %%mm2	\n\t" // BGR BGR BG
+		"movq 14(%1, %%"REG_a"), %%mm0	\n\t" // R BGR BGR B
+		MOVNTQ" %%mm1,  8(%2, %%"REG_a")\n\t" // B RGB RGB R
+		"movq 16(%1, %%"REG_a"), %%mm1	\n\t" // GR BGR BGR
+		"movq 18(%1, %%"REG_a"), %%mm2	\n\t" // BGR BGR BG
 		"pand %%mm6, %%mm0		\n\t"
 		"pand %%mm7, %%mm1		\n\t"
 		"pand %%mm5, %%mm2		\n\t"
 		"por %%mm0, %%mm1		\n\t"
 		"por %%mm2, %%mm1		\n\t"                
-		MOVNTQ" %%mm1, 16(%2, %%eax)	\n\t"
-		"addl $24, %%eax		\n\t"
+		MOVNTQ" %%mm1, 16(%2, %%"REG_a")\n\t"
+		"add $24, %%"REG_a"		\n\t"
 		" js 1b				\n\t"
 		: "+a" (mmx_size)
 		: "r" (src-mmx_size), "r"(dst-mmx_size)
@@ -1465,20 +1465,20 @@
 #ifdef HAVE_MMX
 //FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
 		asm volatile(
-			"xorl %%eax, %%eax		\n\t"
+			"xor %%"REG_a", %%"REG_a"	\n\t"
 			".balign 16			\n\t"
 			"1:				\n\t"
-			PREFETCH" 32(%1, %%eax, 2)	\n\t"
-			PREFETCH" 32(%2, %%eax)		\n\t"
-			PREFETCH" 32(%3, %%eax)		\n\t"
-			"movq (%2, %%eax), %%mm0	\n\t" // U(0)
+			PREFETCH" 32(%1, %%"REG_a", 2)	\n\t"
+			PREFETCH" 32(%2, %%"REG_a")	\n\t"
+			PREFETCH" 32(%3, %%"REG_a")	\n\t"
+			"movq (%2, %%"REG_a"), %%mm0	\n\t" // U(0)
 			"movq %%mm0, %%mm2		\n\t" // U(0)
-			"movq (%3, %%eax), %%mm1	\n\t" // V(0)
+			"movq (%3, %%"REG_a"), %%mm1	\n\t" // V(0)
 			"punpcklbw %%mm1, %%mm0		\n\t" // UVUV UVUV(0)
 			"punpckhbw %%mm1, %%mm2		\n\t" // UVUV UVUV(8)
 
-			"movq (%1, %%eax,2), %%mm3	\n\t" // Y(0)
-			"movq 8(%1, %%eax,2), %%mm5	\n\t" // Y(8)
+			"movq (%1, %%"REG_a",2), %%mm3	\n\t" // Y(0)
+			"movq 8(%1, %%"REG_a",2), %%mm5	\n\t" // Y(8)
 			"movq %%mm3, %%mm4		\n\t" // Y(0)
 			"movq %%mm5, %%mm6		\n\t" // Y(8)
 			"punpcklbw %%mm0, %%mm3		\n\t" // YUYV YUYV(0)
@@ -1486,16 +1486,16 @@
 			"punpcklbw %%mm2, %%mm5		\n\t" // YUYV YUYV(8)
 			"punpckhbw %%mm2, %%mm6		\n\t" // YUYV YUYV(12)
 
-			MOVNTQ" %%mm3, (%0, %%eax, 4)	\n\t"
-			MOVNTQ" %%mm4, 8(%0, %%eax, 4)	\n\t"
-			MOVNTQ" %%mm5, 16(%0, %%eax, 4)	\n\t"
-			MOVNTQ" %%mm6, 24(%0, %%eax, 4)	\n\t"
+			MOVNTQ" %%mm3, (%0, %%"REG_a", 4)\n\t"
+			MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t"
+			MOVNTQ" %%mm5, 16(%0, %%"REG_a", 4)\n\t"
+			MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t"
 
-			"addl $8, %%eax			\n\t"
-			"cmpl %4, %%eax			\n\t"
+			"add $8, %%"REG_a"		\n\t"
+			"cmp %4, %%"REG_a"		\n\t"
 			" jb 1b				\n\t"
-			::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
-			: "%eax"
+			::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" ((long)chromWidth)
+			: "%"REG_a
 		);
 #else
 
@@ -1618,20 +1618,20 @@
 #ifdef HAVE_MMX
 //FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
 		asm volatile(
-			"xorl %%eax, %%eax		\n\t"
+			"xor %%"REG_a", %%"REG_a"	\n\t"
 			".balign 16			\n\t"
 			"1:				\n\t"
-			PREFETCH" 32(%1, %%eax, 2)	\n\t"
-			PREFETCH" 32(%2, %%eax)		\n\t"
-			PREFETCH" 32(%3, %%eax)		\n\t"
-			"movq (%2, %%eax), %%mm0	\n\t" // U(0)
+			PREFETCH" 32(%1, %%"REG_a", 2)	\n\t"
+			PREFETCH" 32(%2, %%"REG_a")	\n\t"
+			PREFETCH" 32(%3, %%"REG_a")	\n\t"
+			"movq (%2, %%"REG_a"), %%mm0	\n\t" // U(0)
 			"movq %%mm0, %%mm2		\n\t" // U(0)
-			"movq (%3, %%eax), %%mm1	\n\t" // V(0)
+			"movq (%3, %%"REG_a"), %%mm1	\n\t" // V(0)
 			"punpcklbw %%mm1, %%mm0		\n\t" // UVUV UVUV(0)
 			"punpckhbw %%mm1, %%mm2		\n\t" // UVUV UVUV(8)
 
-			"movq (%1, %%eax,2), %%mm3	\n\t" // Y(0)
-			"movq 8(%1, %%eax,2), %%mm5	\n\t" // Y(8)
+			"movq (%1, %%"REG_a",2), %%mm3	\n\t" // Y(0)
+			"movq 8(%1, %%"REG_a",2), %%mm5	\n\t" // Y(8)
 			"movq %%mm0, %%mm4		\n\t" // Y(0)
 			"movq %%mm2, %%mm6		\n\t" // Y(8)
 			"punpcklbw %%mm3, %%mm0		\n\t" // YUYV YUYV(0)
@@ -1639,16 +1639,16 @@
 			"punpcklbw %%mm5, %%mm2		\n\t" // YUYV YUYV(8)
 			"punpckhbw %%mm5, %%mm6		\n\t" // YUYV YUYV(12)
 
-			MOVNTQ" %%mm0, (%0, %%eax, 4)	\n\t"
-			MOVNTQ" %%mm4, 8(%0, %%eax, 4)	\n\t"
-			MOVNTQ" %%mm2, 16(%0, %%eax, 4)	\n\t"
-			MOVNTQ" %%mm6, 24(%0, %%eax, 4)	\n\t"
+			MOVNTQ" %%mm0, (%0, %%"REG_a", 4)\n\t"
+			MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t"
+			MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4)\n\t"
+			MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t"
 
-			"addl $8, %%eax			\n\t"
-			"cmpl %4, %%eax			\n\t"
+			"add $8, %%"REG_a"		\n\t"
+			"cmp %4, %%"REG_a"		\n\t"
 			" jb 1b				\n\t"
-			::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
-			: "%eax"
+			::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" ((long)chromWidth)
+			: "%"REG_a
 		);
 #else
 //FIXME adapt the alpha asm code from yv12->yuy2
@@ -1740,14 +1740,14 @@
 	{
 #ifdef HAVE_MMX
 		asm volatile(
-			"xorl %%eax, %%eax		\n\t"
+			"xor %%"REG_a", %%"REG_a"	\n\t"
 			"pcmpeqw %%mm7, %%mm7		\n\t"
 			"psrlw $8, %%mm7		\n\t" // FF,00,FF,00...
 			".balign 16			\n\t"
 			"1:				\n\t"
-			PREFETCH" 64(%0, %%eax, 4)	\n\t"
-			"movq (%0, %%eax, 4), %%mm0	\n\t" // YUYV YUYV(0)
-			"movq 8(%0, %%eax, 4), %%mm1	\n\t" // YUYV YUYV(4)
+			PREFETCH" 64(%0, %%"REG_a", 4)	\n\t"
+			"movq (%0, %%"REG_a", 4), %%mm0	\n\t" // YUYV YUYV(0)
+			"movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
 			"movq %%mm0, %%mm2		\n\t" // YUYV YUYV(0)
 			"movq %%mm1, %%mm3		\n\t" // YUYV YUYV(4)
 			"psrlw $8, %%mm0		\n\t" // U0V0 U0V0(0)
@@ -1757,10 +1757,10 @@
 			"packuswb %%mm1, %%mm0		\n\t" // UVUV UVUV(0)
 			"packuswb %%mm3, %%mm2		\n\t" // YYYY YYYY(0)
 
-			MOVNTQ" %%mm2, (%1, %%eax, 2)	\n\t"
+			MOVNTQ" %%mm2, (%1, %%"REG_a", 2)\n\t"
 
-			"movq 16(%0, %%eax, 4), %%mm1	\n\t" // YUYV YUYV(8)
-			"movq 24(%0, %%eax, 4), %%mm2	\n\t" // YUYV YUYV(12)
+			"movq 16(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(8)
+			"movq 24(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(12)
 			"movq %%mm1, %%mm3		\n\t" // YUYV YUYV(8)
 			"movq %%mm2, %%mm4		\n\t" // YUYV YUYV(12)
 			"psrlw $8, %%mm1		\n\t" // U0V0 U0V0(8)
@@ -1770,7 +1770,7 @@
 			"packuswb %%mm2, %%mm1		\n\t" // UVUV UVUV(8)
 			"packuswb %%mm4, %%mm3		\n\t" // YYYY YYYY(8)
 
-			MOVNTQ" %%mm3, 8(%1, %%eax, 2)	\n\t"
+			MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2)\n\t"
 
 			"movq %%mm0, %%mm2		\n\t" // UVUV UVUV(0)
 			"movq %%mm1, %%mm3		\n\t" // UVUV UVUV(8)
@@ -1781,28 +1781,28 @@
 			"packuswb %%mm1, %%mm0		\n\t" // VVVV VVVV(0)
 			"packuswb %%mm3, %%mm2		\n\t" // UUUU UUUU(0)
 
-			MOVNTQ" %%mm0, (%3, %%eax)	\n\t"
-			MOVNTQ" %%mm2, (%2, %%eax)	\n\t"
+			MOVNTQ" %%mm0, (%3, %%"REG_a")	\n\t"
+			MOVNTQ" %%mm2, (%2, %%"REG_a")	\n\t"
 
-			"addl $8, %%eax			\n\t"
-			"cmpl %4, %%eax			\n\t"
+			"add $8, %%"REG_a"		\n\t"
+			"cmp %4, %%"REG_a"		\n\t"
 			" jb 1b				\n\t"
-			::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
-			: "memory", "%eax"
+			::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" ((long)chromWidth)
+			: "memory", "%"REG_a
 		);
 
 		ydst += lumStride;
 		src  += srcStride;
 
 		asm volatile(
-			"xorl %%eax, %%eax		\n\t"
+			"xor %%"REG_a", %%"REG_a"	\n\t"
 			".balign 16			\n\t"
 			"1:				\n\t"
-			PREFETCH" 64(%0, %%eax, 4)	\n\t"
-			"movq (%0, %%eax, 4), %%mm0	\n\t" // YUYV YUYV(0)
-			"movq 8(%0, %%eax, 4), %%mm1	\n\t" // YUYV YUYV(4)
-			"movq 16(%0, %%eax, 4), %%mm2	\n\t" // YUYV YUYV(8)
-			"movq 24(%0, %%eax, 4), %%mm3	\n\t" // YUYV YUYV(12)
+			PREFETCH" 64(%0, %%"REG_a", 4)	\n\t"
+			"movq (%0, %%"REG_a", 4), %%mm0	\n\t" // YUYV YUYV(0)
+			"movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
+			"movq 16(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(8)
+			"movq 24(%0, %%"REG_a", 4), %%mm3\n\t" // YUYV YUYV(12)
 			"pand %%mm7, %%mm0		\n\t" // Y0Y0 Y0Y0(0)
 			"pand %%mm7, %%mm1		\n\t" // Y0Y0 Y0Y0(4)
 			"pand %%mm7, %%mm2		\n\t" // Y0Y0 Y0Y0(8)
@@ -1810,15 +1810,15 @@
 			"packuswb %%mm1, %%mm0		\n\t" // YYYY YYYY(0)
 			"packuswb %%mm3, %%mm2		\n\t" // YYYY YYYY(8)
 
-			MOVNTQ" %%mm0, (%1, %%eax, 2)	\n\t"
-			MOVNTQ" %%mm2, 8(%1, %%eax, 2)	\n\t"
+			MOVNTQ" %%mm0, (%1, %%"REG_a", 2)\n\t"
+			MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2)\n\t"
 
-			"addl $8, %%eax			\n\t"
-			"cmpl %4, %%eax			\n\t"
+			"add $8, %%"REG_a"		\n\t"
+			"cmp %4, %%"REG_a"		\n\t"
 			" jb 1b				\n\t"
 
-			::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
-			: "memory", "%eax"
+			::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" ((long)chromWidth)
+			: "memory", "%"REG_a
 		);
 #else
 		unsigned i;
@@ -1877,16 +1877,16 @@
 
 	for(y=1; y<srcHeight; y++){
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
-		const int mmxSize= srcWidth&~15;
+		const long mmxSize= srcWidth&~15;
 		asm volatile(
-			"movl %4, %%eax			\n\t"
+			"mov %4, %%"REG_a"		\n\t"
 			"1:				\n\t"
-			"movq (%0, %%eax), %%mm0	\n\t"
-			"movq (%1, %%eax), %%mm1	\n\t"
-			"movq 1(%0, %%eax), %%mm2	\n\t"
-			"movq 1(%1, %%eax), %%mm3	\n\t"
-			"movq -1(%0, %%eax), %%mm4	\n\t"
-			"movq -1(%1, %%eax), %%mm5	\n\t"
+			"movq (%0, %%"REG_a"), %%mm0	\n\t"
+			"movq (%1, %%"REG_a"), %%mm1	\n\t"
+			"movq 1(%0, %%"REG_a"), %%mm2	\n\t"
+			"movq 1(%1, %%"REG_a"), %%mm3	\n\t"
+			"movq -1(%0, %%"REG_a"), %%mm4	\n\t"
+			"movq -1(%1, %%"REG_a"), %%mm5	\n\t"
 			PAVGB" %%mm0, %%mm5		\n\t"
 			PAVGB" %%mm0, %%mm3		\n\t"
 			PAVGB" %%mm0, %%mm5		\n\t"
@@ -1902,22 +1902,22 @@
 			"punpcklbw %%mm2, %%mm4		\n\t"
 			"punpckhbw %%mm2, %%mm6		\n\t"
 #if 1
-			MOVNTQ" %%mm5, (%2, %%eax, 2)	\n\t"
-			MOVNTQ" %%mm7, 8(%2, %%eax, 2)	\n\t"
-			MOVNTQ" %%mm4, (%3, %%eax, 2)	\n\t"
-			MOVNTQ" %%mm6, 8(%3, %%eax, 2)	\n\t"
+			MOVNTQ" %%mm5, (%2, %%"REG_a", 2)\n\t"
+			MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2)\n\t"
+			MOVNTQ" %%mm4, (%3, %%"REG_a", 2)\n\t"
+			MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2)\n\t"
 #else
-			"movq %%mm5, (%2, %%eax, 2)	\n\t"
-			"movq %%mm7, 8(%2, %%eax, 2)	\n\t"
-			"movq %%mm4, (%3, %%eax, 2)	\n\t"
-			"movq %%mm6, 8(%3, %%eax, 2)	\n\t"
+			"movq %%mm5, (%2, %%"REG_a", 2)	\n\t"
+			"movq %%mm7, 8(%2, %%"REG_a", 2)\n\t"
+			"movq %%mm4, (%3, %%"REG_a", 2)	\n\t"
+			"movq %%mm6, 8(%3, %%"REG_a", 2)\n\t"
 #endif
-			"addl $8, %%eax			\n\t"
+			"add $8, %%"REG_a"		\n\t"
 			" js 1b				\n\t"
 			:: "r" (src + mmxSize  ), "r" (src + srcStride + mmxSize  ),
 			   "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
 			   "g" (-mmxSize)
-			: "%eax"
+			: "%"REG_a
 
 		);
 #else
@@ -2107,20 +2107,20 @@
 		for(i=0; i<2; i++)
 		{
 			asm volatile(
-				"movl %2, %%eax			\n\t"
+				"mov %2, %%"REG_a"		\n\t"
 				"movq "MANGLE(bgr2YCoeff)", %%mm6		\n\t"
 				"movq "MANGLE(w1111)", %%mm5		\n\t"
 				"pxor %%mm7, %%mm7		\n\t"
-				"leal (%%eax, %%eax, 2), %%ebx	\n\t"
+				"lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
 				".balign 16			\n\t"
 				"1:				\n\t"
-				PREFETCH" 64(%0, %%ebx)		\n\t"
-				"movd (%0, %%ebx), %%mm0	\n\t"
-				"movd 3(%0, %%ebx), %%mm1	\n\t"
+				PREFETCH" 64(%0, %%"REG_b")	\n\t"
+				"movd (%0, %%"REG_b"), %%mm0	\n\t"
+				"movd 3(%0, %%"REG_b"), %%mm1	\n\t"
 				"punpcklbw %%mm7, %%mm0		\n\t"
 				"punpcklbw %%mm7, %%mm1		\n\t"
-				"movd 6(%0, %%ebx), %%mm2	\n\t"
-				"movd 9(%0, %%ebx), %%mm3	\n\t"
+				"movd 6(%0, %%"REG_b"), %%mm2	\n\t"
+				"movd 9(%0, %%"REG_b"), %%mm3	\n\t"
 				"punpcklbw %%mm7, %%mm2		\n\t"
 				"punpcklbw %%mm7, %%mm3		\n\t"
 				"pmaddwd %%mm6, %%mm0		\n\t"
@@ -2140,12 +2140,12 @@
 				"packssdw %%mm2, %%mm0		\n\t"
 				"psraw $7, %%mm0		\n\t"
 
-				"movd 12(%0, %%ebx), %%mm4	\n\t"
-				"movd 15(%0, %%ebx), %%mm1	\n\t"
+				"movd 12(%0, %%"REG_b"), %%mm4	\n\t"
+				"movd 15(%0, %%"REG_b"), %%mm1	\n\t"
 				"punpcklbw %%mm7, %%mm4		\n\t"
 				"punpcklbw %%mm7, %%mm1		\n\t"
-				"movd 18(%0, %%ebx), %%mm2	\n\t"
-				"movd 21(%0, %%ebx), %%mm3	\n\t"
+				"movd 18(%0, %%"REG_b"), %%mm2	\n\t"
+				"movd 21(%0, %%"REG_b"), %%mm3	\n\t"
 				"punpcklbw %%mm7, %%mm2		\n\t"
 				"punpcklbw %%mm7, %%mm3		\n\t"
 				"pmaddwd %%mm6, %%mm4		\n\t"
@@ -2162,39 +2162,39 @@
 				"packssdw %%mm3, %%mm2		\n\t"
 				"pmaddwd %%mm5, %%mm4		\n\t"
 				"pmaddwd %%mm5, %%mm2		\n\t"
-				"addl $24, %%ebx		\n\t"
+				"add $24, %%"REG_b"		\n\t"
 				"packssdw %%mm2, %%mm4		\n\t"
 				"psraw $7, %%mm4		\n\t"
 
 				"packuswb %%mm4, %%mm0		\n\t"
 				"paddusb "MANGLE(bgr2YOffset)", %%mm0	\n\t"
 
-				MOVNTQ" %%mm0, (%1, %%eax)	\n\t"
-				"addl $8, %%eax			\n\t"
+				MOVNTQ" %%mm0, (%1, %%"REG_a")	\n\t"
+				"add $8, %%"REG_a"		\n\t"
 				" js 1b				\n\t"
-				: : "r" (src+width*3), "r" (ydst+width), "g" (-width)
-				: "%eax", "%ebx"
+				: : "r" (src+width*3), "r" (ydst+width), "g" ((long)-width)
+				: "%"REG_a, "%"REG_b
 			);
 			ydst += lumStride;
 			src  += srcStride;
 		}
 		src -= srcStride*2;
 		asm volatile(
-			"movl %4, %%eax			\n\t"
+			"mov %4, %%"REG_a"		\n\t"
 			"movq "MANGLE(w1111)", %%mm5		\n\t"
 			"movq "MANGLE(bgr2UCoeff)", %%mm6		\n\t"
 			"pxor %%mm7, %%mm7		\n\t"
-			"leal (%%eax, %%eax, 2), %%ebx	\n\t"
-			"addl %%ebx, %%ebx		\n\t"
+			"lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
+			"add %%"REG_b", %%"REG_b"	\n\t"
 			".balign 16			\n\t"
 			"1:				\n\t"
-			PREFETCH" 64(%0, %%ebx)		\n\t"
-			PREFETCH" 64(%1, %%ebx)		\n\t"
+			PREFETCH" 64(%0, %%"REG_b")	\n\t"
+			PREFETCH" 64(%1, %%"REG_b")	\n\t"
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
-			"movq (%0, %%ebx), %%mm0	\n\t"
-			"movq (%1, %%ebx), %%mm1	\n\t"
-			"movq 6(%0, %%ebx), %%mm2	\n\t"
-			"movq 6(%1, %%ebx), %%mm3	\n\t"
+			"movq (%0, %%"REG_b"), %%mm0	\n\t"
+			"movq (%1, %%"REG_b"), %%mm1	\n\t"
+			"movq 6(%0, %%"REG_b"), %%mm2	\n\t"
+			"movq 6(%1, %%"REG_b"), %%mm3	\n\t"
 			PAVGB" %%mm1, %%mm0		\n\t"
 			PAVGB" %%mm3, %%mm2		\n\t"
 			"movq %%mm0, %%mm1		\n\t"
@@ -2206,10 +2206,10 @@
 			"punpcklbw %%mm7, %%mm0		\n\t"
 			"punpcklbw %%mm7, %%mm2		\n\t"
 #else
-			"movd (%0, %%ebx), %%mm0	\n\t"
-			"movd (%1, %%ebx), %%mm1	\n\t"
-			"movd 3(%0, %%ebx), %%mm2	\n\t"
-			"movd 3(%1, %%ebx), %%mm3	\n\t"
+			"movd (%0, %%"REG_b"), %%mm0	\n\t"
+			"movd (%1, %%"REG_b"), %%mm1	\n\t"
+			"movd 3(%0, %%"REG_b"), %%mm2	\n\t"
+			"movd 3(%1, %%"REG_b"), %%mm3	\n\t"
 			"punpcklbw %%mm7, %%mm0		\n\t"
 			"punpcklbw %%mm7, %%mm1		\n\t"
 			"punpcklbw %%mm7, %%mm2		\n\t"
@@ -2217,10 +2217,10 @@
 			"paddw %%mm1, %%mm0		\n\t"
 			"paddw %%mm3, %%mm2		\n\t"
 			"paddw %%mm2, %%mm0		\n\t"
-			"movd 6(%0, %%ebx), %%mm4	\n\t"
-			"movd 6(%1, %%ebx), %%mm1	\n\t"
-			"movd 9(%0, %%ebx), %%mm2	\n\t"
-			"movd 9(%1, %%ebx), %%mm3	\n\t"
+			"movd 6(%0, %%"REG_b"), %%mm4	\n\t"
+			"movd 6(%1, %%"REG_b"), %%mm1	\n\t"
+			"movd 9(%0, %%"REG_b"), %%mm2	\n\t"
+			"movd 9(%1, %%"REG_b"), %%mm3	\n\t"
 			"punpcklbw %%mm7, %%mm4		\n\t"
 			"punpcklbw %%mm7, %%mm1		\n\t"
 			"punpcklbw %%mm7, %%mm2		\n\t"
@@ -2252,10 +2252,10 @@
 			"psraw $7, %%mm0		\n\t"
 
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
-			"movq 12(%0, %%ebx), %%mm4	\n\t"
-			"movq 12(%1, %%ebx), %%mm1	\n\t"
-			"movq 18(%0, %%ebx), %%mm2	\n\t"
-			"movq 18(%1, %%ebx), %%mm3	\n\t"
+			"movq 12(%0, %%"REG_b"), %%mm4	\n\t"
+			"movq 12(%1, %%"REG_b"), %%mm1	\n\t"
+			"movq 18(%0, %%"REG_b"), %%mm2	\n\t"
+			"movq 18(%1, %%"REG_b"), %%mm3	\n\t"
 			PAVGB" %%mm1, %%mm4		\n\t"
 			PAVGB" %%mm3, %%mm2		\n\t"
 			"movq %%mm4, %%mm1		\n\t"
@@ -2267,10 +2267,10 @@
 			"punpcklbw %%mm7, %%mm4		\n\t"
 			"punpcklbw %%mm7, %%mm2		\n\t"
 #else
-			"movd 12(%0, %%ebx), %%mm4	\n\t"
-			"movd 12(%1, %%ebx), %%mm1	\n\t"
-			"movd 15(%0, %%ebx), %%mm2	\n\t"
-			"movd 15(%1, %%ebx), %%mm3	\n\t"
+			"movd 12(%0, %%"REG_b"), %%mm4	\n\t"
+			"movd 12(%1, %%"REG_b"), %%mm1	\n\t"
+			"movd 15(%0, %%"REG_b"), %%mm2	\n\t"
+			"movd 15(%1, %%"REG_b"), %%mm3	\n\t"
 			"punpcklbw %%mm7, %%mm4		\n\t"
 			"punpcklbw %%mm7, %%mm1		\n\t"
 			"punpcklbw %%mm7, %%mm2		\n\t"
@@ -2278,10 +2278,10 @@
 			"paddw %%mm1, %%mm4		\n\t"
 			"paddw %%mm3, %%mm2		\n\t"
 			"paddw %%mm2, %%mm4		\n\t"
-			"movd 18(%0, %%ebx), %%mm5	\n\t"
-			"movd 18(%1, %%ebx), %%mm1	\n\t"
-			"movd 21(%0, %%ebx), %%mm2	\n\t"
-			"movd 21(%1, %%ebx), %%mm3	\n\t"
+			"movd 18(%0, %%"REG_b"), %%mm5	\n\t"
+			"movd 18(%1, %%"REG_b"), %%mm1	\n\t"
+			"movd 21(%0, %%"REG_b"), %%mm2	\n\t"
+			"movd 21(%1, %%"REG_b"), %%mm3	\n\t"
 			"punpcklbw %%mm7, %%mm5		\n\t"
 			"punpcklbw %%mm7, %%mm1		\n\t"
 			"punpcklbw %%mm7, %%mm2		\n\t"
@@ -2310,7 +2310,7 @@
 			"packssdw %%mm3, %%mm1		\n\t"
 			"pmaddwd %%mm5, %%mm4		\n\t"
 			"pmaddwd %%mm5, %%mm1		\n\t"
-			"addl $24, %%ebx		\n\t"
+			"add $24, %%"REG_b"		\n\t"
 			"packssdw %%mm1, %%mm4		\n\t" // V3 V2 U3 U2
 			"psraw $7, %%mm4		\n\t"
 
@@ -2319,14 +2319,13 @@
 			"punpckhdq %%mm4, %%mm1		\n\t"
 			"packsswb %%mm1, %%mm0		\n\t"
 			"paddb "MANGLE(bgr2UVOffset)", %%mm0	\n\t"
-
-			"movd %%mm0, (%2, %%eax)	\n\t"
+			"movd %%mm0, (%2, %%"REG_a")	\n\t"
 			"punpckhdq %%mm0, %%mm0		\n\t"
-			"movd %%mm0, (%3, %%eax)	\n\t"
-			"addl $4, %%eax			\n\t"
+			"movd %%mm0, (%3, %%"REG_a")	\n\t"
+			"add $4, %%"REG_a"		\n\t"
 			" js 1b				\n\t"
-			: : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth)
-			: "%eax", "%ebx"
+			: : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" ((long)-chromWidth)
+			: "%"REG_a, "%"REG_b
 		);
 
 		udst += chromStride;
@@ -2403,48 +2402,48 @@
 #ifdef HAVE_MMX
 #ifdef HAVE_SSE2
 		asm(
-			"xorl %%eax, %%eax		\n\t"
+			"xor %%"REG_a", %%"REG_a"	\n\t"
 			"1:				\n\t"
-			PREFETCH" 64(%1, %%eax)		\n\t"
-			PREFETCH" 64(%2, %%eax)		\n\t"
-			"movdqa (%1, %%eax), %%xmm0	\n\t"
-			"movdqa (%1, %%eax), %%xmm1	\n\t"
-			"movdqa (%2, %%eax), %%xmm2	\n\t"
+			PREFETCH" 64(%1, %%"REG_a")	\n\t"
+			PREFETCH" 64(%2, %%"REG_a")	\n\t"
+			"movdqa (%1, %%"REG_a"), %%xmm0	\n\t"
+			"movdqa (%1, %%"REG_a"), %%xmm1	\n\t"
+			"movdqa (%2, %%"REG_a"), %%xmm2	\n\t"
 			"punpcklbw %%xmm2, %%xmm0	\n\t"
 			"punpckhbw %%xmm2, %%xmm1	\n\t"
-			"movntdq %%xmm0, (%0, %%eax, 2)	\n\t"
-			"movntdq %%xmm1, 16(%0, %%eax, 2)\n\t"
-			"addl $16, %%eax			\n\t"
-			"cmpl %3, %%eax			\n\t"
+			"movntdq %%xmm0, (%0, %%"REG_a", 2)\n\t"
+			"movntdq %%xmm1, 16(%0, %%"REG_a", 2)\n\t"
+			"add $16, %%"REG_a"		\n\t"
+			"cmp %3, %%"REG_a"		\n\t"
 			" jb 1b				\n\t"
-			::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
-			: "memory", "%eax"
+			::"r"(dest), "r"(src1), "r"(src2), "r" ((long)width-15)
+			: "memory", "%"REG_a""
 		);
 #else
 		asm(
-			"xorl %%eax, %%eax		\n\t"
+			"xor %%"REG_a", %%"REG_a"	\n\t"
 			"1:				\n\t"
-			PREFETCH" 64(%1, %%eax)		\n\t"
-			PREFETCH" 64(%2, %%eax)		\n\t"
-			"movq (%1, %%eax), %%mm0	\n\t"
-			"movq 8(%1, %%eax), %%mm2	\n\t"
+			PREFETCH" 64(%1, %%"REG_a")	\n\t"
+			PREFETCH" 64(%2, %%"REG_a")	\n\t"
+			"movq (%1, %%"REG_a"), %%mm0	\n\t"
+			"movq 8(%1, %%"REG_a"), %%mm2	\n\t"
 			"movq %%mm0, %%mm1		\n\t"
 			"movq %%mm2, %%mm3		\n\t"
-			"movq (%2, %%eax), %%mm4	\n\t"
-			"movq 8(%2, %%eax), %%mm5	\n\t"
+			"movq (%2, %%"REG_a"), %%mm4	\n\t"
+			"movq 8(%2, %%"REG_a"), %%mm5	\n\t"
 			"punpcklbw %%mm4, %%mm0		\n\t"
 			"punpckhbw %%mm4, %%mm1		\n\t"
 			"punpcklbw %%mm5, %%mm2		\n\t"
 			"punpckhbw %%mm5, %%mm3		\n\t"
-			MOVNTQ" %%mm0, (%0, %%eax, 2)	\n\t"
-			MOVNTQ" %%mm1, 8(%0, %%eax, 2)	\n\t"
-			MOVNTQ" %%mm2, 16(%0, %%eax, 2)	\n\t"
-			MOVNTQ" %%mm3, 24(%0, %%eax, 2)	\n\t"
-			"addl $16, %%eax			\n\t"
-			"cmpl %3, %%eax			\n\t"
+			MOVNTQ" %%mm0, (%0, %%"REG_a", 2)\n\t"
+			MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2)\n\t"
+			MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2)\n\t"
+			MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2)\n\t"
+			"add $16, %%"REG_a"		\n\t"
+			"cmp %3, %%"REG_a"		\n\t"
 			" jb 1b				\n\t"
-			::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
-			: "memory", "%eax"
+			::"r"(dest), "r"(src1), "r"(src2), "r" ((long)width-15)
+			: "memory", "%"REG_a
 		);
 #endif
 		for(w= (width&(~15)); w < width; w++)
@@ -2582,7 +2581,7 @@
 			int srcStride1, int srcStride2,
 			int srcStride3, int dstStride)
 {
-    unsigned y,x,w,h;
+    unsigned long y,x,w,h;
     w=width/2; h=height;
     for(y=0;y<h;y++){
 	const uint8_t* yp=src1+srcStride1*y;
--- a/postproc/swscale-example.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/postproc/swscale-example.c	Thu Oct 21 11:55:20 2004 +0000
@@ -104,7 +104,7 @@
 	sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride);
 	sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 	asm volatile ("emms\n\t");
 #endif
 	     
@@ -199,14 +199,14 @@
 			rgb_data[ x + y*4*W]= random();
 		}
 	}
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 	sws_rgb2rgb_init(SWS_CPU_CAPS_MMX*0);
 #else
 	sws_rgb2rgb_init(0);
 #endif
 	sws_scale(sws, rgb_src, rgb_stride, 0, H   , src, stride);
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 	asm volatile ("emms\n\t");
 #endif
 
--- a/postproc/swscale.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/postproc/swscale.c	Thu Oct 21 11:55:20 2004 +0000
@@ -145,7 +145,7 @@
 #define MIN(a,b) ((a) > (b) ? (b) : (a))
 #define MAX(a,b) ((a) < (b) ? (b) : (a))
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 static uint64_t attribute_used __attribute__((aligned(8))) bF8=       0xF8F8F8F8F8F8F8F8LL;
 static uint64_t attribute_used __attribute__((aligned(8))) bFC=       0xFCFCFCFCFCFCFCFCLL;
 static uint64_t __attribute__((aligned(8))) w10=       0x0010001000100010LL;
@@ -204,7 +204,7 @@
 extern const uint8_t dither_8x8_73[8][8];
 extern const uint8_t dither_8x8_220[8][8];
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 void in_asm_used_var_warning_killer()
 {
  volatile int i= bF8+bFC+w10+
@@ -679,7 +679,7 @@
 #endif //HAVE_ALTIVEC
 #endif //ARCH_POWERPC
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 
 #if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
 #define COMPILE_MMX
@@ -692,7 +692,7 @@
 #if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
 #define COMPILE_3DNOW
 #endif
-#endif //ARCH_X86
+#endif //ARCH_X86 || ARCH_X86_64
 
 #undef HAVE_MMX
 #undef HAVE_MMX2
@@ -716,7 +716,7 @@
 #endif
 #endif //ARCH_POWERPC
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 
 //X86 versions
 /*
@@ -758,7 +758,7 @@
 #include "swscale_template.c"
 #endif
 
-#endif //ARCH_X86
+#endif //ARCH_X86 || ARCH_X86_64
 
 // minor note: the HAVE_xyz is messed up after that line so don't use it
 
@@ -783,7 +783,7 @@
 	int minFilterSize;
 	double *filter=NULL;
 	double *filter2=NULL;
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 	if(flags & SWS_CPU_CAPS_MMX)
 		asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
 #endif
@@ -1142,17 +1142,17 @@
 	free(filter);
 }
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *filter, int32_t *filterPos, int numSplits)
 {
 	uint8_t *fragmentA;
-	int imm8OfPShufW1A;
-	int imm8OfPShufW2A;
-	int fragmentLengthA;
+	long imm8OfPShufW1A;
+	long imm8OfPShufW2A;
+	long fragmentLengthA;
 	uint8_t *fragmentB;
-	int imm8OfPShufW1B;
-	int imm8OfPShufW2B;
-	int fragmentLengthB;
+	long imm8OfPShufW1B;
+	long imm8OfPShufW2B;
+	long fragmentLengthB;
 	int fragmentPos;
 
 	int xpos, i;
@@ -1165,9 +1165,9 @@
 		"jmp 9f				\n\t"
 	// Begin
 		"0:				\n\t"
-		"movq (%%edx, %%eax), %%mm3	\n\t" 
-		"movd (%%ecx, %%esi), %%mm0	\n\t" 
-		"movd 1(%%ecx, %%esi), %%mm1	\n\t"
+		"movq (%%"REG_d", %%"REG_a"), %%mm3\n\t" 
+		"movd (%%"REG_c", %%"REG_S"), %%mm0\n\t" 
+		"movd 1(%%"REG_c", %%"REG_S"), %%mm1\n\t"
 		"punpcklbw %%mm7, %%mm1		\n\t"
 		"punpcklbw %%mm7, %%mm0		\n\t"
 		"pshufw $0xFF, %%mm1, %%mm1	\n\t"
@@ -1175,26 +1175,26 @@
 		"pshufw $0xFF, %%mm0, %%mm0	\n\t"
 		"2:				\n\t"
 		"psubw %%mm1, %%mm0		\n\t"
-		"movl 8(%%ebx, %%eax), %%esi	\n\t"
+		"mov 8(%%"REG_b", %%"REG_a"), %%"REG_S"\n\t"
 		"pmullw %%mm3, %%mm0		\n\t"
 		"psllw $7, %%mm1		\n\t"
 		"paddw %%mm1, %%mm0		\n\t"
 
-		"movq %%mm0, (%%edi, %%eax)	\n\t"
+		"movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
 
-		"addl $8, %%eax			\n\t"
+		"add $8, %%"REG_a"		\n\t"
 	// End
 		"9:				\n\t"
 //		"int $3\n\t"
-		"leal 0b, %0			\n\t"
-		"leal 1b, %1			\n\t"
-		"leal 2b, %2			\n\t"
-		"decl %1			\n\t"
-		"decl %2			\n\t"
-		"subl %0, %1			\n\t"
-		"subl %0, %2			\n\t"
-		"leal 9b, %3			\n\t"
-		"subl %0, %3			\n\t"
+		"lea 0b, %0			\n\t"
+		"lea 1b, %1			\n\t"
+		"lea 2b, %2			\n\t"
+		"dec %1				\n\t"
+		"dec %2				\n\t"
+		"sub %0, %1			\n\t"
+		"sub %0, %2			\n\t"
+		"lea 9b, %3			\n\t"
+		"sub %0, %3			\n\t"
 
 
 		:"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A),
@@ -1205,34 +1205,34 @@
 		"jmp 9f				\n\t"
 	// Begin
 		"0:				\n\t"
-		"movq (%%edx, %%eax), %%mm3	\n\t" 
-		"movd (%%ecx, %%esi), %%mm0	\n\t" 
+		"movq (%%"REG_d", %%"REG_a"), %%mm3\n\t" 
+		"movd (%%"REG_c", %%"REG_S"), %%mm0\n\t" 
 		"punpcklbw %%mm7, %%mm0		\n\t"
 		"pshufw $0xFF, %%mm0, %%mm1	\n\t"
 		"1:				\n\t"
 		"pshufw $0xFF, %%mm0, %%mm0	\n\t"
 		"2:				\n\t"
 		"psubw %%mm1, %%mm0		\n\t"
-		"movl 8(%%ebx, %%eax), %%esi	\n\t"
+		"mov 8(%%"REG_b", %%"REG_a"), %%"REG_S"\n\t"
 		"pmullw %%mm3, %%mm0		\n\t"
 		"psllw $7, %%mm1		\n\t"
 		"paddw %%mm1, %%mm0		\n\t"
 
-		"movq %%mm0, (%%edi, %%eax)	\n\t"
+		"movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
 
-		"addl $8, %%eax			\n\t"
+		"add $8, %%"REG_a"		\n\t"
 	// End
 		"9:				\n\t"
 //		"int $3\n\t"
-		"leal 0b, %0			\n\t"
-		"leal 1b, %1			\n\t"
-		"leal 2b, %2			\n\t"
-		"decl %1			\n\t"
-		"decl %2			\n\t"
-		"subl %0, %1			\n\t"
-		"subl %0, %2			\n\t"
-		"leal 9b, %3			\n\t"
-		"subl %0, %3			\n\t"
+		"lea 0b, %0			\n\t"
+		"lea 1b, %1			\n\t"
+		"lea 2b, %2			\n\t"
+		"dec %1				\n\t"
+		"dec %2				\n\t"
+		"sub %0, %1			\n\t"
+		"sub %0, %2			\n\t"
+		"lea 9b, %3			\n\t"
+		"sub %0, %3			\n\t"
 
 
 		:"=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B),
@@ -1313,7 +1313,7 @@
 	}
 	filterPos[i/2]= xpos>>16; // needed to jump to the next part
 }
-#endif // ARCH_X86
+#endif // ARCH_X86 || ARCH_X86_64
 
 static void globalInit(){
     // generating tables:
@@ -1327,7 +1327,7 @@
 static SwsFunc getSwsFunc(int flags){
     
 #ifdef RUNTIME_CPUDETECT
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 	// ordered per speed fasterst first
 	if(flags & SWS_CPU_CAPS_MMX2)
 		return swScale_MMX2;
@@ -1755,7 +1755,7 @@
 	int unscaled, needsDither;
 	int srcFormat, dstFormat;
 	SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 	if(flags & SWS_CPU_CAPS_MMX)
 		asm volatile("emms\n\t"::: "memory");
 #endif
@@ -1995,7 +1995,7 @@
 				 (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
 				 srcFilter->chrH, dstFilter->chrH, c->param);
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 // can't downscale !!!
 		if(c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR))
 		{
@@ -2136,7 +2136,7 @@
 		}
 		else
 		{
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 			MSG_V("SwScaler: using X86-Asm scaler for horizontal scaling\n");
 #else
 			if(flags & SWS_FAST_BILINEAR)
--- a/postproc/swscale_template.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/postproc/swscale_template.c	Thu Oct 21 11:55:20 2004 +0000
@@ -16,6 +16,7 @@
     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
 
+#undef REAL_MOVNTQ
 #undef MOVNTQ
 #undef PAVGB
 #undef PREFETCH
@@ -54,29 +55,30 @@
 #endif
 
 #ifdef HAVE_MMX2
-#define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
+#define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
 #else
-#define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
+#define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
 #endif
+#define MOVNTQ(a,b)  REAL_MOVNTQ(a,b)
 
 #ifdef HAVE_ALTIVEC
 #include "swscale_altivec_template.c"
 #endif
 
 #define YSCALEYUV2YV12X(x, offset) \
-			"xorl %%eax, %%eax		\n\t"\
+			"xor %%"REG_a", %%"REG_a"	\n\t"\
 			"movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
 			"movq %%mm3, %%mm4		\n\t"\
-			"leal " offset "(%0), %%edx	\n\t"\
-			"movl (%%edx), %%esi		\n\t"\
+			"lea " offset "(%0), %%"REG_d"	\n\t"\
+			"mov (%%"REG_d"), %%"REG_S"	\n\t"\
 			".balign 16			\n\t" /* FIXME Unroll? */\
 			"1:				\n\t"\
-			"movq 8(%%edx), %%mm0		\n\t" /* filterCoeff */\
-			"movq " #x "(%%esi, %%eax, 2), %%mm2	\n\t" /* srcData */\
-			"movq 8+" #x "(%%esi, %%eax, 2), %%mm5	\n\t" /* srcData */\
-			"addl $16, %%edx		\n\t"\
-			"movl (%%edx), %%esi		\n\t"\
-			"testl %%esi, %%esi		\n\t"\
+			"movq 8(%%"REG_d"), %%mm0	\n\t" /* filterCoeff */\
+			"movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
+			"movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
+			"add $16, %%"REG_d"		\n\t"\
+			"mov (%%"REG_d"), %%"REG_S"	\n\t"\
+			"test %%"REG_S", %%"REG_S"	\n\t"\
 			"pmulhw %%mm0, %%mm2		\n\t"\
 			"pmulhw %%mm0, %%mm5		\n\t"\
 			"paddw %%mm2, %%mm3		\n\t"\
@@ -85,26 +87,26 @@
 			"psraw $3, %%mm3		\n\t"\
 			"psraw $3, %%mm4		\n\t"\
 			"packuswb %%mm4, %%mm3		\n\t"\
-			MOVNTQ(%%mm3, (%1, %%eax))\
-			"addl $8, %%eax			\n\t"\
-			"cmpl %2, %%eax			\n\t"\
+			MOVNTQ(%%mm3, (%1, %%REGa))\
+			"add $8, %%"REG_a"		\n\t"\
+			"cmp %2, %%"REG_a"		\n\t"\
 			"movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
 			"movq %%mm3, %%mm4		\n\t"\
-			"leal " offset "(%0), %%edx	\n\t"\
-			"movl (%%edx), %%esi		\n\t"\
+			"lea " offset "(%0), %%"REG_d"	\n\t"\
+			"mov (%%"REG_d"), %%"REG_S"	\n\t"\
 			"jb 1b				\n\t"
 
 #define YSCALEYUV2YV121 \
-			"movl %2, %%eax			\n\t"\
+			"mov %2, %%"REG_a"		\n\t"\
 			".balign 16			\n\t" /* FIXME Unroll? */\
 			"1:				\n\t"\
-			"movq (%0, %%eax, 2), %%mm0	\n\t"\
-			"movq 8(%0, %%eax, 2), %%mm1	\n\t"\
+			"movq (%0, %%"REG_a", 2), %%mm0	\n\t"\
+			"movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
 			"psraw $7, %%mm0		\n\t"\
 			"psraw $7, %%mm1		\n\t"\
 			"packuswb %%mm1, %%mm0		\n\t"\
-			MOVNTQ(%%mm0, (%1, %%eax))\
-			"addl $8, %%eax			\n\t"\
+			MOVNTQ(%%mm0, (%1, %%REGa))\
+			"add $8, %%"REG_a"		\n\t"\
 			"jnc 1b				\n\t"
 
 /*
@@ -115,44 +117,44 @@
 			: "%eax", "%ebx", "%ecx", "%edx", "%esi"
 */
 #define YSCALEYUV2PACKEDX \
-		"xorl %%eax, %%eax		\n\t"\
+		"xor %%"REG_a", %%"REG_a"	\n\t"\
 		".balign 16			\n\t"\
 		"nop				\n\t"\
 		"1:				\n\t"\
-		"leal "CHR_MMX_FILTER_OFFSET"(%0), %%edx	\n\t"\
-		"movl (%%edx), %%esi		\n\t"\
+		"lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+		"mov (%%"REG_d"), %%"REG_S"	\n\t"\
 		"movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
 		"movq %%mm3, %%mm4		\n\t"\
 		".balign 16			\n\t"\
 		"2:				\n\t"\
-		"movq 8(%%edx), %%mm0		\n\t" /* filterCoeff */\
-		"movq (%%esi, %%eax), %%mm2	\n\t" /* UsrcData */\
-		"movq 4096(%%esi, %%eax), %%mm5	\n\t" /* VsrcData */\
-		"addl $16, %%edx		\n\t"\
-		"movl (%%edx), %%esi		\n\t"\
+		"movq 8(%%"REG_d"), %%mm0	\n\t" /* filterCoeff */\
+		"movq (%%"REG_S", %%"REG_a"), %%mm2	\n\t" /* UsrcData */\
+		"movq 4096(%%"REG_S", %%"REG_a"), %%mm5	\n\t" /* VsrcData */\
+		"add $16, %%"REG_d"		\n\t"\
+		"mov (%%"REG_d"), %%"REG_S"	\n\t"\
 		"pmulhw %%mm0, %%mm2		\n\t"\
 		"pmulhw %%mm0, %%mm5		\n\t"\
 		"paddw %%mm2, %%mm3		\n\t"\
 		"paddw %%mm5, %%mm4		\n\t"\
-		"testl %%esi, %%esi		\n\t"\
+		"test %%"REG_S", %%"REG_S"	\n\t"\
 		" jnz 2b			\n\t"\
 \
-		"leal "LUM_MMX_FILTER_OFFSET"(%0), %%edx	\n\t"\
-		"movl (%%edx), %%esi		\n\t"\
+		"lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+		"mov (%%"REG_d"), %%"REG_S"	\n\t"\
 		"movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
 		"movq %%mm1, %%mm7		\n\t"\
 		".balign 16			\n\t"\
 		"2:				\n\t"\
-		"movq 8(%%edx), %%mm0		\n\t" /* filterCoeff */\
-		"movq (%%esi, %%eax, 2), %%mm2	\n\t" /* Y1srcData */\
-		"movq 8(%%esi, %%eax, 2), %%mm5	\n\t" /* Y2srcData */\
-		"addl $16, %%edx		\n\t"\
-		"movl (%%edx), %%esi		\n\t"\
+		"movq 8(%%"REG_d"), %%mm0	\n\t" /* filterCoeff */\
+		"movq (%%"REG_S", %%"REG_a", 2), %%mm2	\n\t" /* Y1srcData */\
+		"movq 8(%%"REG_S", %%"REG_a", 2), %%mm5	\n\t" /* Y2srcData */\
+		"add $16, %%"REG_d"		\n\t"\
+		"mov (%%"REG_d"), %%"REG_S"	\n\t"\
 		"pmulhw %%mm0, %%mm2		\n\t"\
 		"pmulhw %%mm0, %%mm5		\n\t"\
 		"paddw %%mm2, %%mm1		\n\t"\
 		"paddw %%mm5, %%mm7		\n\t"\
-		"testl %%esi, %%esi		\n\t"\
+		"test %%"REG_S", %%"REG_S"	\n\t"\
 		" jnz 2b			\n\t"\
 
 
@@ -202,22 +204,22 @@
 		"movd %7, %%mm5			\n\t" /*uvalpha1*/\
 		"punpcklwd %%mm5, %%mm5		\n\t"\
 		"punpcklwd %%mm5, %%mm5		\n\t"\
-		"xorl %%eax, %%eax		\n\t"\
+		"xor %%"REG_a", %%"REG_a"		\n\t"\
 		".balign 16			\n\t"\
 		"1:				\n\t"\
-		"movq (%0, %%eax, 2), %%mm0	\n\t" /*buf0[eax]*/\
-		"movq (%1, %%eax, 2), %%mm1	\n\t" /*buf1[eax]*/\
-		"movq (%2, %%eax,2), %%mm2	\n\t" /* uvbuf0[eax]*/\
-		"movq (%3, %%eax,2), %%mm3	\n\t" /* uvbuf1[eax]*/\
+		"movq (%0, %%"REG_a", 2), %%mm0	\n\t" /*buf0[eax]*/\
+		"movq (%1, %%"REG_a", 2), %%mm1	\n\t" /*buf1[eax]*/\
+		"movq (%2, %%"REG_a",2), %%mm2	\n\t" /* uvbuf0[eax]*/\
+		"movq (%3, %%"REG_a",2), %%mm3	\n\t" /* uvbuf1[eax]*/\
 		"psubw %%mm1, %%mm0		\n\t" /* buf0[eax] - buf1[eax]*/\
 		"psubw %%mm3, %%mm2		\n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
 		"pmulhw %%mm6, %%mm0		\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
 		"pmulhw %%mm5, %%mm2		\n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
 		"psraw $4, %%mm1		\n\t" /* buf0[eax] - buf1[eax] >>4*/\
-		"movq 4096(%2, %%eax,2), %%mm4	\n\t" /* uvbuf0[eax+2048]*/\
+		"movq 4096(%2, %%"REG_a",2), %%mm4	\n\t" /* uvbuf0[eax+2048]*/\
 		"psraw $4, %%mm3		\n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
 		"paddw %%mm0, %%mm1		\n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
-		"movq 4096(%3, %%eax,2), %%mm0	\n\t" /* uvbuf1[eax+2048]*/\
+		"movq 4096(%3, %%"REG_a",2), %%mm0	\n\t" /* uvbuf1[eax+2048]*/\
 		"paddw %%mm2, %%mm3		\n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
 		"psubw %%mm0, %%mm4		\n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
 		"psubw "MANGLE(w80)", %%mm1	\n\t" /* 8(Y-16)*/\
@@ -248,14 +250,14 @@
 		"packuswb %%mm1, %%mm1		\n\t"
 #endif
 
-#define YSCALEYUV2PACKED(index, c) \
+#define REAL_YSCALEYUV2PACKED(index, c) \
 		"movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
 		"movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
 		"psraw $3, %%mm0		\n\t"\
 		"psraw $3, %%mm1		\n\t"\
 		"movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
 		"movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
-		"xorl "#index", "#index"		\n\t"\
+		"xor "#index", "#index"		\n\t"\
 		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%2, "#index"), %%mm2	\n\t" /* uvbuf0[eax]*/\
@@ -284,8 +286,10 @@
 		"paddw %%mm0, %%mm1		\n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
 		"paddw %%mm6, %%mm7		\n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
                 
-#define YSCALEYUV2RGB(index, c) \
-		"xorl "#index", "#index"	\n\t"\
+#define YSCALEYUV2PACKED(index, c)  REAL_YSCALEYUV2PACKED(index, c)
+                
+#define REAL_YSCALEYUV2RGB(index, c) \
+		"xor "#index", "#index"	\n\t"\
 		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%2, "#index"), %%mm2	\n\t" /* uvbuf0[eax]*/\
@@ -348,9 +352,10 @@
 		"packuswb %%mm6, %%mm5		\n\t"\
 		"packuswb %%mm3, %%mm4		\n\t"\
 		"pxor %%mm7, %%mm7		\n\t"
+#define YSCALEYUV2RGB(index, c)  REAL_YSCALEYUV2RGB(index, c)
                 
-#define YSCALEYUV2PACKED1(index, c) \
-		"xorl "#index", "#index"		\n\t"\
+#define REAL_YSCALEYUV2PACKED1(index, c) \
+		"xor "#index", "#index"		\n\t"\
 		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%2, "#index"), %%mm3	\n\t" /* uvbuf0[eax]*/\
@@ -362,8 +367,10 @@
 		"psraw $7, %%mm1		\n\t" \
 		"psraw $7, %%mm7		\n\t" \
                 
-#define YSCALEYUV2RGB1(index, c) \
-		"xorl "#index", "#index"	\n\t"\
+#define YSCALEYUV2PACKED1(index, c)  REAL_YSCALEYUV2PACKED1(index, c)
+                
+#define REAL_YSCALEYUV2RGB1(index, c) \
+		"xor "#index", "#index"	\n\t"\
 		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%2, "#index"), %%mm3	\n\t" /* uvbuf0[eax]*/\
@@ -409,9 +416,10 @@
 		"packuswb %%mm6, %%mm5		\n\t"\
 		"packuswb %%mm3, %%mm4		\n\t"\
 		"pxor %%mm7, %%mm7		\n\t"
+#define YSCALEYUV2RGB1(index, c)  REAL_YSCALEYUV2RGB1(index, c)
 
-#define YSCALEYUV2PACKED1b(index, c) \
-		"xorl "#index", "#index"		\n\t"\
+#define REAL_YSCALEYUV2PACKED1b(index, c) \
+		"xor "#index", "#index"		\n\t"\
 		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%2, "#index"), %%mm2	\n\t" /* uvbuf0[eax]*/\
@@ -426,10 +434,11 @@
 		"movq 8(%0, "#index", 2), %%mm7	\n\t" /*buf0[eax]*/\
 		"psraw $7, %%mm1		\n\t" \
 		"psraw $7, %%mm7		\n\t" 
+#define YSCALEYUV2PACKED1b(index, c)  REAL_YSCALEYUV2PACKED1b(index, c)
                 
 // do vertical chrominance interpolation
-#define YSCALEYUV2RGB1b(index, c) \
-		"xorl "#index", "#index"		\n\t"\
+#define REAL_YSCALEYUV2RGB1b(index, c) \
+		"xor "#index", "#index"		\n\t"\
 		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%2, "#index"), %%mm2	\n\t" /* uvbuf0[eax]*/\
@@ -479,8 +488,9 @@
 		"packuswb %%mm6, %%mm5		\n\t"\
 		"packuswb %%mm3, %%mm4		\n\t"\
 		"pxor %%mm7, %%mm7		\n\t"
+#define YSCALEYUV2RGB1b(index, c)  REAL_YSCALEYUV2RGB1b(index, c)
 
-#define WRITEBGR32(dst, dstw, index) \
+#define REAL_WRITEBGR32(dst, dstw, index) \
 		/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
 			"movq %%mm2, %%mm1		\n\t" /* B */\
 			"movq %%mm5, %%mm6		\n\t" /* R */\
@@ -500,11 +510,12 @@
 			MOVNTQ(%%mm1, 16(dst, index, 4))\
 			MOVNTQ(%%mm3, 24(dst, index, 4))\
 \
-			"addl $8, "#index"		\n\t"\
-			"cmpl "#dstw", "#index"		\n\t"\
+			"add $8, "#index"		\n\t"\
+			"cmp "#dstw", "#index"		\n\t"\
 			" jb 1b				\n\t"
+#define WRITEBGR32(dst, dstw, index)  REAL_WRITEBGR32(dst, dstw, index)
 
-#define WRITEBGR16(dst, dstw, index) \
+#define REAL_WRITEBGR16(dst, dstw, index) \
 			"pand "MANGLE(bF8)", %%mm2	\n\t" /* B */\
 			"pand "MANGLE(bFC)", %%mm4	\n\t" /* G */\
 			"pand "MANGLE(bF8)", %%mm5	\n\t" /* R */\
@@ -527,11 +538,12 @@
 			MOVNTQ(%%mm2, (dst, index, 2))\
 			MOVNTQ(%%mm1, 8(dst, index, 2))\
 \
-			"addl $8, "#index"		\n\t"\
-			"cmpl "#dstw", "#index"		\n\t"\
+			"add $8, "#index"		\n\t"\
+			"cmp "#dstw", "#index"		\n\t"\
 			" jb 1b				\n\t"
+#define WRITEBGR16(dst, dstw, index)  REAL_WRITEBGR16(dst, dstw, index)
 
-#define WRITEBGR15(dst, dstw, index) \
+#define REAL_WRITEBGR15(dst, dstw, index) \
 			"pand "MANGLE(bF8)", %%mm2	\n\t" /* B */\
 			"pand "MANGLE(bF8)", %%mm4	\n\t" /* G */\
 			"pand "MANGLE(bF8)", %%mm5	\n\t" /* R */\
@@ -555,9 +567,10 @@
 			MOVNTQ(%%mm2, (dst, index, 2))\
 			MOVNTQ(%%mm1, 8(dst, index, 2))\
 \
-			"addl $8, "#index"		\n\t"\
-			"cmpl "#dstw", "#index"		\n\t"\
+			"add $8, "#index"		\n\t"\
+			"cmp "#dstw", "#index"		\n\t"\
 			" jb 1b				\n\t"
+#define WRITEBGR15(dst, dstw, index)  REAL_WRITEBGR15(dst, dstw, index)
 
 #define WRITEBGR24OLD(dst, dstw, index) \
 		/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
@@ -609,10 +622,10 @@
 			MOVNTQ(%%mm0, (dst))\
 			MOVNTQ(%%mm2, 8(dst))\
 			MOVNTQ(%%mm3, 16(dst))\
-			"addl $24, "#dst"		\n\t"\
+			"add $24, "#dst"		\n\t"\
 \
-			"addl $8, "#index"		\n\t"\
-			"cmpl "#dstw", "#index"		\n\t"\
+			"add $8, "#index"		\n\t"\
+			"cmp "#dstw", "#index"		\n\t"\
 			" jb 1b				\n\t"
 
 #define WRITEBGR24MMX(dst, dstw, index) \
@@ -662,10 +675,10 @@
 			"por %%mm3, %%mm5		\n\t" /* RGBRGBRG 2 */\
 			MOVNTQ(%%mm5, 16(dst))\
 \
-			"addl $24, "#dst"		\n\t"\
+			"add $24, "#dst"		\n\t"\
 \
-			"addl $8, "#index"			\n\t"\
-			"cmpl "#dstw", "#index"			\n\t"\
+			"add $8, "#index"			\n\t"\
+			"cmp "#dstw", "#index"			\n\t"\
 			" jb 1b				\n\t"
 
 #define WRITEBGR24MMX2(dst, dstw, index) \
@@ -710,21 +723,21 @@
 			"por %%mm3, %%mm6		\n\t"\
 			MOVNTQ(%%mm6, 16(dst))\
 \
-			"addl $24, "#dst"		\n\t"\
+			"add $24, "#dst"		\n\t"\
 \
-			"addl $8, "#index"		\n\t"\
-			"cmpl "#dstw", "#index"		\n\t"\
+			"add $8, "#index"		\n\t"\
+			"cmp "#dstw", "#index"		\n\t"\
 			" jb 1b				\n\t"
 
 #ifdef HAVE_MMX2
 #undef WRITEBGR24
-#define WRITEBGR24 WRITEBGR24MMX2
+#define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX2(dst, dstw, index)
 #else
 #undef WRITEBGR24
-#define WRITEBGR24 WRITEBGR24MMX
+#define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX(dst, dstw, index)
 #endif
 
-#define WRITEYUY2(dst, dstw, index) \
+#define REAL_WRITEYUY2(dst, dstw, index) \
 			"packuswb %%mm3, %%mm3		\n\t"\
 			"packuswb %%mm4, %%mm4		\n\t"\
 			"packuswb %%mm7, %%mm1		\n\t"\
@@ -736,9 +749,10 @@
 			MOVNTQ(%%mm1, (dst, index, 2))\
 			MOVNTQ(%%mm7, 8(dst, index, 2))\
 \
-			"addl $8, "#index"		\n\t"\
-			"cmpl "#dstw", "#index"		\n\t"\
+			"add $8, "#index"		\n\t"\
+			"cmp "#dstw", "#index"		\n\t"\
 			" jb 1b				\n\t"
+#define WRITEYUY2(dst, dstw, index)  REAL_WRITEYUY2(dst, dstw, index)
 
 
 static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
@@ -751,23 +765,23 @@
 		asm volatile(
 				YSCALEYUV2YV12X(0, CHR_MMX_FILTER_OFFSET)
 				:: "r" (&c->redDither),
-				"r" (uDest), "m" (chrDstW)
-				: "%eax", "%edx", "%esi"
+				"r" (uDest), "m" ((long)chrDstW)
+				: "%"REG_a, "%"REG_d, "%"REG_S
 			);
 
 		asm volatile(
 				YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET)
 				:: "r" (&c->redDither),
-				"r" (vDest), "m" (chrDstW)
-				: "%eax", "%edx", "%esi"
+				"r" (vDest), "m" ((long)chrDstW)
+				: "%"REG_a, "%"REG_d, "%"REG_S
 			);
 	}
 
 	asm volatile(
 			YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET)
 			:: "r" (&c->redDither),
-			   "r" (dest), "m" (dstW)
-			: "%eax", "%edx", "%esi"
+			   "r" (dest), "m" ((long)dstW)
+			: "%"REG_a, "%"REG_d, "%"REG_S
 		);
 #else
 #ifdef HAVE_ALTIVEC
@@ -791,23 +805,23 @@
 		asm volatile(
 				YSCALEYUV2YV121
 				:: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
-				"g" (-chrDstW)
-				: "%eax"
+				"g" ((long)-chrDstW)
+				: "%"REG_a
 			);
 
 		asm volatile(
 				YSCALEYUV2YV121
 				:: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
-				"g" (-chrDstW)
-				: "%eax"
+				"g" ((long)-chrDstW)
+				: "%"REG_a
 			);
 	}
 
 	asm volatile(
 		YSCALEYUV2YV121
 		:: "r" (lumSrc + dstW), "r" (dest + dstW),
-		"g" (-dstW)
-		: "%eax"
+		"g" ((long)-dstW)
+		: "%"REG_a
 	);
 #else
 	int i;
@@ -858,12 +872,12 @@
 		{
 			asm volatile(
 				YSCALEYUV2RGBX
-				WRITEBGR32(%4, %5, %%eax)
+				WRITEBGR32(%4, %5, %%REGa)
 
 			:: "r" (&c->redDither), 
 			   "m" (dummy), "m" (dummy), "m" (dummy),
 			   "r" (dest), "m" (dstW)
-			: "%eax", "%edx", "%esi"
+			: "%"REG_a, "%"REG_d, "%"REG_S
 			);
 		}
 		break;
@@ -871,14 +885,14 @@
 		{
 			asm volatile(
 				YSCALEYUV2RGBX
-				"leal (%%eax, %%eax, 2), %%ebx	\n\t" //FIXME optimize
-				"addl %4, %%ebx			\n\t"
-				WRITEBGR24(%%ebx, %5, %%eax)
+				"lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize
+				"add %4, %%"REG_b"			\n\t"
+				WRITEBGR24(%%REGb, %5, %%REGa)
 
 			:: "r" (&c->redDither), 
 			   "m" (dummy), "m" (dummy), "m" (dummy),
 			   "r" (dest), "m" (dstW)
-			: "%eax", "%ebx", "%edx", "%esi" //FIXME ebx
+			: "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx
 			);
 		}
 		break;
@@ -893,12 +907,12 @@
 				"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
 
-				WRITEBGR15(%4, %5, %%eax)
+				WRITEBGR15(%4, %5, %%REGa)
 
 			:: "r" (&c->redDither), 
 			   "m" (dummy), "m" (dummy), "m" (dummy),
 			   "r" (dest), "m" (dstW)
-			: "%eax", "%edx", "%esi"
+			: "%"REG_a, "%"REG_d, "%"REG_S
 			);
 		}
 		break;
@@ -913,12 +927,12 @@
 				"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
 
-				WRITEBGR16(%4, %5, %%eax)
+				WRITEBGR16(%4, %5, %%REGa)
 
 			:: "r" (&c->redDither), 
 			   "m" (dummy), "m" (dummy), "m" (dummy),
 			   "r" (dest), "m" (dstW)
-			: "%eax", "%edx", "%esi"
+			: "%"REG_a, "%"REG_d, "%"REG_S
 			);
 		}
 		break;
@@ -932,12 +946,12 @@
 				"psraw $3, %%mm4		\n\t"
 				"psraw $3, %%mm1		\n\t"
 				"psraw $3, %%mm7		\n\t"
-				WRITEYUY2(%4, %5, %%eax)
+				WRITEYUY2(%4, %5, %%REGa)
 
 			:: "r" (&c->redDither), 
 			   "m" (dummy), "m" (dummy), "m" (dummy),
 			   "r" (dest), "m" (dstW)
-			: "%eax", "%edx", "%esi"
+			: "%"REG_a, "%"REG_d, "%"REG_S
 			);
 		}
 		break;
@@ -984,17 +998,17 @@
 			"punpcklwd %%mm0, %%mm3		\n\t" // BGR0BGR0
 			"punpckhwd %%mm0, %%mm1		\n\t" // BGR0BGR0
 
-			MOVNTQ(%%mm3, (%4, %%eax, 4))
-			MOVNTQ(%%mm1, 8(%4, %%eax, 4))
+			MOVNTQ(%%mm3, (%4, %%REGa, 4))
+			MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
 
-			"addl $4, %%eax			\n\t"
-			"cmpl %5, %%eax			\n\t"
+			"add $4, %%"REG_a"		\n\t"
+			"cmp %5, %%"REG_a"		\n\t"
 			" jb 1b				\n\t"
 
 
-			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
+			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
 			"m" (yalpha1), "m" (uvalpha1)
-			: "%eax"
+			: "%"REG_a
 			);
 			break;
 		case IMGFMT_BGR24:
@@ -1024,26 +1038,26 @@
 			"psrlq $24, %%mm1		\n\t" // 0BGR0000
 			"por %%mm2, %%mm1		\n\t" // RBGRR000
 
-			"movl %4, %%ebx			\n\t"
-			"addl %%eax, %%ebx		\n\t"
+			"mov %4, %%"REG_b"		\n\t"
+			"add %%"REG_a", %%"REG_b"	\n\t"
 
 #ifdef HAVE_MMX2
 			//FIXME Alignment
-			"movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
-			"movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
+			"movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t"
+			"movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t"
 #else
-			"movd %%mm3, (%%ebx, %%eax, 2)	\n\t"
+			"movd %%mm3, (%%"REG_b", %%"REG_a", 2)	\n\t"
 			"psrlq $32, %%mm3		\n\t"
-			"movd %%mm3, 4(%%ebx, %%eax, 2)	\n\t"
-			"movd %%mm1, 8(%%ebx, %%eax, 2)	\n\t"
+			"movd %%mm3, 4(%%"REG_b", %%"REG_a", 2)	\n\t"
+			"movd %%mm1, 8(%%"REG_b", %%"REG_a", 2)	\n\t"
 #endif
-			"addl $4, %%eax			\n\t"
-			"cmpl %5, %%eax			\n\t"
+			"add $4, %%"REG_a"		\n\t"
+			"cmp %5, %%"REG_a"		\n\t"
 			" jb 1b				\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
 			"m" (yalpha1), "m" (uvalpha1)
-			: "%eax", "%ebx"
+			: "%"REG_a, "%"REG_b
 			);
 			break;
 		case IMGFMT_BGR15:
@@ -1068,15 +1082,15 @@
 			"por %%mm3, %%mm1		\n\t"
 			"por %%mm1, %%mm0		\n\t"
 
-			MOVNTQ(%%mm0, (%4, %%eax, 2))
+			MOVNTQ(%%mm0, (%4, %%REGa, 2))
 
-			"addl $4, %%eax			\n\t"
-			"cmpl %5, %%eax			\n\t"
+			"add $4, %%"REG_a"		\n\t"
+			"cmp %5, %%"REG_a"		\n\t"
 			" jb 1b				\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
 			"m" (yalpha1), "m" (uvalpha1)
-			: "%eax"
+			: "%"REG_a
 			);
 			break;
 		case IMGFMT_BGR16:
@@ -1101,15 +1115,15 @@
 			"por %%mm3, %%mm1		\n\t"
 			"por %%mm1, %%mm0		\n\t"
 
-			MOVNTQ(%%mm0, (%4, %%eax, 2))
+			MOVNTQ(%%mm0, (%4, %%REGa, 2))
 
-			"addl $4, %%eax			\n\t"
-			"cmpl %5, %%eax			\n\t"
+			"add $4, %%"REG_a"		\n\t"
+			"cmp %5, %%"REG_a"		\n\t"
 			" jb 1b				\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
 			"m" (yalpha1), "m" (uvalpha1)
-			: "%eax"
+			: "%"REG_a
 			);
 		break;
 #endif
@@ -1188,34 +1202,34 @@
 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
 	case IMGFMT_BGR32:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2RGB(%%eax, %5)
-				WRITEBGR32(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2RGB(%%REGa, %5)
+				WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 	case IMGFMT_BGR24:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp			\n\t"
-				YSCALEYUV2RGB(%%eax, %5)
-				WRITEBGR24(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2RGB(%%REGa, %5)
+				WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 	case IMGFMT_BGR15:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2RGB(%%eax, %5)
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2RGB(%%REGa, %5)
 		/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
 #ifdef DITHER1XBPP
 				"paddusb "MANGLE(b5Dither)", %%mm2\n\t"
@@ -1223,19 +1237,19 @@
 				"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
 
-				WRITEBGR15(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 	case IMGFMT_BGR16:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2RGB(%%eax, %5)
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2RGB(%%REGa, %5)
 		/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
 #ifdef DITHER1XBPP
 				"paddusb "MANGLE(b5Dither)", %%mm2\n\t"
@@ -1243,23 +1257,23 @@
 				"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
 
-				WRITEBGR16(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 	case IMGFMT_YUY2:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2PACKED(%%eax, %5)
-				WRITEYUY2(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2PACKED(%%REGa, %5)
+				WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 	default: break;
@@ -1293,54 +1307,54 @@
 		{
 		case IMGFMT_BGR32:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2RGB1(%%eax, %5)
-				WRITEBGR32(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2RGB1(%%REGa, %5)
+				WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 		case IMGFMT_BGR24:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2RGB1(%%eax, %5)
-				WRITEBGR24(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2RGB1(%%REGa, %5)
+				WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 		case IMGFMT_BGR15:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2RGB1(%%eax, %5)
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2RGB1(%%REGa, %5)
 		/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
 #ifdef DITHER1XBPP
 				"paddusb "MANGLE(b5Dither)", %%mm2\n\t"
 				"paddusb "MANGLE(g5Dither)", %%mm4\n\t"
 				"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
-				WRITEBGR15(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 		case IMGFMT_BGR16:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2RGB1(%%eax, %5)
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2RGB1(%%REGa, %5)
 		/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
 #ifdef DITHER1XBPP
 				"paddusb "MANGLE(b5Dither)", %%mm2\n\t"
@@ -1348,25 +1362,25 @@
 				"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
 
-				WRITEBGR16(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 		case IMGFMT_YUY2:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2PACKED1(%%eax, %5)
-				WRITEYUY2(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2PACKED1(%%REGa, %5)
+				WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 		}
@@ -1377,54 +1391,54 @@
 		{
 		case IMGFMT_BGR32:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2RGB1b(%%eax, %5)
-				WRITEBGR32(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2RGB1b(%%REGa, %5)
+				WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 		case IMGFMT_BGR24:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2RGB1b(%%eax, %5)
-				WRITEBGR24(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2RGB1b(%%REGa, %5)
+				WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 		case IMGFMT_BGR15:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2RGB1b(%%eax, %5)
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2RGB1b(%%REGa, %5)
 		/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
 #ifdef DITHER1XBPP
 				"paddusb "MANGLE(b5Dither)", %%mm2\n\t"
 				"paddusb "MANGLE(g5Dither)", %%mm4\n\t"
 				"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
-				WRITEBGR15(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 		case IMGFMT_BGR16:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2RGB1b(%%eax, %5)
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2RGB1b(%%REGa, %5)
 		/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
 #ifdef DITHER1XBPP
 				"paddusb "MANGLE(b5Dither)", %%mm2\n\t"
@@ -1432,25 +1446,25 @@
 				"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
 
-				WRITEBGR16(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 		case IMGFMT_YUY2:
 			asm volatile(
-				"movl %%esp, "ESP_OFFSET"(%5)		\n\t"
-				"movl %4, %%esp				\n\t"
-				YSCALEYUV2PACKED1b(%%eax, %5)
-				WRITEYUY2(%%esp, 8280(%5), %%eax)
-				"movl "ESP_OFFSET"(%5), %%esp		\n\t"
+				"mov %%"REG_SP", "ESP_OFFSET"(%5)	\n\t"
+				"mov %4, %%"REG_SP"			\n\t"
+				YSCALEYUV2PACKED1b(%%REGa, %5)
+				WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
+				"mov "ESP_OFFSET"(%5), %%"REG_SP"	\n\t"
 
 			:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
 			"r" (&c->redDither)
-			: "%eax"
+			: "%"REG_a
 			);
 			return;
 		}
@@ -1471,18 +1485,18 @@
 #ifdef HAVE_MMX
 	asm volatile(
 		"movq "MANGLE(bm01010101)", %%mm2\n\t"
-		"movl %0, %%eax			\n\t"
+		"mov %0, %%"REG_a"		\n\t"
 		"1:				\n\t"
-		"movq (%1, %%eax,2), %%mm0	\n\t"
-		"movq 8(%1, %%eax,2), %%mm1	\n\t"
+		"movq (%1, %%"REG_a",2), %%mm0	\n\t"
+		"movq 8(%1, %%"REG_a",2), %%mm1	\n\t"
 		"pand %%mm2, %%mm0		\n\t"
 		"pand %%mm2, %%mm1		\n\t"
 		"packuswb %%mm1, %%mm0		\n\t"
-		"movq %%mm0, (%2, %%eax)	\n\t"
-		"addl $8, %%eax			\n\t"
+		"movq %%mm0, (%2, %%"REG_a")	\n\t"
+		"add $8, %%"REG_a"		\n\t"
 		" js 1b				\n\t"
-		: : "g" (-width), "r" (src+width*2), "r" (dst+width)
-		: "%eax"
+		: : "g" ((long)-width), "r" (src+width*2), "r" (dst+width)
+		: "%"REG_a
 	);
 #else
 	int i;
@@ -1496,12 +1510,12 @@
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
 	asm volatile(
 		"movq "MANGLE(bm01010101)", %%mm4\n\t"
-		"movl %0, %%eax			\n\t"
+		"mov %0, %%"REG_a"		\n\t"
 		"1:				\n\t"
-		"movq (%1, %%eax,4), %%mm0	\n\t"
-		"movq 8(%1, %%eax,4), %%mm1	\n\t"
-		"movq (%2, %%eax,4), %%mm2	\n\t"
-		"movq 8(%2, %%eax,4), %%mm3	\n\t"
+		"movq (%1, %%"REG_a",4), %%mm0	\n\t"
+		"movq 8(%1, %%"REG_a",4), %%mm1	\n\t"
+		"movq (%2, %%"REG_a",4), %%mm2	\n\t"
+		"movq 8(%2, %%"REG_a",4), %%mm3	\n\t"
 		PAVGB(%%mm2, %%mm0)
 		PAVGB(%%mm3, %%mm1)
 		"psrlw $8, %%mm0		\n\t"
@@ -1512,12 +1526,12 @@
 		"pand %%mm4, %%mm1		\n\t"
 		"packuswb %%mm0, %%mm0		\n\t"
 		"packuswb %%mm1, %%mm1		\n\t"
-		"movd %%mm0, (%4, %%eax)	\n\t"
-		"movd %%mm1, (%3, %%eax)	\n\t"
-		"addl $4, %%eax			\n\t"
+		"movd %%mm0, (%4, %%"REG_a")	\n\t"
+		"movd %%mm1, (%3, %%"REG_a")	\n\t"
+		"add $4, %%"REG_a"		\n\t"
 		" js 1b				\n\t"
-		: : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
-		: "%eax"
+		: : "g" ((long)-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
+		: "%"REG_a
 	);
 #else
 	int i;
@@ -1534,18 +1548,18 @@
 {
 #ifdef HAVE_MMX
 	asm volatile(
-		"movl %0, %%eax			\n\t"
+		"mov %0, %%"REG_a"		\n\t"
 		"1:				\n\t"
-		"movq (%1, %%eax,2), %%mm0	\n\t"
-		"movq 8(%1, %%eax,2), %%mm1	\n\t"
+		"movq (%1, %%"REG_a",2), %%mm0	\n\t"
+		"movq 8(%1, %%"REG_a",2), %%mm1	\n\t"
 		"psrlw $8, %%mm0		\n\t"
 		"psrlw $8, %%mm1		\n\t"
 		"packuswb %%mm1, %%mm0		\n\t"
-		"movq %%mm0, (%2, %%eax)	\n\t"
-		"addl $8, %%eax			\n\t"
+		"movq %%mm0, (%2, %%"REG_a")	\n\t"
+		"add $8, %%"REG_a"		\n\t"
 		" js 1b				\n\t"
-		: : "g" (-width), "r" (src+width*2), "r" (dst+width)
-		: "%eax"
+		: : "g" ((long)-width), "r" (src+width*2), "r" (dst+width)
+		: "%"REG_a
 	);
 #else
 	int i;
@@ -1559,12 +1573,12 @@
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
 	asm volatile(
 		"movq "MANGLE(bm01010101)", %%mm4\n\t"
-		"movl %0, %%eax			\n\t"
+		"mov %0, %%"REG_a"		\n\t"
 		"1:				\n\t"
-		"movq (%1, %%eax,4), %%mm0	\n\t"
-		"movq 8(%1, %%eax,4), %%mm1	\n\t"
-		"movq (%2, %%eax,4), %%mm2	\n\t"
-		"movq 8(%2, %%eax,4), %%mm3	\n\t"
+		"movq (%1, %%"REG_a",4), %%mm0	\n\t"
+		"movq 8(%1, %%"REG_a",4), %%mm1	\n\t"
+		"movq (%2, %%"REG_a",4), %%mm2	\n\t"
+		"movq 8(%2, %%"REG_a",4), %%mm3	\n\t"
 		PAVGB(%%mm2, %%mm0)
 		PAVGB(%%mm3, %%mm1)
 		"pand %%mm4, %%mm0		\n\t"
@@ -1575,12 +1589,12 @@
 		"pand %%mm4, %%mm1		\n\t"
 		"packuswb %%mm0, %%mm0		\n\t"
 		"packuswb %%mm1, %%mm1		\n\t"
-		"movd %%mm0, (%4, %%eax)	\n\t"
-		"movd %%mm1, (%3, %%eax)	\n\t"
-		"addl $4, %%eax			\n\t"
+		"movd %%mm0, (%4, %%"REG_a")	\n\t"
+		"movd %%mm1, (%3, %%"REG_a")	\n\t"
+		"add $4, %%"REG_a"		\n\t"
 		" js 1b				\n\t"
-		: : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
-		: "%eax"
+		: : "g" ((long)-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
+		: "%"REG_a
 	);
 #else
 	int i;
@@ -1635,20 +1649,20 @@
 {
 #ifdef HAVE_MMX
 	asm volatile(
-		"movl %2, %%eax			\n\t"
+		"mov %2, %%"REG_a"		\n\t"
 		"movq "MANGLE(bgr2YCoeff)", %%mm6		\n\t"
 		"movq "MANGLE(w1111)", %%mm5		\n\t"
 		"pxor %%mm7, %%mm7		\n\t"
-		"leal (%%eax, %%eax, 2), %%ebx	\n\t"
+		"lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
 		".balign 16			\n\t"
 		"1:				\n\t"
-		PREFETCH" 64(%0, %%ebx)		\n\t"
-		"movd (%0, %%ebx), %%mm0	\n\t"
-		"movd 3(%0, %%ebx), %%mm1	\n\t"
+		PREFETCH" 64(%0, %%"REG_b")	\n\t"
+		"movd (%0, %%"REG_b"), %%mm0	\n\t"
+		"movd 3(%0, %%"REG_b"), %%mm1	\n\t"
 		"punpcklbw %%mm7, %%mm0		\n\t"
 		"punpcklbw %%mm7, %%mm1		\n\t"
-		"movd 6(%0, %%ebx), %%mm2	\n\t"
-		"movd 9(%0, %%ebx), %%mm3	\n\t"
+		"movd 6(%0, %%"REG_b"), %%mm2	\n\t"
+		"movd 9(%0, %%"REG_b"), %%mm3	\n\t"
 		"punpcklbw %%mm7, %%mm2		\n\t"
 		"punpcklbw %%mm7, %%mm3		\n\t"
 		"pmaddwd %%mm6, %%mm0		\n\t"
@@ -1668,12 +1682,12 @@
 		"packssdw %%mm2, %%mm0		\n\t"
 		"psraw $7, %%mm0		\n\t"
 
-		"movd 12(%0, %%ebx), %%mm4	\n\t"
-		"movd 15(%0, %%ebx), %%mm1	\n\t"
+		"movd 12(%0, %%"REG_b"), %%mm4	\n\t"
+		"movd 15(%0, %%"REG_b"), %%mm1	\n\t"
 		"punpcklbw %%mm7, %%mm4		\n\t"
 		"punpcklbw %%mm7, %%mm1		\n\t"
-		"movd 18(%0, %%ebx), %%mm2	\n\t"
-		"movd 21(%0, %%ebx), %%mm3	\n\t"
+		"movd 18(%0, %%"REG_b"), %%mm2	\n\t"
+		"movd 21(%0, %%"REG_b"), %%mm3	\n\t"
 		"punpcklbw %%mm7, %%mm2		\n\t"
 		"punpcklbw %%mm7, %%mm3		\n\t"
 		"pmaddwd %%mm6, %%mm4		\n\t"
@@ -1690,18 +1704,18 @@
 		"packssdw %%mm3, %%mm2		\n\t"
 		"pmaddwd %%mm5, %%mm4		\n\t"
 		"pmaddwd %%mm5, %%mm2		\n\t"
-		"addl $24, %%ebx		\n\t"
+		"add $24, %%"REG_b"		\n\t"
 		"packssdw %%mm2, %%mm4		\n\t"
 		"psraw $7, %%mm4		\n\t"
 
 		"packuswb %%mm4, %%mm0		\n\t"
 		"paddusb "MANGLE(bgr2YOffset)", %%mm0	\n\t"
 
-		"movq %%mm0, (%1, %%eax)	\n\t"
-		"addl $8, %%eax			\n\t"
+		"movq %%mm0, (%1, %%"REG_a")	\n\t"
+		"add $8, %%"REG_a"		\n\t"
 		" js 1b				\n\t"
-		: : "r" (src+width*3), "r" (dst+width), "g" (-width)
-		: "%eax", "%ebx"
+		: : "r" (src+width*3), "r" (dst+width), "g" ((long)-width)
+		: "%"REG_a, "%"REG_b
 	);
 #else
 	int i;
@@ -1720,21 +1734,21 @@
 {
 #ifdef HAVE_MMX
 	asm volatile(
-		"movl %4, %%eax			\n\t"
+		"mov %4, %%"REG_a"		\n\t"
 		"movq "MANGLE(w1111)", %%mm5		\n\t"
 		"movq "MANGLE(bgr2UCoeff)", %%mm6		\n\t"
 		"pxor %%mm7, %%mm7		\n\t"
-		"leal (%%eax, %%eax, 2), %%ebx	\n\t"
-		"addl %%ebx, %%ebx		\n\t"
+		"lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"	\n\t"
+		"add %%"REG_b", %%"REG_b"	\n\t"
 		".balign 16			\n\t"
 		"1:				\n\t"
-		PREFETCH" 64(%0, %%ebx)		\n\t"
-		PREFETCH" 64(%1, %%ebx)		\n\t"
+		PREFETCH" 64(%0, %%"REG_b")	\n\t"
+		PREFETCH" 64(%1, %%"REG_b")	\n\t"
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
-		"movq (%0, %%ebx), %%mm0	\n\t"
-		"movq (%1, %%ebx), %%mm1	\n\t"
-		"movq 6(%0, %%ebx), %%mm2	\n\t"
-		"movq 6(%1, %%ebx), %%mm3	\n\t"
+		"movq (%0, %%"REG_b"), %%mm0	\n\t"
+		"movq (%1, %%"REG_b"), %%mm1	\n\t"
+		"movq 6(%0, %%"REG_b"), %%mm2	\n\t"
+		"movq 6(%1, %%"REG_b"), %%mm3	\n\t"
 		PAVGB(%%mm1, %%mm0)
 		PAVGB(%%mm3, %%mm2)
 		"movq %%mm0, %%mm1		\n\t"
@@ -1746,10 +1760,10 @@
 		"punpcklbw %%mm7, %%mm0		\n\t"
 		"punpcklbw %%mm7, %%mm2		\n\t"
 #else
-		"movd (%0, %%ebx), %%mm0	\n\t"
-		"movd (%1, %%ebx), %%mm1	\n\t"
-		"movd 3(%0, %%ebx), %%mm2	\n\t"
-		"movd 3(%1, %%ebx), %%mm3	\n\t"
+		"movd (%0, %%"REG_b"), %%mm0	\n\t"
+		"movd (%1, %%"REG_b"), %%mm1	\n\t"
+		"movd 3(%0, %%"REG_b"), %%mm2	\n\t"
+		"movd 3(%1, %%"REG_b"), %%mm3	\n\t"
 		"punpcklbw %%mm7, %%mm0		\n\t"
 		"punpcklbw %%mm7, %%mm1		\n\t"
 		"punpcklbw %%mm7, %%mm2		\n\t"
@@ -1757,10 +1771,10 @@
 		"paddw %%mm1, %%mm0		\n\t"
 		"paddw %%mm3, %%mm2		\n\t"
 		"paddw %%mm2, %%mm0		\n\t"
-		"movd 6(%0, %%ebx), %%mm4	\n\t"
-		"movd 6(%1, %%ebx), %%mm1	\n\t"
-		"movd 9(%0, %%ebx), %%mm2	\n\t"
-		"movd 9(%1, %%ebx), %%mm3	\n\t"
+		"movd 6(%0, %%"REG_b"), %%mm4	\n\t"
+		"movd 6(%1, %%"REG_b"), %%mm1	\n\t"
+		"movd 9(%0, %%"REG_b"), %%mm2	\n\t"
+		"movd 9(%1, %%"REG_b"), %%mm3	\n\t"
 		"punpcklbw %%mm7, %%mm4		\n\t"
 		"punpcklbw %%mm7, %%mm1		\n\t"
 		"punpcklbw %%mm7, %%mm2		\n\t"
@@ -1792,10 +1806,10 @@
 		"psraw $7, %%mm0		\n\t"
 
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
-		"movq 12(%0, %%ebx), %%mm4	\n\t"
-		"movq 12(%1, %%ebx), %%mm1	\n\t"
-		"movq 18(%0, %%ebx), %%mm2	\n\t"
-		"movq 18(%1, %%ebx), %%mm3	\n\t"
+		"movq 12(%0, %%"REG_b"), %%mm4	\n\t"
+		"movq 12(%1, %%"REG_b"), %%mm1	\n\t"
+		"movq 18(%0, %%"REG_b"), %%mm2	\n\t"
+		"movq 18(%1, %%"REG_b"), %%mm3	\n\t"
 		PAVGB(%%mm1, %%mm4)
 		PAVGB(%%mm3, %%mm2)
 		"movq %%mm4, %%mm1		\n\t"
@@ -1807,10 +1821,10 @@
 		"punpcklbw %%mm7, %%mm4		\n\t"
 		"punpcklbw %%mm7, %%mm2		\n\t"
 #else
-		"movd 12(%0, %%ebx), %%mm4	\n\t"
-		"movd 12(%1, %%ebx), %%mm1	\n\t"
-		"movd 15(%0, %%ebx), %%mm2	\n\t"
-		"movd 15(%1, %%ebx), %%mm3	\n\t"
+		"movd 12(%0, %%"REG_b"), %%mm4	\n\t"
+		"movd 12(%1, %%"REG_b"), %%mm1	\n\t"
+		"movd 15(%0, %%"REG_b"), %%mm2	\n\t"
+		"movd 15(%1, %%"REG_b"), %%mm3	\n\t"
 		"punpcklbw %%mm7, %%mm4		\n\t"
 		"punpcklbw %%mm7, %%mm1		\n\t"
 		"punpcklbw %%mm7, %%mm2		\n\t"
@@ -1818,10 +1832,10 @@
 		"paddw %%mm1, %%mm4		\n\t"
 		"paddw %%mm3, %%mm2		\n\t"
 		"paddw %%mm2, %%mm4		\n\t"
-		"movd 18(%0, %%ebx), %%mm5	\n\t"
-		"movd 18(%1, %%ebx), %%mm1	\n\t"
-		"movd 21(%0, %%ebx), %%mm2	\n\t"
-		"movd 21(%1, %%ebx), %%mm3	\n\t"
+		"movd 18(%0, %%"REG_b"), %%mm5	\n\t"
+		"movd 18(%1, %%"REG_b"), %%mm1	\n\t"
+		"movd 21(%0, %%"REG_b"), %%mm2	\n\t"
+		"movd 21(%1, %%"REG_b"), %%mm3	\n\t"
 		"punpcklbw %%mm7, %%mm5		\n\t"
 		"punpcklbw %%mm7, %%mm1		\n\t"
 		"punpcklbw %%mm7, %%mm2		\n\t"
@@ -1850,7 +1864,7 @@
 		"packssdw %%mm3, %%mm1		\n\t"
 		"pmaddwd %%mm5, %%mm4		\n\t"
 		"pmaddwd %%mm5, %%mm1		\n\t"
-		"addl $24, %%ebx		\n\t"
+		"add $24, %%"REG_b"		\n\t"
 		"packssdw %%mm1, %%mm4		\n\t" // V3 V2 U3 U2
 		"psraw $7, %%mm4		\n\t"
 		
@@ -1860,13 +1874,13 @@
 		"packsswb %%mm1, %%mm0		\n\t"
 		"paddb "MANGLE(bgr2UVOffset)", %%mm0	\n\t"
 
-		"movd %%mm0, (%2, %%eax)	\n\t"
+		"movd %%mm0, (%2, %%"REG_a")	\n\t"
 		"punpckhdq %%mm0, %%mm0		\n\t"
-		"movd %%mm0, (%3, %%eax)	\n\t"
-		"addl $4, %%eax			\n\t"
+		"movd %%mm0, (%3, %%"REG_a")	\n\t"
+		"add $4, %%"REG_a"		\n\t"
 		" js 1b				\n\t"
-		: : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
-		: "%eax", "%ebx"
+		: : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" ((long)-width)
+		: "%"REG_a, "%"REG_b
 	);
 #else
 	int i;
@@ -2024,23 +2038,23 @@
 	assert(filterSize % 4 == 0 && filterSize>0);
 	if(filterSize==4) // allways true for upscaling, sometimes for down too
 	{
-		int counter= -2*dstW;
+		long counter= -2*dstW;
 		filter-= counter*2;
 		filterPos-= counter/2;
 		dst-= counter/2;
 		asm volatile(
 			"pxor %%mm7, %%mm7		\n\t"
 			"movq "MANGLE(w02)", %%mm6	\n\t"
-			"pushl %%ebp			\n\t" // we use 7 regs here ...
-			"movl %%eax, %%ebp		\n\t"
+			"push %%"REG_BP"		\n\t" // we use 7 regs here ...
+			"mov %%"REG_a", %%"REG_BP"	\n\t"
 			".balign 16			\n\t"
 			"1:				\n\t"
-			"movzwl (%2, %%ebp), %%eax	\n\t"
-			"movzwl 2(%2, %%ebp), %%ebx	\n\t"
-			"movq (%1, %%ebp, 4), %%mm1	\n\t"
-			"movq 8(%1, %%ebp, 4), %%mm3	\n\t"
-			"movd (%3, %%eax), %%mm0	\n\t"
-			"movd (%3, %%ebx), %%mm2	\n\t"
+			"movzxw (%2, %%"REG_BP"), %%"REG_a"\n\t"
+			"movzxw 2(%2, %%"REG_BP"), %%"REG_b"\n\t"
+			"movq (%1, %%"REG_BP", 4), %%mm1\n\t"
+			"movq 8(%1, %%"REG_BP", 4), %%mm3\n\t"
+			"movd (%3, %%"REG_a"), %%mm0	\n\t"
+			"movd (%3, %%"REG_b"), %%mm2	\n\t"
 			"punpcklbw %%mm7, %%mm0		\n\t"
 			"punpcklbw %%mm7, %%mm2		\n\t"
 			"pmaddwd %%mm1, %%mm0		\n\t"
@@ -2050,44 +2064,44 @@
 			"packssdw %%mm3, %%mm0		\n\t"
 			"pmaddwd %%mm6, %%mm0		\n\t"
 			"packssdw %%mm0, %%mm0		\n\t"
-			"movd %%mm0, (%4, %%ebp)	\n\t"
-			"addl $4, %%ebp			\n\t"
+			"movd %%mm0, (%4, %%"REG_BP")	\n\t"
+			"add $4, %%"REG_BP"		\n\t"
 			" jnc 1b			\n\t"
 
-			"popl %%ebp			\n\t"
+			"pop %%"REG_BP"			\n\t"
 			: "+a" (counter)
 			: "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
-			: "%ebx"
+			: "%"REG_b
 		);
 	}
 	else if(filterSize==8)
 	{
-		int counter= -2*dstW;
+		long counter= -2*dstW;
 		filter-= counter*4;
 		filterPos-= counter/2;
 		dst-= counter/2;
 		asm volatile(
 			"pxor %%mm7, %%mm7		\n\t"
 			"movq "MANGLE(w02)", %%mm6	\n\t"
-			"pushl %%ebp			\n\t" // we use 7 regs here ...
-			"movl %%eax, %%ebp		\n\t"
+			"push %%"REG_BP"		\n\t" // we use 7 regs here ...
+			"mov %%"REG_a", %%"REG_BP"	\n\t"
 			".balign 16			\n\t"
 			"1:				\n\t"
-			"movzwl (%2, %%ebp), %%eax	\n\t"
-			"movzwl 2(%2, %%ebp), %%ebx	\n\t"
-			"movq (%1, %%ebp, 8), %%mm1	\n\t"
-			"movq 16(%1, %%ebp, 8), %%mm3	\n\t"
-			"movd (%3, %%eax), %%mm0	\n\t"
-			"movd (%3, %%ebx), %%mm2	\n\t"
+			"movzxw (%2, %%"REG_BP"), %%"REG_a"\n\t"
+			"movzxw 2(%2, %%"REG_BP"), %%"REG_b"\n\t"
+			"movq (%1, %%"REG_BP", 8), %%mm1\n\t"
+			"movq 16(%1, %%"REG_BP", 8), %%mm3\n\t"
+			"movd (%3, %%"REG_a"), %%mm0	\n\t"
+			"movd (%3, %%"REG_b"), %%mm2	\n\t"
 			"punpcklbw %%mm7, %%mm0		\n\t"
 			"punpcklbw %%mm7, %%mm2		\n\t"
 			"pmaddwd %%mm1, %%mm0		\n\t"
 			"pmaddwd %%mm2, %%mm3		\n\t"
 
-			"movq 8(%1, %%ebp, 8), %%mm1	\n\t"
-			"movq 24(%1, %%ebp, 8), %%mm5	\n\t"
-			"movd 4(%3, %%eax), %%mm4	\n\t"
-			"movd 4(%3, %%ebx), %%mm2	\n\t"
+			"movq 8(%1, %%"REG_BP", 8), %%mm1\n\t"
+			"movq 24(%1, %%"REG_BP", 8), %%mm5\n\t"
+			"movd 4(%3, %%"REG_a"), %%mm4	\n\t"
+			"movd 4(%3, %%"REG_b"), %%mm2	\n\t"
 			"punpcklbw %%mm7, %%mm4		\n\t"
 			"punpcklbw %%mm7, %%mm2		\n\t"
 			"pmaddwd %%mm1, %%mm4		\n\t"
@@ -2100,19 +2114,19 @@
 			"packssdw %%mm3, %%mm0		\n\t"
 			"pmaddwd %%mm6, %%mm0		\n\t"
 			"packssdw %%mm0, %%mm0		\n\t"
-			"movd %%mm0, (%4, %%ebp)	\n\t"
-			"addl $4, %%ebp			\n\t"
+			"movd %%mm0, (%4, %%"REG_BP")	\n\t"
+			"add $4, %%"REG_BP"		\n\t"
 			" jnc 1b			\n\t"
 
-			"popl %%ebp			\n\t"
+			"pop %%"REG_BP"			\n\t"
 			: "+a" (counter)
 			: "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
-			: "%ebx"
+			: "%"REG_b
 		);
 	}
 	else
 	{
-		int counter= -2*dstW;
+		long counter= -2*dstW;
 //		filter-= counter*filterSize/2;
 		filterPos-= counter/2;
 		dst-= counter/2;
@@ -2121,42 +2135,42 @@
 			"movq "MANGLE(w02)", %%mm6	\n\t"
 			".balign 16			\n\t"
 			"1:				\n\t"
-			"movl %2, %%ecx			\n\t"
-			"movzwl (%%ecx, %0), %%eax	\n\t"
-			"movzwl 2(%%ecx, %0), %%ebx	\n\t"
-			"movl %5, %%ecx			\n\t"
+			"mov %2, %%"REG_c"		\n\t"
+			"movzxw (%%"REG_c", %0), %%"REG_a"\n\t"
+			"movzxw 2(%%"REG_c", %0), %%"REG_b"\n\t"
+			"mov %5, %%"REG_c"		\n\t"
 			"pxor %%mm4, %%mm4		\n\t"
 			"pxor %%mm5, %%mm5		\n\t"
 			"2:				\n\t"
 			"movq (%1), %%mm1		\n\t"
 			"movq (%1, %6), %%mm3		\n\t"
-			"movd (%%ecx, %%eax), %%mm0	\n\t"
-			"movd (%%ecx, %%ebx), %%mm2	\n\t"
+			"movd (%%"REG_c", %%"REG_a"), %%mm0\n\t"
+			"movd (%%"REG_c", %%"REG_b"), %%mm2\n\t"
 			"punpcklbw %%mm7, %%mm0		\n\t"
 			"punpcklbw %%mm7, %%mm2		\n\t"
 			"pmaddwd %%mm1, %%mm0		\n\t"
 			"pmaddwd %%mm2, %%mm3		\n\t"
 			"paddd %%mm3, %%mm5		\n\t"
 			"paddd %%mm0, %%mm4		\n\t"
-			"addl $8, %1			\n\t"
-			"addl $4, %%ecx			\n\t"
-			"cmpl %4, %%ecx			\n\t"
+			"add $8, %1			\n\t"
+			"add $4, %%"REG_c"		\n\t"
+			"cmp %4, %%"REG_c"		\n\t"
 			" jb 2b				\n\t"
-			"addl %6, %1			\n\t"
+			"add %6, %1			\n\t"
 			"psrad $8, %%mm4		\n\t"
 			"psrad $8, %%mm5		\n\t"
 			"packssdw %%mm5, %%mm4		\n\t"
 			"pmaddwd %%mm6, %%mm4		\n\t"
 			"packssdw %%mm4, %%mm4		\n\t"
-			"movl %3, %%eax			\n\t"
-			"movd %%mm4, (%%eax, %0)	\n\t"
-			"addl $4, %0			\n\t"
+			"mov %3, %%"REG_a"		\n\t"
+			"movd %%mm4, (%%"REG_a", %0)	\n\t"
+			"add $4, %0			\n\t"
 			" jnc 1b			\n\t"
 
 			: "+r" (counter), "+r" (filter)
 			: "m" (filterPos), "m" (dst), "m"(src+filterSize),
-			  "m" (src), "r" (filterSize*2)
-			: "%ebx", "%eax", "%ecx"
+			  "m" (src), "r" ((long)filterSize*2)
+			: "%"REG_b, "%"REG_a, "%"REG_c
 		);
 	}
 #else
@@ -2241,28 +2255,28 @@
     }
     else // Fast Bilinear upscale / crap downscale
     {
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 #ifdef HAVE_MMX2
 	int i;
 	if(canMMX2BeUsed)
 	{
 		asm volatile(
 			"pxor %%mm7, %%mm7		\n\t"
-			"movl %0, %%ecx			\n\t"
-			"movl %1, %%edi			\n\t"
-			"movl %2, %%edx			\n\t"
-			"movl %3, %%ebx			\n\t"
-			"xorl %%eax, %%eax		\n\t" // i
-			PREFETCH" (%%ecx)		\n\t"
-			PREFETCH" 32(%%ecx)		\n\t"
-			PREFETCH" 64(%%ecx)		\n\t"
+			"mov %0, %%"REG_c"		\n\t"
+			"mov %1, %%"REG_D"		\n\t"
+			"mov %2, %%"REG_d"		\n\t"
+			"mov %3, %%"REG_b"		\n\t"
+			"xor %%"REG_a", %%"REG_a"	\n\t" // i
+			PREFETCH" (%%"REG_c")		\n\t"
+			PREFETCH" 32(%%"REG_c")		\n\t"
+			PREFETCH" 64(%%"REG_c")		\n\t"
 
 #define FUNNY_Y_CODE \
-			"movl (%%ebx), %%esi		\n\t"\
+			"mov (%%"REG_b"), %%"REG_S"	\n\t"\
 			"call *%4			\n\t"\
-			"addl (%%ebx, %%eax), %%ecx	\n\t"\
-			"addl %%eax, %%edi		\n\t"\
-			"xorl %%eax, %%eax		\n\t"\
+			"addl (%%"REG_b", %%"REG_a"), %%ecx\n\t"\
+			"add %%"REG_a", %%"REG_d"	\n\t"\
+			"xor %%"REG_a", %%"REG_a"	\n\t"\
 
 FUNNY_Y_CODE
 FUNNY_Y_CODE
@@ -2275,7 +2289,7 @@
 
 			:: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
 			"m" (funnyYCode)
-			: "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
+			: "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_d
 		);
 		for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
 	}
@@ -2284,43 +2298,43 @@
 #endif
 	//NO MMX just normal asm ...
 	asm volatile(
-		"xorl %%eax, %%eax		\n\t" // i
-		"xorl %%ebx, %%ebx		\n\t" // xx
+		"xor %%"REG_a", %%"REG_a"	\n\t" // i
+		"xor %%"REG_b", %%"REG_b"	\n\t" // xx
 		"xorl %%ecx, %%ecx		\n\t" // 2*xalpha
 		".balign 16			\n\t"
 		"1:				\n\t"
-		"movzbl  (%0, %%ebx), %%edi	\n\t" //src[xx]
-		"movzbl 1(%0, %%ebx), %%esi	\n\t" //src[xx+1]
+		"movzbl  (%0, %%"REG_b"), %%edi	\n\t" //src[xx]
+		"movzbl 1(%0, %%"REG_b"), %%esi	\n\t" //src[xx+1]
 		"subl %%edi, %%esi		\n\t" //src[xx+1] - src[xx]
 		"imull %%ecx, %%esi		\n\t" //(src[xx+1] - src[xx])*2*xalpha
 		"shll $16, %%edi		\n\t"
 		"addl %%edi, %%esi		\n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
-		"movl %1, %%edi			\n\t"
+		"mov %1, %%"REG_D"		\n\t"
 		"shrl $9, %%esi			\n\t"
-		"movw %%si, (%%edi, %%eax, 2)	\n\t"
+		"movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
 		"addw %4, %%cx			\n\t" //2*xalpha += xInc&0xFF
-		"adcl %3, %%ebx			\n\t" //xx+= xInc>>8 + carry
+		"adc %3, %%"REG_b"		\n\t" //xx+= xInc>>8 + carry
 
-		"movzbl (%0, %%ebx), %%edi	\n\t" //src[xx]
-		"movzbl 1(%0, %%ebx), %%esi	\n\t" //src[xx+1]
+		"movzbl (%0, %%"REG_b"), %%edi	\n\t" //src[xx]
+		"movzbl 1(%0, %%"REG_b"), %%esi	\n\t" //src[xx+1]
 		"subl %%edi, %%esi		\n\t" //src[xx+1] - src[xx]
 		"imull %%ecx, %%esi		\n\t" //(src[xx+1] - src[xx])*2*xalpha
 		"shll $16, %%edi		\n\t"
 		"addl %%edi, %%esi		\n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
-		"movl %1, %%edi			\n\t"
+		"mov %1, %%"REG_D"		\n\t"
 		"shrl $9, %%esi			\n\t"
-		"movw %%si, 2(%%edi, %%eax, 2)	\n\t"
+		"movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t"
 		"addw %4, %%cx			\n\t" //2*xalpha += xInc&0xFF
-		"adcl %3, %%ebx			\n\t" //xx+= xInc>>8 + carry
+		"adc %3, %%"REG_b"		\n\t" //xx+= xInc>>8 + carry
 
 
-		"addl $2, %%eax			\n\t"
-		"cmpl %2, %%eax			\n\t"
+		"add $2, %%"REG_a"		\n\t"
+		"cmp %2, %%"REG_a"		\n\t"
 		" jb 1b				\n\t"
 
 
 		:: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
-		: "%eax", "%ebx", "%ecx", "%edi", "%esi"
+		: "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
 		);
 #ifdef HAVE_MMX2
 	} //if MMX2 can't be used
@@ -2410,40 +2424,40 @@
     }
     else // Fast Bilinear upscale / crap downscale
     {
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 #ifdef HAVE_MMX2
 	int i;
 	if(canMMX2BeUsed)
 	{
 		asm volatile(
 			"pxor %%mm7, %%mm7		\n\t"
-			"movl %0, %%ecx			\n\t"
-			"movl %1, %%edi			\n\t"
-			"movl %2, %%edx			\n\t"
-			"movl %3, %%ebx			\n\t"
-			"xorl %%eax, %%eax		\n\t" // i
-			PREFETCH" (%%ecx)		\n\t"
-			PREFETCH" 32(%%ecx)		\n\t"
-			PREFETCH" 64(%%ecx)		\n\t"
+			"mov %0, %%"REG_c"		\n\t"
+			"mov %1, %%"REG_D"		\n\t"
+			"mov %2, %%"REG_d"		\n\t"
+			"mov %3, %%"REG_b"		\n\t"
+			"xor %%"REG_a", %%"REG_a"	\n\t" // i
+			PREFETCH" (%%"REG_c")		\n\t"
+			PREFETCH" 32(%%"REG_c")		\n\t"
+			PREFETCH" 64(%%"REG_c")		\n\t"
 
 #define FUNNY_UV_CODE \
-			"movl (%%ebx), %%esi		\n\t"\
+			"movl (%%"REG_b"), %%esi	\n\t"\
 			"call *%4			\n\t"\
-			"addl (%%ebx, %%eax), %%ecx	\n\t"\
-			"addl %%eax, %%edi		\n\t"\
-			"xorl %%eax, %%eax		\n\t"\
+			"addl (%%"REG_b", %%"REG_a"), %%ecx\n\t"\
+			"add %%"REG_a", %%"REG_D"	\n\t"\
+			"xor %%"REG_a", %%"REG_a"	\n\t"\
 
 FUNNY_UV_CODE
 FUNNY_UV_CODE
 FUNNY_UV_CODE
 FUNNY_UV_CODE
-			"xorl %%eax, %%eax		\n\t" // i
-			"movl %5, %%ecx			\n\t" // src
-			"movl %1, %%edi			\n\t" // buf1
-			"addl $4096, %%edi		\n\t"
-			PREFETCH" (%%ecx)		\n\t"
-			PREFETCH" 32(%%ecx)		\n\t"
-			PREFETCH" 64(%%ecx)		\n\t"
+			"xor %%"REG_a", %%"REG_a"	\n\t" // i
+			"mov %5, %%"REG_c"		\n\t" // src
+			"mov %1, %%"REG_D"		\n\t" // buf1
+			"add $4096, %%"REG_D"		\n\t"
+			PREFETCH" (%%"REG_c")		\n\t"
+			PREFETCH" 32(%%"REG_c")		\n\t"
+			PREFETCH" 64(%%"REG_c")		\n\t"
 
 FUNNY_UV_CODE
 FUNNY_UV_CODE
@@ -2452,7 +2466,7 @@
 
 			:: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
 			"m" (funnyUVCode), "m" (src2)
-			: "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
+			: "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%esi", "%"REG_D
 		);
 		for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
 		{
@@ -2465,41 +2479,41 @@
 	{
 #endif
 	asm volatile(
-		"xorl %%eax, %%eax		\n\t" // i
-		"xorl %%ebx, %%ebx		\n\t" // xx
+		"xor %%"REG_a", %%"REG_a"	\n\t" // i
+		"xor %%"REG_b", %%"REG_b"		\n\t" // xx
 		"xorl %%ecx, %%ecx		\n\t" // 2*xalpha
 		".balign 16			\n\t"
 		"1:				\n\t"
-		"movl %0, %%esi			\n\t"
-		"movzbl  (%%esi, %%ebx), %%edi	\n\t" //src[xx]
-		"movzbl 1(%%esi, %%ebx), %%esi	\n\t" //src[xx+1]
+		"mov %0, %%"REG_S"		\n\t"
+		"movzbl  (%%"REG_S", %%"REG_b"), %%edi	\n\t" //src[xx]
+		"movzbl 1(%%"REG_S", %%"REG_b"), %%esi	\n\t" //src[xx+1]
 		"subl %%edi, %%esi		\n\t" //src[xx+1] - src[xx]
 		"imull %%ecx, %%esi		\n\t" //(src[xx+1] - src[xx])*2*xalpha
 		"shll $16, %%edi		\n\t"
 		"addl %%edi, %%esi		\n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
-		"movl %1, %%edi			\n\t"
+		"mov %1, %%"REG_D"		\n\t"
 		"shrl $9, %%esi			\n\t"
-		"movw %%si, (%%edi, %%eax, 2)	\n\t"
+		"movw %%si, (%%"REG_d", %%"REG_a", 2)\n\t"
 
-		"movzbl  (%5, %%ebx), %%edi	\n\t" //src[xx]
-		"movzbl 1(%5, %%ebx), %%esi	\n\t" //src[xx+1]
+		"movzbl  (%5, %%"REG_b"), %%edi	\n\t" //src[xx]
+		"movzbl 1(%5, %%"REG_b"), %%esi	\n\t" //src[xx+1]
 		"subl %%edi, %%esi		\n\t" //src[xx+1] - src[xx]
 		"imull %%ecx, %%esi		\n\t" //(src[xx+1] - src[xx])*2*xalpha
 		"shll $16, %%edi		\n\t"
 		"addl %%edi, %%esi		\n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
-		"movl %1, %%edi			\n\t"
+		"mov %1, %%"REG_D"		\n\t"
 		"shrl $9, %%esi			\n\t"
-		"movw %%si, 4096(%%edi, %%eax, 2)\n\t"
+		"movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
 
 		"addw %4, %%cx			\n\t" //2*xalpha += xInc&0xFF
-		"adcl %3, %%ebx			\n\t" //xx+= xInc>>8 + carry
-		"addl $1, %%eax			\n\t"
-		"cmpl %2, %%eax			\n\t"
+		"adc %3, %%"REG_b"		\n\t" //xx+= xInc>>8 + carry
+		"add $1, %%"REG_a"		\n\t"
+		"cmp %2, %%"REG_a"		\n\t"
 		" jb 1b				\n\t"
 
-		:: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
+		:: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" ((long)(xInc>>16)), "m" ((xInc&0xFFFF)),
 		"r" (src2)
-		: "%eax", "%ebx", "%ecx", "%edi", "%esi"
+		: "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
 		);
 #ifdef HAVE_MMX2
 	} //if MMX2 can't be used
--- a/postproc/yuv2rgb.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/postproc/yuv2rgb.c	Thu Oct 21 11:55:20 2004 +0000
@@ -156,7 +156,7 @@
 };
 #endif
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 
 /* hope these constant values are cache line aligned */
 uint64_t attribute_used __attribute__((aligned(8))) mmx_00ffw = 0x00ff00ff00ff00ffULL;
@@ -183,14 +183,12 @@
 	0x0004000400040004LL,};
 
 #undef HAVE_MMX
-#undef ARCH_X86
 
 //MMX versions
 #undef RENAME
 #define HAVE_MMX
 #undef HAVE_MMX2
 #undef HAVE_3DNOW
-#define ARCH_X86
 #define RENAME(a) a ## _MMX
 #include "yuv2rgb_template.c"
 
@@ -199,7 +197,6 @@
 #define HAVE_MMX
 #define HAVE_MMX2
 #undef HAVE_3DNOW
-#define ARCH_X86
 #define RENAME(a) a ## _MMX2
 #include "yuv2rgb_template.c"
 
@@ -583,7 +580,7 @@
 
 SwsFunc yuv2rgb_get_func_ptr (SwsContext *c)
 {
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
     if(c->flags & SWS_CPU_CAPS_MMX2){
 	switch(c->dstFormat){
 	case IMGFMT_BGR32: return yuv420_rgb32_MMX2;
--- a/postproc/yuv2rgb_template.c	Thu Oct 21 11:36:20 2004 +0000
+++ b/postproc/yuv2rgb_template.c	Thu Oct 21 11:55:20 2004 +0000
@@ -143,7 +143,7 @@
 	uint8_t *_py = src[0] + y*srcStride[0];
 	uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
 	uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
-	int index= -h_size/2;
+	long index= -h_size/2;
 
 	b5Dither= dither8[y&1];
 	g6Dither= dither4[y&1];
@@ -204,8 +204,8 @@
 
 		     MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
 		     
-		     "addl $16, %1			\n\t"
-		     "addl $4, %0			\n\t"
+		     "add $16, %1			\n\t"
+		     "add $4, %0			\n\t"
 		     " js 1b				\n\t"
 		     
 		     : "+r" (index), "+r" (_image)
@@ -238,7 +238,7 @@
 	uint8_t *_py = src[0] + y*srcStride[0];
 	uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
 	uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
-	int index= -h_size/2;
+	long index= -h_size/2;
 
 	b5Dither= dither8[y&1];
 	g6Dither= dither4[y&1];
@@ -295,8 +295,8 @@
 
 		     MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
 		     
-		     "addl $16, %1			\n\t"
-		     "addl $4, %0			\n\t"
+		     "add $16, %1			\n\t"
+		     "add $4, %0			\n\t"
 		     " js 1b				\n\t"
 		     : "+r" (index), "+r" (_image)
 		     : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
@@ -326,7 +326,7 @@
 	uint8_t *_py = src[0] + y*srcStride[0];
 	uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
 	uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
-	int index= -h_size/2;
+	long index= -h_size/2;
 
 	    /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
 	       pixels in each iteration */
@@ -440,8 +440,8 @@
 			"pxor %%mm4, %%mm4		\n\t"
 #endif
 		     
-		     "addl $24, %1			\n\t"
-		     "addl $4, %0			\n\t"
+		     "add $24, %1			\n\t"
+		     "add $4, %0			\n\t"
 		     " js 1b				\n\t"
 		     
 		     : "+r" (index), "+r" (_image)
@@ -472,7 +472,7 @@
 	uint8_t *_py = src[0] + y*srcStride[0];
 	uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
 	uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
-	int index= -h_size/2;
+	long index= -h_size/2;
 
 	    /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
 	       pixels in each iteration */
@@ -526,8 +526,8 @@
 		     "pxor %%mm4, %%mm4;" /* zero mm4 */
 		     "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
 
-		     "addl $32, %1			\n\t"
-		     "addl $4, %0			\n\t"
+		     "add $32, %1			\n\t"
+		     "add $4, %0			\n\t"
 		     " js 1b				\n\t"
 		     
 		     : "+r" (index), "+r" (_image)