changeset 2800:7847d6b7ad3d

.balign or weĦ­ll align by 64kb on some architectures
author michael
date Sat, 10 Nov 2001 20:39:23 +0000
parents 0d7fd1655a89
children 318c240363c7
files libvo/osd.c libvo/osd_template.c postproc/rgb2rgb.c postproc/rgb2rgb_template.c postproc/swscale.c postproc/swscale_template.c
diffstat 6 files changed, 24 insertions(+), 24 deletions(-) [+]
line wrap: on
line diff
--- a/libvo/osd.c	Sat Nov 10 19:46:04 2001 +0000
+++ b/libvo/osd.c	Sat Nov 10 20:39:23 2001 +0000
@@ -86,7 +86,7 @@
 		"pxor %%mm7, %%mm7		\n\t"
 		"xorl %%eax, %%eax		\n\t"
 		"pcmpeqb %%mm6, %%mm6		\n\t" // F..F
-		".align 16\n\t"
+		".balign 16\n\t"
 		"1:				\n\t"
 		"movq (%0, %%eax, 4), %%mm0	\n\t" // dstbase
 		"movq %%mm0, %%mm1		\n\t"
@@ -121,7 +121,7 @@
 		"xorl %%eax, %%eax		\n\t"
 		"xorl %%ebx, %%ebx		\n\t"
 		"xorl %%edx, %%edx		\n\t"
-		".align 16\n\t"
+		".balign 16\n\t"
 		"1:				\n\t"
 		"movb (%1, %%eax), %%bl		\n\t"
 		"cmpb $0, %%bl			\n\t"
--- a/libvo/osd_template.c	Sat Nov 10 19:46:04 2001 +0000
+++ b/libvo/osd_template.c	Sat Nov 10 20:39:23 2001 +0000
@@ -86,7 +86,7 @@
 		"pxor %%mm7, %%mm7		\n\t"
 		"xorl %%eax, %%eax		\n\t"
 		"pcmpeqb %%mm6, %%mm6		\n\t" // F..F
-		".align 16\n\t"
+		".balign 16\n\t"
 		"1:				\n\t"
 		"movq (%0, %%eax, 4), %%mm0	\n\t" // dstbase
 		"movq %%mm0, %%mm1		\n\t"
@@ -121,7 +121,7 @@
 		"xorl %%eax, %%eax		\n\t"
 		"xorl %%ebx, %%ebx		\n\t"
 		"xorl %%edx, %%edx		\n\t"
-		".align 16\n\t"
+		".balign 16\n\t"
 		"1:				\n\t"
 		"movb (%1, %%eax), %%bl		\n\t"
 		"cmpb $0, %%bl			\n\t"
--- a/postproc/rgb2rgb.c	Sat Nov 10 19:46:04 2001 +0000
+++ b/postproc/rgb2rgb.c	Sat Nov 10 20:39:23 2001 +0000
@@ -585,7 +585,7 @@
 #ifdef HAVE_MMX
 	asm volatile (
 		"xorl %%eax, %%eax		\n\t"
-		".align 16			\n\t"
+		".balign 16			\n\t"
 		"1:				\n\t"
 		PREFETCH" 32(%0, %%eax)		\n\t"
 		"movq (%0, %%eax), %%mm0	\n\t"
@@ -636,7 +636,7 @@
 //FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
 		asm volatile(
 			"xorl %%eax, %%eax		\n\t"
-			".align 16			\n\t"
+			".balign 16			\n\t"
 			"1:				\n\t"
 			PREFETCH" 32(%1, %%eax, 2)	\n\t"
 			PREFETCH" 32(%2, %%eax)		\n\t"
@@ -710,7 +710,7 @@
 			"xorl %%eax, %%eax		\n\t"
 			"pcmpeqw %%mm7, %%mm7		\n\t"
 			"psrlw $8, %%mm7		\n\t" // FF,00,FF,00...
-			".align 16			\n\t"
+			".balign 16			\n\t"
 			"1:				\n\t"
 			PREFETCH" 64(%0, %%eax, 4)	\n\t"
 			"movq (%0, %%eax, 4), %%mm0	\n\t" // YUYV YUYV(0)
@@ -760,7 +760,7 @@
 
 		asm volatile(
 			"xorl %%eax, %%eax		\n\t"
-			".align 16			\n\t"
+			".balign 16			\n\t"
 			"1:				\n\t"
 			PREFETCH" 64(%0, %%eax, 4)	\n\t"
 			"movq (%0, %%eax, 4), %%mm0	\n\t" // YUYV YUYV(0)
--- a/postproc/rgb2rgb_template.c	Sat Nov 10 19:46:04 2001 +0000
+++ b/postproc/rgb2rgb_template.c	Sat Nov 10 20:39:23 2001 +0000
@@ -585,7 +585,7 @@
 #ifdef HAVE_MMX
 	asm volatile (
 		"xorl %%eax, %%eax		\n\t"
-		".align 16			\n\t"
+		".balign 16			\n\t"
 		"1:				\n\t"
 		PREFETCH" 32(%0, %%eax)		\n\t"
 		"movq (%0, %%eax), %%mm0	\n\t"
@@ -636,7 +636,7 @@
 //FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
 		asm volatile(
 			"xorl %%eax, %%eax		\n\t"
-			".align 16			\n\t"
+			".balign 16			\n\t"
 			"1:				\n\t"
 			PREFETCH" 32(%1, %%eax, 2)	\n\t"
 			PREFETCH" 32(%2, %%eax)		\n\t"
@@ -710,7 +710,7 @@
 			"xorl %%eax, %%eax		\n\t"
 			"pcmpeqw %%mm7, %%mm7		\n\t"
 			"psrlw $8, %%mm7		\n\t" // FF,00,FF,00...
-			".align 16			\n\t"
+			".balign 16			\n\t"
 			"1:				\n\t"
 			PREFETCH" 64(%0, %%eax, 4)	\n\t"
 			"movq (%0, %%eax, 4), %%mm0	\n\t" // YUYV YUYV(0)
@@ -760,7 +760,7 @@
 
 		asm volatile(
 			"xorl %%eax, %%eax		\n\t"
-			".align 16			\n\t"
+			".balign 16			\n\t"
 			"1:				\n\t"
 			PREFETCH" 64(%0, %%eax, 4)	\n\t"
 			"movq (%0, %%eax, 4), %%mm0	\n\t" // YUYV YUYV(0)
--- a/postproc/swscale.c	Sat Nov 10 19:46:04 2001 +0000
+++ b/postproc/swscale.c	Sat Nov 10 20:39:23 2001 +0000
@@ -143,7 +143,7 @@
 		"punpcklwd %%mm5, %%mm5		\n\t"\
 		"punpcklwd %%mm5, %%mm5		\n\t"\
 		"xorl %%eax, %%eax		\n\t"\
-		".align 16			\n\t"\
+		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%0, %%eax, 2), %%mm0	\n\t" /*buf0[eax]*/\
 		"movq (%1, %%eax, 2), %%mm1	\n\t" /*buf1[eax]*/\
@@ -197,7 +197,7 @@
 		"punpcklwd %%mm5, %%mm5		\n\t"\
 		"movq %%mm5, asm_uvalpha1	\n\t"\
 		"xorl %%eax, %%eax		\n\t"\
-		".align 16			\n\t"\
+		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%2, %%eax), %%mm2	\n\t" /* uvbuf0[eax]*/\
 		"movq (%3, %%eax), %%mm3	\n\t" /* uvbuf1[eax]*/\
@@ -262,7 +262,7 @@
 
 #define YSCALEYUV2RGB1 \
 		"xorl %%eax, %%eax		\n\t"\
-		".align 16			\n\t"\
+		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%2, %%eax), %%mm3	\n\t" /* uvbuf0[eax]*/\
 		"movq 4096(%2, %%eax), %%mm4	\n\t" /* uvbuf0[eax+2048]*/\
@@ -311,7 +311,7 @@
 // do vertical chrominance interpolation
 #define YSCALEYUV2RGB1b \
 		"xorl %%eax, %%eax		\n\t"\
-		".align 16			\n\t"\
+		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%2, %%eax), %%mm2	\n\t" /* uvbuf0[eax]*/\
 		"movq (%3, %%eax), %%mm3	\n\t" /* uvbuf1[eax]*/\
@@ -1310,7 +1310,7 @@
 		"xorl %%eax, %%eax		\n\t" // i
 		"xorl %%ebx, %%ebx		\n\t" // xx
 		"xorl %%ecx, %%ecx		\n\t" // 2*xalpha
-		".align 16			\n\t"
+		".balign 16			\n\t"
 		"1:				\n\t"
 		"movzbl  (%0, %%ebx), %%edi	\n\t" //src[xx]
 		"movzbl 1(%0, %%ebx), %%esi	\n\t" //src[xx+1]
@@ -1442,7 +1442,7 @@
 		"xorl %%eax, %%eax		\n\t" // i
 		"xorl %%ebx, %%ebx		\n\t" // xx
 		"xorl %%ecx, %%ecx		\n\t" // 2*xalpha
-		".align 16			\n\t"
+		".balign 16			\n\t"
 		"1:				\n\t"
 		"movl %0, %%esi			\n\t"
 		"movzbl  (%%esi, %%ebx), %%edi	\n\t" //src[xx]
--- a/postproc/swscale_template.c	Sat Nov 10 19:46:04 2001 +0000
+++ b/postproc/swscale_template.c	Sat Nov 10 20:39:23 2001 +0000
@@ -143,7 +143,7 @@
 		"punpcklwd %%mm5, %%mm5		\n\t"\
 		"punpcklwd %%mm5, %%mm5		\n\t"\
 		"xorl %%eax, %%eax		\n\t"\
-		".align 16			\n\t"\
+		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%0, %%eax, 2), %%mm0	\n\t" /*buf0[eax]*/\
 		"movq (%1, %%eax, 2), %%mm1	\n\t" /*buf1[eax]*/\
@@ -197,7 +197,7 @@
 		"punpcklwd %%mm5, %%mm5		\n\t"\
 		"movq %%mm5, asm_uvalpha1	\n\t"\
 		"xorl %%eax, %%eax		\n\t"\
-		".align 16			\n\t"\
+		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%2, %%eax), %%mm2	\n\t" /* uvbuf0[eax]*/\
 		"movq (%3, %%eax), %%mm3	\n\t" /* uvbuf1[eax]*/\
@@ -262,7 +262,7 @@
 
 #define YSCALEYUV2RGB1 \
 		"xorl %%eax, %%eax		\n\t"\
-		".align 16			\n\t"\
+		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%2, %%eax), %%mm3	\n\t" /* uvbuf0[eax]*/\
 		"movq 4096(%2, %%eax), %%mm4	\n\t" /* uvbuf0[eax+2048]*/\
@@ -311,7 +311,7 @@
 // do vertical chrominance interpolation
 #define YSCALEYUV2RGB1b \
 		"xorl %%eax, %%eax		\n\t"\
-		".align 16			\n\t"\
+		".balign 16			\n\t"\
 		"1:				\n\t"\
 		"movq (%2, %%eax), %%mm2	\n\t" /* uvbuf0[eax]*/\
 		"movq (%3, %%eax), %%mm3	\n\t" /* uvbuf1[eax]*/\
@@ -1310,7 +1310,7 @@
 		"xorl %%eax, %%eax		\n\t" // i
 		"xorl %%ebx, %%ebx		\n\t" // xx
 		"xorl %%ecx, %%ecx		\n\t" // 2*xalpha
-		".align 16			\n\t"
+		".balign 16			\n\t"
 		"1:				\n\t"
 		"movzbl  (%0, %%ebx), %%edi	\n\t" //src[xx]
 		"movzbl 1(%0, %%ebx), %%esi	\n\t" //src[xx+1]
@@ -1442,7 +1442,7 @@
 		"xorl %%eax, %%eax		\n\t" // i
 		"xorl %%ebx, %%ebx		\n\t" // xx
 		"xorl %%ecx, %%ecx		\n\t" // 2*xalpha
-		".align 16			\n\t"
+		".balign 16			\n\t"
 		"1:				\n\t"
 		"movl %0, %%esi			\n\t"
 		"movzbl  (%%esi, %%ebx), %%edi	\n\t" //src[xx]