Mercurial > mplayer.hg
changeset 32157:aae5b14d959a
rgb2rgb: don't misuse HAVE_* defines
Introduce and use COMPILE_TEMPLATE_* instead.
author | ramiro |
---|---|
date | Tue, 14 Sep 2010 13:12:11 +0000 |
parents | 68914bed3a8f |
children | ec7786b93e52 |
files | libswscale/rgb2rgb.c libswscale/rgb2rgb_template.c |
diffstat | 2 files changed, 83 insertions(+), 87 deletions(-) [+] |
line wrap: on
line diff
--- a/libswscale/rgb2rgb.c Tue Sep 14 13:07:57 2010 +0000 +++ b/libswscale/rgb2rgb.c Tue Sep 14 13:12:11 2010 +0000 @@ -149,14 +149,10 @@ //Note: We have C, MMX, MMX2, 3DNOW versions, there is no 3DNOW + MMX2 one. //plain C versions -#undef HAVE_MMX -#undef HAVE_MMX2 -#undef HAVE_AMD3DNOW -#undef HAVE_SSE2 -#define HAVE_MMX 0 -#define HAVE_MMX2 0 -#define HAVE_AMD3DNOW 0 -#define HAVE_SSE2 0 +#define COMPILE_TEMPLATE_MMX 0 +#define COMPILE_TEMPLATE_MMX2 0 +#define COMPILE_TEMPLATE_AMD3DNOW 0 +#define COMPILE_TEMPLATE_SSE2 0 #define RENAME(a) a ## _C #include "rgb2rgb_template.c" @@ -164,33 +160,33 @@ //MMX versions #undef RENAME -#undef HAVE_MMX -#define HAVE_MMX 1 +#undef COMPILE_TEMPLATE_MMX +#define COMPILE_TEMPLATE_MMX 1 #define RENAME(a) a ## _MMX #include "rgb2rgb_template.c" //MMX2 versions #undef RENAME -#undef HAVE_MMX2 -#define HAVE_MMX2 1 +#undef COMPILE_TEMPLATE_MMX2 +#define COMPILE_TEMPLATE_MMX2 1 #define RENAME(a) a ## _MMX2 #include "rgb2rgb_template.c" //SSE2 versions #undef RENAME -#undef HAVE_SSE2 -#define HAVE_SSE2 1 +#undef COMPILE_TEMPLATE_SSE2 +#define COMPILE_TEMPLATE_SSE2 1 #define RENAME(a) a ## _SSE2 #include "rgb2rgb_template.c" //3DNOW versions #undef RENAME -#undef HAVE_MMX2 -#undef HAVE_SSE2 -#undef HAVE_AMD3DNOW -#define HAVE_MMX2 0 -#define HAVE_SSE2 0 -#define HAVE_AMD3DNOW 1 +#undef COMPILE_TEMPLATE_MMX2 +#undef COMPILE_TEMPLATE_SSE2 +#undef COMPILE_TEMPLATE_AMD3DNOW +#define COMPILE_TEMPLATE_MMX2 0 +#define COMPILE_TEMPLATE_SSE2 1 +#define COMPILE_TEMPLATE_AMD3DNOW 1 #define RENAME(a) a ## _3DNOW #include "rgb2rgb_template.c"
--- a/libswscale/rgb2rgb_template.c Tue Sep 14 13:07:57 2010 +0000 +++ b/libswscale/rgb2rgb_template.c Tue Sep 14 13:12:11 2010 +0000 @@ -33,30 +33,30 @@ #undef MMREG_SIZE #undef PAVGB -#if HAVE_SSE2 +#if COMPILE_TEMPLATE_SSE2 #define MMREG_SIZE 16 #else #define MMREG_SIZE 8 #endif -#if HAVE_AMD3DNOW +#if COMPILE_TEMPLATE_AMD3DNOW #define PREFETCH "prefetch" #define PAVGB "pavgusb" -#elif HAVE_MMX2 +#elif COMPILE_TEMPLATE_MMX2 #define PREFETCH "prefetchnta" #define PAVGB "pavgb" #else #define PREFETCH " # nop" #endif -#if HAVE_AMD3DNOW +#if COMPILE_TEMPLATE_AMD3DNOW /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */ #define EMMS "femms" #else #define EMMS "emms" #endif -#if HAVE_MMX2 +#if COMPILE_TEMPLATE_MMX2 #define MOVNTQ "movntq" #define SFENCE "sfence" #else @@ -69,11 +69,11 @@ uint8_t *dest = dst; const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 23; __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory"); @@ -164,11 +164,11 @@ uint8_t *dest = dst; const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 31; while (s < mm_end) { @@ -222,7 +222,7 @@ register const uint8_t *end; const uint8_t *mm_end; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s)); __asm__ volatile("movq %0, %%mm4"::"m"(mask15s)); mm_end = end - 15; @@ -268,7 +268,7 @@ register const uint8_t *end; const uint8_t *mm_end; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s)); __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg)); __asm__ volatile("movq %0, %%mm6"::"m"(mask15b)); @@ -316,12 +316,12 @@ { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX mm_end = end - 15; #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) __asm__ volatile( @@ -412,12 +412,12 @@ { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" @@ -471,12 +471,12 @@ { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX mm_end = end - 15; #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) __asm__ volatile( @@ -567,12 +567,12 @@ { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" @@ -626,12 +626,12 @@ { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" @@ -687,12 +687,12 @@ { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" @@ -748,12 +748,12 @@ { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" @@ -809,12 +809,12 @@ { const uint8_t *s = src; const uint8_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" @@ -890,13 +890,13 @@ static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) { const uint16_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint16_t *mm_end; #endif uint8_t *d = dst; const uint16_t *s = (const uint16_t*)src; end = s + src_size/2; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 7; while (s < mm_end) { @@ -997,13 +997,13 @@ static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) { const uint16_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint16_t *mm_end; #endif uint8_t *d = (uint8_t *)dst; const uint16_t *s = (const uint16_t *)src; end = s + src_size/2; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); mm_end = end - 7; while (s < mm_end) { @@ -1122,13 +1122,13 @@ static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size) { const uint16_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint16_t *mm_end; #endif uint8_t *d = dst; const uint16_t *s = (const uint16_t *)src; end = s + src_size/2; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory"); @@ -1175,13 +1175,13 @@ static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size) { const uint16_t *end; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX const uint16_t *mm_end; #endif uint8_t *d = dst; const uint16_t *s = (const uint16_t*)src; end = s + src_size/2; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory"); @@ -1230,7 +1230,7 @@ x86_reg idx = 15 - src_size; const uint8_t *s = src-idx; uint8_t *d = dst-idx; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile( "test %0, %0 \n\t" "jns 2f \n\t" @@ -1244,7 +1244,7 @@ PREFETCH" 32(%1, %0) \n\t" "movq (%1, %0), %%mm0 \n\t" "movq 8(%1, %0), %%mm1 \n\t" -# if HAVE_MMX2 +# if COMPILE_TEMPLATE_MMX2 "pshufw $177, %%mm0, %%mm3 \n\t" "pshufw $177, %%mm1, %%mm5 \n\t" "pand %%mm7, %%mm0 \n\t" @@ -1292,7 +1292,7 @@ static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) { unsigned i; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX x86_reg mmx_size= 23 - src_size; __asm__ volatile ( "test %%"REG_a", %%"REG_a" \n\t" @@ -1365,7 +1365,7 @@ long y; const x86_reg chromWidth= width>>1; for (y=0; y<height; y++) { -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway) __asm__ volatile( "xor %%"REG_a", %%"REG_a" \n\t" @@ -1489,7 +1489,7 @@ ysrc += lumStride; dst += dstStride; } -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__(EMMS" \n\t" SFENCE" \n\t" :::"memory"); @@ -1515,7 +1515,7 @@ long y; const x86_reg chromWidth= width>>1; for (y=0; y<height; y++) { -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway) __asm__ volatile( "xor %%"REG_a", %%"REG_a" \n\t" @@ -1593,7 +1593,7 @@ ysrc += lumStride; dst += dstStride; } -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__(EMMS" \n\t" SFENCE" \n\t" :::"memory"); @@ -1643,7 +1643,7 @@ long y; const x86_reg chromWidth= width>>1; for (y=0; y<height; y+=2) { -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile( "xor %%"REG_a", %%"REG_a" \n\t" "pcmpeqw %%mm7, %%mm7 \n\t" @@ -1746,7 +1746,7 @@ ydst += lumStride; src += srcStride; } -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(EMMS" \n\t" SFENCE" \n\t" :::"memory"); @@ -1769,7 +1769,7 @@ dst+= dstStride; for (y=1; y<srcHeight; y++) { -#if HAVE_MMX2 || HAVE_AMD3DNOW +#if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW const x86_reg mmxSize= srcWidth&~15; __asm__ volatile( "mov %4, %%"REG_a" \n\t" @@ -1859,7 +1859,7 @@ } #endif -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(EMMS" \n\t" SFENCE" \n\t" :::"memory"); @@ -1879,7 +1879,7 @@ long y; const x86_reg chromWidth= width>>1; for (y=0; y<height; y+=2) { -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile( "xor %%"REG_a", %%"REG_a" \n\t" "pcmpeqw %%mm7, %%mm7 \n\t" @@ -1982,7 +1982,7 @@ ydst += lumStride; src += srcStride; } -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile(EMMS" \n\t" SFENCE" \n\t" :::"memory"); @@ -2002,7 +2002,7 @@ { long y; const x86_reg chromWidth= width>>1; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX for (y=0; y<height-2; y+=2) { long i; for (i=0; i<2; i++) { @@ -2090,7 +2090,7 @@ "1: \n\t" PREFETCH" 64(%0, %%"REG_d") \n\t" PREFETCH" 64(%1, %%"REG_d") \n\t" -#if HAVE_MMX2 || HAVE_AMD3DNOW +#if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW "movq (%0, %%"REG_d"), %%mm0 \n\t" "movq (%1, %%"REG_d"), %%mm1 \n\t" "movq 6(%0, %%"REG_d"), %%mm2 \n\t" @@ -2151,7 +2151,7 @@ "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0 "psraw $7, %%mm0 \n\t" -#if HAVE_MMX2 || HAVE_AMD3DNOW +#if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW "movq 12(%0, %%"REG_d"), %%mm4 \n\t" "movq 12(%1, %%"REG_d"), %%mm1 \n\t" "movq 18(%0, %%"REG_d"), %%mm2 \n\t" @@ -2296,8 +2296,8 @@ for (h=0; h < height; h++) { long w; -#if HAVE_MMX -#if HAVE_SSE2 +#if COMPILE_TEMPLATE_MMX +#if COMPILE_TEMPLATE_SSE2 __asm__( "xor %%"REG_a", %%"REG_a" \n\t" "1: \n\t" @@ -2357,7 +2357,7 @@ src1 += src1Stride; src2 += src2Stride; } -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__( EMMS" \n\t" SFENCE" \n\t" @@ -2375,7 +2375,7 @@ x86_reg y; long x,w,h; w=width/2; h=height/2; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__ volatile( PREFETCH" %0 \n\t" PREFETCH" %1 \n\t" @@ -2385,7 +2385,7 @@ const uint8_t* s1=src1+srcStride1*(y>>1); uint8_t* d=dst1+dstStride1*y; x=0; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX for (;x<w-31;x+=32) { __asm__ volatile( PREFETCH" 32%1 \n\t" @@ -2424,7 +2424,7 @@ const uint8_t* s2=src2+srcStride2*(y>>1); uint8_t* d=dst2+dstStride2*y; x=0; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX for (;x<w-31;x+=32) { __asm__ volatile( PREFETCH" 32%1 \n\t" @@ -2459,7 +2459,7 @@ #endif for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x]; } -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__( EMMS" \n\t" SFENCE" \n\t" @@ -2483,7 +2483,7 @@ const uint8_t* vp=src3+srcStride3*(y>>2); uint8_t* d=dst+dstStride*y; x=0; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX for (;x<w-7;x+=8) { __asm__ volatile( PREFETCH" 32(%1, %0) \n\t" @@ -2549,7 +2549,7 @@ d[8*x+7] = vp[x]; } } -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__( EMMS" \n\t" SFENCE" \n\t" @@ -2564,7 +2564,7 @@ src += 2*count; count= - count; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX if(count <= -16) { count += 15; __asm__ volatile( @@ -2603,7 +2603,7 @@ dst1+= count; src += 4*count; count= - count; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX if(count <= -8) { count += 7; __asm__ volatile( @@ -2704,7 +2704,7 @@ dst1+= count; src += 4*count; count= - count; -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX if(count <= -8) { count += 7; __asm__ volatile( @@ -2820,7 +2820,7 @@ src += srcStride; ydst+= lumStride; } -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__( EMMS" \n\t" SFENCE" \n\t" @@ -2845,7 +2845,7 @@ udst+= chromStride; vdst+= chromStride; } -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__( EMMS" \n\t" SFENCE" \n\t" @@ -2872,7 +2872,7 @@ src += srcStride; ydst+= lumStride; } -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__( EMMS" \n\t" SFENCE" \n\t" @@ -2897,7 +2897,7 @@ udst+= chromStride; vdst+= chromStride; } -#if HAVE_MMX +#if COMPILE_TEMPLATE_MMX __asm__( EMMS" \n\t" SFENCE" \n\t"