Mercurial > mplayer.hg
changeset 25750:06bf0eb70a55
Cosmetics: whitespaces
author | benoit |
---|---|
date | Thu, 17 Jan 2008 10:24:14 +0000 |
parents | 8d082a234a6d |
children | 147deb141e07 |
files | libswscale/rgb2rgb.c libswscale/rgb2rgb_template.c libswscale/swscale.c libswscale/swscale_altivec_template.c libswscale/yuv2rgb_bfin.c libswscale/yuv2rgb_vis.c |
diffstat | 6 files changed, 69 insertions(+), 69 deletions(-) [+] |
line wrap: on
line diff
--- a/libswscale/rgb2rgb.c Thu Jan 17 08:57:15 2008 +0000 +++ b/libswscale/rgb2rgb.c Thu Jan 17 10:24:14 2008 +0000 @@ -35,18 +35,18 @@ #define FAST_BGR2YV12 // use 7 bit coeffs instead of 15bit -void (*rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size); -void (*rgb24to16)(const uint8_t *src,uint8_t *dst,long src_size); -void (*rgb24to15)(const uint8_t *src,uint8_t *dst,long src_size); -void (*rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size); -void (*rgb32to16)(const uint8_t *src,uint8_t *dst,long src_size); -void (*rgb32to15)(const uint8_t *src,uint8_t *dst,long src_size); -void (*rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size); -void (*rgb15to24)(const uint8_t *src,uint8_t *dst,long src_size); -void (*rgb15to32)(const uint8_t *src,uint8_t *dst,long src_size); -void (*rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size); -void (*rgb16to24)(const uint8_t *src,uint8_t *dst,long src_size); -void (*rgb16to32)(const uint8_t *src,uint8_t *dst,long src_size); +void (*rgb24to32)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb32to24)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb32to16)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb32to15)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb15to16)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb15to24)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb16to15)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb16to24)(const uint8_t *src, uint8_t *dst, long src_size); +void (*rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size); //void (*rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long src_size); void (*rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size); void (*rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size); @@ -231,20 +231,20 @@ /* for (i=0; i<num_pixels; i++) - ((unsigned *)dst)[i] = ((unsigned *)palette)[ src[i] ]; + ((unsigned *)dst)[i] = ((unsigned *)palette)[src[i]]; */ for (i=0; i<num_pixels; i++) { #ifdef WORDS_BIGENDIAN - dst[3]= palette[ src[i]*4+2 ]; - dst[2]= palette[ src[i]*4+1 ]; - dst[1]= palette[ src[i]*4+0 ]; + dst[3]= palette[src[i]*4+2]; + dst[2]= palette[src[i]*4+1]; + dst[1]= palette[src[i]*4+0]; #else //FIXME slow? - dst[0]= palette[ src[i]*4+2 ]; - dst[1]= palette[ src[i]*4+1 ]; - dst[2]= palette[ src[i]*4+0 ]; + dst[0]= palette[src[i]*4+2]; + dst[1]= palette[src[i]*4+1]; + dst[2]= palette[src[i]*4+0]; //dst[3]= 0; /* do we need this cleansing? */ #endif dst+= 4; @@ -257,14 +257,14 @@ for (i=0; i<num_pixels; i++) { #ifdef WORDS_BIGENDIAN - dst[3]= palette[ src[i]*4+0 ]; - dst[2]= palette[ src[i]*4+1 ]; - dst[1]= palette[ src[i]*4+2 ]; + dst[3]= palette[src[i]*4+0]; + dst[2]= palette[src[i]*4+1]; + dst[1]= palette[src[i]*4+2]; #else //FIXME slow? - dst[0]= palette[ src[i]*4+0 ]; - dst[1]= palette[ src[i]*4+1 ]; - dst[2]= palette[ src[i]*4+2 ]; + dst[0]= palette[src[i]*4+0]; + dst[1]= palette[src[i]*4+1]; + dst[2]= palette[src[i]*4+2]; //dst[3]= 0; /* do we need this cleansing? */ #endif @@ -281,14 +281,14 @@ /* writes 1 byte o much and might cause alignment issues on some architectures? for (i=0; i<num_pixels; i++) - ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ]; + ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[src[i]]; */ for (i=0; i<num_pixels; i++) { //FIXME slow? - dst[0]= palette[ src[i]*4+2 ]; - dst[1]= palette[ src[i]*4+1 ]; - dst[2]= palette[ src[i]*4+0 ]; + dst[0]= palette[src[i]*4+2]; + dst[1]= palette[src[i]*4+1]; + dst[2]= palette[src[i]*4+0]; dst+= 3; } } @@ -299,14 +299,14 @@ /* writes 1 byte o much and might cause alignment issues on some architectures? for (i=0; i<num_pixels; i++) - ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ]; + ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[src[i]]; */ for (i=0; i<num_pixels; i++) { //FIXME slow? - dst[0]= palette[ src[i]*4+0 ]; - dst[1]= palette[ src[i]*4+1 ]; - dst[2]= palette[ src[i]*4+2 ]; + dst[0]= palette[src[i]*4+0]; + dst[1]= palette[src[i]*4+1]; + dst[2]= palette[src[i]*4+2]; dst+= 3; } } @@ -318,13 +318,13 @@ { long i; for (i=0; i<num_pixels; i++) - ((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ]; + ((uint16_t *)dst)[i] = ((uint16_t *)palette)[src[i]]; } void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette) { long i; for (i=0; i<num_pixels; i++) - ((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[ src[i] ]); + ((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[src[i]]); } /** @@ -334,13 +334,13 @@ { long i; for (i=0; i<num_pixels; i++) - ((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ]; + ((uint16_t *)dst)[i] = ((uint16_t *)palette)[src[i]]; } void palette8tobgr15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette) { long i; for (i=0; i<num_pixels; i++) - ((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[ src[i] ]); + ((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[src[i]]); } void rgb32tobgr24(const uint8_t *src, uint8_t *dst, long src_size)
--- a/libswscale/rgb2rgb_template.c Thu Jan 17 08:57:15 2008 +0000 +++ b/libswscale/rgb2rgb_template.c Thu Jan 17 10:24:14 2008 +0000 @@ -82,7 +82,7 @@ #define SFENCE " # nop" #endif -static inline void RENAME(rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size) +static inline void RENAME(rgb24to32)(const uint8_t *src, uint8_t *dst, long src_size) { uint8_t *dest = dst; const uint8_t *s = src; @@ -142,7 +142,7 @@ } } -static inline void RENAME(rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size) +static inline void RENAME(rgb32to24)(const uint8_t *src, uint8_t *dst, long src_size) { uint8_t *dest = dst; const uint8_t *s = src; @@ -234,7 +234,7 @@ MMX2, 3DNOW optimization by Nick Kurshev 32 bit C version, and and&add trick by Michael Niedermayer */ -static inline void RENAME(rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size) +static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_size) { register const uint8_t* s=src; register uint8_t* d=dst; @@ -283,7 +283,7 @@ } } -static inline void RENAME(rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size) +static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_size) { register const uint8_t* s=src; register uint8_t* d=dst;
--- a/libswscale/swscale.c Thu Jan 17 08:57:15 2008 +0000 +++ b/libswscale/swscale.c Thu Jan 17 10:24:14 2008 +0000 @@ -1597,9 +1597,9 @@ } dst = dstParam[1] + dstStride[1]*srcSliceY/2; if (c->dstFormat == PIX_FMT_NV12) - interleaveBytes(src[1],src[2],dst,c->srcW/2,srcSliceH/2,srcStride[1],srcStride[2],dstStride[0]); + interleaveBytes(src[1], src[2], dst, c->srcW/2, srcSliceH/2, srcStride[1], srcStride[2], dstStride[0]); else - interleaveBytes(src[2],src[1],dst,c->srcW/2,srcSliceH/2,srcStride[2],srcStride[1],dstStride[0]); + interleaveBytes(src[2], src[1], dst, c->srcW/2, srcSliceH/2, srcStride[2], srcStride[1], dstStride[0]); return srcSliceH; } @@ -1608,7 +1608,7 @@ int srcSliceH, uint8_t* dstParam[], int dstStride[]){ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY; - yv12toyuy2(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]); + yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]); return srcSliceH; } @@ -1617,7 +1617,7 @@ int srcSliceH, uint8_t* dstParam[], int dstStride[]){ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY; - yv12touyvy(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]); + yv12touyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]); return srcSliceH; } @@ -2489,7 +2489,7 @@ { if (c->vLumFilterSize==1 && c->vChrFilterSize==2) av_log(c, AV_LOG_VERBOSE, "SwScaler: using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n" - "SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n",(flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); + "SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); else if (c->vLumFilterSize==2 && c->vChrFilterSize==2) av_log(c, AV_LOG_VERBOSE, "SwScaler: using 2-tap linear %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); else
--- a/libswscale/swscale_altivec_template.c Thu Jan 17 08:57:15 2008 +0000 +++ b/libswscale/swscale_altivec_template.c Thu Jan 17 10:24:14 2008 +0000 @@ -27,7 +27,7 @@ altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW) { register int i; vector unsigned int altivec_vectorShiftInt19 = - vec_add(vec_splat_u32(10),vec_splat_u32(9)); + vec_add(vec_splat_u32(10), vec_splat_u32(9)); if ((unsigned long)dest % 16) { /* badly aligned store, we force store alignment */ /* and will handle load misalignment on val w/ vec_perm */ @@ -46,10 +46,10 @@ vector signed int v3 = vec_ld(offset + 32, val); vector signed int v4 = vec_ld(offset + 48, val); vector signed int v5 = vec_ld(offset + 64, val); - vector signed int v12 = vec_perm(v1,v2,perm1); - vector signed int v23 = vec_perm(v2,v3,perm1); - vector signed int v34 = vec_perm(v3,v4,perm1); - vector signed int v45 = vec_perm(v4,v5,perm1); + vector signed int v12 = vec_perm(v1, v2, perm1); + vector signed int v23 = vec_perm(v2, v3, perm1); + vector signed int v34 = vec_perm(v3, v4, perm1); + vector signed int v45 = vec_perm(v4, v5, perm1); vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19); vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19); @@ -137,7 +137,7 @@ val[i] += lumSrc[j][i] * lumFilter[j]; } } - altivec_packIntArrayToCharArray(val,dest,dstW); + altivec_packIntArrayToCharArray(val, dest, dstW); } if (uDest != 0) { int __attribute__ ((aligned (16))) u[chrDstW]; @@ -203,8 +203,8 @@ v[i] += chrSrc[j][i + 2048] * chrFilter[j]; } } - altivec_packIntArrayToCharArray(u,uDest,chrDstW); - altivec_packIntArrayToCharArray(v,vDest,chrDstW); + altivec_packIntArrayToCharArray(u, uDest, chrDstW); + altivec_packIntArrayToCharArray(v, vDest, chrDstW); } } @@ -252,9 +252,9 @@ // and we're going to use vec_mule, so we chose // carefully how to "unpack" the elements into the even slots if ((i << 3) % 16) - filter_v = vec_mergel(filter_v,(vector signed short)vzero); + filter_v = vec_mergel(filter_v, (vector signed short)vzero); else - filter_v = vec_mergeh(filter_v,(vector signed short)vzero); + filter_v = vec_mergeh(filter_v, (vector signed short)vzero); val_vEven = vec_mule(src_v, filter_v); val_s = vec_sums(val_vEven, vzero); @@ -387,7 +387,7 @@ static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) { uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY; - // yv12toyuy2(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]); + // yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]); uint8_t *ysrc = src[0]; uint8_t *usrc = src[1]; uint8_t *vsrc = src[2]; @@ -401,7 +401,7 @@ register unsigned int y; if (width&15) { - yv12toyuy2(ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride); + yv12toyuy2(ysrc, usrc, vsrc, dst, c->srcW, srcSliceH, lumStride, chromStride, dstStride); return srcSliceH; } @@ -464,7 +464,7 @@ static inline int yv12touyvy_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) { uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY; - // yv12toyuy2(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]); + // yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]); uint8_t *ysrc = src[0]; uint8_t *usrc = src[1]; uint8_t *vsrc = src[2]; @@ -478,7 +478,7 @@ register unsigned int y; if (width&15) { - yv12touyvy(ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride); + yv12touyvy(ysrc, usrc, vsrc, dst, c->srcW, srcSliceH, lumStride, chromStride, dstStride); return srcSliceH; }
--- a/libswscale/yuv2rgb_bfin.c Thu Jan 17 08:57:15 2008 +0000 +++ b/libswscale/yuv2rgb_bfin.c Thu Jan 17 10:24:14 2008 +0000 @@ -110,12 +110,12 @@ for (i=0;i<h2;i++) { - lcscf (py,pu,pv,op,w,&c->oy); + lcscf (py, pu, pv, op, w, &c->oy); py += instrides[0]; op += outstrides[0]; - lcscf (py,pu,pv,op,w,&c->oy); + lcscf (py, pu, pv, op, w, &c->oy); py += instrides[0]; pu += instrides[1]; @@ -132,7 +132,7 @@ int srcSliceY, int srcSliceH, uint8_t **oplanes, int *outstrides) { - return core_yuv420_rgb (c,in,instrides,srcSliceY,srcSliceH,oplanes,outstrides, + return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides, ff_bfin_yuv2rgb555_line, 1, 555); } @@ -141,7 +141,7 @@ int srcSliceY, int srcSliceH, uint8_t **oplanes, int *outstrides) { - return core_yuv420_rgb (c,in,instrides,srcSliceY,srcSliceH,oplanes,outstrides, + return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides, ff_bfin_yuv2rgb555_line, 0, 555); } @@ -150,7 +150,7 @@ int srcSliceY, int srcSliceH, uint8_t **oplanes, int *outstrides) { - return core_yuv420_rgb (c,in,instrides,srcSliceY,srcSliceH,oplanes,outstrides, + return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides, ff_bfin_yuv2rgb24_line, 1, 888); } @@ -159,7 +159,7 @@ int srcSliceY, int srcSliceH, uint8_t **oplanes, int *outstrides) { - return core_yuv420_rgb (c,in,instrides,srcSliceY,srcSliceH,oplanes,outstrides, + return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides, ff_bfin_yuv2rgb24_line, 0, 888); } @@ -168,7 +168,7 @@ int srcSliceY, int srcSliceH, uint8_t **oplanes, int *outstrides) { - return core_yuv420_rgb (c,in,instrides,srcSliceY,srcSliceH,oplanes,outstrides, + return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides, ff_bfin_yuv2rgb565_line, 1, 565); } @@ -177,7 +177,7 @@ int srcSliceY, int srcSliceH, uint8_t **oplanes, int *outstrides) { - return core_yuv420_rgb (c,in,instrides,srcSliceY,srcSliceH,oplanes,outstrides, + return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides, ff_bfin_yuv2rgb565_line, 0, 565); }
--- a/libswscale/yuv2rgb_vis.c Thu Jan 17 08:57:15 2008 +0000 +++ b/libswscale/yuv2rgb_vis.c Thu Jan 17 10:24:14 2008 +0000 @@ -188,7 +188,7 @@ c->sparc_coeffs[8]=c->ubCoeff; c->sparc_coeffs[9]=c->ugCoeff; - c->sparc_coeffs[0]=(((int16_t)c->yOffset*(int16_t)c->yCoeff>>11) & 0xffff) * 0x0001000100010001ULL; + c->sparc_coeffs[0]=(((int16_t)c->yOffset*(int16_t)c->yCoeff >>11) & 0xffff) * 0x0001000100010001ULL; c->sparc_coeffs[1]=(((int16_t)c->uOffset*(int16_t)c->ubCoeff>>11) & 0xffff) * 0x0001000100010001ULL; c->sparc_coeffs[2]=(((int16_t)c->uOffset*(int16_t)c->ugCoeff>>11) & 0xffff) * 0x0001000100010001ULL; c->sparc_coeffs[3]=(((int16_t)c->vOffset*(int16_t)c->vgCoeff>>11) & 0xffff) * 0x0001000100010001ULL;