From 0d9f3d85f648779f4fcc630c593f0322f5fd9ef2 Mon Sep 17 00:00:00 2001 From: Arpi Date: Sat, 22 Jun 2002 08:49:45 +0000 Subject: [PATCH] sync with mplayer xp - partial yvu9 support (copy only) - rgb 15/16 -> 24/32 converters - int->unsigned changes Originally committed as revision 6493 to svn://svn.mplayerhq.hu/mplayer/trunk/postproc --- postproc/rgb2rgb.c | 126 ++++--- postproc/rgb2rgb.h | 21 +- postproc/rgb2rgb_template.c | 696 +++++++++++++++++++++++++++++------- postproc/swscale.c | 531 ++++++++++++++++++++++----- postproc/swscale_template.c | 2 +- postproc/yuv2rgb.c | 12 +- postproc/yuv2rgb_mlib.c | 14 +- postproc/yuv2rgb_template.c | 18 +- 8 files changed, 1128 insertions(+), 292 deletions(-) diff --git a/postproc/rgb2rgb.c b/postproc/rgb2rgb.c index 91983bea0a..962a58945f 100644 --- a/postproc/rgb2rgb.c +++ b/postproc/rgb2rgb.c @@ -20,6 +20,8 @@ #define FAST_BGR2YV12 // use 7 bit coeffs instead of 15bit #ifdef CAN_COMPILE_X86_ASM +static const uint64_t mmx_null __attribute__((aligned(8))) = 0x0000000000000000ULL; +static const uint64_t mmx_one __attribute__((aligned(8))) = 0xFFFFFFFFFFFFFFFFULL; static const uint64_t mask32b __attribute__((aligned(8))) = 0x000000FF000000FFULL; static const uint64_t mask32g __attribute__((aligned(8))) = 0x0000FF000000FF00ULL; static const uint64_t mask32r __attribute__((aligned(8))) = 0x00FF000000FF0000ULL; @@ -35,6 +37,11 @@ static const uint64_t mask24hhhh __attribute__((aligned(8))) = 0xffffffffffff00 static const uint64_t mask15b __attribute__((aligned(8))) = 0x001F001F001F001FULL; /* 00000000 00011111 xxB */ static const uint64_t mask15rg __attribute__((aligned(8))) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000 RGx */ static const uint64_t mask15s __attribute__((aligned(8))) = 0xFFE0FFE0FFE0FFE0ULL; +static const uint64_t mask15g __attribute__((aligned(8))) = 0x03E003E003E003E0ULL; +static const uint64_t mask15r __attribute__((aligned(8))) = 0x7C007C007C007C00ULL; +#define mask16b mask15b +static const uint64_t mask16g __attribute__((aligned(8))) = 0x07E007E007E007E0ULL; +static const uint64_t mask16r __attribute__((aligned(8))) = 0xF800F800F800F800ULL; static const uint64_t red_16mask __attribute__((aligned(8))) = 0x0000f8000000f800ULL; static const uint64_t green_16mask __attribute__((aligned(8)))= 0x000007e0000007e0ULL; static const uint64_t blue_16mask __attribute__((aligned(8))) = 0x0000001f0000001fULL; @@ -137,10 +144,68 @@ void rgb24to32(const uint8_t *src,uint8_t *dst,unsigned src_size) else if(gCpuCaps.hasMMX) rgb24to32_MMX(src, dst, src_size); else - rgb24to32_C(src, dst, src_size); -#else - rgb24to32_C(src, dst, src_size); #endif + rgb24to32_C(src, dst, src_size); +} + +void rgb15to24(const uint8_t *src,uint8_t *dst,unsigned src_size) +{ +#ifdef CAN_COMPILE_X86_ASM + // ordered per speed fasterst first + if(gCpuCaps.hasMMX2) + rgb15to24_MMX2(src, dst, src_size); + else if(gCpuCaps.has3DNow) + rgb15to24_3DNow(src, dst, src_size); + else if(gCpuCaps.hasMMX) + rgb15to24_MMX(src, dst, src_size); + else +#endif + rgb15to24_C(src, dst, src_size); +} + +void rgb16to24(const uint8_t *src,uint8_t *dst,unsigned src_size) +{ +#ifdef CAN_COMPILE_X86_ASM + // ordered per speed fasterst first + if(gCpuCaps.hasMMX2) + rgb16to24_MMX2(src, dst, src_size); + else if(gCpuCaps.has3DNow) + rgb16to24_3DNow(src, dst, src_size); + else if(gCpuCaps.hasMMX) + rgb16to24_MMX(src, dst, src_size); + else +#endif + rgb16to24_C(src, dst, src_size); +} + +void rgb15to32(const uint8_t *src,uint8_t *dst,unsigned src_size) +{ +#ifdef CAN_COMPILE_X86_ASM + // ordered per speed fasterst first + if(gCpuCaps.hasMMX2) + rgb15to32_MMX2(src, dst, src_size); + else if(gCpuCaps.has3DNow) + rgb15to32_3DNow(src, dst, src_size); + else if(gCpuCaps.hasMMX) + rgb15to32_MMX(src, dst, src_size); + else +#endif + rgb15to32_C(src, dst, src_size); +} + +void rgb16to32(const uint8_t *src,uint8_t *dst,unsigned src_size) +{ +#ifdef CAN_COMPILE_X86_ASM + // ordered per speed fasterst first + if(gCpuCaps.hasMMX2) + rgb16to32_MMX2(src, dst, src_size); + else if(gCpuCaps.has3DNow) + rgb16to32_3DNow(src, dst, src_size); + else if(gCpuCaps.hasMMX) + rgb16to32_MMX(src, dst, src_size); + else +#endif + rgb16to32_C(src, dst, src_size); } void rgb32to24(const uint8_t *src,uint8_t *dst,unsigned src_size) @@ -154,10 +219,8 @@ void rgb32to24(const uint8_t *src,uint8_t *dst,unsigned src_size) else if(gCpuCaps.hasMMX) rgb32to24_MMX(src, dst, src_size); else - rgb32to24_C(src, dst, src_size); -#else - rgb32to24_C(src, dst, src_size); #endif + rgb32to24_C(src, dst, src_size); } /* @@ -177,10 +240,8 @@ void rgb15to16(const uint8_t *src,uint8_t *dst,unsigned src_size) else if(gCpuCaps.hasMMX) rgb15to16_MMX(src, dst, src_size); else - rgb15to16_C(src, dst, src_size); -#else - rgb15to16_C(src, dst, src_size); #endif + rgb15to16_C(src, dst, src_size); } /** @@ -242,10 +303,8 @@ void rgb32to16(const uint8_t *src, uint8_t *dst, unsigned src_size) else if(gCpuCaps.hasMMX) rgb32to16_MMX(src, dst, src_size); else - rgb32to16_C(src, dst, src_size); -#else - rgb32to16_C(src, dst, src_size); #endif + rgb32to16_C(src, dst, src_size); } void rgb32to15(const uint8_t *src, uint8_t *dst, unsigned src_size) @@ -259,10 +318,8 @@ void rgb32to15(const uint8_t *src, uint8_t *dst, unsigned src_size) else if(gCpuCaps.hasMMX) rgb32to15_MMX(src, dst, src_size); else - rgb32to15_C(src, dst, src_size); -#else - rgb32to15_C(src, dst, src_size); #endif + rgb32to15_C(src, dst, src_size); } void rgb24to16(const uint8_t *src, uint8_t *dst, unsigned src_size) @@ -276,10 +333,8 @@ void rgb24to16(const uint8_t *src, uint8_t *dst, unsigned src_size) else if(gCpuCaps.hasMMX) rgb24to16_MMX(src, dst, src_size); else - rgb24to16_C(src, dst, src_size); -#else - rgb24to16_C(src, dst, src_size); #endif + rgb24to16_C(src, dst, src_size); } void rgb24to15(const uint8_t *src, uint8_t *dst, unsigned src_size) @@ -293,10 +348,8 @@ void rgb24to15(const uint8_t *src, uint8_t *dst, unsigned src_size) else if(gCpuCaps.hasMMX) rgb24to15_MMX(src, dst, src_size); else - rgb24to15_C(src, dst, src_size); -#else - rgb24to15_C(src, dst, src_size); #endif + rgb24to15_C(src, dst, src_size); } /** @@ -330,10 +383,8 @@ void rgb32tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size) else if(gCpuCaps.hasMMX) rgb32tobgr32_MMX(src, dst, src_size); else - rgb32tobgr32_C(src, dst, src_size); -#else - rgb32tobgr32_C(src, dst, src_size); #endif + rgb32tobgr32_C(src, dst, src_size); } void rgb24tobgr24(const uint8_t *src, uint8_t *dst, unsigned int src_size) @@ -347,10 +398,8 @@ void rgb24tobgr24(const uint8_t *src, uint8_t *dst, unsigned int src_size) else if(gCpuCaps.hasMMX) rgb24tobgr24_MMX(src, dst, src_size); else - rgb24tobgr24_C(src, dst, src_size); -#else - rgb24tobgr24_C(src, dst, src_size); #endif + rgb24tobgr24_C(src, dst, src_size); } /** @@ -371,10 +420,8 @@ void yv12toyuy2(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, u else if(gCpuCaps.hasMMX) yv12toyuy2_MMX(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride); else - yv12toyuy2_C(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride); -#else - yv12toyuy2_C(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride); #endif + yv12toyuy2_C(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride); } /** @@ -394,10 +441,8 @@ void yuv422ptoyuy2(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc else if(gCpuCaps.hasMMX) yuv422ptoyuy2_MMX(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride); else - yuv422ptoyuy2_C(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride); -#else - yuv422ptoyuy2_C(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride); #endif + yuv422ptoyuy2_C(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride); } /** @@ -418,10 +463,8 @@ void yuy2toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, else if(gCpuCaps.hasMMX) yuy2toyv12_MMX(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride); else - yuy2toyv12_C(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride); -#else - yuy2toyv12_C(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride); #endif + yuy2toyv12_C(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride); } /** @@ -488,14 +531,13 @@ void rgb24toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst else if(gCpuCaps.hasMMX) rgb24toyv12_MMX(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride); else - rgb24toyv12_C(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride); -#else - rgb24toyv12_C(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride); #endif + rgb24toyv12_C(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride); } void interleaveBytes(uint8_t *src1, uint8_t *src2, uint8_t *dst, - int width, int height, int src1Stride, int src2Stride, int dstStride) + unsigned width, unsigned height, unsigned src1Stride, + unsigned src2Stride, unsigned dstStride) { #ifdef CAN_COMPILE_X86_ASM // ordered per speed fasterst first @@ -506,8 +548,6 @@ void interleaveBytes(uint8_t *src1, uint8_t *src2, uint8_t *dst, else if(gCpuCaps.hasMMX) interleaveBytes_MMX(src1, src2, dst, width, height, src1Stride, src2Stride, dstStride); else - interleaveBytes_C(src1, src2, dst, width, height, src1Stride, src2Stride, dstStride); -#else - interleaveBytes_C(src1, src2, dst, width, height, src1Stride, src2Stride, dstStride); #endif + interleaveBytes_C(src1, src2, dst, width, height, src1Stride, src2Stride, dstStride); } diff --git a/postproc/rgb2rgb.h b/postproc/rgb2rgb.h index fb4f04590d..9fb6da6ef1 100644 --- a/postproc/rgb2rgb.h +++ b/postproc/rgb2rgb.h @@ -10,12 +10,16 @@ #define RGB2RGB_INCLUDED extern void rgb24to32(const uint8_t *src,uint8_t *dst,unsigned src_size); -extern void rgb32to24(const uint8_t *src,uint8_t *dst,unsigned src_size); -extern void rgb15to16(const uint8_t *src,uint8_t *dst,unsigned src_size); -extern void rgb32to16(const uint8_t *src,uint8_t *dst,unsigned src_size); -extern void rgb32to15(const uint8_t *src,uint8_t *dst,unsigned src_size); extern void rgb24to16(const uint8_t *src,uint8_t *dst,unsigned src_size); extern void rgb24to15(const uint8_t *src,uint8_t *dst,unsigned src_size); +extern void rgb32to24(const uint8_t *src,uint8_t *dst,unsigned src_size); +extern void rgb32to16(const uint8_t *src,uint8_t *dst,unsigned src_size); +extern void rgb32to15(const uint8_t *src,uint8_t *dst,unsigned src_size); +extern void rgb15to16(const uint8_t *src,uint8_t *dst,unsigned src_size); +extern void rgb15to24(const uint8_t *src,uint8_t *dst,unsigned src_size); +extern void rgb15to32(const uint8_t *src,uint8_t *dst,unsigned src_size); +extern void rgb16to24(const uint8_t *src,uint8_t *dst,unsigned src_size); +extern void rgb16to32(const uint8_t *src,uint8_t *dst,unsigned src_size); extern void rgb32tobgr32(const uint8_t *src, uint8_t *dst, unsigned src_size); extern void rgb24tobgr24(const uint8_t *src, uint8_t *dst, unsigned src_size); @@ -39,7 +43,8 @@ extern void rgb24toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_ unsigned int lumStride, unsigned int chromStride, unsigned int srcStride); extern void interleaveBytes(uint8_t *src1, uint8_t *src2, uint8_t *dst, - int width, int height, int src1Stride, int src2Stride, int dstStride); + unsigned width, unsigned height, unsigned src1Stride, + unsigned src2Stride, unsigned dstStride); #define MODE_RGB 0x1 @@ -47,11 +52,11 @@ extern void interleaveBytes(uint8_t *src1, uint8_t *src2, uint8_t *dst, typedef void (* yuv2rgb_fun) (uint8_t * image, uint8_t * py, uint8_t * pu, uint8_t * pv, - int h_size, int v_size, - int rgb_stride, int y_stride, int uv_stride); + unsigned h_size, unsigned v_size, + unsigned rgb_stride, unsigned y_stride, unsigned uv_stride); extern yuv2rgb_fun yuv2rgb; -void yuv2rgb_init (int bpp, int mode); +void yuv2rgb_init (unsigned bpp, int mode); #endif diff --git a/postproc/rgb2rgb_template.c b/postproc/rgb2rgb_template.c index 9d59eabc70..015e7f2d56 100644 --- a/postproc/rgb2rgb_template.c +++ b/postproc/rgb2rgb_template.c @@ -8,6 +8,13 @@ * palette & yuv & runtime cpu stuff by Michael (michaelni@gmx.at) (under GPL) */ +#include +#include /* for __WORDSIZE */ + +#ifndef __WORDSIZE +#warning You have misconfigured system and probably will lose performance! +#endif + #undef PREFETCH #undef MOVNTQ #undef EMMS @@ -56,13 +63,13 @@ static inline void RENAME(rgb24to32)(const uint8_t *src,uint8_t *dst,unsigned sr const uint8_t *s = src; const uint8_t *end; #ifdef HAVE_MMX - const uint8_t *mm_end; + uint8_t *mm_end; #endif end = s + src_size; #ifdef HAVE_MMX __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); - mm_end = end - 23; __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory"); + mm_end = (uint8_t*)((((unsigned long)end)/24)*24); while(s < mm_end) { __asm __volatile( @@ -107,12 +114,12 @@ static inline void RENAME(rgb32to24)(const uint8_t *src,uint8_t *dst,unsigned sr const uint8_t *s = src; const uint8_t *end; #ifdef HAVE_MMX - const uint8_t *mm_end; + uint8_t *mm_end; #endif end = s + src_size; #ifdef HAVE_MMX __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); - mm_end = end - 31; + mm_end = (uint8_t*)((((unsigned long)end)/32)*32); while(s < mm_end) { __asm __volatile( @@ -186,15 +193,16 @@ static inline void RENAME(rgb32to24)(const uint8_t *src,uint8_t *dst,unsigned sr */ static inline void RENAME(rgb15to16)(const uint8_t *src,uint8_t *dst,unsigned src_size) { + register const uint8_t* s=src; + register uint8_t* d=dst; + register const uint8_t *end; + uint8_t *mm_end; + end = s + src_size; #ifdef HAVE_MMX - register int offs=15-src_size; - register const char* s=src-offs; - register char* d=dst-offs; - __asm __volatile(PREFETCH" %0"::"m"(*(s+offs))); - __asm __volatile( - "movq %0, %%mm4\n\t" - ::"m"(mask15s)); - while(offs<0) + __asm __volatile(PREFETCH" %0"::"m"(*s)); + __asm __volatile("movq %0, %%mm4"::"m"(mask15s)); + mm_end = (uint8_t*)((((unsigned long)end)/16)*16); + while(s>2; - for(i=0; i>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); + s++; } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); -#else - unsigned j,i,num_pixels=src_size/4; - uint16_t *d = (uint16_t *)dst; - for(i=0,j=0; j>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); - } -#endif } static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, unsigned src_size) { -#ifdef HAVE_MMX const uint8_t *s = src; - const uint8_t *end,*mm_end; + const uint8_t *end; +#ifdef HAVE_MMX + const uint8_t *mm_end; +#endif uint16_t *d = (uint16_t *)dst; end = s + src_size; - mm_end = end - 15; +#ifdef HAVE_MMX __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm __volatile( "movq %0, %%mm7\n\t" "movq %1, %%mm6\n\t" ::"m"(red_15mask),"m"(green_15mask)); + mm_end = (uint8_t*)((((unsigned long)end)/16)*16); while(s < mm_end) { __asm __volatile( @@ -375,43 +366,35 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, unsigned d += 4; s += 16; } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); +#endif while(s < end) { const int b= *s++; const int g= *s++; const int r= *s++; - s++; *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); + s++; } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); -#else - unsigned j,i,num_pixels=src_size/4; - uint16_t *d = (uint16_t *)dst; - for(i=0,j=0; j>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); - } -#endif } static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, unsigned src_size) { -#ifdef HAVE_MMX const uint8_t *s = src; - const uint8_t *end,*mm_end; + const uint8_t *end; +#ifdef HAVE_MMX + const uint8_t *mm_end; +#endif uint16_t *d = (uint16_t *)dst; end = s + src_size; - mm_end = end - 11; +#ifdef HAVE_MMX __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm __volatile( "movq %0, %%mm7\n\t" "movq %1, %%mm6\n\t" ::"m"(red_16mask),"m"(green_16mask)); + mm_end = (uint8_t*)((((unsigned long)end)/16)*16); while(s < mm_end) { __asm __volatile( @@ -447,6 +430,9 @@ static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, unsigned d += 4; s += 12; } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); +#endif while(s < end) { const int b= *s++; @@ -454,35 +440,24 @@ static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, unsigned const int r= *s++; *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); -#else - unsigned j,i,num_pixels=src_size/3; - uint16_t *d = (uint16_t *)dst; - for(i=0,j=0; j>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); - } -#endif } static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, unsigned src_size) { -#ifdef HAVE_MMX const uint8_t *s = src; - const uint8_t *end,*mm_end; + const uint8_t *end; +#ifdef HAVE_MMX + const uint8_t *mm_end; +#endif uint16_t *d = (uint16_t *)dst; end = s + src_size; - mm_end = end -11; +#ifdef HAVE_MMX __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm __volatile( "movq %0, %%mm7\n\t" "movq %1, %%mm6\n\t" ::"m"(red_15mask),"m"(green_15mask)); + mm_end = (uint8_t*)((((unsigned long)end)/16)*16); while(s < mm_end) { __asm __volatile( @@ -518,6 +493,9 @@ static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, unsigned d += 4; s += 12; } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); +#endif while(s < end) { const int b= *s++; @@ -525,25 +503,448 @@ static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, unsigned const int r= *s++; *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); } +} + +/* + I use here less accurate approximation by simply + left-shifting the input + value and filling the low order bits with + zeroes. This method improves png's + compression but this scheme cannot reproduce white exactly, since it does not + generate an all-ones maximum value; the net effect is to darken the + image slightly. + + The better method should be "left bit replication": + + 4 3 2 1 0 + --------- + 1 1 0 1 1 + + 7 6 5 4 3 2 1 0 + ---------------- + 1 1 0 1 1 1 1 0 + |=======| |===| + | Leftmost Bits Repeated to Fill Open Bits + | + Original Bits +*/ +static inline void RENAME(rgb15to24)(const uint8_t *src, uint8_t *dst, unsigned src_size) +{ + const uint16_t *end; +#ifdef HAVE_MMX + const uint16_t *mm_end; +#endif + uint8_t *d = (uint8_t *)dst; + const uint16_t *s = (uint16_t *)src; + end = s + src_size/2; +#ifdef HAVE_MMX + __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); + mm_end = (uint16_t*)((((unsigned long)end)/8)*8); + while(s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1\n\t" + "movq %1, %%mm0\n\t" + "movq %1, %%mm1\n\t" + "movq %1, %%mm2\n\t" + "pand %2, %%mm0\n\t" + "pand %3, %%mm1\n\t" + "pand %4, %%mm2\n\t" + "psllq $3, %%mm0\n\t" + "psrlq $2, %%mm1\n\t" + "psrlq $7, %%mm2\n\t" + "movq %%mm0, %%mm3\n\t" + "movq %%mm1, %%mm4\n\t" + "movq %%mm2, %%mm5\n\t" + "punpcklwd %5, %%mm0\n\t" + "punpcklwd %5, %%mm1\n\t" + "punpcklwd %5, %%mm2\n\t" + "punpckhwd %5, %%mm3\n\t" + "punpckhwd %5, %%mm4\n\t" + "punpckhwd %5, %%mm5\n\t" + "psllq $8, %%mm1\n\t" + "psllq $16, %%mm2\n\t" + "por %%mm1, %%mm0\n\t" + "por %%mm2, %%mm0\n\t" + "psllq $8, %%mm4\n\t" + "psllq $16, %%mm5\n\t" + "por %%mm4, %%mm3\n\t" + "por %%mm5, %%mm3\n\t" + + "movq %%mm0, %%mm6\n\t" + "movq %%mm3, %%mm7\n\t" + + "movq 8%1, %%mm0\n\t" + "movq 8%1, %%mm1\n\t" + "movq 8%1, %%mm2\n\t" + "pand %2, %%mm0\n\t" + "pand %3, %%mm1\n\t" + "pand %4, %%mm2\n\t" + "psllq $3, %%mm0\n\t" + "psrlq $2, %%mm1\n\t" + "psrlq $7, %%mm2\n\t" + "movq %%mm0, %%mm3\n\t" + "movq %%mm1, %%mm4\n\t" + "movq %%mm2, %%mm5\n\t" + "punpcklwd %5, %%mm0\n\t" + "punpcklwd %5, %%mm1\n\t" + "punpcklwd %5, %%mm2\n\t" + "punpckhwd %5, %%mm3\n\t" + "punpckhwd %5, %%mm4\n\t" + "punpckhwd %5, %%mm5\n\t" + "psllq $8, %%mm1\n\t" + "psllq $16, %%mm2\n\t" + "por %%mm1, %%mm0\n\t" + "por %%mm2, %%mm0\n\t" + "psllq $8, %%mm4\n\t" + "psllq $16, %%mm5\n\t" + "por %%mm4, %%mm3\n\t" + "por %%mm5, %%mm3\n\t" + + :"=m"(*d) + :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null) + :"memory"); + /* Borrowed 32 to 24 */ + __asm __volatile( + "movq %%mm0, %%mm4\n\t" + "movq %%mm3, %%mm5\n\t" + "movq %%mm6, %%mm0\n\t" + "movq %%mm7, %%mm1\n\t" + + "movq %%mm4, %%mm6\n\t" + "movq %%mm5, %%mm7\n\t" + "movq %%mm0, %%mm2\n\t" + "movq %%mm1, %%mm3\n\t" + + "psrlq $8, %%mm2\n\t" + "psrlq $8, %%mm3\n\t" + "psrlq $8, %%mm6\n\t" + "psrlq $8, %%mm7\n\t" + "pand %2, %%mm0\n\t" + "pand %2, %%mm1\n\t" + "pand %2, %%mm4\n\t" + "pand %2, %%mm5\n\t" + "pand %3, %%mm2\n\t" + "pand %3, %%mm3\n\t" + "pand %3, %%mm6\n\t" + "pand %3, %%mm7\n\t" + "por %%mm2, %%mm0\n\t" + "por %%mm3, %%mm1\n\t" + "por %%mm6, %%mm4\n\t" + "por %%mm7, %%mm5\n\t" + + "movq %%mm1, %%mm2\n\t" + "movq %%mm4, %%mm3\n\t" + "psllq $48, %%mm2\n\t" + "psllq $32, %%mm3\n\t" + "pand %4, %%mm2\n\t" + "pand %5, %%mm3\n\t" + "por %%mm2, %%mm0\n\t" + "psrlq $16, %%mm1\n\t" + "psrlq $32, %%mm4\n\t" + "psllq $16, %%mm5\n\t" + "por %%mm3, %%mm1\n\t" + "pand %6, %%mm5\n\t" + "por %%mm5, %%mm4\n\t" + + MOVNTQ" %%mm0, %0\n\t" + MOVNTQ" %%mm1, 8%0\n\t" + MOVNTQ" %%mm4, 16%0" + + :"=m"(*d) + :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) + :"memory"); + d += 24; + s += 8; + } __asm __volatile(SFENCE:::"memory"); __asm __volatile(EMMS:::"memory"); -#else - unsigned j,i,num_pixels=src_size/3; - uint16_t *d = (uint16_t *)dst; - for(i=0,j=0; j>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); - } #endif + while(s < end) + { + register uint16_t bgr; + bgr = *s++; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x7C00)>>7; + } +} + +static inline void RENAME(rgb16to24)(const uint8_t *src, uint8_t *dst, unsigned src_size) +{ + const uint16_t *end; +#ifdef HAVE_MMX + const uint16_t *mm_end; +#endif + uint8_t *d = (uint8_t *)dst; + const uint16_t *s = (const uint16_t *)src; + end = s + src_size/2; +#ifdef HAVE_MMX + __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); + mm_end = (uint16_t*)((((unsigned long)end)/8)*8); + while(s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1\n\t" + "movq %1, %%mm0\n\t" + "movq %1, %%mm1\n\t" + "movq %1, %%mm2\n\t" + "pand %2, %%mm0\n\t" + "pand %3, %%mm1\n\t" + "pand %4, %%mm2\n\t" + "psllq $3, %%mm0\n\t" + "psrlq $3, %%mm1\n\t" + "psrlq $8, %%mm2\n\t" + "movq %%mm0, %%mm3\n\t" + "movq %%mm1, %%mm4\n\t" + "movq %%mm2, %%mm5\n\t" + "punpcklwd %5, %%mm0\n\t" + "punpcklwd %5, %%mm1\n\t" + "punpcklwd %5, %%mm2\n\t" + "punpckhwd %5, %%mm3\n\t" + "punpckhwd %5, %%mm4\n\t" + "punpckhwd %5, %%mm5\n\t" + "psllq $8, %%mm1\n\t" + "psllq $16, %%mm2\n\t" + "por %%mm1, %%mm0\n\t" + "por %%mm2, %%mm0\n\t" + "psllq $8, %%mm4\n\t" + "psllq $16, %%mm5\n\t" + "por %%mm4, %%mm3\n\t" + "por %%mm5, %%mm3\n\t" + + "movq %%mm0, %%mm6\n\t" + "movq %%mm3, %%mm7\n\t" + + "movq 8%1, %%mm0\n\t" + "movq 8%1, %%mm1\n\t" + "movq 8%1, %%mm2\n\t" + "pand %2, %%mm0\n\t" + "pand %3, %%mm1\n\t" + "pand %4, %%mm2\n\t" + "psllq $3, %%mm0\n\t" + "psrlq $3, %%mm1\n\t" + "psrlq $8, %%mm2\n\t" + "movq %%mm0, %%mm3\n\t" + "movq %%mm1, %%mm4\n\t" + "movq %%mm2, %%mm5\n\t" + "punpcklwd %5, %%mm0\n\t" + "punpcklwd %5, %%mm1\n\t" + "punpcklwd %5, %%mm2\n\t" + "punpckhwd %5, %%mm3\n\t" + "punpckhwd %5, %%mm4\n\t" + "punpckhwd %5, %%mm5\n\t" + "psllq $8, %%mm1\n\t" + "psllq $16, %%mm2\n\t" + "por %%mm1, %%mm0\n\t" + "por %%mm2, %%mm0\n\t" + "psllq $8, %%mm4\n\t" + "psllq $16, %%mm5\n\t" + "por %%mm4, %%mm3\n\t" + "por %%mm5, %%mm3\n\t" + :"=m"(*d) + :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null) + :"memory"); + /* Borrowed 32 to 24 */ + __asm __volatile( + "movq %%mm0, %%mm4\n\t" + "movq %%mm3, %%mm5\n\t" + "movq %%mm6, %%mm0\n\t" + "movq %%mm7, %%mm1\n\t" + + "movq %%mm4, %%mm6\n\t" + "movq %%mm5, %%mm7\n\t" + "movq %%mm0, %%mm2\n\t" + "movq %%mm1, %%mm3\n\t" + + "psrlq $8, %%mm2\n\t" + "psrlq $8, %%mm3\n\t" + "psrlq $8, %%mm6\n\t" + "psrlq $8, %%mm7\n\t" + "pand %2, %%mm0\n\t" + "pand %2, %%mm1\n\t" + "pand %2, %%mm4\n\t" + "pand %2, %%mm5\n\t" + "pand %3, %%mm2\n\t" + "pand %3, %%mm3\n\t" + "pand %3, %%mm6\n\t" + "pand %3, %%mm7\n\t" + "por %%mm2, %%mm0\n\t" + "por %%mm3, %%mm1\n\t" + "por %%mm6, %%mm4\n\t" + "por %%mm7, %%mm5\n\t" + + "movq %%mm1, %%mm2\n\t" + "movq %%mm4, %%mm3\n\t" + "psllq $48, %%mm2\n\t" + "psllq $32, %%mm3\n\t" + "pand %4, %%mm2\n\t" + "pand %5, %%mm3\n\t" + "por %%mm2, %%mm0\n\t" + "psrlq $16, %%mm1\n\t" + "psrlq $32, %%mm4\n\t" + "psllq $16, %%mm5\n\t" + "por %%mm3, %%mm1\n\t" + "pand %6, %%mm5\n\t" + "por %%mm5, %%mm4\n\t" + + MOVNTQ" %%mm0, %0\n\t" + MOVNTQ" %%mm1, 8%0\n\t" + MOVNTQ" %%mm4, 16%0" + + :"=m"(*d) + :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) + :"memory"); + d += 24; + s += 8; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); +#endif + while(s < end) + { + register uint16_t bgr; + bgr = *s++; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0xF800)>>8; + } +} + +static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, unsigned src_size) +{ + const uint16_t *end; +#ifdef HAVE_MMX + const uint16_t *mm_end; +#endif + uint8_t *d = (uint8_t *)dst; + const uint16_t *s = (const uint16_t *)src; + end = s + src_size/2; +#ifdef HAVE_MMX + __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); + __asm __volatile("pxor %%mm7,%%mm7\n\t":::"memory"); + mm_end = (uint16_t*)((((unsigned long)end)/4)*4); + while(s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1\n\t" + "movq %1, %%mm0\n\t" + "movq %1, %%mm1\n\t" + "movq %1, %%mm2\n\t" + "pand %2, %%mm0\n\t" + "pand %3, %%mm1\n\t" + "pand %4, %%mm2\n\t" + "psllq $3, %%mm0\n\t" + "psrlq $2, %%mm1\n\t" + "psrlq $7, %%mm2\n\t" + "movq %%mm0, %%mm3\n\t" + "movq %%mm1, %%mm4\n\t" + "movq %%mm2, %%mm5\n\t" + "punpcklwd %%mm7, %%mm0\n\t" + "punpcklwd %%mm7, %%mm1\n\t" + "punpcklwd %%mm7, %%mm2\n\t" + "punpckhwd %%mm7, %%mm3\n\t" + "punpckhwd %%mm7, %%mm4\n\t" + "punpckhwd %%mm7, %%mm5\n\t" + "psllq $8, %%mm1\n\t" + "psllq $16, %%mm2\n\t" + "por %%mm1, %%mm0\n\t" + "por %%mm2, %%mm0\n\t" + "psllq $8, %%mm4\n\t" + "psllq $16, %%mm5\n\t" + "por %%mm4, %%mm3\n\t" + "por %%mm5, %%mm3\n\t" + MOVNTQ" %%mm0, %0\n\t" + MOVNTQ" %%mm3, 8%0\n\t" + :"=m"(*d) + :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r) + :"memory"); + d += 16; + s += 4; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); +#endif + while(s < end) + { + register uint16_t bgr; + bgr = *s++; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x7C00)>>7; + *d++ = 0; + } +} + +static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, unsigned src_size) +{ + const uint16_t *end; +#ifdef HAVE_MMX + const uint16_t *mm_end; +#endif + uint8_t *d = (uint8_t *)dst; + const uint16_t *s = (uint16_t *)src; + end = s + src_size/2; +#ifdef HAVE_MMX + __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); + __asm __volatile("pxor %%mm7,%%mm7\n\t":::"memory"); + mm_end = (uint16_t*)((((unsigned long)end)/4)*4); + while(s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1\n\t" + "movq %1, %%mm0\n\t" + "movq %1, %%mm1\n\t" + "movq %1, %%mm2\n\t" + "pand %2, %%mm0\n\t" + "pand %3, %%mm1\n\t" + "pand %4, %%mm2\n\t" + "psllq $3, %%mm0\n\t" + "psrlq $3, %%mm1\n\t" + "psrlq $8, %%mm2\n\t" + "movq %%mm0, %%mm3\n\t" + "movq %%mm1, %%mm4\n\t" + "movq %%mm2, %%mm5\n\t" + "punpcklwd %%mm7, %%mm0\n\t" + "punpcklwd %%mm7, %%mm1\n\t" + "punpcklwd %%mm7, %%mm2\n\t" + "punpckhwd %%mm7, %%mm3\n\t" + "punpckhwd %%mm7, %%mm4\n\t" + "punpckhwd %%mm7, %%mm5\n\t" + "psllq $8, %%mm1\n\t" + "psllq $16, %%mm2\n\t" + "por %%mm1, %%mm0\n\t" + "por %%mm2, %%mm0\n\t" + "psllq $8, %%mm4\n\t" + "psllq $16, %%mm5\n\t" + "por %%mm4, %%mm3\n\t" + "por %%mm5, %%mm3\n\t" + MOVNTQ" %%mm0, %0\n\t" + MOVNTQ" %%mm3, 8%0\n\t" + :"=m"(*d) + :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r) + :"memory"); + d += 16; + s += 4; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); +#endif + while(s < end) + { + register uint16_t bgr; + bgr = *s++; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0xF800)>>8; + *d++ = 0; + } } static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, unsigned int src_size) { #ifdef HAVE_MMX +/* TODO: unroll this loop */ asm volatile ( "xorl %%eax, %%eax \n\t" ".balign 16 \n\t" @@ -554,9 +955,9 @@ static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, unsign "movq %%mm0, %%mm2 \n\t" "pslld $16, %%mm0 \n\t" "psrld $16, %%mm1 \n\t" - "pand "MANGLE(mask32r)", %%mm0 \n\t" - "pand "MANGLE(mask32g)", %%mm2 \n\t" - "pand "MANGLE(mask32b)", %%mm1 \n\t" + "pand "MANGLE(mask32r)", %%mm0 \n\t" + "pand "MANGLE(mask32g)", %%mm2 \n\t" + "pand "MANGLE(mask32b)", %%mm1 \n\t" "por %%mm0, %%mm2 \n\t" "por %%mm1, %%mm2 \n\t" MOVNTQ" %%mm2, (%1, %%eax) \n\t" @@ -570,8 +971,8 @@ static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, unsign __asm __volatile(SFENCE:::"memory"); __asm __volatile(EMMS:::"memory"); #else - int i; - int num_pixels= src_size >> 2; + unsigned i; + unsigned num_pixels = src_size >> 2; for(i=0; i>1; + unsigned y; + const unsigned chromWidth= width>>1; for(y=0; y= 64 int i; - for(i=0; i>1; + unsigned y; + const unsigned chromWidth= width>>1; for(y=0; y>1; + unsigned y; + const unsigned chromWidth= width>>1; for(y=0; y>1; + unsigned y; + const unsigned chromWidth= width>>1; #ifdef HAVE_MMX for(y=0; y %d\n", filter2Size, filterSize); + MSG_INFO("SwScaler: reducing / aligning filtersize %d -> %d\n", filter2Size, filterSize); /* try to reduce the filter-size (step2 reduce it) */ for(i=0; isrcFormat==IMGFMT_YV12) + interleaveBytes( src[1],src[2],dst,c->srcW,srcSliceH,srcStride[1],srcStride[2],dstStride[0] ); + else /* I420 & IYUV */ + interleaveBytes( src[2],src[1],dst,c->srcW,srcSliceH,srcStride[2],srcStride[1],dstStride[0] ); +} + + /* Warper functions for yuv2bgr */ static void planarYuvToBgr(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dstParam[], int dstStride[]){ @@ -1265,6 +1300,16 @@ static void planarYuvToBgr(SwsContext *c, uint8_t* src[], int srcStride[], int s yuv2rgb( dst,src[0],src[2],src[1],c->srcW,srcSliceH,dstStride[0],srcStride[0],srcStride[1] ); } +static void Planar2PackedWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dstParam[], int dstStride[]){ + uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY; + + if(c->srcFormat==IMGFMT_YV12) + yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] ); + else /* I420 & IYUV */ + yv12toyuy2( src[0],src[2],src[1],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] ); +} + static void bgr24to32Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]){ @@ -1285,6 +1330,46 @@ static void bgr24to32Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int } } +static void bgr24to16Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + + if(dstStride[0]*3==srcStride[0]*2) + rgb24to16(src[0], dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]); + else + { + int i; + uint8_t *srcPtr= src[0]; + uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; + + for(i=0; isrcW*3); + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + } +} + +static void bgr24to15Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + + if(dstStride[0]*3==srcStride[0]*2) + rgb24to15(src[0], dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]); + else + { + int i; + uint8_t *srcPtr= src[0]; + uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; + + for(i=0; isrcW*3); + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + } +} + static void bgr32to24Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]){ @@ -1305,6 +1390,46 @@ static void bgr32to24Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int } } +static void bgr32to16Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + + if(dstStride[0]*4==srcStride[0]*2) + rgb32to16(src[0], dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]); + else + { + int i; + uint8_t *srcPtr= src[0]; + uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; + + for(i=0; isrcW<<2); + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + } +} + +static void bgr32to15Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + + if(dstStride[0]*4==srcStride[0]*2) + rgb32to15(src[0], dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]); + else + { + int i; + uint8_t *srcPtr= src[0]; + uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; + + for(i=0; isrcW<<2); + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + } +} + static void bgr15to16Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]){ @@ -1325,6 +1450,86 @@ static void bgr15to16Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int } } +static void bgr15to24Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + + if(dstStride[0]*2==srcStride[0]*3) + rgb15to24(src[0], dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]); + else + { + int i; + uint8_t *srcPtr= src[0]; + uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; + + for(i=0; isrcW<<1); + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + } +} + +static void bgr15to32Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + + if(dstStride[0]*2==srcStride[0]*4) + rgb15to32(src[0], dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]); + else + { + int i; + uint8_t *srcPtr= src[0]; + uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; + + for(i=0; isrcW<<1); + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + } +} + +static void bgr16to24Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + + if(dstStride[0]*2==srcStride[0]*3) + rgb16to24(src[0], dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]); + else + { + int i; + uint8_t *srcPtr= src[0]; + uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; + + for(i=0; isrcW<<1); + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + } +} + +static void bgr16to32Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]){ + + if(dstStride[0]*2==srcStride[0]*4) + rgb16to32(src[0], dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]); + else + { + int i; + uint8_t *srcPtr= src[0]; + uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY; + + for(i=0; isrcW<<1); + srcPtr+= srcStride[0]; + dstPtr+= dstStride[0]; + } + } +} + static void bgr24toyv12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]){ @@ -1346,21 +1551,25 @@ static void simpleCopy(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], uint8_t *src[3]; uint8_t *dst[3]; - if(c->srcFormat == IMGFMT_I420){ + if(isPlanarYUV(c->srcFormat)) + { + if(c->srcFormat == IMGFMT_I420 || c->srcFormat == IMGFMT_IYUV){ src[0]= srcParam[0]; src[1]= srcParam[2]; src[2]= srcParam[1]; srcStride[0]= srcStrideParam[0]; srcStride[1]= srcStrideParam[2]; srcStride[2]= srcStrideParam[1]; - } - else if(c->srcFormat==IMGFMT_YV12){ + } + else + { src[0]= srcParam[0]; src[1]= srcParam[1]; src[2]= srcParam[2]; srcStride[0]= srcStrideParam[0]; srcStride[1]= srcStrideParam[1]; srcStride[2]= srcStrideParam[2]; + } } else if(isPacked(c->srcFormat) || isGray(c->srcFormat)){ src[0]= srcParam[0]; @@ -1371,7 +1580,7 @@ static void simpleCopy(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], srcStride[2]= 0; } - if(c->dstFormat == IMGFMT_I420){ + if(c->dstFormat == IMGFMT_I420 || c->dstFormat == IMGFMT_IYUV){ dst[0]= dstParam[0]; dst[1]= dstParam[2]; dst[2]= dstParam[1]; @@ -1411,9 +1620,21 @@ static void simpleCopy(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], int plane; for(plane=0; plane<3; plane++) { - int length= plane==0 ? c->srcW : ((c->srcW+1)>>1); - int y= plane==0 ? srcSliceY: ((srcSliceY+1)>>1); - int height= plane==0 ? srcSliceH: ((srcSliceH+1)>>1); + int length; + int y; + int height; + if(c->srcFormat == IMGFMT_YVU9 || c->srcFormat == IMGFMT_IF09) + { + length= plane==0 ? c->srcW : ((c->srcW+1)>>2); + y= plane==0 ? srcSliceY: ((srcSliceY+1)>>2); + height= plane==0 ? srcSliceH: ((srcSliceH+1)>>2); + } + else + { + length= plane==0 ? c->srcW : ((c->srcW+1)>>1); + y= plane==0 ? srcSliceY: ((srcSliceY+1)>>1); + height= plane==0 ? srcSliceH: ((srcSliceH+1)>>1); + } if(dstStride[plane]==srcStride[plane]) memcpy(dst[plane] + dstStride[plane]*y, src[plane], height*dstStride[plane]); @@ -1433,12 +1654,23 @@ static void simpleCopy(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], } } +static uint32_t remove_dup_fourcc(uint32_t fourcc) +{ + switch(fourcc) + { + case IMGFMT_IYUV: return IMGFMT_I420; + case IMGFMT_Y8 : return IMGFMT_Y800; + default: return fourcc; + } +} + SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter){ SwsContext *c; int i; int usesFilter; + int simple_copy, unscaled_copy; SwsFilter dummyFilter= {NULL, NULL, NULL, NULL}; #ifdef ARCH_X86 @@ -1449,25 +1681,44 @@ SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, if(swScale==NULL) globalInit(); /* avoid dupplicate Formats, so we dont need to check to much */ - if(srcFormat==IMGFMT_IYUV) srcFormat=IMGFMT_I420; - if(srcFormat==IMGFMT_Y8) srcFormat=IMGFMT_Y800; - if(dstFormat==IMGFMT_Y8) dstFormat=IMGFMT_Y800; - - if(!isSupportedIn(srcFormat)) + srcFormat = remove_dup_fourcc(srcFormat); + dstFormat = remove_dup_fourcc(dstFormat); + /* don't refuse this beauty */ + unscaled_copy = (srcW == dstW && srcH == dstH); + simple_copy = (srcW == dstW && srcH == dstH && srcFormat == dstFormat); + if(!simple_copy) { - mp_msg(MSGT_SWS,MSGL_ERR,"swScaler: %s is not supported as input format\n", vo_format_name(srcFormat)); - return NULL; + if(unscaled_copy) + { + if(!isSupportedUnscaledIn(srcFormat)) + { + MSG_ERR("swScaler: %s is not supported as input format\n", vo_format_name(srcFormat)); + return NULL; + } + if(!isSupportedUnscaledOut(dstFormat)) + { + MSG_ERR("swScaler: %s is not supported as output format\n", vo_format_name(dstFormat)); + return NULL; + } + } + else + { + if(!isSupportedIn(srcFormat)) + { + MSG_ERR("swScaler: %s is not supported as input format\n", vo_format_name(srcFormat)); + return NULL; + } + if(!isSupportedOut(dstFormat)) + { + MSG_ERR("swScaler: %s is not supported as output format\n", vo_format_name(dstFormat)); + return NULL; + } + } } - if(!isSupportedOut(dstFormat)) - { - mp_msg(MSGT_SWS,MSGL_ERR,"swScaler: %s is not supported as output format\n", vo_format_name(dstFormat)); - return NULL; - } - /* sanity check */ if(srcW<4 || srcH<1 || dstW<8 || dstH<1) //FIXME check if these are enough and try to lowwer them after fixing the relevant parts of the code { - mp_msg(MSGT_SWS,MSGL_ERR,"swScaler: %dx%d -> %dx%d is invalid scaling dimension\n", + MSG_ERR("swScaler: %dx%d -> %dx%d is invalid scaling dimension\n", srcW, srcH, dstW, dstH); return NULL; } @@ -1501,6 +1752,26 @@ SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, /* unscaled special Cases */ if(srcW==dstW && srcH==dstH && !usesFilter) { + /* yv12_to_nv12 */ + if((srcFormat == IMGFMT_YV12||srcFormat==IMGFMT_I420)&&dstFormat == IMGFMT_NV12) + { + c->swScale= PlanarToNV12Wrapper; + + if(flags&SWS_PRINT_INFO) + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", + vo_format_name(srcFormat), vo_format_name(dstFormat)); + return c; + } + /* yv12_to_yuy2 */ + if((srcFormat == IMGFMT_YV12||srcFormat==IMGFMT_I420)&&dstFormat == IMGFMT_YUY2) + { + c->swScale= Planar2PackedWrapper; + + if(flags&SWS_PRINT_INFO) + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", + vo_format_name(srcFormat), vo_format_name(dstFormat)); + return c; + } /* yuv2bgr */ if(isPlanarYUV(srcFormat) && isBGR(dstFormat)) { @@ -1516,7 +1787,7 @@ SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, c->swScale= planarYuvToBgr; if(flags&SWS_PRINT_INFO) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using unscaled %s -> %s special converter\n", + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", vo_format_name(srcFormat), vo_format_name(dstFormat)); return c; } @@ -1527,7 +1798,7 @@ SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, c->swScale= simpleCopy; if(flags&SWS_PRINT_INFO) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using unscaled %s -> %s special converter\n", + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", vo_format_name(srcFormat), vo_format_name(dstFormat)); return c; } @@ -1539,7 +1810,31 @@ SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, c->swScale= bgr32to24Wrapper; if(flags&SWS_PRINT_INFO) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using unscaled %s -> %s special converter\n", + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", + vo_format_name(srcFormat), vo_format_name(dstFormat)); + return c; + } + + /* bgr32to16 & rgb32to16*/ + if((srcFormat==IMGFMT_BGR32 && dstFormat==IMGFMT_BGR16) + ||(srcFormat==IMGFMT_RGB32 && dstFormat==IMGFMT_RGB16)) + { + c->swScale= bgr32to16Wrapper; + + if(flags&SWS_PRINT_INFO) + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", + vo_format_name(srcFormat), vo_format_name(dstFormat)); + return c; + } + + /* bgr32to15 & rgb32to15*/ + if((srcFormat==IMGFMT_BGR32 && dstFormat==IMGFMT_BGR15) + ||(srcFormat==IMGFMT_RGB32 && dstFormat==IMGFMT_RGB15)) + { + c->swScale= bgr32to15Wrapper; + + if(flags&SWS_PRINT_INFO) + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", vo_format_name(srcFormat), vo_format_name(dstFormat)); return c; } @@ -1551,7 +1846,31 @@ SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, c->swScale= bgr24to32Wrapper; if(flags&SWS_PRINT_INFO) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using unscaled %s -> %s special converter\n", + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", + vo_format_name(srcFormat), vo_format_name(dstFormat)); + return c; + } + + /* bgr24to16 & rgb24to16*/ + if((srcFormat==IMGFMT_BGR24 && dstFormat==IMGFMT_BGR16) + ||(srcFormat==IMGFMT_RGB24 && dstFormat==IMGFMT_RGB16)) + { + c->swScale= bgr24to16Wrapper; + + if(flags&SWS_PRINT_INFO) + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", + vo_format_name(srcFormat), vo_format_name(dstFormat)); + return c; + } + + /* bgr24to15 & rgb24to15*/ + if((srcFormat==IMGFMT_BGR24 && dstFormat==IMGFMT_BGR15) + ||(srcFormat==IMGFMT_RGB24 && dstFormat==IMGFMT_RGB15)) + { + c->swScale= bgr24to15Wrapper; + + if(flags&SWS_PRINT_INFO) + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", vo_format_name(srcFormat), vo_format_name(dstFormat)); return c; } @@ -1562,7 +1881,55 @@ SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, c->swScale= bgr15to16Wrapper; if(flags&SWS_PRINT_INFO) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using unscaled %s -> %s special converter\n", + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", + vo_format_name(srcFormat), vo_format_name(dstFormat)); + return c; + } + + /* bgr15to24 */ + if((srcFormat==IMGFMT_BGR15 && dstFormat==IMGFMT_BGR24) + ||(srcFormat==IMGFMT_RGB15 && dstFormat==IMGFMT_RGB24)) + { + c->swScale= bgr15to24Wrapper; + + if(flags&SWS_PRINT_INFO) + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", + vo_format_name(srcFormat), vo_format_name(dstFormat)); + return c; + } + + /* bgr15to32 */ + if((srcFormat==IMGFMT_BGR15 && dstFormat==IMGFMT_BGR32) + ||(srcFormat==IMGFMT_RGB15 && dstFormat==IMGFMT_RGB32)) + { + c->swScale= bgr15to32Wrapper; + + if(flags&SWS_PRINT_INFO) + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", + vo_format_name(srcFormat), vo_format_name(dstFormat)); + return c; + } + + /* bgr16to24 */ + if((srcFormat==IMGFMT_BGR16 && dstFormat==IMGFMT_BGR24) + ||(srcFormat==IMGFMT_RGB16 && dstFormat==IMGFMT_RGB24)) + { + c->swScale= bgr16to24Wrapper; + + if(flags&SWS_PRINT_INFO) + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", + vo_format_name(srcFormat), vo_format_name(dstFormat)); + return c; + } + + /* bgr16to32 */ + if((srcFormat==IMGFMT_BGR16 && dstFormat==IMGFMT_BGR32) + ||(srcFormat==IMGFMT_RGB16 && dstFormat==IMGFMT_RGB32)) + { + c->swScale= bgr16to32Wrapper; + + if(flags&SWS_PRINT_INFO) + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", vo_format_name(srcFormat), vo_format_name(dstFormat)); return c; } @@ -1573,7 +1940,7 @@ SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, c->swScale= bgr24toyv12Wrapper; if(flags&SWS_PRINT_INFO) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using unscaled %s -> %s special converter\n", + MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n", vo_format_name(srcFormat), vo_format_name(dstFormat)); return c; } @@ -1585,7 +1952,7 @@ SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, if(!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (flags&SWS_FAST_BILINEAR)) { if(flags&SWS_PRINT_INFO) - mp_msg(MSGT_SWS,MSGL_WARN,"SwScaler: output Width is not a multiple of 32 -> no MMX2 scaler\n"); + MSG_INFO("SwScaler: output Width is not a multiple of 32 -> no MMX2 scaler\n"); } } else @@ -1723,33 +2090,35 @@ SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, char *dither= ""; #endif if(flags&SWS_FAST_BILINEAR) - mp_msg(MSGT_SWS,MSGL_INFO,"SwScaler: FAST_BILINEAR scaler, "); + MSG_INFO("\nSwScaler: FAST_BILINEAR scaler, "); else if(flags&SWS_BILINEAR) - mp_msg(MSGT_SWS,MSGL_INFO,"SwScaler: BILINEAR scaler, "); + MSG_INFO("\nSwScaler: BILINEAR scaler, "); else if(flags&SWS_BICUBIC) - mp_msg(MSGT_SWS,MSGL_INFO,"SwScaler: BICUBIC scaler, "); + MSG_INFO("\nSwScaler: BICUBIC scaler, "); else if(flags&SWS_X) - mp_msg(MSGT_SWS,MSGL_INFO,"SwScaler: Experimental scaler, "); + MSG_INFO("\nSwScaler: Experimental scaler, "); else if(flags&SWS_POINT) - mp_msg(MSGT_SWS,MSGL_INFO,"SwScaler: Nearest Neighbor / POINT scaler, "); + MSG_INFO("\nSwScaler: Nearest Neighbor / POINT scaler, "); else if(flags&SWS_AREA) - mp_msg(MSGT_SWS,MSGL_INFO,"SwScaler: Area Averageing scaler, "); + MSG_INFO("\nSwScaler: Area Averageing scaler, "); else - mp_msg(MSGT_SWS,MSGL_INFO,"SwScaler: ehh flags invalid?! "); + MSG_INFO("\nSwScaler: ehh flags invalid?! "); - mp_msg(MSGT_SWS,MSGL_INFO,"%dx%d %s -> %dx%d%s %s ", - srcW,srcH, vo_format_name(srcFormat), dstW,dstH, - (dstFormat==IMGFMT_BGR15 || dstFormat==IMGFMT_BGR16) ? - dither : "", vo_format_name(dstFormat)); + if(dstFormat==IMGFMT_BGR15 || dstFormat==IMGFMT_BGR16) + MSG_INFO("from %s to%s %s ", + vo_format_name(srcFormat), dither, vo_format_name(dstFormat)); + else + MSG_INFO("from %s to %s ", + vo_format_name(srcFormat), vo_format_name(dstFormat)); if(cpuCaps.hasMMX2) - mp_msg(MSGT_SWS,MSGL_INFO,"using MMX2\n"); + MSG_INFO("using MMX2\n"); else if(cpuCaps.has3DNow) - mp_msg(MSGT_SWS,MSGL_INFO,"using 3DNOW\n"); + MSG_INFO("using 3DNOW\n"); else if(cpuCaps.hasMMX) - mp_msg(MSGT_SWS,MSGL_INFO,"using MMX\n"); + MSG_INFO("using MMX\n"); else - mp_msg(MSGT_SWS,MSGL_INFO,"using C\n"); + MSG_INFO("using C\n"); } if((flags & SWS_PRINT_INFO) && verbose) @@ -1757,70 +2126,70 @@ SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, if(cpuCaps.hasMMX) { if(c->canMMX2BeUsed && (flags&SWS_FAST_BILINEAR)) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using FAST_BILINEAR MMX2 scaler for horizontal scaling\n"); + MSG_V("SwScaler: using FAST_BILINEAR MMX2 scaler for horizontal scaling\n"); else { if(c->hLumFilterSize==4) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using 4-tap MMX scaler for horizontal luminance scaling\n"); + MSG_V("SwScaler: using 4-tap MMX scaler for horizontal luminance scaling\n"); else if(c->hLumFilterSize==8) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using 8-tap MMX scaler for horizontal luminance scaling\n"); + MSG_V("SwScaler: using 8-tap MMX scaler for horizontal luminance scaling\n"); else - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using n-tap MMX scaler for horizontal luminance scaling\n"); + MSG_V("SwScaler: using n-tap MMX scaler for horizontal luminance scaling\n"); if(c->hChrFilterSize==4) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using 4-tap MMX scaler for horizontal chrominance scaling\n"); + MSG_V("SwScaler: using 4-tap MMX scaler for horizontal chrominance scaling\n"); else if(c->hChrFilterSize==8) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using 8-tap MMX scaler for horizontal chrominance scaling\n"); + MSG_V("SwScaler: using 8-tap MMX scaler for horizontal chrominance scaling\n"); else - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using n-tap MMX scaler for horizontal chrominance scaling\n"); + MSG_V("SwScaler: using n-tap MMX scaler for horizontal chrominance scaling\n"); } } else { #ifdef ARCH_X86 - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using X86-Asm scaler for horizontal scaling\n"); + MSG_V("SwScaler: using X86-Asm scaler for horizontal scaling\n"); #else if(flags & SWS_FAST_BILINEAR) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using FAST_BILINEAR C scaler for horizontal scaling\n"); + MSG_V("SwScaler: using FAST_BILINEAR C scaler for horizontal scaling\n"); else - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using C scaler for horizontal scaling\n"); + MSG_V("SwScaler: using C scaler for horizontal scaling\n"); #endif } if(isPlanarYUV(dstFormat)) { if(c->vLumFilterSize==1) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", cpuCaps.hasMMX ? "MMX" : "C"); + MSG_V("SwScaler: using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", cpuCaps.hasMMX ? "MMX" : "C"); else - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using n-tap %s scaler for vertical scaling (YV12 like)\n", cpuCaps.hasMMX ? "MMX" : "C"); + MSG_V("SwScaler: using n-tap %s scaler for vertical scaling (YV12 like)\n", cpuCaps.hasMMX ? "MMX" : "C"); } else { if(c->vLumFilterSize==1 && c->vChrFilterSize==2) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n" + MSG_V("SwScaler: using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n" "SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n",cpuCaps.hasMMX ? "MMX" : "C"); else if(c->vLumFilterSize==2 && c->vChrFilterSize==2) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using 2-tap linear %s scaler for vertical scaling (BGR)\n", cpuCaps.hasMMX ? "MMX" : "C"); + MSG_V("SwScaler: using 2-tap linear %s scaler for vertical scaling (BGR)\n", cpuCaps.hasMMX ? "MMX" : "C"); else - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using n-tap %s scaler for vertical scaling (BGR)\n", cpuCaps.hasMMX ? "MMX" : "C"); + MSG_V("SwScaler: using n-tap %s scaler for vertical scaling (BGR)\n", cpuCaps.hasMMX ? "MMX" : "C"); } if(dstFormat==IMGFMT_BGR24) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using %s YV12->BGR24 Converter\n", + MSG_V("SwScaler: using %s YV12->BGR24 Converter\n", cpuCaps.hasMMX2 ? "MMX2" : (cpuCaps.hasMMX ? "MMX" : "C")); else if(dstFormat==IMGFMT_BGR32) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using %s YV12->BGR32 Converter\n", cpuCaps.hasMMX ? "MMX" : "C"); + MSG_V("SwScaler: using %s YV12->BGR32 Converter\n", cpuCaps.hasMMX ? "MMX" : "C"); else if(dstFormat==IMGFMT_BGR16) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using %s YV12->BGR16 Converter\n", cpuCaps.hasMMX ? "MMX" : "C"); + MSG_V("SwScaler: using %s YV12->BGR16 Converter\n", cpuCaps.hasMMX ? "MMX" : "C"); else if(dstFormat==IMGFMT_BGR15) - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: using %s YV12->BGR15 Converter\n", cpuCaps.hasMMX ? "MMX" : "C"); + MSG_V("SwScaler: using %s YV12->BGR15 Converter\n", cpuCaps.hasMMX ? "MMX" : "C"); - mp_msg(MSGT_SWS,MSGL_V,"SwScaler: %dx%d -> %dx%d\n", srcW, srcH, dstW, dstH); + MSG_V("SwScaler: %dx%d -> %dx%d\n", srcW, srcH, dstW, dstH); } if((flags & SWS_PRINT_INFO) && verbose>1) { - mp_msg(MSGT_SWS,MSGL_DBG2,"SwScaler:Lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n", + MSG_DBG2("SwScaler:Lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n", c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc); - mp_msg(MSGT_SWS,MSGL_DBG2,"SwScaler:Chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n", + MSG_DBG2("SwScaler:Chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n", c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH, c->chrXInc, c->chrYInc); } @@ -2039,9 +2408,9 @@ void printVec(SwsVector *a){ for(i=0; ilength; i++) { int x= (int)((a->coeff[i]-min)*60.0/range +0.5); - printf("%1.3f ", a->coeff[i]); - for(;x>0; x--) printf(" "); - printf("|\n"); + MSG_DBG2("%1.3f ", a->coeff[i]); + for(;x>0; x--) MSG_DBG2(" "); + MSG_DBG2("|\n"); } } diff --git a/postproc/swscale_template.c b/postproc/swscale_template.c index 89ca6111c3..41d3f90650 100644 --- a/postproc/swscale_template.c +++ b/postproc/swscale_template.c @@ -2626,7 +2626,7 @@ static void RENAME(swScale)(SwsContext *c, uint8_t* srcParam[], int srcStridePar srcStride[1]= srcStrideParam[1]; srcStride[2]= srcStrideParam[2]; } - else if(isPacked(c->srcFormat)){ + else if(isPacked(c->srcFormat) || isBGR(c->srcFormat) || isRGB(c->srcFormat)){ src[0]= src[1]= src[2]= srcParam[0]; diff --git a/postproc/yuv2rgb.c b/postproc/yuv2rgb.c index 08ebc1a778..e1248bafa0 100644 --- a/postproc/yuv2rgb.c +++ b/postproc/yuv2rgb.c @@ -156,7 +156,7 @@ const int32_t Inverse_Table_6_9[8][4] = { {117579, 136230, 16907, 35559} /* SMPTE 240M (1987) */ }; -static void yuv2rgb_c_init (int bpp, int mode); +static void yuv2rgb_c_init (unsigned bpp, int mode); yuv2rgb_fun yuv2rgb; @@ -166,11 +166,11 @@ static void (* yuv2rgb_c_internal) (uint8_t *, uint8_t *, static void yuv2rgb_c (void * dst, uint8_t * py, uint8_t * pu, uint8_t * pv, - int h_size, int v_size, - int rgb_stride, int y_stride, int uv_stride) + unsigned h_size, unsigned v_size, + unsigned rgb_stride, unsigned y_stride, unsigned uv_stride) { v_size >>= 1; - + while (v_size--) { yuv2rgb_c_internal (py, py + y_stride, pu, pv, dst, dst + rgb_stride, h_size, v_size<<1); @@ -182,7 +182,7 @@ static void yuv2rgb_c (void * dst, uint8_t * py, } } -void yuv2rgb_init (int bpp, int mode) +void yuv2rgb_init (unsigned bpp, int mode) { yuv2rgb = NULL; #ifdef CAN_COMPILE_X86_ASM @@ -676,7 +676,7 @@ static int div_round (int dividend, int divisor) return -((-dividend + (divisor>>1)) / divisor); } -static void yuv2rgb_c_init (int bpp, int mode) +static void yuv2rgb_c_init (unsigned bpp, int mode) { int i; uint8_t table_Y[1024]; diff --git a/postproc/yuv2rgb_mlib.c b/postproc/yuv2rgb_mlib.c index 669fbae791..5facb1fdb0 100644 --- a/postproc/yuv2rgb_mlib.c +++ b/postproc/yuv2rgb_mlib.c @@ -29,8 +29,8 @@ static void mlib_YUV2ARGB420_32(uint8_t* image, uint8_t* py, uint8_t* pu, uint8_t* pv, - int h_size, int v_size, - int rgb_stride, int y_stride, int uv_stride) + unsigned h_size, unsigned v_size, + unsigned rgb_stride, unsigned y_stride, unsigned uv_stride) { mlib_VideoColorYUV2ARGB420(image, py, pu, pv, h_size, v_size, rgb_stride, y_stride, uv_stride); @@ -38,8 +38,8 @@ static void mlib_YUV2ARGB420_32(uint8_t* image, uint8_t* py, static void mlib_YUV2ABGR420_32(uint8_t* image, uint8_t* py, uint8_t* pu, uint8_t* pv, - int h_size, int v_size, - int rgb_stride, int y_stride, int uv_stride) + unsigned h_size, unsigned v_size, + unsigned rgb_stride, unsigned y_stride, unsigned uv_stride) { mlib_VideoColorYUV2ABGR420(image, py, pu, pv, h_size, v_size, rgb_stride, y_stride, uv_stride); @@ -47,15 +47,15 @@ static void mlib_YUV2ABGR420_32(uint8_t* image, uint8_t* py, static void mlib_YUV2RGB420_24(uint8_t* image, uint8_t* py, uint8_t* pu, uint8_t* pv, - int h_size, int v_size, - int rgb_stride, int y_stride, int uv_stride) + unsigned h_size, unsigned v_size, + unsigned rgb_stride, unsigned y_stride, unsigned uv_stride) { mlib_VideoColorYUV2RGB420(image, py, pu, pv, h_size, v_size, rgb_stride, y_stride, uv_stride); } -yuv2rgb_fun yuv2rgb_init_mlib(int bpp, int mode) +yuv2rgb_fun yuv2rgb_init_mlib(unsigned bpp, int mode) { if( bpp == 24 ) diff --git a/postproc/yuv2rgb_template.c b/postproc/yuv2rgb_template.c index f254388e82..d8e4a87a06 100644 --- a/postproc/yuv2rgb_template.c +++ b/postproc/yuv2rgb_template.c @@ -123,8 +123,8 @@ static inline void RENAME(yuv420_rgb16) (uint8_t * image, uint8_t * py, uint8_t * pu, uint8_t * pv, - int h_size, int v_size, - int rgb_stride, int y_stride, int uv_stride) + unsigned h_size, unsigned v_size, + unsigned rgb_stride, unsigned y_stride, unsigned uv_stride) { int even = 1; int x, y; @@ -228,8 +228,8 @@ YUV2RGB static inline void RENAME(yuv420_rgb15) (uint8_t * image, uint8_t * py, uint8_t * pu, uint8_t * pv, - int h_size, int v_size, - int rgb_stride, int y_stride, int uv_stride) + unsigned h_size, unsigned v_size, + unsigned rgb_stride, unsigned y_stride, unsigned uv_stride) { int even = 1; int x, y; @@ -329,8 +329,8 @@ YUV2RGB static inline void RENAME(yuv420_rgb24) (uint8_t * image, uint8_t * py, uint8_t * pu, uint8_t * pv, - int h_size, int v_size, - int rgb_stride, int y_stride, int uv_stride) + unsigned h_size, unsigned v_size, + unsigned rgb_stride, unsigned y_stride, unsigned uv_stride) { int even = 1; int x, y; @@ -488,8 +488,8 @@ YUV2RGB static inline void RENAME(yuv420_argb32) (uint8_t * image, uint8_t * py, uint8_t * pu, uint8_t * pv, - int h_size, int v_size, - int rgb_stride, int y_stride, int uv_stride) + unsigned h_size, unsigned v_size, + unsigned rgb_stride, unsigned y_stride, unsigned uv_stride) { int even = 1; int x, y; @@ -584,7 +584,7 @@ YUV2RGB __asm__ __volatile__ (EMMS); } -yuv2rgb_fun RENAME(yuv2rgb_init) (int bpp, int mode) +yuv2rgb_fun RENAME(yuv2rgb_init) (unsigned bpp, int mode) { if (bpp == 15 && mode == MODE_RGB) return RENAME(yuv420_rgb15); if (bpp == 16 && mode == MODE_RGB) return RENAME(yuv420_rgb16);