From 6e42e6c4b410dbef8b593c2d796a5dad95f89ee4 Mon Sep 17 00:00:00 2001 From: Diego Biurrun Date: Sat, 28 Apr 2007 11:44:49 +0000 Subject: [PATCH] cosmetics attack, part I: Remove all tabs and prettyprint/reindent the code. Originally committed as revision 23158 to svn://svn.mplayerhq.hu/mplayer/trunk/libswscale --- libswscale/cs_test.c | 6 +- libswscale/rgb2rgb.c | 599 ++-- libswscale/rgb2rgb.h | 69 +- libswscale/rgb2rgb_template.c | 4389 ++++++++++++------------- libswscale/swscale_altivec_template.c | 854 +++-- 5 files changed, 2955 insertions(+), 2962 deletions(-) diff --git a/libswscale/cs_test.c b/libswscale/cs_test.c index 552fafaf6a..876f270411 100644 --- a/libswscale/cs_test.c +++ b/libswscale/cs_test.c @@ -142,7 +142,7 @@ int main(int argc, char **argv) for(i=0; i> 2; - for(i=0; i BGR24 (= B,G,R) */ - dst[3*i + 0] = src[4*i + 1]; - dst[3*i + 1] = src[4*i + 2]; - dst[3*i + 2] = src[4*i + 3]; - #else - dst[3*i + 0] = src[4*i + 2]; - dst[3*i + 1] = src[4*i + 1]; - dst[3*i + 2] = src[4*i + 0]; - #endif - } + long i; + long num_pixels = src_size >> 2; + for (i=0; i BGR24 (= B,G,R) */ + dst[3*i + 0] = src[4*i + 1]; + dst[3*i + 1] = src[4*i + 2]; + dst[3*i + 2] = src[4*i + 3]; + #else + dst[3*i + 0] = src[4*i + 2]; + dst[3*i + 1] = src[4*i + 1]; + dst[3*i + 2] = src[4*i + 0]; + #endif + } } void rgb24tobgr32(const uint8_t *src, uint8_t *dst, long src_size) { - long i; - for(i=0; 3*i BGR32 (= A,R,G,B) */ - dst[4*i + 0] = 0; - dst[4*i + 1] = src[3*i + 0]; - dst[4*i + 2] = src[3*i + 1]; - dst[4*i + 3] = src[3*i + 2]; - #else - dst[4*i + 0] = src[3*i + 2]; - dst[4*i + 1] = src[3*i + 1]; - dst[4*i + 2] = src[3*i + 0]; - dst[4*i + 3] = 0; - #endif - } + long i; + for (i=0; 3*i BGR32 (= A,R,G,B) */ + dst[4*i + 0] = 0; + dst[4*i + 1] = src[3*i + 0]; + dst[4*i + 2] = src[3*i + 1]; + dst[4*i + 3] = src[3*i + 2]; + #else + dst[4*i + 0] = src[3*i + 2]; + dst[4*i + 1] = src[3*i + 1]; + dst[4*i + 2] = src[3*i + 0]; + dst[4*i + 3] = 0; + #endif + } } void rgb16tobgr32(const uint8_t *src, uint8_t *dst, long src_size) { - const uint16_t *end; - uint8_t *d = (uint8_t *)dst; - const uint16_t *s = (uint16_t *)src; - end = s + src_size/2; - while(s < end) - { - register uint16_t bgr; - bgr = *s++; - #ifdef WORDS_BIGENDIAN - *d++ = 0; - *d++ = (bgr&0x1F)<<3; - *d++ = (bgr&0x7E0)>>3; - *d++ = (bgr&0xF800)>>8; - #else - *d++ = (bgr&0xF800)>>8; - *d++ = (bgr&0x7E0)>>3; - *d++ = (bgr&0x1F)<<3; - *d++ = 0; - #endif - } + const uint16_t *end; + uint8_t *d = (uint8_t *)dst; + const uint16_t *s = (uint16_t *)src; + end = s + src_size/2; + while (s < end) + { + register uint16_t bgr; + bgr = *s++; + #ifdef WORDS_BIGENDIAN + *d++ = 0; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0xF800)>>8; + #else + *d++ = (bgr&0xF800)>>8; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0x1F)<<3; + *d++ = 0; + #endif + } } void rgb16tobgr24(const uint8_t *src, uint8_t *dst, long src_size) { - const uint16_t *end; - uint8_t *d = (uint8_t *)dst; - const uint16_t *s = (const uint16_t *)src; - end = s + src_size/2; - while(s < end) - { - register uint16_t bgr; - bgr = *s++; - *d++ = (bgr&0xF800)>>8; - *d++ = (bgr&0x7E0)>>3; - *d++ = (bgr&0x1F)<<3; - } + const uint16_t *end; + uint8_t *d = (uint8_t *)dst; + const uint16_t *s = (const uint16_t *)src; + end = s + src_size/2; + while (s < end) + { + register uint16_t bgr; + bgr = *s++; + *d++ = (bgr&0xF800)>>8; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0x1F)<<3; + } } void rgb16tobgr16(const uint8_t *src, uint8_t *dst, long src_size) { - long i; - long num_pixels = src_size >> 1; + long i; + long num_pixels = src_size >> 1; - for(i=0; i>5; - b = (rgb&0xF800)>>11; - dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11); - } + for (i=0; i>5; + b = (rgb&0xF800)>>11; + dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11); + } } void rgb16tobgr15(const uint8_t *src, uint8_t *dst, long src_size) { - long i; - long num_pixels = src_size >> 1; + long i; + long num_pixels = src_size >> 1; - for(i=0; i>5; - b = (rgb&0xF800)>>11; - dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10); - } + for (i=0; i>5; + b = (rgb&0xF800)>>11; + dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10); + } } void rgb15tobgr32(const uint8_t *src, uint8_t *dst, long src_size) { - const uint16_t *end; - uint8_t *d = (uint8_t *)dst; - const uint16_t *s = (const uint16_t *)src; - end = s + src_size/2; - while(s < end) - { - register uint16_t bgr; - bgr = *s++; - #ifdef WORDS_BIGENDIAN - *d++ = 0; - *d++ = (bgr&0x1F)<<3; - *d++ = (bgr&0x3E0)>>2; - *d++ = (bgr&0x7C00)>>7; - #else - *d++ = (bgr&0x7C00)>>7; - *d++ = (bgr&0x3E0)>>2; - *d++ = (bgr&0x1F)<<3; - *d++ = 0; - #endif - } + const uint16_t *end; + uint8_t *d = (uint8_t *)dst; + const uint16_t *s = (const uint16_t *)src; + end = s + src_size/2; + while (s < end) + { + register uint16_t bgr; + bgr = *s++; + #ifdef WORDS_BIGENDIAN + *d++ = 0; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x7C00)>>7; + #else + *d++ = (bgr&0x7C00)>>7; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x1F)<<3; + *d++ = 0; + #endif + } } void rgb15tobgr24(const uint8_t *src, uint8_t *dst, long src_size) { - const uint16_t *end; - uint8_t *d = (uint8_t *)dst; - const uint16_t *s = (uint16_t *)src; - end = s + src_size/2; - while(s < end) - { - register uint16_t bgr; - bgr = *s++; - *d++ = (bgr&0x7C00)>>7; - *d++ = (bgr&0x3E0)>>2; - *d++ = (bgr&0x1F)<<3; - } + const uint16_t *end; + uint8_t *d = (uint8_t *)dst; + const uint16_t *s = (uint16_t *)src; + end = s + src_size/2; + while (s < end) + { + register uint16_t bgr; + bgr = *s++; + *d++ = (bgr&0x7C00)>>7; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x1F)<<3; + } } void rgb15tobgr16(const uint8_t *src, uint8_t *dst, long src_size) { - long i; - long num_pixels = src_size >> 1; + long i; + long num_pixels = src_size >> 1; - for(i=0; i>5; - b = (rgb&0x7C00)>>10; - dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11); - } + for (i=0; i>5; + b = (rgb&0x7C00)>>10; + dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11); + } } void rgb15tobgr15(const uint8_t *src, uint8_t *dst, long src_size) { - long i; - long num_pixels = src_size >> 1; + long i; + long num_pixels = src_size >> 1; - for(i=0; i>5; - b = (rgb&0x7C00)>>10; - dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10); - } + for (i=0; i>5; + b = (rgb&0x7C00)>>10; + dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10); + } } void rgb8tobgr8(const uint8_t *src, uint8_t *dst, long src_size) { - long i; - long num_pixels = src_size; - for(i=0; i>3; - b = (rgb&0xC0)>>6; - dst[i] = ((b<<1)&0x07) | ((g&0x07)<<3) | ((r&0x03)<<6); - } + long i; + long num_pixels = src_size; + for (i=0; i>3; + b = (rgb&0xC0)>>6; + dst[i] = ((b<<1)&0x07) | ((g&0x07)<<3) | ((r&0x03)<<6); + } } diff --git a/libswscale/rgb2rgb.h b/libswscale/rgb2rgb.h index 769811fb8f..5ad3ec71b4 100644 --- a/libswscale/rgb2rgb.h +++ b/libswscale/rgb2rgb.h @@ -1,5 +1,4 @@ /* - * * rgb2rgb.h, Software RGB to RGB convertor * pluralize by Software PAL8 to RGB convertor * Software YUV to YUV convertor @@ -30,18 +29,18 @@ // Note: do not fix the dependence on stdio.h /* A full collection of rgb to rgb(bgr) convertors */ -extern void (*rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size); -extern void (*rgb24to16)(const uint8_t *src,uint8_t *dst,long src_size); -extern void (*rgb24to15)(const uint8_t *src,uint8_t *dst,long src_size); -extern void (*rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size); -extern void (*rgb32to16)(const uint8_t *src,uint8_t *dst,long src_size); -extern void (*rgb32to15)(const uint8_t *src,uint8_t *dst,long src_size); -extern void (*rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size); -extern void (*rgb15to24)(const uint8_t *src,uint8_t *dst,long src_size); -extern void (*rgb15to32)(const uint8_t *src,uint8_t *dst,long src_size); -extern void (*rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size); -extern void (*rgb16to24)(const uint8_t *src,uint8_t *dst,long src_size); -extern void (*rgb16to32)(const uint8_t *src,uint8_t *dst,long src_size); +extern void (*rgb24to32) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb24to16) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb24to15) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb32to24) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb32to16) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb32to15) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb15to16) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb15to24) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb15to32) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb16to15) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb16to24) (const uint8_t *src, uint8_t *dst, long src_size); +extern void (*rgb16to32) (const uint8_t *src, uint8_t *dst, long src_size); extern void (*rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size); extern void (*rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size); extern void (*rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size); @@ -59,7 +58,7 @@ extern void rgb15tobgr32(const uint8_t *src, uint8_t *dst, long src_size); extern void rgb15tobgr24(const uint8_t *src, uint8_t *dst, long src_size); extern void rgb15tobgr16(const uint8_t *src, uint8_t *dst, long src_size); extern void rgb15tobgr15(const uint8_t *src, uint8_t *dst, long src_size); -extern void rgb8tobgr8(const uint8_t *src, uint8_t *dst, long src_size); +extern void rgb8tobgr8 (const uint8_t *src, uint8_t *dst, long src_size); extern void palette8torgb32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette); @@ -85,16 +84,16 @@ extern void palette8tobgr15(const uint8_t *src, uint8_t *dst, long num_pixels, c * problem for anyone then tell me, and ill fix it) */ extern void (*yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, - long width, long height, - long lumStride, long chromStride, long dstStride); + long width, long height, + long lumStride, long chromStride, long dstStride); /** * * width should be a multiple of 16 */ extern void (*yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, - long width, long height, - long lumStride, long chromStride, long dstStride); + long width, long height, + long lumStride, long chromStride, long dstStride); /** * @@ -102,8 +101,8 @@ extern void (*yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uin * problem for anyone then tell me, and ill fix it) */ extern void (*yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, - long width, long height, - long lumStride, long chromStride, long srcStride); + long width, long height, + long lumStride, long chromStride, long srcStride); /** * @@ -111,8 +110,8 @@ extern void (*yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint * problem for anyone then tell me, and ill fix it) */ extern void (*yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, - long width, long height, - long lumStride, long chromStride, long dstStride); + long width, long height, + long lumStride, long chromStride, long dstStride); /** * @@ -121,26 +120,26 @@ extern void (*yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_ * chrominance data is only taken from every secound line others are ignored FIXME write HQ version */ extern void (*rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, - long width, long height, - long lumStride, long chromStride, long srcStride); + long width, long height, + long lumStride, long chromStride, long srcStride); extern void (*planar2x)(const uint8_t *src, uint8_t *dst, long width, long height, - long srcStride, long dstStride); + long srcStride, long dstStride); extern void (*interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dst, - long width, long height, long src1Stride, - long src2Stride, long dstStride); + long width, long height, long src1Stride, + long src2Stride, long dstStride); extern void (*vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, - uint8_t *dst1, uint8_t *dst2, - long width, long height, - long srcStride1, long srcStride2, - long dstStride1, long dstStride2); + uint8_t *dst1, uint8_t *dst2, + long width, long height, + long srcStride1, long srcStride2, + long dstStride1, long dstStride2); extern void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, - uint8_t *dst, - long width, long height, - long srcStride1, long srcStride2, - long srcStride3, long dstStride); + uint8_t *dst, + long width, long height, + long srcStride1, long srcStride2, + long srcStride3, long dstStride); void sws_rgb2rgb_init(int flags); diff --git a/libswscale/rgb2rgb_template.c b/libswscale/rgb2rgb_template.c index c089e39fa7..6dab0aa539 100644 --- a/libswscale/rgb2rgb_template.c +++ b/libswscale/rgb2rgb_template.c @@ -1,5 +1,4 @@ /* - * * rgb2rgb.c, Software RGB to RGB convertor * pluralize by Software PAL8 to RGB convertor * Software YUV to YUV convertor @@ -53,11 +52,11 @@ #ifdef HAVE_3DNOW #define PREFETCH "prefetch" #define PREFETCHW "prefetchw" -#define PAVGB "pavgusb" +#define PAVGB "pavgusb" #elif defined ( HAVE_MMX2 ) #define PREFETCH "prefetchnta" #define PREFETCHW "prefetcht0" -#define PAVGB "pavgb" +#define PAVGB "pavgb" #else #ifdef __APPLE__ #define PREFETCH "#" @@ -85,148 +84,148 @@ static inline void RENAME(rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size) { - uint8_t *dest = dst; - const uint8_t *s = src; - const uint8_t *end; -#ifdef HAVE_MMX - const uint8_t *mm_end; -#endif - end = s + src_size; -#ifdef HAVE_MMX - __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); - mm_end = end - 23; - __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory"); - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movd %1, %%mm0\n\t" - "punpckldq 3%1, %%mm0\n\t" - "movd 6%1, %%mm1\n\t" - "punpckldq 9%1, %%mm1\n\t" - "movd 12%1, %%mm2\n\t" - "punpckldq 15%1, %%mm2\n\t" - "movd 18%1, %%mm3\n\t" - "punpckldq 21%1, %%mm3\n\t" - "pand %%mm7, %%mm0\n\t" - "pand %%mm7, %%mm1\n\t" - "pand %%mm7, %%mm2\n\t" - "pand %%mm7, %%mm3\n\t" - MOVNTQ" %%mm0, %0\n\t" - MOVNTQ" %%mm1, 8%0\n\t" - MOVNTQ" %%mm2, 16%0\n\t" - MOVNTQ" %%mm3, 24%0" - :"=m"(*dest) - :"m"(*s) - :"memory"); - dest += 32; - s += 24; - } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); -#endif - while(s < end) - { -#ifdef WORDS_BIGENDIAN - /* RGB24 (= R,G,B) -> RGB32 (= A,B,G,R) */ - *dest++ = 0; - *dest++ = s[2]; - *dest++ = s[1]; - *dest++ = s[0]; - s+=3; -#else - *dest++ = *s++; - *dest++ = *s++; - *dest++ = *s++; - *dest++ = 0; -#endif - } + uint8_t *dest = dst; + const uint8_t *s = src; + const uint8_t *end; + #ifdef HAVE_MMX + const uint8_t *mm_end; + #endif + end = s + src_size; + #ifdef HAVE_MMX + __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); + mm_end = end - 23; + __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory"); + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "punpckldq 3%1, %%mm0 \n\t" + "movd 6%1, %%mm1 \n\t" + "punpckldq 9%1, %%mm1 \n\t" + "movd 12%1, %%mm2 \n\t" + "punpckldq 15%1, %%mm2 \n\t" + "movd 18%1, %%mm3 \n\t" + "punpckldq 21%1, %%mm3 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm7, %%mm1 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm7, %%mm3 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm1, 8%0 \n\t" + MOVNTQ" %%mm2, 16%0 \n\t" + MOVNTQ" %%mm3, 24%0" + :"=m"(*dest) + :"m"(*s) + :"memory"); + dest += 32; + s += 24; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); + #endif + while (s < end) + { + #ifdef WORDS_BIGENDIAN + /* RGB24 (= R,G,B) -> RGB32 (= A,B,G,R) */ + *dest++ = 0; + *dest++ = s[2]; + *dest++ = s[1]; + *dest++ = s[0]; + s+=3; + #else + *dest++ = *s++; + *dest++ = *s++; + *dest++ = *s++; + *dest++ = 0; + #endif + } } static inline void RENAME(rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size) { - uint8_t *dest = dst; - const uint8_t *s = src; - const uint8_t *end; + uint8_t *dest = dst; + const uint8_t *s = src; + const uint8_t *end; #ifdef HAVE_MMX - const uint8_t *mm_end; + const uint8_t *mm_end; #endif - end = s + src_size; + end = s + src_size; #ifdef HAVE_MMX - __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); - mm_end = end - 31; - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movq %1, %%mm0\n\t" - "movq 8%1, %%mm1\n\t" - "movq 16%1, %%mm4\n\t" - "movq 24%1, %%mm5\n\t" - "movq %%mm0, %%mm2\n\t" - "movq %%mm1, %%mm3\n\t" - "movq %%mm4, %%mm6\n\t" - "movq %%mm5, %%mm7\n\t" - "psrlq $8, %%mm2\n\t" - "psrlq $8, %%mm3\n\t" - "psrlq $8, %%mm6\n\t" - "psrlq $8, %%mm7\n\t" - "pand %2, %%mm0\n\t" - "pand %2, %%mm1\n\t" - "pand %2, %%mm4\n\t" - "pand %2, %%mm5\n\t" - "pand %3, %%mm2\n\t" - "pand %3, %%mm3\n\t" - "pand %3, %%mm6\n\t" - "pand %3, %%mm7\n\t" - "por %%mm2, %%mm0\n\t" - "por %%mm3, %%mm1\n\t" - "por %%mm6, %%mm4\n\t" - "por %%mm7, %%mm5\n\t" + __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); + mm_end = end - 31; + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq 8%1, %%mm1 \n\t" + "movq 16%1, %%mm4 \n\t" + "movq 24%1, %%mm5 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm1, %%mm3 \n\t" + "movq %%mm4, %%mm6 \n\t" + "movq %%mm5, %%mm7 \n\t" + "psrlq $8, %%mm2 \n\t" + "psrlq $8, %%mm3 \n\t" + "psrlq $8, %%mm6 \n\t" + "psrlq $8, %%mm7 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm1 \n\t" + "pand %2, %%mm4 \n\t" + "pand %2, %%mm5 \n\t" + "pand %3, %%mm2 \n\t" + "pand %3, %%mm3 \n\t" + "pand %3, %%mm6 \n\t" + "pand %3, %%mm7 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm3, %%mm1 \n\t" + "por %%mm6, %%mm4 \n\t" + "por %%mm7, %%mm5 \n\t" - "movq %%mm1, %%mm2\n\t" - "movq %%mm4, %%mm3\n\t" - "psllq $48, %%mm2\n\t" - "psllq $32, %%mm3\n\t" - "pand %4, %%mm2\n\t" - "pand %5, %%mm3\n\t" - "por %%mm2, %%mm0\n\t" - "psrlq $16, %%mm1\n\t" - "psrlq $32, %%mm4\n\t" - "psllq $16, %%mm5\n\t" - "por %%mm3, %%mm1\n\t" - "pand %6, %%mm5\n\t" - "por %%mm5, %%mm4\n\t" + "movq %%mm1, %%mm2 \n\t" + "movq %%mm4, %%mm3 \n\t" + "psllq $48, %%mm2 \n\t" + "psllq $32, %%mm3 \n\t" + "pand %4, %%mm2 \n\t" + "pand %5, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "psrlq $16, %%mm1 \n\t" + "psrlq $32, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm3, %%mm1 \n\t" + "pand %6, %%mm5 \n\t" + "por %%mm5, %%mm4 \n\t" - MOVNTQ" %%mm0, %0\n\t" - MOVNTQ" %%mm1, 8%0\n\t" - MOVNTQ" %%mm4, 16%0" - :"=m"(*dest) - :"m"(*s),"m"(mask24l), - "m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) - :"memory"); - dest += 24; - s += 32; - } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm1, 8%0 \n\t" + MOVNTQ" %%mm4, 16%0" + :"=m"(*dest) + :"m"(*s),"m"(mask24l), + "m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) + :"memory"); + dest += 24; + s += 32; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); #endif - while(s < end) - { + while (s < end) + { #ifdef WORDS_BIGENDIAN - /* RGB32 (= A,B,G,R) -> RGB24 (= R,G,B) */ - s++; - dest[2] = *s++; - dest[1] = *s++; - dest[0] = *s++; - dest += 3; + /* RGB32 (= A,B,G,R) -> RGB24 (= R,G,B) */ + s++; + dest[2] = *s++; + dest[1] = *s++; + dest[0] = *s++; + dest += 3; #else - *dest++ = *s++; - *dest++ = *s++; - *dest++ = *s++; - s++; + *dest++ = *s++; + *dest++ = *s++; + *dest++ = *s++; + s++; #endif - } + } } /* @@ -237,677 +236,677 @@ static inline void RENAME(rgb32to24)(const uint8_t *src,uint8_t *dst,long src_si */ static inline void RENAME(rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size) { - register const uint8_t* s=src; - register uint8_t* d=dst; - register const uint8_t *end; - const uint8_t *mm_end; - end = s + src_size; + register const uint8_t* s=src; + register uint8_t* d=dst; + register const uint8_t *end; + const uint8_t *mm_end; + end = s + src_size; #ifdef HAVE_MMX - __asm __volatile(PREFETCH" %0"::"m"(*s)); - __asm __volatile("movq %0, %%mm4"::"m"(mask15s)); - mm_end = end - 15; - while(s>1)&0x7FE07FE0) | (x&0x001F001F); - s+=4; - d+=4; + register uint32_t x= *((uint32_t *)s); + *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F); + s+=4; + d+=4; } - if(s < end) + if (s < end) { - register uint16_t x= *((uint16_t *)s); - *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F); - s+=2; - d+=2; + register uint16_t x= *((uint16_t *)s); + *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F); + s+=2; + d+=2; } } static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_size) { - const uint8_t *s = src; - const uint8_t *end; + const uint8_t *s = src; + const uint8_t *end; #ifdef HAVE_MMX - const uint8_t *mm_end; + const uint8_t *mm_end; #endif - uint16_t *d = (uint16_t *)dst; - end = s + src_size; + uint16_t *d = (uint16_t *)dst; + end = s + src_size; #ifdef HAVE_MMX - mm_end = end - 15; + mm_end = end - 15; #if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster) - asm volatile( - "movq %3, %%mm5 \n\t" - "movq %4, %%mm6 \n\t" - "movq %5, %%mm7 \n\t" - "jmp 2f \n\t" - ASMALIGN(4) - "1: \n\t" - PREFETCH" 32(%1) \n\t" - "movd (%1), %%mm0 \n\t" - "movd 4(%1), %%mm3 \n\t" - "punpckldq 8(%1), %%mm0 \n\t" - "punpckldq 12(%1), %%mm3 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm3, %%mm4 \n\t" - "pand %%mm6, %%mm0 \n\t" - "pand %%mm6, %%mm3 \n\t" - "pmaddwd %%mm7, %%mm0 \n\t" - "pmaddwd %%mm7, %%mm3 \n\t" - "pand %%mm5, %%mm1 \n\t" - "pand %%mm5, %%mm4 \n\t" - "por %%mm1, %%mm0 \n\t" - "por %%mm4, %%mm3 \n\t" - "psrld $5, %%mm0 \n\t" - "pslld $11, %%mm3 \n\t" - "por %%mm3, %%mm0 \n\t" - MOVNTQ" %%mm0, (%0) \n\t" - "add $16, %1 \n\t" - "add $8, %0 \n\t" - "2: \n\t" - "cmp %2, %1 \n\t" - " jb 1b \n\t" - : "+r" (d), "+r"(s) - : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216) - ); + asm volatile( + "movq %3, %%mm5 \n\t" + "movq %4, %%mm6 \n\t" + "movq %5, %%mm7 \n\t" + "jmp 2f \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 32(%1) \n\t" + "movd (%1), %%mm0 \n\t" + "movd 4(%1), %%mm3 \n\t" + "punpckldq 8(%1), %%mm0 \n\t" + "punpckldq 12(%1), %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm3, %%mm4 \n\t" + "pand %%mm6, %%mm0 \n\t" + "pand %%mm6, %%mm3 \n\t" + "pmaddwd %%mm7, %%mm0 \n\t" + "pmaddwd %%mm7, %%mm3 \n\t" + "pand %%mm5, %%mm1 \n\t" + "pand %%mm5, %%mm4 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "psrld $5, %%mm0 \n\t" + "pslld $11, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, (%0) \n\t" + "add $16, %1 \n\t" + "add $8, %0 \n\t" + "2: \n\t" + "cmp %2, %1 \n\t" + " jb 1b \n\t" + : "+r" (d), "+r"(s) + : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216) + ); #else - __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); - __asm __volatile( - "movq %0, %%mm7\n\t" - "movq %1, %%mm6\n\t" - ::"m"(red_16mask),"m"(green_16mask)); - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movd %1, %%mm0\n\t" - "movd 4%1, %%mm3\n\t" - "punpckldq 8%1, %%mm0\n\t" - "punpckldq 12%1, %%mm3\n\t" - "movq %%mm0, %%mm1\n\t" - "movq %%mm0, %%mm2\n\t" - "movq %%mm3, %%mm4\n\t" - "movq %%mm3, %%mm5\n\t" - "psrlq $3, %%mm0\n\t" - "psrlq $3, %%mm3\n\t" - "pand %2, %%mm0\n\t" - "pand %2, %%mm3\n\t" - "psrlq $5, %%mm1\n\t" - "psrlq $5, %%mm4\n\t" - "pand %%mm6, %%mm1\n\t" - "pand %%mm6, %%mm4\n\t" - "psrlq $8, %%mm2\n\t" - "psrlq $8, %%mm5\n\t" - "pand %%mm7, %%mm2\n\t" - "pand %%mm7, %%mm5\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm2, %%mm0\n\t" - "por %%mm5, %%mm3\n\t" - "psllq $16, %%mm3\n\t" - "por %%mm3, %%mm0\n\t" - MOVNTQ" %%mm0, %0\n\t" - :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); - d += 4; - s += 16; - } + __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm __volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_16mask),"m"(green_16mask)); + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 4%1, %%mm3 \n\t" + "punpckldq 8%1, %%mm0 \n\t" + "punpckldq 12%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psrlq $3, %%mm0 \n\t" + "psrlq $3, %%mm3 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm3 \n\t" + "psrlq $5, %%mm1 \n\t" + "psrlq $5, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $8, %%mm2 \n\t" + "psrlq $8, %%mm5 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm7, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); + d += 4; + s += 16; + } #endif - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); #endif - while(s < end) - { - register int rgb = *(uint32_t*)s; s += 4; - *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8); - } + while (s < end) + { + register int rgb = *(uint32_t*)s; s += 4; + *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8); + } } static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long src_size) { - const uint8_t *s = src; - const uint8_t *end; + const uint8_t *s = src; + const uint8_t *end; #ifdef HAVE_MMX - const uint8_t *mm_end; + const uint8_t *mm_end; #endif - uint16_t *d = (uint16_t *)dst; - end = s + src_size; + uint16_t *d = (uint16_t *)dst; + end = s + src_size; #ifdef HAVE_MMX - __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); - __asm __volatile( - "movq %0, %%mm7\n\t" - "movq %1, %%mm6\n\t" - ::"m"(red_16mask),"m"(green_16mask)); - mm_end = end - 15; - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movd %1, %%mm0\n\t" - "movd 4%1, %%mm3\n\t" - "punpckldq 8%1, %%mm0\n\t" - "punpckldq 12%1, %%mm3\n\t" - "movq %%mm0, %%mm1\n\t" - "movq %%mm0, %%mm2\n\t" - "movq %%mm3, %%mm4\n\t" - "movq %%mm3, %%mm5\n\t" - "psllq $8, %%mm0\n\t" - "psllq $8, %%mm3\n\t" - "pand %%mm7, %%mm0\n\t" - "pand %%mm7, %%mm3\n\t" - "psrlq $5, %%mm1\n\t" - "psrlq $5, %%mm4\n\t" - "pand %%mm6, %%mm1\n\t" - "pand %%mm6, %%mm4\n\t" - "psrlq $19, %%mm2\n\t" - "psrlq $19, %%mm5\n\t" - "pand %2, %%mm2\n\t" - "pand %2, %%mm5\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm2, %%mm0\n\t" - "por %%mm5, %%mm3\n\t" - "psllq $16, %%mm3\n\t" - "por %%mm3, %%mm0\n\t" - MOVNTQ" %%mm0, %0\n\t" - :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); - d += 4; - s += 16; - } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm __volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_16mask),"m"(green_16mask)); + mm_end = end - 15; + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 4%1, %%mm3 \n\t" + "punpckldq 8%1, %%mm0 \n\t" + "punpckldq 12%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psllq $8, %%mm0 \n\t" + "psllq $8, %%mm3 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm7, %%mm3 \n\t" + "psrlq $5, %%mm1 \n\t" + "psrlq $5, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $19, %%mm2 \n\t" + "psrlq $19, %%mm5 \n\t" + "pand %2, %%mm2 \n\t" + "pand %2, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); + d += 4; + s += 16; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); #endif - while(s < end) - { - register int rgb = *(uint32_t*)s; s += 4; - *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19); - } + while (s < end) + { + register int rgb = *(uint32_t*)s; s += 4; + *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19); + } } static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_size) { - const uint8_t *s = src; - const uint8_t *end; + const uint8_t *s = src; + const uint8_t *end; #ifdef HAVE_MMX - const uint8_t *mm_end; + const uint8_t *mm_end; #endif - uint16_t *d = (uint16_t *)dst; - end = s + src_size; + uint16_t *d = (uint16_t *)dst; + end = s + src_size; #ifdef HAVE_MMX - mm_end = end - 15; + mm_end = end - 15; #if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster) - asm volatile( - "movq %3, %%mm5 \n\t" - "movq %4, %%mm6 \n\t" - "movq %5, %%mm7 \n\t" - "jmp 2f \n\t" - ASMALIGN(4) - "1: \n\t" - PREFETCH" 32(%1) \n\t" - "movd (%1), %%mm0 \n\t" - "movd 4(%1), %%mm3 \n\t" - "punpckldq 8(%1), %%mm0 \n\t" - "punpckldq 12(%1), %%mm3 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm3, %%mm4 \n\t" - "pand %%mm6, %%mm0 \n\t" - "pand %%mm6, %%mm3 \n\t" - "pmaddwd %%mm7, %%mm0 \n\t" - "pmaddwd %%mm7, %%mm3 \n\t" - "pand %%mm5, %%mm1 \n\t" - "pand %%mm5, %%mm4 \n\t" - "por %%mm1, %%mm0 \n\t" - "por %%mm4, %%mm3 \n\t" - "psrld $6, %%mm0 \n\t" - "pslld $10, %%mm3 \n\t" - "por %%mm3, %%mm0 \n\t" - MOVNTQ" %%mm0, (%0) \n\t" - "add $16, %1 \n\t" - "add $8, %0 \n\t" - "2: \n\t" - "cmp %2, %1 \n\t" - " jb 1b \n\t" - : "+r" (d), "+r"(s) - : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215) - ); + asm volatile( + "movq %3, %%mm5 \n\t" + "movq %4, %%mm6 \n\t" + "movq %5, %%mm7 \n\t" + "jmp 2f \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 32(%1) \n\t" + "movd (%1), %%mm0 \n\t" + "movd 4(%1), %%mm3 \n\t" + "punpckldq 8(%1), %%mm0 \n\t" + "punpckldq 12(%1), %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm3, %%mm4 \n\t" + "pand %%mm6, %%mm0 \n\t" + "pand %%mm6, %%mm3 \n\t" + "pmaddwd %%mm7, %%mm0 \n\t" + "pmaddwd %%mm7, %%mm3 \n\t" + "pand %%mm5, %%mm1 \n\t" + "pand %%mm5, %%mm4 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "psrld $6, %%mm0 \n\t" + "pslld $10, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, (%0) \n\t" + "add $16, %1 \n\t" + "add $8, %0 \n\t" + "2: \n\t" + "cmp %2, %1 \n\t" + " jb 1b \n\t" + : "+r" (d), "+r"(s) + : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215) + ); #else - __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); - __asm __volatile( - "movq %0, %%mm7\n\t" - "movq %1, %%mm6\n\t" - ::"m"(red_15mask),"m"(green_15mask)); - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movd %1, %%mm0\n\t" - "movd 4%1, %%mm3\n\t" - "punpckldq 8%1, %%mm0\n\t" - "punpckldq 12%1, %%mm3\n\t" - "movq %%mm0, %%mm1\n\t" - "movq %%mm0, %%mm2\n\t" - "movq %%mm3, %%mm4\n\t" - "movq %%mm3, %%mm5\n\t" - "psrlq $3, %%mm0\n\t" - "psrlq $3, %%mm3\n\t" - "pand %2, %%mm0\n\t" - "pand %2, %%mm3\n\t" - "psrlq $6, %%mm1\n\t" - "psrlq $6, %%mm4\n\t" - "pand %%mm6, %%mm1\n\t" - "pand %%mm6, %%mm4\n\t" - "psrlq $9, %%mm2\n\t" - "psrlq $9, %%mm5\n\t" - "pand %%mm7, %%mm2\n\t" - "pand %%mm7, %%mm5\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm2, %%mm0\n\t" - "por %%mm5, %%mm3\n\t" - "psllq $16, %%mm3\n\t" - "por %%mm3, %%mm0\n\t" - MOVNTQ" %%mm0, %0\n\t" - :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); - d += 4; - s += 16; - } + __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm __volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_15mask),"m"(green_15mask)); + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 4%1, %%mm3 \n\t" + "punpckldq 8%1, %%mm0 \n\t" + "punpckldq 12%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psrlq $3, %%mm0 \n\t" + "psrlq $3, %%mm3 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm3 \n\t" + "psrlq $6, %%mm1 \n\t" + "psrlq $6, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $9, %%mm2 \n\t" + "psrlq $9, %%mm5 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm7, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); + d += 4; + s += 16; + } #endif - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); #endif - while(s < end) - { - register int rgb = *(uint32_t*)s; s += 4; - *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9); - } + while (s < end) + { + register int rgb = *(uint32_t*)s; s += 4; + *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9); + } } static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size) { - const uint8_t *s = src; - const uint8_t *end; + const uint8_t *s = src; + const uint8_t *end; #ifdef HAVE_MMX - const uint8_t *mm_end; + const uint8_t *mm_end; #endif - uint16_t *d = (uint16_t *)dst; - end = s + src_size; + uint16_t *d = (uint16_t *)dst; + end = s + src_size; #ifdef HAVE_MMX - __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); - __asm __volatile( - "movq %0, %%mm7\n\t" - "movq %1, %%mm6\n\t" - ::"m"(red_15mask),"m"(green_15mask)); - mm_end = end - 15; - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movd %1, %%mm0\n\t" - "movd 4%1, %%mm3\n\t" - "punpckldq 8%1, %%mm0\n\t" - "punpckldq 12%1, %%mm3\n\t" - "movq %%mm0, %%mm1\n\t" - "movq %%mm0, %%mm2\n\t" - "movq %%mm3, %%mm4\n\t" - "movq %%mm3, %%mm5\n\t" - "psllq $7, %%mm0\n\t" - "psllq $7, %%mm3\n\t" - "pand %%mm7, %%mm0\n\t" - "pand %%mm7, %%mm3\n\t" - "psrlq $6, %%mm1\n\t" - "psrlq $6, %%mm4\n\t" - "pand %%mm6, %%mm1\n\t" - "pand %%mm6, %%mm4\n\t" - "psrlq $19, %%mm2\n\t" - "psrlq $19, %%mm5\n\t" - "pand %2, %%mm2\n\t" - "pand %2, %%mm5\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm2, %%mm0\n\t" - "por %%mm5, %%mm3\n\t" - "psllq $16, %%mm3\n\t" - "por %%mm3, %%mm0\n\t" - MOVNTQ" %%mm0, %0\n\t" - :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); - d += 4; - s += 16; - } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm __volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_15mask),"m"(green_15mask)); + mm_end = end - 15; + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 4%1, %%mm3 \n\t" + "punpckldq 8%1, %%mm0 \n\t" + "punpckldq 12%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psllq $7, %%mm0 \n\t" + "psllq $7, %%mm3 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm7, %%mm3 \n\t" + "psrlq $6, %%mm1 \n\t" + "psrlq $6, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $19, %%mm2 \n\t" + "psrlq $19, %%mm5 \n\t" + "pand %2, %%mm2 \n\t" + "pand %2, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); + d += 4; + s += 16; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); #endif - while(s < end) - { - register int rgb = *(uint32_t*)s; s += 4; - *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19); - } + while (s < end) + { + register int rgb = *(uint32_t*)s; s += 4; + *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19); + } } static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size) { - const uint8_t *s = src; - const uint8_t *end; + const uint8_t *s = src; + const uint8_t *end; #ifdef HAVE_MMX - const uint8_t *mm_end; + const uint8_t *mm_end; #endif - uint16_t *d = (uint16_t *)dst; - end = s + src_size; + uint16_t *d = (uint16_t *)dst; + end = s + src_size; #ifdef HAVE_MMX - __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); - __asm __volatile( - "movq %0, %%mm7\n\t" - "movq %1, %%mm6\n\t" - ::"m"(red_16mask),"m"(green_16mask)); - mm_end = end - 11; - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movd %1, %%mm0\n\t" - "movd 3%1, %%mm3\n\t" - "punpckldq 6%1, %%mm0\n\t" - "punpckldq 9%1, %%mm3\n\t" - "movq %%mm0, %%mm1\n\t" - "movq %%mm0, %%mm2\n\t" - "movq %%mm3, %%mm4\n\t" - "movq %%mm3, %%mm5\n\t" - "psrlq $3, %%mm0\n\t" - "psrlq $3, %%mm3\n\t" - "pand %2, %%mm0\n\t" - "pand %2, %%mm3\n\t" - "psrlq $5, %%mm1\n\t" - "psrlq $5, %%mm4\n\t" - "pand %%mm6, %%mm1\n\t" - "pand %%mm6, %%mm4\n\t" - "psrlq $8, %%mm2\n\t" - "psrlq $8, %%mm5\n\t" - "pand %%mm7, %%mm2\n\t" - "pand %%mm7, %%mm5\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm2, %%mm0\n\t" - "por %%mm5, %%mm3\n\t" - "psllq $16, %%mm3\n\t" - "por %%mm3, %%mm0\n\t" - MOVNTQ" %%mm0, %0\n\t" - :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); - d += 4; - s += 12; - } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm __volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_16mask),"m"(green_16mask)); + mm_end = end - 11; + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 3%1, %%mm3 \n\t" + "punpckldq 6%1, %%mm0 \n\t" + "punpckldq 9%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psrlq $3, %%mm0 \n\t" + "psrlq $3, %%mm3 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm3 \n\t" + "psrlq $5, %%mm1 \n\t" + "psrlq $5, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $8, %%mm2 \n\t" + "psrlq $8, %%mm5 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm7, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); + d += 4; + s += 12; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); #endif - while(s < end) - { - const int b= *s++; - const int g= *s++; - const int r= *s++; - *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); - } + while (s < end) + { + const int b = *s++; + const int g = *s++; + const int r = *s++; + *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); + } } static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size) { - const uint8_t *s = src; - const uint8_t *end; + const uint8_t *s = src; + const uint8_t *end; #ifdef HAVE_MMX - const uint8_t *mm_end; + const uint8_t *mm_end; #endif - uint16_t *d = (uint16_t *)dst; - end = s + src_size; + uint16_t *d = (uint16_t *)dst; + end = s + src_size; #ifdef HAVE_MMX - __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); - __asm __volatile( - "movq %0, %%mm7\n\t" - "movq %1, %%mm6\n\t" - ::"m"(red_16mask),"m"(green_16mask)); - mm_end = end - 15; - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movd %1, %%mm0\n\t" - "movd 3%1, %%mm3\n\t" - "punpckldq 6%1, %%mm0\n\t" - "punpckldq 9%1, %%mm3\n\t" - "movq %%mm0, %%mm1\n\t" - "movq %%mm0, %%mm2\n\t" - "movq %%mm3, %%mm4\n\t" - "movq %%mm3, %%mm5\n\t" - "psllq $8, %%mm0\n\t" - "psllq $8, %%mm3\n\t" - "pand %%mm7, %%mm0\n\t" - "pand %%mm7, %%mm3\n\t" - "psrlq $5, %%mm1\n\t" - "psrlq $5, %%mm4\n\t" - "pand %%mm6, %%mm1\n\t" - "pand %%mm6, %%mm4\n\t" - "psrlq $19, %%mm2\n\t" - "psrlq $19, %%mm5\n\t" - "pand %2, %%mm2\n\t" - "pand %2, %%mm5\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm2, %%mm0\n\t" - "por %%mm5, %%mm3\n\t" - "psllq $16, %%mm3\n\t" - "por %%mm3, %%mm0\n\t" - MOVNTQ" %%mm0, %0\n\t" - :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); - d += 4; - s += 12; - } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm __volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_16mask),"m"(green_16mask)); + mm_end = end - 15; + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 3%1, %%mm3 \n\t" + "punpckldq 6%1, %%mm0 \n\t" + "punpckldq 9%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psllq $8, %%mm0 \n\t" + "psllq $8, %%mm3 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm7, %%mm3 \n\t" + "psrlq $5, %%mm1 \n\t" + "psrlq $5, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $19, %%mm2 \n\t" + "psrlq $19, %%mm5 \n\t" + "pand %2, %%mm2 \n\t" + "pand %2, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); + d += 4; + s += 12; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); #endif - while(s < end) - { - const int r= *s++; - const int g= *s++; - const int b= *s++; - *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); - } + while (s < end) + { + const int r = *s++; + const int g = *s++; + const int b = *s++; + *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); + } } static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size) { - const uint8_t *s = src; - const uint8_t *end; + const uint8_t *s = src; + const uint8_t *end; #ifdef HAVE_MMX - const uint8_t *mm_end; + const uint8_t *mm_end; #endif - uint16_t *d = (uint16_t *)dst; - end = s + src_size; + uint16_t *d = (uint16_t *)dst; + end = s + src_size; #ifdef HAVE_MMX - __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); - __asm __volatile( - "movq %0, %%mm7\n\t" - "movq %1, %%mm6\n\t" - ::"m"(red_15mask),"m"(green_15mask)); - mm_end = end - 11; - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movd %1, %%mm0\n\t" - "movd 3%1, %%mm3\n\t" - "punpckldq 6%1, %%mm0\n\t" - "punpckldq 9%1, %%mm3\n\t" - "movq %%mm0, %%mm1\n\t" - "movq %%mm0, %%mm2\n\t" - "movq %%mm3, %%mm4\n\t" - "movq %%mm3, %%mm5\n\t" - "psrlq $3, %%mm0\n\t" - "psrlq $3, %%mm3\n\t" - "pand %2, %%mm0\n\t" - "pand %2, %%mm3\n\t" - "psrlq $6, %%mm1\n\t" - "psrlq $6, %%mm4\n\t" - "pand %%mm6, %%mm1\n\t" - "pand %%mm6, %%mm4\n\t" - "psrlq $9, %%mm2\n\t" - "psrlq $9, %%mm5\n\t" - "pand %%mm7, %%mm2\n\t" - "pand %%mm7, %%mm5\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm2, %%mm0\n\t" - "por %%mm5, %%mm3\n\t" - "psllq $16, %%mm3\n\t" - "por %%mm3, %%mm0\n\t" - MOVNTQ" %%mm0, %0\n\t" - :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); - d += 4; - s += 12; - } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm __volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_15mask),"m"(green_15mask)); + mm_end = end - 11; + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 3%1, %%mm3 \n\t" + "punpckldq 6%1, %%mm0 \n\t" + "punpckldq 9%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psrlq $3, %%mm0 \n\t" + "psrlq $3, %%mm3 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm3 \n\t" + "psrlq $6, %%mm1 \n\t" + "psrlq $6, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $9, %%mm2 \n\t" + "psrlq $9, %%mm5 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm7, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); + d += 4; + s += 12; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); #endif - while(s < end) - { - const int b= *s++; - const int g= *s++; - const int r= *s++; - *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); - } + while (s < end) + { + const int b = *s++; + const int g = *s++; + const int r = *s++; + *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); + } } static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size) { - const uint8_t *s = src; - const uint8_t *end; + const uint8_t *s = src; + const uint8_t *end; #ifdef HAVE_MMX - const uint8_t *mm_end; + const uint8_t *mm_end; #endif - uint16_t *d = (uint16_t *)dst; - end = s + src_size; + uint16_t *d = (uint16_t *)dst; + end = s + src_size; #ifdef HAVE_MMX - __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); - __asm __volatile( - "movq %0, %%mm7\n\t" - "movq %1, %%mm6\n\t" - ::"m"(red_15mask),"m"(green_15mask)); - mm_end = end - 15; - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movd %1, %%mm0\n\t" - "movd 3%1, %%mm3\n\t" - "punpckldq 6%1, %%mm0\n\t" - "punpckldq 9%1, %%mm3\n\t" - "movq %%mm0, %%mm1\n\t" - "movq %%mm0, %%mm2\n\t" - "movq %%mm3, %%mm4\n\t" - "movq %%mm3, %%mm5\n\t" - "psllq $7, %%mm0\n\t" - "psllq $7, %%mm3\n\t" - "pand %%mm7, %%mm0\n\t" - "pand %%mm7, %%mm3\n\t" - "psrlq $6, %%mm1\n\t" - "psrlq $6, %%mm4\n\t" - "pand %%mm6, %%mm1\n\t" - "pand %%mm6, %%mm4\n\t" - "psrlq $19, %%mm2\n\t" - "psrlq $19, %%mm5\n\t" - "pand %2, %%mm2\n\t" - "pand %2, %%mm5\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm2, %%mm0\n\t" - "por %%mm5, %%mm3\n\t" - "psllq $16, %%mm3\n\t" - "por %%mm3, %%mm0\n\t" - MOVNTQ" %%mm0, %0\n\t" - :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); - d += 4; - s += 12; - } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); + __asm __volatile( + "movq %0, %%mm7 \n\t" + "movq %1, %%mm6 \n\t" + ::"m"(red_15mask),"m"(green_15mask)); + mm_end = end - 15; + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 3%1, %%mm3 \n\t" + "punpckldq 6%1, %%mm0 \n\t" + "punpckldq 9%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psllq $7, %%mm0 \n\t" + "psllq $7, %%mm3 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm7, %%mm3 \n\t" + "psrlq $6, %%mm1 \n\t" + "psrlq $6, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $19, %%mm2 \n\t" + "psrlq $19, %%mm5 \n\t" + "pand %2, %%mm2 \n\t" + "pand %2, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); + d += 4; + s += 12; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); #endif - while(s < end) - { - const int r= *s++; - const int g= *s++; - const int b= *s++; - *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); - } + while (s < end) + { + const int r = *s++; + const int g = *s++; + const int b = *s++; + *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); + } } /* @@ -935,706 +934,706 @@ static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long s */ static inline void RENAME(rgb15to24)(const uint8_t *src, uint8_t *dst, long src_size) { - const uint16_t *end; + const uint16_t *end; #ifdef HAVE_MMX - const uint16_t *mm_end; + const uint16_t *mm_end; #endif - uint8_t *d = (uint8_t *)dst; - const uint16_t *s = (uint16_t *)src; - end = s + src_size/2; + uint8_t *d = (uint8_t *)dst; + const uint16_t *s = (uint16_t *)src; + end = s + src_size/2; #ifdef HAVE_MMX - __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); - mm_end = end - 7; - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movq %1, %%mm0\n\t" - "movq %1, %%mm1\n\t" - "movq %1, %%mm2\n\t" - "pand %2, %%mm0\n\t" - "pand %3, %%mm1\n\t" - "pand %4, %%mm2\n\t" - "psllq $3, %%mm0\n\t" - "psrlq $2, %%mm1\n\t" - "psrlq $7, %%mm2\n\t" - "movq %%mm0, %%mm3\n\t" - "movq %%mm1, %%mm4\n\t" - "movq %%mm2, %%mm5\n\t" - "punpcklwd %5, %%mm0\n\t" - "punpcklwd %5, %%mm1\n\t" - "punpcklwd %5, %%mm2\n\t" - "punpckhwd %5, %%mm3\n\t" - "punpckhwd %5, %%mm4\n\t" - "punpckhwd %5, %%mm5\n\t" - "psllq $8, %%mm1\n\t" - "psllq $16, %%mm2\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm2, %%mm0\n\t" - "psllq $8, %%mm4\n\t" - "psllq $16, %%mm5\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm5, %%mm3\n\t" + __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); + mm_end = end - 7; + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + "movq %1, %%mm2 \n\t" + "pand %2, %%mm0 \n\t" + "pand %3, %%mm1 \n\t" + "pand %4, %%mm2 \n\t" + "psllq $3, %%mm0 \n\t" + "psrlq $2, %%mm1 \n\t" + "psrlq $7, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "movq %%mm1, %%mm4 \n\t" + "movq %%mm2, %%mm5 \n\t" + "punpcklwd %5, %%mm0 \n\t" + "punpcklwd %5, %%mm1 \n\t" + "punpcklwd %5, %%mm2 \n\t" + "punpckhwd %5, %%mm3 \n\t" + "punpckhwd %5, %%mm4 \n\t" + "punpckhwd %5, %%mm5 \n\t" + "psllq $8, %%mm1 \n\t" + "psllq $16, %%mm2 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm2, %%mm0 \n\t" + "psllq $8, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm5, %%mm3 \n\t" - "movq %%mm0, %%mm6\n\t" - "movq %%mm3, %%mm7\n\t" + "movq %%mm0, %%mm6 \n\t" + "movq %%mm3, %%mm7 \n\t" - "movq 8%1, %%mm0\n\t" - "movq 8%1, %%mm1\n\t" - "movq 8%1, %%mm2\n\t" - "pand %2, %%mm0\n\t" - "pand %3, %%mm1\n\t" - "pand %4, %%mm2\n\t" - "psllq $3, %%mm0\n\t" - "psrlq $2, %%mm1\n\t" - "psrlq $7, %%mm2\n\t" - "movq %%mm0, %%mm3\n\t" - "movq %%mm1, %%mm4\n\t" - "movq %%mm2, %%mm5\n\t" - "punpcklwd %5, %%mm0\n\t" - "punpcklwd %5, %%mm1\n\t" - "punpcklwd %5, %%mm2\n\t" - "punpckhwd %5, %%mm3\n\t" - "punpckhwd %5, %%mm4\n\t" - "punpckhwd %5, %%mm5\n\t" - "psllq $8, %%mm1\n\t" - "psllq $16, %%mm2\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm2, %%mm0\n\t" - "psllq $8, %%mm4\n\t" - "psllq $16, %%mm5\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm5, %%mm3\n\t" + "movq 8%1, %%mm0 \n\t" + "movq 8%1, %%mm1 \n\t" + "movq 8%1, %%mm2 \n\t" + "pand %2, %%mm0 \n\t" + "pand %3, %%mm1 \n\t" + "pand %4, %%mm2 \n\t" + "psllq $3, %%mm0 \n\t" + "psrlq $2, %%mm1 \n\t" + "psrlq $7, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "movq %%mm1, %%mm4 \n\t" + "movq %%mm2, %%mm5 \n\t" + "punpcklwd %5, %%mm0 \n\t" + "punpcklwd %5, %%mm1 \n\t" + "punpcklwd %5, %%mm2 \n\t" + "punpckhwd %5, %%mm3 \n\t" + "punpckhwd %5, %%mm4 \n\t" + "punpckhwd %5, %%mm5 \n\t" + "psllq $8, %%mm1 \n\t" + "psllq $16, %%mm2 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm2, %%mm0 \n\t" + "psllq $8, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm5, %%mm3 \n\t" - :"=m"(*d) - :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null) - :"memory"); - /* Borrowed 32 to 24 */ - __asm __volatile( - "movq %%mm0, %%mm4\n\t" - "movq %%mm3, %%mm5\n\t" - "movq %%mm6, %%mm0\n\t" - "movq %%mm7, %%mm1\n\t" + :"=m"(*d) + :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null) + :"memory"); + /* Borrowed 32 to 24 */ + __asm __volatile( + "movq %%mm0, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "movq %%mm6, %%mm0 \n\t" + "movq %%mm7, %%mm1 \n\t" - "movq %%mm4, %%mm6\n\t" - "movq %%mm5, %%mm7\n\t" - "movq %%mm0, %%mm2\n\t" - "movq %%mm1, %%mm3\n\t" + "movq %%mm4, %%mm6 \n\t" + "movq %%mm5, %%mm7 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm1, %%mm3 \n\t" - "psrlq $8, %%mm2\n\t" - "psrlq $8, %%mm3\n\t" - "psrlq $8, %%mm6\n\t" - "psrlq $8, %%mm7\n\t" - "pand %2, %%mm0\n\t" - "pand %2, %%mm1\n\t" - "pand %2, %%mm4\n\t" - "pand %2, %%mm5\n\t" - "pand %3, %%mm2\n\t" - "pand %3, %%mm3\n\t" - "pand %3, %%mm6\n\t" - "pand %3, %%mm7\n\t" - "por %%mm2, %%mm0\n\t" - "por %%mm3, %%mm1\n\t" - "por %%mm6, %%mm4\n\t" - "por %%mm7, %%mm5\n\t" + "psrlq $8, %%mm2 \n\t" + "psrlq $8, %%mm3 \n\t" + "psrlq $8, %%mm6 \n\t" + "psrlq $8, %%mm7 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm1 \n\t" + "pand %2, %%mm4 \n\t" + "pand %2, %%mm5 \n\t" + "pand %3, %%mm2 \n\t" + "pand %3, %%mm3 \n\t" + "pand %3, %%mm6 \n\t" + "pand %3, %%mm7 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm3, %%mm1 \n\t" + "por %%mm6, %%mm4 \n\t" + "por %%mm7, %%mm5 \n\t" - "movq %%mm1, %%mm2\n\t" - "movq %%mm4, %%mm3\n\t" - "psllq $48, %%mm2\n\t" - "psllq $32, %%mm3\n\t" - "pand %4, %%mm2\n\t" - "pand %5, %%mm3\n\t" - "por %%mm2, %%mm0\n\t" - "psrlq $16, %%mm1\n\t" - "psrlq $32, %%mm4\n\t" - "psllq $16, %%mm5\n\t" - "por %%mm3, %%mm1\n\t" - "pand %6, %%mm5\n\t" - "por %%mm5, %%mm4\n\t" + "movq %%mm1, %%mm2 \n\t" + "movq %%mm4, %%mm3 \n\t" + "psllq $48, %%mm2 \n\t" + "psllq $32, %%mm3 \n\t" + "pand %4, %%mm2 \n\t" + "pand %5, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "psrlq $16, %%mm1 \n\t" + "psrlq $32, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm3, %%mm1 \n\t" + "pand %6, %%mm5 \n\t" + "por %%mm5, %%mm4 \n\t" - MOVNTQ" %%mm0, %0\n\t" - MOVNTQ" %%mm1, 8%0\n\t" - MOVNTQ" %%mm4, 16%0" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm1, 8%0 \n\t" + MOVNTQ" %%mm4, 16%0" - :"=m"(*d) - :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) - :"memory"); - d += 24; - s += 8; - } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + :"=m"(*d) + :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) + :"memory"); + d += 24; + s += 8; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); #endif - while(s < end) - { - register uint16_t bgr; - bgr = *s++; - *d++ = (bgr&0x1F)<<3; - *d++ = (bgr&0x3E0)>>2; - *d++ = (bgr&0x7C00)>>7; - } + while (s < end) + { + register uint16_t bgr; + bgr = *s++; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x7C00)>>7; + } } static inline void RENAME(rgb16to24)(const uint8_t *src, uint8_t *dst, long src_size) { - const uint16_t *end; + const uint16_t *end; #ifdef HAVE_MMX - const uint16_t *mm_end; + const uint16_t *mm_end; #endif - uint8_t *d = (uint8_t *)dst; - const uint16_t *s = (const uint16_t *)src; - end = s + src_size/2; + uint8_t *d = (uint8_t *)dst; + const uint16_t *s = (const uint16_t *)src; + end = s + src_size/2; #ifdef HAVE_MMX - __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); - mm_end = end - 7; - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movq %1, %%mm0\n\t" - "movq %1, %%mm1\n\t" - "movq %1, %%mm2\n\t" - "pand %2, %%mm0\n\t" - "pand %3, %%mm1\n\t" - "pand %4, %%mm2\n\t" - "psllq $3, %%mm0\n\t" - "psrlq $3, %%mm1\n\t" - "psrlq $8, %%mm2\n\t" - "movq %%mm0, %%mm3\n\t" - "movq %%mm1, %%mm4\n\t" - "movq %%mm2, %%mm5\n\t" - "punpcklwd %5, %%mm0\n\t" - "punpcklwd %5, %%mm1\n\t" - "punpcklwd %5, %%mm2\n\t" - "punpckhwd %5, %%mm3\n\t" - "punpckhwd %5, %%mm4\n\t" - "punpckhwd %5, %%mm5\n\t" - "psllq $8, %%mm1\n\t" - "psllq $16, %%mm2\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm2, %%mm0\n\t" - "psllq $8, %%mm4\n\t" - "psllq $16, %%mm5\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm5, %%mm3\n\t" + __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); + mm_end = end - 7; + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + "movq %1, %%mm2 \n\t" + "pand %2, %%mm0 \n\t" + "pand %3, %%mm1 \n\t" + "pand %4, %%mm2 \n\t" + "psllq $3, %%mm0 \n\t" + "psrlq $3, %%mm1 \n\t" + "psrlq $8, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "movq %%mm1, %%mm4 \n\t" + "movq %%mm2, %%mm5 \n\t" + "punpcklwd %5, %%mm0 \n\t" + "punpcklwd %5, %%mm1 \n\t" + "punpcklwd %5, %%mm2 \n\t" + "punpckhwd %5, %%mm3 \n\t" + "punpckhwd %5, %%mm4 \n\t" + "punpckhwd %5, %%mm5 \n\t" + "psllq $8, %%mm1 \n\t" + "psllq $16, %%mm2 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm2, %%mm0 \n\t" + "psllq $8, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm5, %%mm3 \n\t" - "movq %%mm0, %%mm6\n\t" - "movq %%mm3, %%mm7\n\t" + "movq %%mm0, %%mm6 \n\t" + "movq %%mm3, %%mm7 \n\t" - "movq 8%1, %%mm0\n\t" - "movq 8%1, %%mm1\n\t" - "movq 8%1, %%mm2\n\t" - "pand %2, %%mm0\n\t" - "pand %3, %%mm1\n\t" - "pand %4, %%mm2\n\t" - "psllq $3, %%mm0\n\t" - "psrlq $3, %%mm1\n\t" - "psrlq $8, %%mm2\n\t" - "movq %%mm0, %%mm3\n\t" - "movq %%mm1, %%mm4\n\t" - "movq %%mm2, %%mm5\n\t" - "punpcklwd %5, %%mm0\n\t" - "punpcklwd %5, %%mm1\n\t" - "punpcklwd %5, %%mm2\n\t" - "punpckhwd %5, %%mm3\n\t" - "punpckhwd %5, %%mm4\n\t" - "punpckhwd %5, %%mm5\n\t" - "psllq $8, %%mm1\n\t" - "psllq $16, %%mm2\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm2, %%mm0\n\t" - "psllq $8, %%mm4\n\t" - "psllq $16, %%mm5\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm5, %%mm3\n\t" - :"=m"(*d) - :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null) - :"memory"); - /* Borrowed 32 to 24 */ - __asm __volatile( - "movq %%mm0, %%mm4\n\t" - "movq %%mm3, %%mm5\n\t" - "movq %%mm6, %%mm0\n\t" - "movq %%mm7, %%mm1\n\t" + "movq 8%1, %%mm0 \n\t" + "movq 8%1, %%mm1 \n\t" + "movq 8%1, %%mm2 \n\t" + "pand %2, %%mm0 \n\t" + "pand %3, %%mm1 \n\t" + "pand %4, %%mm2 \n\t" + "psllq $3, %%mm0 \n\t" + "psrlq $3, %%mm1 \n\t" + "psrlq $8, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "movq %%mm1, %%mm4 \n\t" + "movq %%mm2, %%mm5 \n\t" + "punpcklwd %5, %%mm0 \n\t" + "punpcklwd %5, %%mm1 \n\t" + "punpcklwd %5, %%mm2 \n\t" + "punpckhwd %5, %%mm3 \n\t" + "punpckhwd %5, %%mm4 \n\t" + "punpckhwd %5, %%mm5 \n\t" + "psllq $8, %%mm1 \n\t" + "psllq $16, %%mm2 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm2, %%mm0 \n\t" + "psllq $8, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm5, %%mm3 \n\t" + :"=m"(*d) + :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null) + :"memory"); + /* Borrowed 32 to 24 */ + __asm __volatile( + "movq %%mm0, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "movq %%mm6, %%mm0 \n\t" + "movq %%mm7, %%mm1 \n\t" - "movq %%mm4, %%mm6\n\t" - "movq %%mm5, %%mm7\n\t" - "movq %%mm0, %%mm2\n\t" - "movq %%mm1, %%mm3\n\t" + "movq %%mm4, %%mm6 \n\t" + "movq %%mm5, %%mm7 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm1, %%mm3 \n\t" - "psrlq $8, %%mm2\n\t" - "psrlq $8, %%mm3\n\t" - "psrlq $8, %%mm6\n\t" - "psrlq $8, %%mm7\n\t" - "pand %2, %%mm0\n\t" - "pand %2, %%mm1\n\t" - "pand %2, %%mm4\n\t" - "pand %2, %%mm5\n\t" - "pand %3, %%mm2\n\t" - "pand %3, %%mm3\n\t" - "pand %3, %%mm6\n\t" - "pand %3, %%mm7\n\t" - "por %%mm2, %%mm0\n\t" - "por %%mm3, %%mm1\n\t" - "por %%mm6, %%mm4\n\t" - "por %%mm7, %%mm5\n\t" + "psrlq $8, %%mm2 \n\t" + "psrlq $8, %%mm3 \n\t" + "psrlq $8, %%mm6 \n\t" + "psrlq $8, %%mm7 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm1 \n\t" + "pand %2, %%mm4 \n\t" + "pand %2, %%mm5 \n\t" + "pand %3, %%mm2 \n\t" + "pand %3, %%mm3 \n\t" + "pand %3, %%mm6 \n\t" + "pand %3, %%mm7 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm3, %%mm1 \n\t" + "por %%mm6, %%mm4 \n\t" + "por %%mm7, %%mm5 \n\t" - "movq %%mm1, %%mm2\n\t" - "movq %%mm4, %%mm3\n\t" - "psllq $48, %%mm2\n\t" - "psllq $32, %%mm3\n\t" - "pand %4, %%mm2\n\t" - "pand %5, %%mm3\n\t" - "por %%mm2, %%mm0\n\t" - "psrlq $16, %%mm1\n\t" - "psrlq $32, %%mm4\n\t" - "psllq $16, %%mm5\n\t" - "por %%mm3, %%mm1\n\t" - "pand %6, %%mm5\n\t" - "por %%mm5, %%mm4\n\t" + "movq %%mm1, %%mm2 \n\t" + "movq %%mm4, %%mm3 \n\t" + "psllq $48, %%mm2 \n\t" + "psllq $32, %%mm3 \n\t" + "pand %4, %%mm2 \n\t" + "pand %5, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "psrlq $16, %%mm1 \n\t" + "psrlq $32, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm3, %%mm1 \n\t" + "pand %6, %%mm5 \n\t" + "por %%mm5, %%mm4 \n\t" - MOVNTQ" %%mm0, %0\n\t" - MOVNTQ" %%mm1, 8%0\n\t" - MOVNTQ" %%mm4, 16%0" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm1, 8%0 \n\t" + MOVNTQ" %%mm4, 16%0" - :"=m"(*d) - :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) - :"memory"); - d += 24; - s += 8; - } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + :"=m"(*d) + :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) + :"memory"); + d += 24; + s += 8; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); #endif - while(s < end) - { - register uint16_t bgr; - bgr = *s++; - *d++ = (bgr&0x1F)<<3; - *d++ = (bgr&0x7E0)>>3; - *d++ = (bgr&0xF800)>>8; - } + while (s < end) + { + register uint16_t bgr; + bgr = *s++; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0xF800)>>8; + } } static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size) { - const uint16_t *end; + const uint16_t *end; #ifdef HAVE_MMX - const uint16_t *mm_end; + const uint16_t *mm_end; #endif - uint8_t *d = (uint8_t *)dst; - const uint16_t *s = (const uint16_t *)src; - end = s + src_size/2; + uint8_t *d = (uint8_t *)dst; + const uint16_t *s = (const uint16_t *)src; + end = s + src_size/2; #ifdef HAVE_MMX - __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); - __asm __volatile("pxor %%mm7,%%mm7\n\t":::"memory"); - mm_end = end - 3; - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movq %1, %%mm0\n\t" - "movq %1, %%mm1\n\t" - "movq %1, %%mm2\n\t" - "pand %2, %%mm0\n\t" - "pand %3, %%mm1\n\t" - "pand %4, %%mm2\n\t" - "psllq $3, %%mm0\n\t" - "psrlq $2, %%mm1\n\t" - "psrlq $7, %%mm2\n\t" - "movq %%mm0, %%mm3\n\t" - "movq %%mm1, %%mm4\n\t" - "movq %%mm2, %%mm5\n\t" - "punpcklwd %%mm7, %%mm0\n\t" - "punpcklwd %%mm7, %%mm1\n\t" - "punpcklwd %%mm7, %%mm2\n\t" - "punpckhwd %%mm7, %%mm3\n\t" - "punpckhwd %%mm7, %%mm4\n\t" - "punpckhwd %%mm7, %%mm5\n\t" - "psllq $8, %%mm1\n\t" - "psllq $16, %%mm2\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm2, %%mm0\n\t" - "psllq $8, %%mm4\n\t" - "psllq $16, %%mm5\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm5, %%mm3\n\t" - MOVNTQ" %%mm0, %0\n\t" - MOVNTQ" %%mm3, 8%0\n\t" - :"=m"(*d) - :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r) - :"memory"); - d += 16; - s += 4; - } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); + __asm __volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); + mm_end = end - 3; + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + "movq %1, %%mm2 \n\t" + "pand %2, %%mm0 \n\t" + "pand %3, %%mm1 \n\t" + "pand %4, %%mm2 \n\t" + "psllq $3, %%mm0 \n\t" + "psrlq $2, %%mm1 \n\t" + "psrlq $7, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "movq %%mm1, %%mm4 \n\t" + "movq %%mm2, %%mm5 \n\t" + "punpcklwd %%mm7, %%mm0 \n\t" + "punpcklwd %%mm7, %%mm1 \n\t" + "punpcklwd %%mm7, %%mm2 \n\t" + "punpckhwd %%mm7, %%mm3 \n\t" + "punpckhwd %%mm7, %%mm4 \n\t" + "punpckhwd %%mm7, %%mm5 \n\t" + "psllq $8, %%mm1 \n\t" + "psllq $16, %%mm2 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm2, %%mm0 \n\t" + "psllq $8, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm5, %%mm3 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm3, 8%0 \n\t" + :"=m"(*d) + :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r) + :"memory"); + d += 16; + s += 4; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); #endif - while(s < end) - { + while (s < end) + { #if 0 //slightly slower on athlon - int bgr= *s++; - *((uint32_t*)d)++ = ((bgr&0x1F)<<3) + ((bgr&0x3E0)<<6) + ((bgr&0x7C00)<<9); + int bgr= *s++; + *((uint32_t*)d)++ = ((bgr&0x1F)<<3) + ((bgr&0x3E0)<<6) + ((bgr&0x7C00)<<9); #else - register uint16_t bgr; - bgr = *s++; + register uint16_t bgr; + bgr = *s++; #ifdef WORDS_BIGENDIAN - *d++ = 0; - *d++ = (bgr&0x7C00)>>7; - *d++ = (bgr&0x3E0)>>2; - *d++ = (bgr&0x1F)<<3; + *d++ = 0; + *d++ = (bgr&0x7C00)>>7; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x1F)<<3; #else - *d++ = (bgr&0x1F)<<3; - *d++ = (bgr&0x3E0)>>2; - *d++ = (bgr&0x7C00)>>7; - *d++ = 0; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x3E0)>>2; + *d++ = (bgr&0x7C00)>>7; + *d++ = 0; #endif #endif - } + } } static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size) { - const uint16_t *end; + const uint16_t *end; #ifdef HAVE_MMX - const uint16_t *mm_end; + const uint16_t *mm_end; #endif - uint8_t *d = (uint8_t *)dst; - const uint16_t *s = (uint16_t *)src; - end = s + src_size/2; + uint8_t *d = (uint8_t *)dst; + const uint16_t *s = (uint16_t *)src; + end = s + src_size/2; #ifdef HAVE_MMX - __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); - __asm __volatile("pxor %%mm7,%%mm7\n\t":::"memory"); - mm_end = end - 3; - while(s < mm_end) - { - __asm __volatile( - PREFETCH" 32%1\n\t" - "movq %1, %%mm0\n\t" - "movq %1, %%mm1\n\t" - "movq %1, %%mm2\n\t" - "pand %2, %%mm0\n\t" - "pand %3, %%mm1\n\t" - "pand %4, %%mm2\n\t" - "psllq $3, %%mm0\n\t" - "psrlq $3, %%mm1\n\t" - "psrlq $8, %%mm2\n\t" - "movq %%mm0, %%mm3\n\t" - "movq %%mm1, %%mm4\n\t" - "movq %%mm2, %%mm5\n\t" - "punpcklwd %%mm7, %%mm0\n\t" - "punpcklwd %%mm7, %%mm1\n\t" - "punpcklwd %%mm7, %%mm2\n\t" - "punpckhwd %%mm7, %%mm3\n\t" - "punpckhwd %%mm7, %%mm4\n\t" - "punpckhwd %%mm7, %%mm5\n\t" - "psllq $8, %%mm1\n\t" - "psllq $16, %%mm2\n\t" - "por %%mm1, %%mm0\n\t" - "por %%mm2, %%mm0\n\t" - "psllq $8, %%mm4\n\t" - "psllq $16, %%mm5\n\t" - "por %%mm4, %%mm3\n\t" - "por %%mm5, %%mm3\n\t" - MOVNTQ" %%mm0, %0\n\t" - MOVNTQ" %%mm3, 8%0\n\t" - :"=m"(*d) - :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r) - :"memory"); - d += 16; - s += 4; - } - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + __asm __volatile(PREFETCH" %0"::"m"(*s):"memory"); + __asm __volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); + mm_end = end - 3; + while (s < mm_end) + { + __asm __volatile( + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq %1, %%mm1 \n\t" + "movq %1, %%mm2 \n\t" + "pand %2, %%mm0 \n\t" + "pand %3, %%mm1 \n\t" + "pand %4, %%mm2 \n\t" + "psllq $3, %%mm0 \n\t" + "psrlq $3, %%mm1 \n\t" + "psrlq $8, %%mm2 \n\t" + "movq %%mm0, %%mm3 \n\t" + "movq %%mm1, %%mm4 \n\t" + "movq %%mm2, %%mm5 \n\t" + "punpcklwd %%mm7, %%mm0 \n\t" + "punpcklwd %%mm7, %%mm1 \n\t" + "punpcklwd %%mm7, %%mm2 \n\t" + "punpckhwd %%mm7, %%mm3 \n\t" + "punpckhwd %%mm7, %%mm4 \n\t" + "punpckhwd %%mm7, %%mm5 \n\t" + "psllq $8, %%mm1 \n\t" + "psllq $16, %%mm2 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm2, %%mm0 \n\t" + "psllq $8, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm5, %%mm3 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm3, 8%0 \n\t" + :"=m"(*d) + :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r) + :"memory"); + d += 16; + s += 4; + } + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); #endif - while(s < end) - { - register uint16_t bgr; - bgr = *s++; + while (s < end) + { + register uint16_t bgr; + bgr = *s++; #ifdef WORDS_BIGENDIAN - *d++ = 0; - *d++ = (bgr&0xF800)>>8; - *d++ = (bgr&0x7E0)>>3; - *d++ = (bgr&0x1F)<<3; + *d++ = 0; + *d++ = (bgr&0xF800)>>8; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0x1F)<<3; #else - *d++ = (bgr&0x1F)<<3; - *d++ = (bgr&0x7E0)>>3; - *d++ = (bgr&0xF800)>>8; - *d++ = 0; + *d++ = (bgr&0x1F)<<3; + *d++ = (bgr&0x7E0)>>3; + *d++ = (bgr&0xF800)>>8; + *d++ = 0; #endif - } + } } static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long src_size) { - long idx = 15 - src_size; - uint8_t *s = (uint8_t *) src-idx, *d = dst-idx; + long idx = 15 - src_size; + uint8_t *s = (uint8_t *) src-idx, *d = dst-idx; #ifdef HAVE_MMX - __asm __volatile( - "test %0, %0 \n\t" - "jns 2f \n\t" - PREFETCH" (%1, %0) \n\t" - "movq %3, %%mm7 \n\t" - "pxor %4, %%mm7 \n\t" - "movq %%mm7, %%mm6 \n\t" - "pxor %5, %%mm7 \n\t" - ASMALIGN(4) - "1: \n\t" - PREFETCH" 32(%1, %0) \n\t" - "movq (%1, %0), %%mm0 \n\t" - "movq 8(%1, %0), %%mm1 \n\t" + __asm __volatile( + "test %0, %0 \n\t" + "jns 2f \n\t" + PREFETCH" (%1, %0) \n\t" + "movq %3, %%mm7 \n\t" + "pxor %4, %%mm7 \n\t" + "movq %%mm7, %%mm6 \n\t" + "pxor %5, %%mm7 \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 32(%1, %0) \n\t" + "movq (%1, %0), %%mm0 \n\t" + "movq 8(%1, %0), %%mm1 \n\t" # ifdef HAVE_MMX2 - "pshufw $177, %%mm0, %%mm3 \n\t" - "pshufw $177, %%mm1, %%mm5 \n\t" - "pand %%mm7, %%mm0 \n\t" - "pand %%mm6, %%mm3 \n\t" - "pand %%mm7, %%mm1 \n\t" - "pand %%mm6, %%mm5 \n\t" - "por %%mm3, %%mm0 \n\t" - "por %%mm5, %%mm1 \n\t" + "pshufw $177, %%mm0, %%mm3 \n\t" + "pshufw $177, %%mm1, %%mm5 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm6, %%mm3 \n\t" + "pand %%mm7, %%mm1 \n\t" + "pand %%mm6, %%mm5 \n\t" + "por %%mm3, %%mm0 \n\t" + "por %%mm5, %%mm1 \n\t" # else - "movq %%mm0, %%mm2 \n\t" - "movq %%mm1, %%mm4 \n\t" - "pand %%mm7, %%mm0 \n\t" - "pand %%mm6, %%mm2 \n\t" - "pand %%mm7, %%mm1 \n\t" - "pand %%mm6, %%mm4 \n\t" - "movq %%mm2, %%mm3 \n\t" - "movq %%mm4, %%mm5 \n\t" - "pslld $16, %%mm2 \n\t" - "psrld $16, %%mm3 \n\t" - "pslld $16, %%mm4 \n\t" - "psrld $16, %%mm5 \n\t" - "por %%mm2, %%mm0 \n\t" - "por %%mm4, %%mm1 \n\t" - "por %%mm3, %%mm0 \n\t" - "por %%mm5, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm1, %%mm4 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm6, %%mm2 \n\t" + "pand %%mm7, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "movq %%mm2, %%mm3 \n\t" + "movq %%mm4, %%mm5 \n\t" + "pslld $16, %%mm2 \n\t" + "psrld $16, %%mm3 \n\t" + "pslld $16, %%mm4 \n\t" + "psrld $16, %%mm5 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm4, %%mm1 \n\t" + "por %%mm3, %%mm0 \n\t" + "por %%mm5, %%mm1 \n\t" # endif - MOVNTQ" %%mm0, (%2, %0) \n\t" - MOVNTQ" %%mm1, 8(%2, %0) \n\t" - "add $16, %0 \n\t" - "js 1b \n\t" - SFENCE" \n\t" - EMMS" \n\t" - "2: \n\t" - : "+&r"(idx) - : "r" (s), "r" (d), "m" (mask32b), "m" (mask32r), "m" (mmx_one) - : "memory"); + MOVNTQ" %%mm0, (%2, %0) \n\t" + MOVNTQ" %%mm1, 8(%2, %0) \n\t" + "add $16, %0 \n\t" + "js 1b \n\t" + SFENCE" \n\t" + EMMS" \n\t" + "2: \n\t" + : "+&r"(idx) + : "r" (s), "r" (d), "m" (mask32b), "m" (mask32r), "m" (mmx_one) + : "memory"); #endif - for (; idx<15; idx+=4) { - register int v = *(uint32_t *)&s[idx], g = v & 0xff00ff00; - v &= 0xff00ff; - *(uint32_t *)&d[idx] = (v>>16) + g + (v<<16); - } + for (; idx<15; idx+=4) { + register int v = *(uint32_t *)&s[idx], g = v & 0xff00ff00; + v &= 0xff00ff; + *(uint32_t *)&d[idx] = (v>>16) + g + (v<<16); + } } static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size) { - unsigned i; + unsigned i; #ifdef HAVE_MMX - long mmx_size= 23 - src_size; - asm volatile ( - "test %%"REG_a", %%"REG_a" \n\t" - "jns 2f \n\t" - "movq "MANGLE(mask24r)", %%mm5 \n\t" - "movq "MANGLE(mask24g)", %%mm6 \n\t" - "movq "MANGLE(mask24b)", %%mm7 \n\t" - ASMALIGN(4) - "1: \n\t" - PREFETCH" 32(%1, %%"REG_a") \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG - "movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG - "movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B - "psllq $16, %%mm0 \n\t" // 00 BGR BGR - "pand %%mm5, %%mm0 \n\t" - "pand %%mm6, %%mm1 \n\t" - "pand %%mm7, %%mm2 \n\t" - "por %%mm0, %%mm1 \n\t" - "por %%mm2, %%mm1 \n\t" - "movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG - MOVNTQ" %%mm1, (%2, %%"REG_a")\n\t" // RGB RGB RG - "movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B - "movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR - "pand %%mm7, %%mm0 \n\t" - "pand %%mm5, %%mm1 \n\t" - "pand %%mm6, %%mm2 \n\t" - "por %%mm0, %%mm1 \n\t" - "por %%mm2, %%mm1 \n\t" - "movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B - MOVNTQ" %%mm1, 8(%2, %%"REG_a")\n\t" // B RGB RGB R - "movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR - "movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG - "pand %%mm6, %%mm0 \n\t" - "pand %%mm7, %%mm1 \n\t" - "pand %%mm5, %%mm2 \n\t" - "por %%mm0, %%mm1 \n\t" - "por %%mm2, %%mm1 \n\t" - MOVNTQ" %%mm1, 16(%2, %%"REG_a")\n\t" - "add $24, %%"REG_a" \n\t" - " js 1b \n\t" - "2: \n\t" - : "+a" (mmx_size) - : "r" (src-mmx_size), "r"(dst-mmx_size) - ); + long mmx_size= 23 - src_size; + asm volatile ( + "test %%"REG_a", %%"REG_a" \n\t" + "jns 2f \n\t" + "movq "MANGLE(mask24r)", %%mm5 \n\t" + "movq "MANGLE(mask24g)", %%mm6 \n\t" + "movq "MANGLE(mask24b)", %%mm7 \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 32(%1, %%"REG_a") \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG + "movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG + "movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B + "psllq $16, %%mm0 \n\t" // 00 BGR BGR + "pand %%mm5, %%mm0 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm7, %%mm2 \n\t" + "por %%mm0, %%mm1 \n\t" + "por %%mm2, %%mm1 \n\t" + "movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG + MOVNTQ" %%mm1, (%2, %%"REG_a") \n\t" // RGB RGB RG + "movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B + "movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR + "pand %%mm7, %%mm0 \n\t" + "pand %%mm5, %%mm1 \n\t" + "pand %%mm6, %%mm2 \n\t" + "por %%mm0, %%mm1 \n\t" + "por %%mm2, %%mm1 \n\t" + "movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B + MOVNTQ" %%mm1, 8(%2, %%"REG_a") \n\t" // B RGB RGB R + "movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR + "movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG + "pand %%mm6, %%mm0 \n\t" + "pand %%mm7, %%mm1 \n\t" + "pand %%mm5, %%mm2 \n\t" + "por %%mm0, %%mm1 \n\t" + "por %%mm2, %%mm1 \n\t" + MOVNTQ" %%mm1, 16(%2, %%"REG_a") \n\t" + "add $24, %%"REG_a" \n\t" + " js 1b \n\t" + "2: \n\t" + : "+a" (mmx_size) + : "r" (src-mmx_size), "r"(dst-mmx_size) + ); - __asm __volatile(SFENCE:::"memory"); - __asm __volatile(EMMS:::"memory"); + __asm __volatile(SFENCE:::"memory"); + __asm __volatile(EMMS:::"memory"); - if(mmx_size==23) return; //finihsed, was multiple of 8 + if (mmx_size==23) return; //finihsed, was multiple of 8 - src+= src_size; - dst+= src_size; - src_size= 23-mmx_size; - src-= src_size; - dst-= src_size; + src+= src_size; + dst+= src_size; + src_size= 23-mmx_size; + src-= src_size; + dst-= src_size; #endif - for(i=0; i>1; - for(y=0; y>1; + for (y=0; y= 64 - int i; - uint64_t *ldst = (uint64_t *) dst; - const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; - for(i = 0; i < chromWidth; i += 2){ - uint64_t k, l; - k = yc[0] + (uc[0] << 8) + - (yc[1] << 16) + (vc[0] << 24); - l = yc[2] + (uc[1] << 8) + - (yc[3] << 16) + (vc[1] << 24); - *ldst++ = k + (l << 32); - yc += 4; - uc += 2; - vc += 2; - } + int i; + uint64_t *ldst = (uint64_t *) dst; + const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; + for (i = 0; i < chromWidth; i += 2){ + uint64_t k, l; + k = yc[0] + (uc[0] << 8) + + (yc[1] << 16) + (vc[0] << 24); + l = yc[2] + (uc[1] << 8) + + (yc[3] << 16) + (vc[1] << 24); + *ldst++ = k + (l << 32); + yc += 4; + uc += 2; + vc += 2; + } #else - int i, *idst = (int32_t *) dst; - const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; - for(i = 0; i < chromWidth; i++){ + int i, *idst = (int32_t *) dst; + const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; + for (i = 0; i < chromWidth; i++){ #ifdef WORDS_BIGENDIAN - *idst++ = (yc[0] << 24)+ (uc[0] << 16) + - (yc[1] << 8) + (vc[0] << 0); + *idst++ = (yc[0] << 24)+ (uc[0] << 16) + + (yc[1] << 8) + (vc[0] << 0); #else - *idst++ = yc[0] + (uc[0] << 8) + - (yc[1] << 16) + (vc[0] << 24); + *idst++ = yc[0] + (uc[0] << 8) + + (yc[1] << 16) + (vc[0] << 24); #endif - yc += 2; - uc++; - vc++; - } + yc += 2; + uc++; + vc++; + } #endif #endif - if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) ) - { - usrc += chromStride; - vsrc += chromStride; - } - ysrc += lumStride; - dst += dstStride; - } + if ((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) ) + { + usrc += chromStride; + vsrc += chromStride; + } + ysrc += lumStride; + dst += dstStride; + } #ifdef HAVE_MMX -asm( EMMS" \n\t" - SFENCE" \n\t" +asm( EMMS" \n\t" + SFENCE" \n\t" :::"memory"); #endif } @@ -1645,103 +1644,103 @@ asm( EMMS" \n\t" * problem for anyone then tell me, and ill fix it) */ static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, - long width, long height, - long lumStride, long chromStride, long dstStride) + long width, long height, + long lumStride, long chromStride, long dstStride) { - //FIXME interpolate chroma - RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2); + //FIXME interpolate chroma + RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2); } static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, - long width, long height, - long lumStride, long chromStride, long dstStride, long vertLumPerChroma) + long width, long height, + long lumStride, long chromStride, long dstStride, long vertLumPerChroma) { - long y; - const long chromWidth= width>>1; - for(y=0; y>1; + for (y=0; yyuy2 #if __WORDSIZE >= 64 - int i; - uint64_t *ldst = (uint64_t *) dst; - const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; - for(i = 0; i < chromWidth; i += 2){ - uint64_t k, l; - k = uc[0] + (yc[0] << 8) + - (vc[0] << 16) + (yc[1] << 24); - l = uc[1] + (yc[2] << 8) + - (vc[1] << 16) + (yc[3] << 24); - *ldst++ = k + (l << 32); - yc += 4; - uc += 2; - vc += 2; - } + int i; + uint64_t *ldst = (uint64_t *) dst; + const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; + for (i = 0; i < chromWidth; i += 2){ + uint64_t k, l; + k = uc[0] + (yc[0] << 8) + + (vc[0] << 16) + (yc[1] << 24); + l = uc[1] + (yc[2] << 8) + + (vc[1] << 16) + (yc[3] << 24); + *ldst++ = k + (l << 32); + yc += 4; + uc += 2; + vc += 2; + } #else - int i, *idst = (int32_t *) dst; - const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; - for(i = 0; i < chromWidth; i++){ + int i, *idst = (int32_t *) dst; + const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; + for (i = 0; i < chromWidth; i++){ #ifdef WORDS_BIGENDIAN - *idst++ = (uc[0] << 24)+ (yc[0] << 16) + - (vc[0] << 8) + (yc[1] << 0); + *idst++ = (uc[0] << 24)+ (yc[0] << 16) + + (vc[0] << 8) + (yc[1] << 0); #else - *idst++ = uc[0] + (yc[0] << 8) + - (vc[0] << 16) + (yc[1] << 24); + *idst++ = uc[0] + (yc[0] << 8) + + (vc[0] << 16) + (yc[1] << 24); #endif - yc += 2; - uc++; - vc++; - } + yc += 2; + uc++; + vc++; + } #endif #endif - if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) ) - { - usrc += chromStride; - vsrc += chromStride; - } - ysrc += lumStride; - dst += dstStride; - } + if ((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) ) + { + usrc += chromStride; + vsrc += chromStride; + } + ysrc += lumStride; + dst += dstStride; + } #ifdef HAVE_MMX -asm( EMMS" \n\t" - SFENCE" \n\t" +asm( EMMS" \n\t" + SFENCE" \n\t" :::"memory"); #endif } @@ -1752,11 +1751,11 @@ asm( EMMS" \n\t" * problem for anyone then tell me, and ill fix it) */ static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, - long width, long height, - long lumStride, long chromStride, long dstStride) + long width, long height, + long lumStride, long chromStride, long dstStride) { - //FIXME interpolate chroma - RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2); + //FIXME interpolate chroma + RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2); } /** @@ -1764,10 +1763,10 @@ static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, * width should be a multiple of 16 */ static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, - long width, long height, - long lumStride, long chromStride, long dstStride) + long width, long height, + long lumStride, long chromStride, long dstStride) { - RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1); + RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1); } /** @@ -1776,234 +1775,234 @@ static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usr * problem for anyone then tell me, and ill fix it) */ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, - long width, long height, - long lumStride, long chromStride, long srcStride) + long width, long height, + long lumStride, long chromStride, long srcStride) { - long y; - const long chromWidth= width>>1; - for(y=0; y>1; + for (y=0; y>2; - dst[2*x+2]= ( src[x] + 3*src[x+1])>>2; - } - dst[2*srcWidth-1]= src[srcWidth-1]; + // first line + for (x=0; x>2; + dst[2*x+2]= ( src[x] + 3*src[x+1])>>2; + } + dst[2*srcWidth-1]= src[srcWidth-1]; dst+= dstStride; - for(y=1; y>2; - dst[dstStride]= ( src[0] + 3*src[srcStride])>>2; + dst[0 ]= (3*src[0] + src[srcStride])>>2; + dst[dstStride]= ( src[0] + 3*src[srcStride])>>2; - for(x=mmxSize-1; x>2; - dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2; - dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2; - dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2; - } - dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2; - dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2; + for (x=mmxSize-1; x>2; + dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2; + dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2; + dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2; + } + dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2; + dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2; - dst+=dstStride*2; - src+=srcStride; - } + dst+=dstStride*2; + src+=srcStride; + } - // last line + // last line #if 1 - dst[0]= src[0]; + dst[0]= src[0]; - for(x=0; x>2; - dst[2*x+2]= ( src[x] + 3*src[x+1])>>2; - } - dst[2*srcWidth-1]= src[srcWidth-1]; + for (x=0; x>2; + dst[2*x+2]= ( src[x] + 3*src[x+1])>>2; + } + dst[2*srcWidth-1]= src[srcWidth-1]; #else - for(x=0; x>1; - for(y=0; y>1; + for (y=0; y>1; + long y; + const long chromWidth= width>>1; #ifdef HAVE_MMX - for(y=0; y>RGB2YUV_SHIFT) + 16; - unsigned int V = ((RV*r + GV*g + BV*b)>>RGB2YUV_SHIFT) + 128; - unsigned int U = ((RU*r + GU*g + BU*b)>>RGB2YUV_SHIFT) + 128; + unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; + unsigned int V = ((RV*r + GV*g + BV*b)>>RGB2YUV_SHIFT) + 128; + unsigned int U = ((RU*r + GU*g + BU*b)>>RGB2YUV_SHIFT) + 128; - udst[i] = U; - vdst[i] = V; - ydst[2*i] = Y; + udst[i] = U; + vdst[i] = V; + ydst[2*i] = Y; - b= src[6*i+3]; - g= src[6*i+4]; - r= src[6*i+5]; + b = src[6*i+3]; + g = src[6*i+4]; + r = src[6*i+5]; - Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; - ydst[2*i+1] = Y; - } - ydst += lumStride; - src += srcStride; + Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; + ydst[2*i+1] = Y; + } + ydst += lumStride; + src += srcStride; - for(i=0; i>RGB2YUV_SHIFT) + 16; + unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; - ydst[2*i] = Y; + ydst[2*i] = Y; - b= src[6*i+3]; - g= src[6*i+4]; - r= src[6*i+5]; + b = src[6*i+3]; + g = src[6*i+4]; + r = src[6*i+5]; - Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; - ydst[2*i+1] = Y; - } - udst += chromStride; - vdst += chromStride; - ydst += lumStride; - src += srcStride; - } + Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16; + ydst[2*i+1] = Y; + } + udst += chromStride; + vdst += chromStride; + ydst += lumStride; + src += srcStride; + } } void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest, - long width, long height, long src1Stride, - long src2Stride, long dstStride){ - long h; + long width, long height, long src1Stride, + long src2Stride, long dstStride){ + long h; - for(h=0; h < height; h++) - { - long w; + for (h=0; h < height; h++) + { + long w; #ifdef HAVE_MMX #ifdef HAVE_SSE2 - asm( - "xor %%"REG_a", %%"REG_a" \n\t" - "1: \n\t" - PREFETCH" 64(%1, %%"REG_a") \n\t" - PREFETCH" 64(%2, %%"REG_a") \n\t" - "movdqa (%1, %%"REG_a"), %%xmm0 \n\t" - "movdqa (%1, %%"REG_a"), %%xmm1 \n\t" - "movdqa (%2, %%"REG_a"), %%xmm2 \n\t" - "punpcklbw %%xmm2, %%xmm0 \n\t" - "punpckhbw %%xmm2, %%xmm1 \n\t" - "movntdq %%xmm0, (%0, %%"REG_a", 2)\n\t" - "movntdq %%xmm1, 16(%0, %%"REG_a", 2)\n\t" - "add $16, %%"REG_a" \n\t" - "cmp %3, %%"REG_a" \n\t" - " jb 1b \n\t" - ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15) - : "memory", "%"REG_a"" - ); + asm( + "xor %%"REG_a", %%"REG_a" \n\t" + "1: \n\t" + PREFETCH" 64(%1, %%"REG_a") \n\t" + PREFETCH" 64(%2, %%"REG_a") \n\t" + "movdqa (%1, %%"REG_a"), %%xmm0 \n\t" + "movdqa (%1, %%"REG_a"), %%xmm1 \n\t" + "movdqa (%2, %%"REG_a"), %%xmm2 \n\t" + "punpcklbw %%xmm2, %%xmm0 \n\t" + "punpckhbw %%xmm2, %%xmm1 \n\t" + "movntdq %%xmm0, (%0, %%"REG_a", 2) \n\t" + "movntdq %%xmm1, 16(%0, %%"REG_a", 2) \n\t" + "add $16, %%"REG_a" \n\t" + "cmp %3, %%"REG_a" \n\t" + " jb 1b \n\t" + ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15) + : "memory", "%"REG_a"" + ); #else - asm( - "xor %%"REG_a", %%"REG_a" \n\t" - "1: \n\t" - PREFETCH" 64(%1, %%"REG_a") \n\t" - PREFETCH" 64(%2, %%"REG_a") \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq 8(%1, %%"REG_a"), %%mm2 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm2, %%mm3 \n\t" - "movq (%2, %%"REG_a"), %%mm4 \n\t" - "movq 8(%2, %%"REG_a"), %%mm5 \n\t" - "punpcklbw %%mm4, %%mm0 \n\t" - "punpckhbw %%mm4, %%mm1 \n\t" - "punpcklbw %%mm5, %%mm2 \n\t" - "punpckhbw %%mm5, %%mm3 \n\t" - MOVNTQ" %%mm0, (%0, %%"REG_a", 2)\n\t" - MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2)\n\t" - MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2)\n\t" - MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2)\n\t" - "add $16, %%"REG_a" \n\t" - "cmp %3, %%"REG_a" \n\t" - " jb 1b \n\t" - ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15) - : "memory", "%"REG_a - ); + asm( + "xor %%"REG_a", %%"REG_a" \n\t" + "1: \n\t" + PREFETCH" 64(%1, %%"REG_a") \n\t" + PREFETCH" 64(%2, %%"REG_a") \n\t" + "movq (%1, %%"REG_a"), %%mm0 \n\t" + "movq 8(%1, %%"REG_a"), %%mm2 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "movq (%2, %%"REG_a"), %%mm4 \n\t" + "movq 8(%2, %%"REG_a"), %%mm5 \n\t" + "punpcklbw %%mm4, %%mm0 \n\t" + "punpckhbw %%mm4, %%mm1 \n\t" + "punpcklbw %%mm5, %%mm2 \n\t" + "punpckhbw %%mm5, %%mm3 \n\t" + MOVNTQ" %%mm0, (%0, %%"REG_a", 2) \n\t" + MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2) \n\t" + MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2) \n\t" + MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2) \n\t" + "add $16, %%"REG_a" \n\t" + "cmp %3, %%"REG_a" \n\t" + " jb 1b \n\t" + ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15) + : "memory", "%"REG_a + ); #endif - for(w= (width&(~15)); w < width; w++) - { - dest[2*w+0] = src1[w]; - dest[2*w+1] = src2[w]; - } + for (w= (width&(~15)); w < width; w++) + { + dest[2*w+0] = src1[w]; + dest[2*w+1] = src2[w]; + } #else - for(w=0; w < width; w++) - { - dest[2*w+0] = src1[w]; - dest[2*w+1] = src2[w]; - } + for (w=0; w < width; w++) + { + dest[2*w+0] = src1[w]; + dest[2*w+1] = src2[w]; + } #endif - dest += dstStride; + dest += dstStride; src1 += src1Stride; src2 += src2Stride; - } + } #ifdef HAVE_MMX - asm( - EMMS" \n\t" - SFENCE" \n\t" - ::: "memory" - ); + asm( + EMMS" \n\t" + SFENCE" \n\t" + ::: "memory" + ); #endif } static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2, - uint8_t *dst1, uint8_t *dst2, - long width, long height, - long srcStride1, long srcStride2, - long dstStride1, long dstStride2) + uint8_t *dst1, uint8_t *dst2, + long width, long height, + long srcStride1, long srcStride2, + long dstStride1, long dstStride2) { long y,x,w,h; w=width/2; h=height/2; #ifdef HAVE_MMX asm volatile( - PREFETCH" %0\n\t" - PREFETCH" %1\n\t" - ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory"); + PREFETCH" %0 \n\t" + PREFETCH" %1 \n\t" + ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory"); #endif - for(y=0;y>1); - uint8_t* d=dst1+dstStride1*y; - x=0; + for (y=0;y>1); + uint8_t* d=dst1+dstStride1*y; + x=0; #ifdef HAVE_MMX - for(;x>1); - uint8_t* d=dst2+dstStride2*y; - x=0; -#ifdef HAVE_MMX - for(;x>1); + uint8_t* d=dst2+dstStride2*y; + x=0; +#ifdef HAVE_MMX + for (;x>2); - const uint8_t* vp=src3+srcStride3*(y>>2); - uint8_t* d=dst+dstStride*y; - x=0; + for (y=0;y>2); + const uint8_t* vp=src3+srcStride3*(y>>2); + uint8_t* d=dst+dstStride*y; + x=0; #ifdef HAVE_MMX - for(;x> 19; - dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t); - } - perm1 = vec_lvsl(i << 2, val); - v1 = vec_ld(i << 2, val); - for ( ; i < (dstW - 15); i+=16) { - int offset = i << 2; - vector signed int v2 = vec_ld(offset + 16, val); - vector signed int v3 = vec_ld(offset + 32, val); - vector signed int v4 = vec_ld(offset + 48, val); - vector signed int v5 = vec_ld(offset + 64, val); - vector signed int v12 = vec_perm(v1,v2,perm1); - vector signed int v23 = vec_perm(v2,v3,perm1); - vector signed int v34 = vec_perm(v3,v4,perm1); - vector signed int v45 = vec_perm(v4,v5,perm1); + register int i; + vector unsigned int altivec_vectorShiftInt19 = + vec_add(vec_splat_u32(10),vec_splat_u32(9)); + if ((unsigned long)dest % 16) { + /* badly aligned store, we force store alignement */ + /* and will handle load misalignement on val w/ vec_perm */ + vector unsigned char perm1; + vector signed int v1; + for (i = 0 ; (i < dstW) && + (((unsigned long)dest + i) % 16) ; i++) { + int t = val[i] >> 19; + dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t); + } + perm1 = vec_lvsl(i << 2, val); + v1 = vec_ld(i << 2, val); + for ( ; i < (dstW - 15); i+=16) { + int offset = i << 2; + vector signed int v2 = vec_ld(offset + 16, val); + vector signed int v3 = vec_ld(offset + 32, val); + vector signed int v4 = vec_ld(offset + 48, val); + vector signed int v5 = vec_ld(offset + 64, val); + vector signed int v12 = vec_perm(v1,v2,perm1); + vector signed int v23 = vec_perm(v2,v3,perm1); + vector signed int v34 = vec_perm(v3,v4,perm1); + vector signed int v45 = vec_perm(v4,v5,perm1); - vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19); - vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19); - vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19); - vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19); - vector unsigned short vs1 = vec_packsu(vA, vB); - vector unsigned short vs2 = vec_packsu(vC, vD); - vector unsigned char vf = vec_packsu(vs1, vs2); - vec_st(vf, i, dest); - v1 = v5; + vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19); + vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19); + vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19); + vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19); + vector unsigned short vs1 = vec_packsu(vA, vB); + vector unsigned short vs2 = vec_packsu(vC, vD); + vector unsigned char vf = vec_packsu(vs1, vs2); + vec_st(vf, i, dest); + v1 = v5; + } + } else { // dest is properly aligned, great + for (i = 0; i < (dstW - 15); i+=16) { + int offset = i << 2; + vector signed int v1 = vec_ld(offset, val); + vector signed int v2 = vec_ld(offset + 16, val); + vector signed int v3 = vec_ld(offset + 32, val); + vector signed int v4 = vec_ld(offset + 48, val); + vector signed int v5 = vec_sra(v1, altivec_vectorShiftInt19); + vector signed int v6 = vec_sra(v2, altivec_vectorShiftInt19); + vector signed int v7 = vec_sra(v3, altivec_vectorShiftInt19); + vector signed int v8 = vec_sra(v4, altivec_vectorShiftInt19); + vector unsigned short vs1 = vec_packsu(v5, v6); + vector unsigned short vs2 = vec_packsu(v7, v8); + vector unsigned char vf = vec_packsu(vs1, vs2); + vec_st(vf, i, dest); + } } - } else { // dest is properly aligned, great - for (i = 0; i < (dstW - 15); i+=16) { - int offset = i << 2; - vector signed int v1 = vec_ld(offset, val); - vector signed int v2 = vec_ld(offset + 16, val); - vector signed int v3 = vec_ld(offset + 32, val); - vector signed int v4 = vec_ld(offset + 48, val); - vector signed int v5 = vec_sra(v1, altivec_vectorShiftInt19); - vector signed int v6 = vec_sra(v2, altivec_vectorShiftInt19); - vector signed int v7 = vec_sra(v3, altivec_vectorShiftInt19); - vector signed int v8 = vec_sra(v4, altivec_vectorShiftInt19); - vector unsigned short vs1 = vec_packsu(v5, v6); - vector unsigned short vs2 = vec_packsu(v7, v8); - vector unsigned char vf = vec_packsu(vs1, vs2); - vec_st(vf, i, dest); + for ( ; i < dstW ; i++) { + int t = val[i] >> 19; + dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t); } - } - for ( ; i < dstW ; i++) { - int t = val[i] >> 19; - dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t); - } } static inline void yuv2yuvX_altivec_real(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, - int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, - uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW) + int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, + uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW) { - const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)}; - register int i, j; - { - int __attribute__ ((aligned (16))) val[dstW]; + const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)}; + register int i, j; + { + int __attribute__ ((aligned (16))) val[dstW]; - for (i = 0; i < (dstW -7); i+=4) { - vec_st(vini, i << 2, val); + for (i = 0; i < (dstW -7); i+=4) { + vec_st(vini, i << 2, val); + } + for (; i < dstW; i++) { + val[i] = (1 << 18); + } + + for (j = 0; j < lumFilterSize; j++) { + vector signed short l1, vLumFilter = vec_ld(j << 1, lumFilter); + vector unsigned char perm, perm0 = vec_lvsl(j << 1, lumFilter); + vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0); + vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter + + perm = vec_lvsl(0, lumSrc[j]); + l1 = vec_ld(0, lumSrc[j]); + + for (i = 0; i < (dstW - 7); i+=8) { + int offset = i << 2; + vector signed short l2 = vec_ld((i << 1) + 16, lumSrc[j]); + + vector signed int v1 = vec_ld(offset, val); + vector signed int v2 = vec_ld(offset + 16, val); + + vector signed short ls = vec_perm(l1, l2, perm); // lumSrc[j][i] ... lumSrc[j][i+7] + + vector signed int i1 = vec_mule(vLumFilter, ls); + vector signed int i2 = vec_mulo(vLumFilter, ls); + + vector signed int vf1 = vec_mergeh(i1, i2); + vector signed int vf2 = vec_mergel(i1, i2); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j] + + vector signed int vo1 = vec_add(v1, vf1); + vector signed int vo2 = vec_add(v2, vf2); + + vec_st(vo1, offset, val); + vec_st(vo2, offset + 16, val); + + l1 = l2; + } + for ( ; i < dstW; i++) { + val[i] += lumSrc[j][i] * lumFilter[j]; + } + } + altivec_packIntArrayToCharArray(val,dest,dstW); } - for (; i < dstW; i++) { - val[i] = (1 << 18); + if (uDest != 0) { + int __attribute__ ((aligned (16))) u[chrDstW]; + int __attribute__ ((aligned (16))) v[chrDstW]; + + for (i = 0; i < (chrDstW -7); i+=4) { + vec_st(vini, i << 2, u); + vec_st(vini, i << 2, v); + } + for (; i < chrDstW; i++) { + u[i] = (1 << 18); + v[i] = (1 << 18); + } + + for (j = 0; j < chrFilterSize; j++) { + vector signed short l1, l1_V, vChrFilter = vec_ld(j << 1, chrFilter); + vector unsigned char perm, perm0 = vec_lvsl(j << 1, chrFilter); + vChrFilter = vec_perm(vChrFilter, vChrFilter, perm0); + vChrFilter = vec_splat(vChrFilter, 0); // chrFilter[j] is loaded 8 times in vChrFilter + + perm = vec_lvsl(0, chrSrc[j]); + l1 = vec_ld(0, chrSrc[j]); + l1_V = vec_ld(2048 << 1, chrSrc[j]); + + for (i = 0; i < (chrDstW - 7); i+=8) { + int offset = i << 2; + vector signed short l2 = vec_ld((i << 1) + 16, chrSrc[j]); + vector signed short l2_V = vec_ld(((i + 2048) << 1) + 16, chrSrc[j]); + + vector signed int v1 = vec_ld(offset, u); + vector signed int v2 = vec_ld(offset + 16, u); + vector signed int v1_V = vec_ld(offset, v); + vector signed int v2_V = vec_ld(offset + 16, v); + + vector signed short ls = vec_perm(l1, l2, perm); // chrSrc[j][i] ... chrSrc[j][i+7] + vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrSrc[j][i+2048] ... chrSrc[j][i+2055] + + vector signed int i1 = vec_mule(vChrFilter, ls); + vector signed int i2 = vec_mulo(vChrFilter, ls); + vector signed int i1_V = vec_mule(vChrFilter, ls_V); + vector signed int i2_V = vec_mulo(vChrFilter, ls_V); + + vector signed int vf1 = vec_mergeh(i1, i2); + vector signed int vf2 = vec_mergel(i1, i2); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j] + vector signed int vf1_V = vec_mergeh(i1_V, i2_V); + vector signed int vf2_V = vec_mergel(i1_V, i2_V); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j] + + vector signed int vo1 = vec_add(v1, vf1); + vector signed int vo2 = vec_add(v2, vf2); + vector signed int vo1_V = vec_add(v1_V, vf1_V); + vector signed int vo2_V = vec_add(v2_V, vf2_V); + + vec_st(vo1, offset, u); + vec_st(vo2, offset + 16, u); + vec_st(vo1_V, offset, v); + vec_st(vo2_V, offset + 16, v); + + l1 = l2; + l1_V = l2_V; + } + for ( ; i < chrDstW; i++) { + u[i] += chrSrc[j][i] * chrFilter[j]; + v[i] += chrSrc[j][i + 2048] * chrFilter[j]; + } + } + altivec_packIntArrayToCharArray(u,uDest,chrDstW); + altivec_packIntArrayToCharArray(v,vDest,chrDstW); } - - for (j = 0; j < lumFilterSize; j++) { - vector signed short l1, vLumFilter = vec_ld(j << 1, lumFilter); - vector unsigned char perm, perm0 = vec_lvsl(j << 1, lumFilter); - vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0); - vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter - - perm = vec_lvsl(0, lumSrc[j]); - l1 = vec_ld(0, lumSrc[j]); - - for (i = 0; i < (dstW - 7); i+=8) { - int offset = i << 2; - vector signed short l2 = vec_ld((i << 1) + 16, lumSrc[j]); - - vector signed int v1 = vec_ld(offset, val); - vector signed int v2 = vec_ld(offset + 16, val); - - vector signed short ls = vec_perm(l1, l2, perm); // lumSrc[j][i] ... lumSrc[j][i+7] - - vector signed int i1 = vec_mule(vLumFilter, ls); - vector signed int i2 = vec_mulo(vLumFilter, ls); - - vector signed int vf1 = vec_mergeh(i1, i2); - vector signed int vf2 = vec_mergel(i1, i2); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j] - - vector signed int vo1 = vec_add(v1, vf1); - vector signed int vo2 = vec_add(v2, vf2); - - vec_st(vo1, offset, val); - vec_st(vo2, offset + 16, val); - - l1 = l2; - } - for ( ; i < dstW; i++) { - val[i] += lumSrc[j][i] * lumFilter[j]; - } - } - altivec_packIntArrayToCharArray(val,dest,dstW); - } - if (uDest != 0) { - int __attribute__ ((aligned (16))) u[chrDstW]; - int __attribute__ ((aligned (16))) v[chrDstW]; - - for (i = 0; i < (chrDstW -7); i+=4) { - vec_st(vini, i << 2, u); - vec_st(vini, i << 2, v); - } - for (; i < chrDstW; i++) { - u[i] = (1 << 18); - v[i] = (1 << 18); - } - - for (j = 0; j < chrFilterSize; j++) { - vector signed short l1, l1_V, vChrFilter = vec_ld(j << 1, chrFilter); - vector unsigned char perm, perm0 = vec_lvsl(j << 1, chrFilter); - vChrFilter = vec_perm(vChrFilter, vChrFilter, perm0); - vChrFilter = vec_splat(vChrFilter, 0); // chrFilter[j] is loaded 8 times in vChrFilter - - perm = vec_lvsl(0, chrSrc[j]); - l1 = vec_ld(0, chrSrc[j]); - l1_V = vec_ld(2048 << 1, chrSrc[j]); - - for (i = 0; i < (chrDstW - 7); i+=8) { - int offset = i << 2; - vector signed short l2 = vec_ld((i << 1) + 16, chrSrc[j]); - vector signed short l2_V = vec_ld(((i + 2048) << 1) + 16, chrSrc[j]); - - vector signed int v1 = vec_ld(offset, u); - vector signed int v2 = vec_ld(offset + 16, u); - vector signed int v1_V = vec_ld(offset, v); - vector signed int v2_V = vec_ld(offset + 16, v); - - vector signed short ls = vec_perm(l1, l2, perm); // chrSrc[j][i] ... chrSrc[j][i+7] - vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrSrc[j][i+2048] ... chrSrc[j][i+2055] - - vector signed int i1 = vec_mule(vChrFilter, ls); - vector signed int i2 = vec_mulo(vChrFilter, ls); - vector signed int i1_V = vec_mule(vChrFilter, ls_V); - vector signed int i2_V = vec_mulo(vChrFilter, ls_V); - - vector signed int vf1 = vec_mergeh(i1, i2); - vector signed int vf2 = vec_mergel(i1, i2); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j] - vector signed int vf1_V = vec_mergeh(i1_V, i2_V); - vector signed int vf2_V = vec_mergel(i1_V, i2_V); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j] - - vector signed int vo1 = vec_add(v1, vf1); - vector signed int vo2 = vec_add(v2, vf2); - vector signed int vo1_V = vec_add(v1_V, vf1_V); - vector signed int vo2_V = vec_add(v2_V, vf2_V); - - vec_st(vo1, offset, u); - vec_st(vo2, offset + 16, u); - vec_st(vo1_V, offset, v); - vec_st(vo2_V, offset + 16, v); - - l1 = l2; - l1_V = l2_V; - } - for ( ; i < chrDstW; i++) { - u[i] += chrSrc[j][i] * chrFilter[j]; - v[i] += chrSrc[j][i + 2048] * chrFilter[j]; - } - } - altivec_packIntArrayToCharArray(u,uDest,chrDstW); - altivec_packIntArrayToCharArray(v,vDest,chrDstW); - } } static inline void hScale_altivec_real(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, int16_t *filter, int16_t *filterPos, int filterSize) { - register int i; - int __attribute__ ((aligned (16))) tempo[4]; + register int i; + int __attribute__ ((aligned (16))) tempo[4]; - if (filterSize % 4) { - for(i=0; i>7, 0, (1<<15)-1); + if (filterSize % 4) { + for (i=0; i>7, 0, (1<<15)-1); + } } - } - else - switch (filterSize) { - case 4: + else + switch (filterSize) { + case 4: { - for(i=0; i 12) { - src_v1 = vec_ld(srcPos + 16, src); - } - src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); + vector unsigned char src_v0 = vec_ld(srcPos, src); + vector unsigned char src_v1, src_vF; + vector signed short src_v, filter_v; + vector signed int val_vEven, val_s; + if ((((int)src + srcPos)% 16) > 12) { + src_v1 = vec_ld(srcPos + 16, src); + } + src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); - src_v = // vec_unpackh sign-extends... - (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); - // now put our elements in the even slots - src_v = vec_mergeh(src_v, (vector signed short)vzero); + src_v = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + // now put our elements in the even slots + src_v = vec_mergeh(src_v, (vector signed short)vzero); - filter_v = vec_ld(i << 3, filter); + filter_v = vec_ld(i << 3, filter); // the 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2) // the neat trick : we only care for half the elements, // high or low depending on (i<<3)%16 (it's 0 or 8 here), // and we're going to use vec_mule, so we chose // carefully how to "unpack" the elements into the even slots - if ((i << 3) % 16) - filter_v = vec_mergel(filter_v,(vector signed short)vzero); - else - filter_v = vec_mergeh(filter_v,(vector signed short)vzero); + if ((i << 3) % 16) + filter_v = vec_mergel(filter_v,(vector signed short)vzero); + else + filter_v = vec_mergeh(filter_v,(vector signed short)vzero); - val_vEven = vec_mule(src_v, filter_v); - val_s = vec_sums(val_vEven, vzero); - vec_st(val_s, 0, tempo); - dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1); - } + val_vEven = vec_mule(src_v, filter_v); + val_s = vec_sums(val_vEven, vzero); + vec_st(val_s, 0, tempo); + dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1); + } } break; - case 8: + case 8: { - for(i=0; i 8) { - src_v1 = vec_ld(srcPos + 16, src); - } - src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); + vector unsigned char src_v0 = vec_ld(srcPos, src); + vector unsigned char src_v1, src_vF; + vector signed short src_v, filter_v; + vector signed int val_v, val_s; + if ((((int)src + srcPos)% 16) > 8) { + src_v1 = vec_ld(srcPos + 16, src); + } + src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); - src_v = // vec_unpackh sign-extends... - (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); - filter_v = vec_ld(i << 4, filter); + src_v = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + filter_v = vec_ld(i << 4, filter); // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2) - val_v = vec_msums(src_v, filter_v, (vector signed int)vzero); - val_s = vec_sums(val_v, vzero); - vec_st(val_s, 0, tempo); - dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1); - } + val_v = vec_msums(src_v, filter_v, (vector signed int)vzero); + val_s = vec_sums(val_v, vzero); + vec_st(val_s, 0, tempo); + dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1); + } } break; - case 16: + case 16: { - for(i=0; i>7, 0, (1<<15)-1); - } + vec_st(val_s, 0, tempo); + dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1); + } } break; - default: + default: { - for(i=0; i 8) { - src_v1 = vec_ld(srcPos + j + 16, src); - } - src_vF = vec_perm(src_v0, src_v1, permS); + // loading src_v0 is useless, it's already done above + //vector unsigned char src_v0 = vec_ld(srcPos + j, src); + vector unsigned char src_v1, src_vF; + vector signed short src_v, filter_v1R, filter_v; + if ((((int)src + srcPos)% 16) > 8) { + src_v1 = vec_ld(srcPos + j + 16, src); + } + src_vF = vec_perm(src_v0, src_v1, permS); - src_v = // vec_unpackh sign-extends... - (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); - // loading filter_v0R is useless, it's already done above - //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter); - filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter); - filter_v = vec_perm(filter_v0R, filter_v1R, permF); + src_v = // vec_unpackh sign-extends... + (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); + // loading filter_v0R is useless, it's already done above + //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter); + filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter); + filter_v = vec_perm(filter_v0R, filter_v1R, permF); - val_v = vec_msums(src_v, filter_v, val_v); + val_v = vec_msums(src_v, filter_v, val_v); } val_s = vec_sums(val_v, vzero); vec_st(val_s, 0, tempo); dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1); - } + } } - } + } } static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) { - uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY; - // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] ); - uint8_t *ysrc = src[0]; - uint8_t *usrc = src[1]; - uint8_t *vsrc = src[2]; - const int width = c->srcW; - const int height = srcSliceH; - const int lumStride = srcStride[0]; - const int chromStride = srcStride[1]; - const int dstStride = dstStride_a[0]; - const vector unsigned char yperm = vec_lvsl(0, ysrc); - const int vertLumPerChroma = 2; - register unsigned int y; + int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) { + uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY; + // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] ); + uint8_t *ysrc = src[0]; + uint8_t *usrc = src[1]; + uint8_t *vsrc = src[2]; + const int width = c->srcW; + const int height = srcSliceH; + const int lumStride = srcStride[0]; + const int chromStride = srcStride[1]; + const int dstStride = dstStride_a[0]; + const vector unsigned char yperm = vec_lvsl(0, ysrc); + const int vertLumPerChroma = 2; + register unsigned int y; - if(width&15){ - yv12toyuy2( ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride); - return srcSliceH; - } - - /* this code assume: - - 1) dst is 16 bytes-aligned - 2) dstStride is a multiple of 16 - 3) width is a multiple of 16 - 4) lum&chrom stride are multiple of 8 - */ - - for(y=0; y> 1; - vector unsigned char v_yA = vec_ld(i, ysrc); - vector unsigned char v_yB = vec_ld(i + 16, ysrc); - vector unsigned char v_yC = vec_ld(i + 32, ysrc); - vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm); - vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm); - vector unsigned char v_uA = vec_ld(j, usrc); - vector unsigned char v_uB = vec_ld(j + 16, usrc); - vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc)); - vector unsigned char v_vA = vec_ld(j, vsrc); - vector unsigned char v_vB = vec_ld(j + 16, vsrc); - vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc)); - vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); - vector unsigned char v_uv_b = vec_mergel(v_u, v_v); - vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a); - vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a); - vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b); - vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b); - vec_st(v_yuy2_0, (i << 1), dst); - vec_st(v_yuy2_1, (i << 1) + 16, dst); - vec_st(v_yuy2_2, (i << 1) + 32, dst); - vec_st(v_yuy2_3, (i << 1) + 48, dst); - } - if (i < width) { - const unsigned int j = i >> 1; - vector unsigned char v_y1 = vec_ld(i, ysrc); - vector unsigned char v_u = vec_ld(j, usrc); - vector unsigned char v_v = vec_ld(j, vsrc); - vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); - vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a); - vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a); - vec_st(v_yuy2_0, (i << 1), dst); - vec_st(v_yuy2_1, (i << 1) + 16, dst); - } - if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) ) - { - usrc += chromStride; - vsrc += chromStride; - } - ysrc += lumStride; - dst += dstStride; + if (width&15) { + yv12toyuy2( ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride); + return srcSliceH; } - return srcSliceH; + /* this code assume: + + 1) dst is 16 bytes-aligned + 2) dstStride is a multiple of 16 + 3) width is a multiple of 16 + 4) lum&chrom stride are multiple of 8 + */ + + for (y=0; y> 1; + vector unsigned char v_yA = vec_ld(i, ysrc); + vector unsigned char v_yB = vec_ld(i + 16, ysrc); + vector unsigned char v_yC = vec_ld(i + 32, ysrc); + vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm); + vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm); + vector unsigned char v_uA = vec_ld(j, usrc); + vector unsigned char v_uB = vec_ld(j + 16, usrc); + vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc)); + vector unsigned char v_vA = vec_ld(j, vsrc); + vector unsigned char v_vB = vec_ld(j + 16, vsrc); + vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc)); + vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); + vector unsigned char v_uv_b = vec_mergel(v_u, v_v); + vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a); + vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a); + vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b); + vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b); + vec_st(v_yuy2_0, (i << 1), dst); + vec_st(v_yuy2_1, (i << 1) + 16, dst); + vec_st(v_yuy2_2, (i << 1) + 32, dst); + vec_st(v_yuy2_3, (i << 1) + 48, dst); + } + if (i < width) { + const unsigned int j = i >> 1; + vector unsigned char v_y1 = vec_ld(i, ysrc); + vector unsigned char v_u = vec_ld(j, usrc); + vector unsigned char v_v = vec_ld(j, vsrc); + vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); + vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a); + vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a); + vec_st(v_yuy2_0, (i << 1), dst); + vec_st(v_yuy2_1, (i << 1) + 16, dst); + } + if ( (y&(vertLumPerChroma-1))==(vertLumPerChroma-1) ) { + usrc += chromStride; + vsrc += chromStride; + } + ysrc += lumStride; + dst += dstStride; + } + + return srcSliceH; } static inline int yv12touyvy_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, - int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) { - uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY; - // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] ); - uint8_t *ysrc = src[0]; - uint8_t *usrc = src[1]; - uint8_t *vsrc = src[2]; - const int width = c->srcW; - const int height = srcSliceH; - const int lumStride = srcStride[0]; - const int chromStride = srcStride[1]; - const int dstStride = dstStride_a[0]; - const int vertLumPerChroma = 2; - const vector unsigned char yperm = vec_lvsl(0, ysrc); - register unsigned int y; + int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) { + uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY; + // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] ); + uint8_t *ysrc = src[0]; + uint8_t *usrc = src[1]; + uint8_t *vsrc = src[2]; + const int width = c->srcW; + const int height = srcSliceH; + const int lumStride = srcStride[0]; + const int chromStride = srcStride[1]; + const int dstStride = dstStride_a[0]; + const int vertLumPerChroma = 2; + const vector unsigned char yperm = vec_lvsl(0, ysrc); + register unsigned int y; - if(width&15){ - yv12touyvy( ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride); - return srcSliceH; - } - - /* this code assume: - - 1) dst is 16 bytes-aligned - 2) dstStride is a multiple of 16 - 3) width is a multiple of 16 - 4) lum&chrom stride are multiple of 8 - */ - - for(y=0; y> 1; - vector unsigned char v_yA = vec_ld(i, ysrc); - vector unsigned char v_yB = vec_ld(i + 16, ysrc); - vector unsigned char v_yC = vec_ld(i + 32, ysrc); - vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm); - vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm); - vector unsigned char v_uA = vec_ld(j, usrc); - vector unsigned char v_uB = vec_ld(j + 16, usrc); - vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc)); - vector unsigned char v_vA = vec_ld(j, vsrc); - vector unsigned char v_vB = vec_ld(j + 16, vsrc); - vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc)); - vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); - vector unsigned char v_uv_b = vec_mergel(v_u, v_v); - vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1); - vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1); - vector unsigned char v_uyvy_2 = vec_mergeh(v_uv_b, v_y2); - vector unsigned char v_uyvy_3 = vec_mergel(v_uv_b, v_y2); - vec_st(v_uyvy_0, (i << 1), dst); - vec_st(v_uyvy_1, (i << 1) + 16, dst); - vec_st(v_uyvy_2, (i << 1) + 32, dst); - vec_st(v_uyvy_3, (i << 1) + 48, dst); - } - if (i < width) { - const unsigned int j = i >> 1; - vector unsigned char v_y1 = vec_ld(i, ysrc); - vector unsigned char v_u = vec_ld(j, usrc); - vector unsigned char v_v = vec_ld(j, vsrc); - vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); - vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1); - vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1); - vec_st(v_uyvy_0, (i << 1), dst); - vec_st(v_uyvy_1, (i << 1) + 16, dst); - } - if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) ) - { - usrc += chromStride; - vsrc += chromStride; - } - ysrc += lumStride; - dst += dstStride; + if (width&15) { + yv12touyvy( ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride); + return srcSliceH; } - return srcSliceH; + + /* this code assume: + + 1) dst is 16 bytes-aligned + 2) dstStride is a multiple of 16 + 3) width is a multiple of 16 + 4) lum&chrom stride are multiple of 8 + */ + + for (y=0; y> 1; + vector unsigned char v_yA = vec_ld(i, ysrc); + vector unsigned char v_yB = vec_ld(i + 16, ysrc); + vector unsigned char v_yC = vec_ld(i + 32, ysrc); + vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm); + vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm); + vector unsigned char v_uA = vec_ld(j, usrc); + vector unsigned char v_uB = vec_ld(j + 16, usrc); + vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc)); + vector unsigned char v_vA = vec_ld(j, vsrc); + vector unsigned char v_vB = vec_ld(j + 16, vsrc); + vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc)); + vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); + vector unsigned char v_uv_b = vec_mergel(v_u, v_v); + vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1); + vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1); + vector unsigned char v_uyvy_2 = vec_mergeh(v_uv_b, v_y2); + vector unsigned char v_uyvy_3 = vec_mergel(v_uv_b, v_y2); + vec_st(v_uyvy_0, (i << 1), dst); + vec_st(v_uyvy_1, (i << 1) + 16, dst); + vec_st(v_uyvy_2, (i << 1) + 32, dst); + vec_st(v_uyvy_3, (i << 1) + 48, dst); + } + if (i < width) { + const unsigned int j = i >> 1; + vector unsigned char v_y1 = vec_ld(i, ysrc); + vector unsigned char v_u = vec_ld(j, usrc); + vector unsigned char v_v = vec_ld(j, vsrc); + vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); + vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1); + vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1); + vec_st(v_uyvy_0, (i << 1), dst); + vec_st(v_uyvy_1, (i << 1) + 16, dst); + } + if ( (y&(vertLumPerChroma-1))==(vertLumPerChroma-1) ) { + usrc += chromStride; + vsrc += chromStride; + } + ysrc += lumStride; + dst += dstStride; + } + return srcSliceH; }