From ac6a2e4550d27bfd23c0c096b1ee10b1d76de213 Mon Sep 17 00:00:00 2001 From: Michael Niedermayer Date: Sat, 9 Feb 2002 17:03:53 +0000 Subject: [PATCH] bgr24toY in MMX Originally committed as revision 4613 to svn://svn.mplayerhq.hu/mplayer/trunk/postproc --- postproc/swscale.c | 10 ++++++ postproc/swscale_template.c | 71 ++++++++++++++++++++++++++++++++++++- 2 files changed, 80 insertions(+), 1 deletion(-) diff --git a/postproc/swscale.c b/postproc/swscale.c index 511b10c317..dbd381c943 100644 --- a/postproc/swscale.c +++ b/postproc/swscale.c @@ -68,6 +68,8 @@ untested special converters //#undef ARCH_X86 #define DITHER1XBPP +#define FAST_BGR2YV12 // use 7 bit coeffs instead of 15bit + #define RET 0xC3 //near return opcode for X86 #ifdef MP_DEBUG @@ -178,6 +180,14 @@ static uint64_t __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFLL; static uint64_t __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00LL; static uint64_t __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000LL; +#ifdef FAST_BGR2YV12 +static const uint64_t bgr2YCoeff __attribute__((aligned(8))) = 0x000000210041000DULL; +#else +static const uint64_t bgr2YCoeff __attribute__((aligned(8))) = 0x000020E540830C8BULL; +#endif +static const uint64_t bgr2YOffset __attribute__((aligned(8))) = 0x1010101010101010ULL; +static const uint64_t w1111 __attribute__((aligned(8))) = 0x0001000100010001ULL; + // FIXME remove static uint64_t __attribute__((aligned(8))) asm_yalpha1; static uint64_t __attribute__((aligned(8))) asm_uvalpha1; diff --git a/postproc/swscale_template.c b/postproc/swscale_template.c index 26e23fe350..bea137796d 100644 --- a/postproc/swscale_template.c +++ b/postproc/swscale_template.c @@ -1635,7 +1635,76 @@ static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1 static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, int width) { -#ifdef HAVE_MMXFIXME +#ifdef HAVE_MMX + asm volatile( + "movl %2, %%eax \n\t" + "movq bgr2YCoeff, %%mm6 \n\t" + "movq w1111, %%mm5 \n\t" + "pxor %%mm7, %%mm7 \n\t" + "leal (%%eax, %%eax, 2), %%ebx \n\t" + ".balign 16 \n\t" + "1: \n\t" + PREFETCH" 64(%0, %%ebx) \n\t" + "movd (%0, %%ebx), %%mm0 \n\t" + "movd 3(%0, %%ebx), %%mm1 \n\t" + "punpcklbw %%mm7, %%mm0 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "movd 6(%0, %%ebx), %%mm2 \n\t" + "movd 9(%0, %%ebx), %%mm3 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm3 \n\t" + "pmaddwd %%mm6, %%mm0 \n\t" + "pmaddwd %%mm6, %%mm1 \n\t" + "pmaddwd %%mm6, %%mm2 \n\t" + "pmaddwd %%mm6, %%mm3 \n\t" +#ifndef FAST_BGR2YV12 + "psrad $8, %%mm0 \n\t" + "psrad $8, %%mm1 \n\t" + "psrad $8, %%mm2 \n\t" + "psrad $8, %%mm3 \n\t" +#endif + "packssdw %%mm1, %%mm0 \n\t" + "packssdw %%mm3, %%mm2 \n\t" + "pmaddwd %%mm5, %%mm0 \n\t" + "pmaddwd %%mm5, %%mm2 \n\t" + "packssdw %%mm2, %%mm0 \n\t" + "psraw $7, %%mm0 \n\t" + + "movd 12(%0, %%ebx), %%mm4 \n\t" + "movd 15(%0, %%ebx), %%mm1 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpcklbw %%mm7, %%mm1 \n\t" + "movd 18(%0, %%ebx), %%mm2 \n\t" + "movd 21(%0, %%ebx), %%mm3 \n\t" + "punpcklbw %%mm7, %%mm2 \n\t" + "punpcklbw %%mm7, %%mm3 \n\t" + "pmaddwd %%mm6, %%mm4 \n\t" + "pmaddwd %%mm6, %%mm1 \n\t" + "pmaddwd %%mm6, %%mm2 \n\t" + "pmaddwd %%mm6, %%mm3 \n\t" +#ifndef FAST_BGR2YV12 + "psrad $8, %%mm4 \n\t" + "psrad $8, %%mm1 \n\t" + "psrad $8, %%mm2 \n\t" + "psrad $8, %%mm3 \n\t" +#endif + "packssdw %%mm1, %%mm4 \n\t" + "packssdw %%mm3, %%mm2 \n\t" + "pmaddwd %%mm5, %%mm4 \n\t" + "pmaddwd %%mm5, %%mm2 \n\t" + "addl $24, %%ebx \n\t" + "packssdw %%mm2, %%mm4 \n\t" + "psraw $7, %%mm4 \n\t" + + "packuswb %%mm4, %%mm0 \n\t" + "paddusb bgr2YOffset, %%mm0 \n\t" + + MOVNTQ(%%mm0, (%1, %%eax)) + "addl $8, %%eax \n\t" + " js 1b \n\t" + : : "r" (src+width*3), "r" (dst+width), "g" (-width) + : "%eax", "%ebx" + ); #else int i; for(i=0; i