2001-04-16 20:55:27 +00:00
|
|
|
|
|
|
|
#ifndef __MPLAYER_MEMCPY
|
|
|
|
#define __MPLAYER_MEMCPY
|
|
|
|
|
2001-04-11 20:14:49 +00:00
|
|
|
/*
|
|
|
|
This part of code was taken by from Linux-2.4.3 and slightly modified
|
|
|
|
for MMX2 instruction set. I have done it since linux uses page aligned
|
|
|
|
blocks but mplayer uses weakly ordered data and original sources can not
|
2001-04-12 14:40:10 +00:00
|
|
|
speedup their. Only using prefetchnta and movntq together have effect!
|
2001-04-11 20:14:49 +00:00
|
|
|
If you have questions please contact with me: Nick Kurshev: nickols_k@mail.ru.
|
|
|
|
*/
|
2001-04-16 20:55:27 +00:00
|
|
|
|
|
|
|
// 3dnow memcpy support from kernel 2.4.2
|
|
|
|
// by Pontscho/fresh!mindworkz
|
|
|
|
|
|
|
|
#if defined( HAVE_MMX2 ) || defined( HAVE_3DNOW )
|
|
|
|
|
2001-04-12 14:40:10 +00:00
|
|
|
/* for small memory blocks (<256 bytes) this version is faster */
|
|
|
|
#define small_memcpy(to,from,n)\
|
|
|
|
{\
|
|
|
|
__asm__ __volatile__(\
|
|
|
|
"rep ; movsb\n"\
|
|
|
|
::"D" (to), "S" (from),"c" (n)\
|
|
|
|
: "memory");\
|
2001-04-11 20:14:49 +00:00
|
|
|
}
|
2001-04-12 14:40:10 +00:00
|
|
|
|
2001-04-12 00:09:57 +00:00
|
|
|
inline static void * fast_memcpy(void * to, const void * from, unsigned len)
|
2001-04-11 20:14:49 +00:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if(len >= 0x200) /* 512-byte blocks */
|
|
|
|
{
|
|
|
|
p = to;
|
|
|
|
i = len >> 6; /* len/64 */
|
2001-04-12 14:40:10 +00:00
|
|
|
len&=63;
|
|
|
|
|
2001-04-11 20:14:49 +00:00
|
|
|
__asm__ __volatile__ (
|
2001-04-16 20:55:27 +00:00
|
|
|
#if defined( HAVE_3DNOW ) && !defined( HAVE_MMX2 )
|
|
|
|
"prefetch (%0)\n"
|
|
|
|
"prefetch 64(%0)\n"
|
|
|
|
"prefetch 128(%0)\n"
|
|
|
|
"prefetch 192(%0)\n"
|
|
|
|
"prefetch 256(%0)\n"
|
|
|
|
#else
|
2001-04-14 17:56:44 +00:00
|
|
|
"prefetchnta (%0)\n"
|
|
|
|
"prefetchnta 64(%0)\n"
|
|
|
|
"prefetchnta 128(%0)\n"
|
|
|
|
"prefetchnta 192(%0)\n"
|
|
|
|
"prefetchnta 256(%0)\n"
|
2001-04-16 20:55:27 +00:00
|
|
|
#endif
|
2001-04-11 20:14:49 +00:00
|
|
|
: : "r" (from) );
|
2001-04-14 17:56:44 +00:00
|
|
|
/*
|
|
|
|
This algorithm is top effective when the code consequently
|
|
|
|
reads and writes blocks which have size of cache line.
|
|
|
|
Size of cache line is processor-dependent.
|
|
|
|
It will, however, be a minimum of 32 bytes on any processors.
|
|
|
|
It would be better to have a number of instructions which
|
|
|
|
perform reading and writing to be multiple to a number of
|
|
|
|
processor's decoders, but it's not always possible.
|
|
|
|
*/
|
2001-04-11 20:14:49 +00:00
|
|
|
for(; i>0; i--)
|
|
|
|
{
|
|
|
|
__asm__ __volatile__ (
|
2001-04-16 20:55:27 +00:00
|
|
|
#if defined( HAVE_3DNOW ) && !defined( HAVE_MMX2 )
|
|
|
|
"prefetch 320(%0)\n"
|
|
|
|
#else
|
2001-04-14 17:56:44 +00:00
|
|
|
"prefetchnta 320(%0)\n"
|
2001-04-16 20:55:27 +00:00
|
|
|
#endif
|
2001-04-14 17:56:44 +00:00
|
|
|
#ifdef HAVE_SSE /* Only P3 (may be Cyrix3) */
|
|
|
|
"movups (%0), %%xmm0\n"
|
|
|
|
"movups 16(%0), %%xmm1\n"
|
|
|
|
"movntps %%xmm0, (%1)\n"
|
|
|
|
"movntps %%xmm1, 16(%1)\n"
|
|
|
|
"movups 32(%0), %%xmm0\n"
|
|
|
|
"movups 48(%0), %%xmm1\n"
|
|
|
|
"movntps %%xmm0, 32(%1)\n"
|
|
|
|
"movntps %%xmm1, 48(%1)\n"
|
|
|
|
#else /* Only K7 (may be other) */
|
2001-04-16 20:55:27 +00:00
|
|
|
#if defined( HAVE_3DNOW ) && !defined( HAVE_MMX2 )
|
|
|
|
"movq (%0), %%mm0\n"
|
|
|
|
"movq 8(%0), %%mm1\n"
|
|
|
|
"movq 16(%0), %%mm2\n"
|
|
|
|
"movq 24(%0), %%mm3\n"
|
|
|
|
"movq %%mm0, (%1)\n"
|
|
|
|
"movq %%mm1, 8(%1)\n"
|
|
|
|
"movq %%mm2, 16(%1)\n"
|
|
|
|
"movq %%mm3, 24(%1)\n"
|
|
|
|
"movq 32(%0), %%mm0\n"
|
|
|
|
"movq 40(%0), %%mm1\n"
|
|
|
|
"movq 48(%0), %%mm2\n"
|
|
|
|
"movq 56(%0), %%mm3\n"
|
|
|
|
"movq %%mm0, 32(%1)\n"
|
|
|
|
"movq %%mm1, 40(%1)\n"
|
|
|
|
"movq %%mm2, 48(%1)\n"
|
|
|
|
"movq %%mm3, 56(%1)\n"
|
|
|
|
#else
|
2001-04-14 17:56:44 +00:00
|
|
|
"movq (%0), %%mm0\n"
|
|
|
|
"movq 8(%0), %%mm1\n"
|
|
|
|
"movq 16(%0), %%mm2\n"
|
|
|
|
"movq 24(%0), %%mm3\n"
|
|
|
|
"movntq %%mm0, (%1)\n"
|
|
|
|
"movntq %%mm1, 8(%1)\n"
|
|
|
|
"movntq %%mm2, 16(%1)\n"
|
|
|
|
"movntq %%mm3, 24(%1)\n"
|
|
|
|
"movq 32(%0), %%mm0\n"
|
|
|
|
"movq 40(%0), %%mm1\n"
|
|
|
|
"movq 48(%0), %%mm2\n"
|
|
|
|
"movq 56(%0), %%mm3\n"
|
|
|
|
"movntq %%mm0, 32(%1)\n"
|
|
|
|
"movntq %%mm1, 40(%1)\n"
|
|
|
|
"movntq %%mm2, 48(%1)\n"
|
|
|
|
"movntq %%mm3, 56(%1)\n"
|
2001-04-16 20:55:27 +00:00
|
|
|
#endif
|
2001-04-14 17:56:44 +00:00
|
|
|
#endif
|
|
|
|
:: "r" (from), "r" (to) : "memory");
|
2001-04-11 20:14:49 +00:00
|
|
|
from+=64;
|
|
|
|
to+=64;
|
|
|
|
}
|
2001-04-16 20:55:27 +00:00
|
|
|
#if defined( HAVE_3DNOW ) && !defined( HAVE_MMX2 )
|
|
|
|
__asm__ __volatile__ ("femms":::"memory");
|
|
|
|
#else
|
2001-04-14 17:56:44 +00:00
|
|
|
__asm__ __volatile__ ("emms":::"memory");
|
2001-04-16 20:55:27 +00:00
|
|
|
#endif
|
2001-04-11 20:14:49 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Now do the tail of the block
|
|
|
|
*/
|
2001-04-12 14:40:10 +00:00
|
|
|
small_memcpy(to, from, len);
|
2001-04-11 20:14:49 +00:00
|
|
|
return p;
|
|
|
|
}
|
2001-04-12 14:40:10 +00:00
|
|
|
#define memcpy(a,b,c) fast_memcpy(a,b,c)
|
2001-04-16 20:55:27 +00:00
|
|
|
|
2001-04-18 20:44:16 +00:00
|
|
|
#undef small_memcpy
|
|
|
|
|
2001-04-11 20:14:49 +00:00
|
|
|
#endif
|
|
|
|
|
2001-04-16 21:08:05 +00:00
|
|
|
#endif
|