2001-11-22 19:40:38 +00:00
|
|
|
/*
|
2009-02-08 03:27:30 +00:00
|
|
|
* aclib - advanced C library ;)
|
|
|
|
* functions which improve and expand the standard C library
|
|
|
|
*
|
|
|
|
* This file is part of MPlayer.
|
|
|
|
*
|
|
|
|
* MPlayer is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* MPlayer is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*/
|
2001-05-05 13:30:00 +00:00
|
|
|
|
2009-01-16 09:21:21 +00:00
|
|
|
#if !HAVE_SSE2
|
2001-06-13 16:12:14 +00:00
|
|
|
/*
|
|
|
|
P3 processor has only one SSE decoder so can execute only 1 sse insn per
|
|
|
|
cpu clock, but it has 3 mmx decoders (include load/store unit)
|
|
|
|
and executes 3 mmx insns per cpu clock.
|
|
|
|
P4 processor has some chances, but after reading:
|
|
|
|
http://www.emulators.com/pentium4.htm
|
|
|
|
I have doubts. Anyway SSE2 version of this code can be written better.
|
|
|
|
*/
|
2001-05-05 13:30:00 +00:00
|
|
|
#undef HAVE_SSE
|
2009-01-16 09:21:21 +00:00
|
|
|
#define HAVE_SSE 0
|
2001-05-05 13:30:00 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
This part of code was taken by me from Linux-2.4.3 and slightly modified
|
|
|
|
for MMX, MMX2, SSE instruction set. I have done it since linux uses page aligned
|
|
|
|
blocks but mplayer uses weakly ordered data and original sources can not
|
|
|
|
speedup them. Only using PREFETCHNTA and MOVNTQ together have effect!
|
|
|
|
|
|
|
|
>From IA-32 Intel Architecture Software Developer's Manual Volume 1,
|
|
|
|
|
|
|
|
Order Number 245470:
|
|
|
|
"10.4.6. Cacheability Control, Prefetch, and Memory Ordering Instructions"
|
|
|
|
|
|
|
|
Data referenced by a program can be temporal (data will be used again) or
|
|
|
|
non-temporal (data will be referenced once and not reused in the immediate
|
|
|
|
future). To make efficient use of the processor's caches, it is generally
|
|
|
|
desirable to cache temporal data and not cache non-temporal data. Overloading
|
|
|
|
the processor's caches with non-temporal data is sometimes referred to as
|
|
|
|
"polluting the caches".
|
|
|
|
The non-temporal data is written to memory with Write-Combining semantics.
|
|
|
|
|
|
|
|
The PREFETCHh instructions permits a program to load data into the processor
|
|
|
|
at a suggested cache level, so that it is closer to the processors load and
|
|
|
|
store unit when it is needed. If the data is already present in a level of
|
|
|
|
the cache hierarchy that is closer to the processor, the PREFETCHh instruction
|
|
|
|
will not result in any data movement.
|
|
|
|
But we should you PREFETCHNTA: Non-temporal data fetch data into location
|
|
|
|
close to the processor, minimizing cache pollution.
|
|
|
|
|
|
|
|
The MOVNTQ (store quadword using non-temporal hint) instruction stores
|
|
|
|
packed integer data from an MMX register to memory, using a non-temporal hint.
|
|
|
|
The MOVNTPS (store packed single-precision floating-point values using
|
|
|
|
non-temporal hint) instruction stores packed floating-point data from an
|
|
|
|
XMM register to memory, using a non-temporal hint.
|
|
|
|
|
|
|
|
The SFENCE (Store Fence) instruction controls write ordering by creating a
|
|
|
|
fence for memory store operations. This instruction guarantees that the results
|
|
|
|
of every store instruction that precedes the store fence in program order is
|
|
|
|
globally visible before any store instruction that follows the fence. The
|
|
|
|
SFENCE instruction provides an efficient way of ensuring ordering between
|
|
|
|
procedures that produce weakly-ordered data and procedures that consume that
|
|
|
|
data.
|
|
|
|
|
|
|
|
If you have questions please contact with me: Nick Kurshev: nickols_k@mail.ru.
|
|
|
|
*/
|
|
|
|
|
|
|
|
// 3dnow memcpy support from kernel 2.4.2
|
|
|
|
// by Pontscho/fresh!mindworkz
|
|
|
|
|
|
|
|
|
2007-05-27 21:45:33 +00:00
|
|
|
#undef HAVE_ONLY_MMX1
|
2009-01-26 09:29:53 +00:00
|
|
|
#if HAVE_MMX && !HAVE_MMX2 && !HAVE_AMD3DNOW && !HAVE_SSE
|
2001-05-05 13:30:00 +00:00
|
|
|
/* means: mmx v.1. Note: Since we added alignment of destinition it speedups
|
|
|
|
of memory copying on PentMMX, Celeron-1 and P2 upto 12% versus
|
|
|
|
standard (non MMX-optimized) version.
|
|
|
|
Note: on K6-2+ it speedups memory copying upto 25% and
|
|
|
|
on K7 and P3 about 500% (5 times). */
|
2007-05-27 21:45:33 +00:00
|
|
|
#define HAVE_ONLY_MMX1
|
2001-05-05 13:30:00 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
#undef HAVE_K6_2PLUS
|
2009-01-26 09:29:53 +00:00
|
|
|
#if !HAVE_MMX2 && HAVE_AMD3DNOW
|
2001-05-05 13:30:00 +00:00
|
|
|
#define HAVE_K6_2PLUS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* for small memory blocks (<256 bytes) this version is faster */
|
|
|
|
#define small_memcpy(to,from,n)\
|
|
|
|
{\
|
2010-01-03 09:20:01 +00:00
|
|
|
register x86_reg dummy;\
|
2008-10-16 20:17:56 +00:00
|
|
|
__asm__ volatile(\
|
2001-05-05 13:30:00 +00:00
|
|
|
"rep; movsb"\
|
|
|
|
:"=&D"(to), "=&S"(from), "=&c"(dummy)\
|
|
|
|
/* It's most portable way to notify compiler */\
|
|
|
|
/* that edi, esi and ecx are clobbered in asm block. */\
|
|
|
|
/* Thanks to A'rpi for hint!!! */\
|
|
|
|
:"0" (to), "1" (from),"2" (n)\
|
|
|
|
: "memory");\
|
|
|
|
}
|
|
|
|
|
2001-12-09 15:25:11 +00:00
|
|
|
#undef MMREG_SIZE
|
2009-01-16 09:21:21 +00:00
|
|
|
#if HAVE_SSE
|
2001-05-05 13:30:00 +00:00
|
|
|
#define MMREG_SIZE 16
|
|
|
|
#else
|
2001-11-22 19:40:38 +00:00
|
|
|
#define MMREG_SIZE 64 //8
|
2001-05-05 13:30:00 +00:00
|
|
|
#endif
|
|
|
|
|
2001-12-09 15:25:11 +00:00
|
|
|
#undef PREFETCH
|
|
|
|
#undef EMMS
|
2002-04-17 18:48:22 +00:00
|
|
|
|
2009-01-16 09:21:21 +00:00
|
|
|
#if HAVE_MMX2
|
2002-04-17 18:48:22 +00:00
|
|
|
#define PREFETCH "prefetchnta"
|
2009-01-26 09:29:53 +00:00
|
|
|
#elif HAVE_AMD3DNOW
|
2002-04-17 19:11:52 +00:00
|
|
|
#define PREFETCH "prefetch"
|
2002-04-17 18:48:22 +00:00
|
|
|
#else
|
2008-02-15 21:52:34 +00:00
|
|
|
#define PREFETCH " # nop"
|
2002-04-17 18:48:22 +00:00
|
|
|
#endif
|
|
|
|
|
2001-05-05 13:30:00 +00:00
|
|
|
/* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
|
2009-01-26 09:29:53 +00:00
|
|
|
#if HAVE_AMD3DNOW
|
2001-05-05 13:30:00 +00:00
|
|
|
#define EMMS "femms"
|
|
|
|
#else
|
|
|
|
#define EMMS "emms"
|
|
|
|
#endif
|
|
|
|
|
2001-12-09 15:25:11 +00:00
|
|
|
#undef MOVNTQ
|
2009-01-16 09:21:21 +00:00
|
|
|
#if HAVE_MMX2
|
2001-05-05 13:30:00 +00:00
|
|
|
#define MOVNTQ "movntq"
|
|
|
|
#else
|
|
|
|
#define MOVNTQ "movq"
|
|
|
|
#endif
|
|
|
|
|
2001-12-09 15:25:11 +00:00
|
|
|
#undef MIN_LEN
|
2007-05-27 21:45:33 +00:00
|
|
|
#ifdef HAVE_ONLY_MMX1
|
2001-05-05 13:30:00 +00:00
|
|
|
#define MIN_LEN 0x800 /* 2K blocks */
|
|
|
|
#else
|
|
|
|
#define MIN_LEN 0x40 /* 64-byte blocks */
|
|
|
|
#endif
|
|
|
|
|
2002-08-22 23:28:33 +00:00
|
|
|
static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
|
2001-05-05 13:30:00 +00:00
|
|
|
{
|
|
|
|
void *retval;
|
|
|
|
size_t i;
|
2001-11-22 19:40:38 +00:00
|
|
|
retval = to;
|
|
|
|
#ifdef STATISTICS
|
|
|
|
{
|
|
|
|
static int freq[33];
|
|
|
|
static int t=0;
|
|
|
|
int i;
|
|
|
|
for(i=0; len>(1<<i); i++);
|
|
|
|
freq[i]++;
|
|
|
|
t++;
|
|
|
|
if(1024*1024*1024 % t == 0)
|
|
|
|
for(i=0; i<32; i++)
|
|
|
|
printf("freq < %8d %4d\n", 1<<i, freq[i]);
|
|
|
|
}
|
|
|
|
#endif
|
2007-05-27 21:45:33 +00:00
|
|
|
#ifndef HAVE_ONLY_MMX1
|
2001-05-05 13:30:00 +00:00
|
|
|
/* PREFETCH has effect even for MOVSB instruction ;) */
|
2008-10-16 20:17:56 +00:00
|
|
|
__asm__ volatile (
|
2001-05-05 13:30:00 +00:00
|
|
|
PREFETCH" (%0)\n"
|
|
|
|
PREFETCH" 64(%0)\n"
|
|
|
|
PREFETCH" 128(%0)\n"
|
|
|
|
PREFETCH" 192(%0)\n"
|
|
|
|
PREFETCH" 256(%0)\n"
|
|
|
|
: : "r" (from) );
|
|
|
|
#endif
|
|
|
|
if(len >= MIN_LEN)
|
|
|
|
{
|
2010-01-03 09:20:01 +00:00
|
|
|
register x86_reg delta;
|
2001-05-05 13:30:00 +00:00
|
|
|
/* Align destinition to MMREG_SIZE -boundary */
|
2010-01-03 09:20:01 +00:00
|
|
|
delta = ((intptr_t)to)&(MMREG_SIZE-1);
|
2001-05-05 13:30:00 +00:00
|
|
|
if(delta)
|
|
|
|
{
|
|
|
|
delta=MMREG_SIZE-delta;
|
|
|
|
len -= delta;
|
|
|
|
small_memcpy(to, from, delta);
|
|
|
|
}
|
|
|
|
i = len >> 6; /* len/64 */
|
|
|
|
len&=63;
|
|
|
|
/*
|
|
|
|
This algorithm is top effective when the code consequently
|
|
|
|
reads and writes blocks which have size of cache line.
|
|
|
|
Size of cache line is processor-dependent.
|
|
|
|
It will, however, be a minimum of 32 bytes on any processors.
|
|
|
|
It would be better to have a number of instructions which
|
|
|
|
perform reading and writing to be multiple to a number of
|
|
|
|
processor's decoders, but it's not always possible.
|
|
|
|
*/
|
2009-01-16 09:21:21 +00:00
|
|
|
#if HAVE_SSE /* Only P3 (may be Cyrix3) */
|
2010-01-03 09:20:01 +00:00
|
|
|
if(((intptr_t)from) & 15)
|
2001-05-05 13:30:00 +00:00
|
|
|
/* if SRC is misaligned */
|
|
|
|
for(; i>0; i--)
|
|
|
|
{
|
2008-10-16 20:17:56 +00:00
|
|
|
__asm__ volatile (
|
2001-05-05 13:30:00 +00:00
|
|
|
PREFETCH" 320(%0)\n"
|
|
|
|
"movups (%0), %%xmm0\n"
|
|
|
|
"movups 16(%0), %%xmm1\n"
|
|
|
|
"movups 32(%0), %%xmm2\n"
|
|
|
|
"movups 48(%0), %%xmm3\n"
|
|
|
|
"movntps %%xmm0, (%1)\n"
|
|
|
|
"movntps %%xmm1, 16(%1)\n"
|
|
|
|
"movntps %%xmm2, 32(%1)\n"
|
|
|
|
"movntps %%xmm3, 48(%1)\n"
|
|
|
|
:: "r" (from), "r" (to) : "memory");
|
2005-01-21 21:11:35 +00:00
|
|
|
from=((const unsigned char *) from)+64;
|
|
|
|
to=((unsigned char *)to)+64;
|
2001-05-05 13:30:00 +00:00
|
|
|
}
|
2001-11-22 19:40:38 +00:00
|
|
|
else
|
2001-05-05 13:30:00 +00:00
|
|
|
/*
|
|
|
|
Only if SRC is aligned on 16-byte boundary.
|
|
|
|
It allows to use movaps instead of movups, which required data
|
|
|
|
to be aligned or a general-protection exception (#GP) is generated.
|
|
|
|
*/
|
|
|
|
for(; i>0; i--)
|
|
|
|
{
|
2008-10-16 20:17:56 +00:00
|
|
|
__asm__ volatile (
|
2001-05-05 13:30:00 +00:00
|
|
|
PREFETCH" 320(%0)\n"
|
|
|
|
"movaps (%0), %%xmm0\n"
|
|
|
|
"movaps 16(%0), %%xmm1\n"
|
|
|
|
"movaps 32(%0), %%xmm2\n"
|
|
|
|
"movaps 48(%0), %%xmm3\n"
|
|
|
|
"movntps %%xmm0, (%1)\n"
|
|
|
|
"movntps %%xmm1, 16(%1)\n"
|
|
|
|
"movntps %%xmm2, 32(%1)\n"
|
|
|
|
"movntps %%xmm3, 48(%1)\n"
|
|
|
|
:: "r" (from), "r" (to) : "memory");
|
2005-01-21 21:11:35 +00:00
|
|
|
from=((const unsigned char *)from)+64;
|
|
|
|
to=((unsigned char *)to)+64;
|
2001-05-05 13:30:00 +00:00
|
|
|
}
|
|
|
|
#else
|
2001-11-22 19:40:38 +00:00
|
|
|
// Align destination at BLOCK_SIZE boundary
|
2010-01-03 09:20:01 +00:00
|
|
|
for(; ((intptr_t)to & (BLOCK_SIZE-1)) && i>0; i--)
|
2001-11-22 19:40:38 +00:00
|
|
|
{
|
2008-10-16 20:17:56 +00:00
|
|
|
__asm__ volatile (
|
2007-05-27 21:45:33 +00:00
|
|
|
#ifndef HAVE_ONLY_MMX1
|
2001-11-22 19:40:38 +00:00
|
|
|
PREFETCH" 320(%0)\n"
|
|
|
|
#endif
|
|
|
|
"movq (%0), %%mm0\n"
|
|
|
|
"movq 8(%0), %%mm1\n"
|
|
|
|
"movq 16(%0), %%mm2\n"
|
|
|
|
"movq 24(%0), %%mm3\n"
|
|
|
|
"movq 32(%0), %%mm4\n"
|
|
|
|
"movq 40(%0), %%mm5\n"
|
|
|
|
"movq 48(%0), %%mm6\n"
|
|
|
|
"movq 56(%0), %%mm7\n"
|
|
|
|
MOVNTQ" %%mm0, (%1)\n"
|
|
|
|
MOVNTQ" %%mm1, 8(%1)\n"
|
|
|
|
MOVNTQ" %%mm2, 16(%1)\n"
|
|
|
|
MOVNTQ" %%mm3, 24(%1)\n"
|
|
|
|
MOVNTQ" %%mm4, 32(%1)\n"
|
|
|
|
MOVNTQ" %%mm5, 40(%1)\n"
|
|
|
|
MOVNTQ" %%mm6, 48(%1)\n"
|
|
|
|
MOVNTQ" %%mm7, 56(%1)\n"
|
|
|
|
:: "r" (from), "r" (to) : "memory");
|
2005-06-04 21:11:39 +00:00
|
|
|
from=((const unsigned char *)from)+64;
|
|
|
|
to=((unsigned char *)to)+64;
|
2001-11-22 19:40:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// printf(" %d %d\n", (int)from&1023, (int)to&1023);
|
|
|
|
// Pure Assembly cuz gcc is a bit unpredictable ;)
|
|
|
|
if(i>=BLOCK_SIZE/64)
|
2008-10-16 18:59:27 +00:00
|
|
|
__asm__ volatile(
|
2004-10-21 11:55:20 +00:00
|
|
|
"xor %%"REG_a", %%"REG_a" \n\t"
|
2008-02-15 21:52:34 +00:00
|
|
|
ASMALIGN(4)
|
2001-11-22 19:40:38 +00:00
|
|
|
"1: \n\t"
|
2009-09-17 19:03:31 +00:00
|
|
|
"movl (%0, %%"REG_a"), %%ecx \n\t"
|
|
|
|
"movl 32(%0, %%"REG_a"), %%ecx \n\t"
|
|
|
|
"movl 64(%0, %%"REG_a"), %%ecx \n\t"
|
|
|
|
"movl 96(%0, %%"REG_a"), %%ecx \n\t"
|
2004-10-21 11:55:20 +00:00
|
|
|
"add $128, %%"REG_a" \n\t"
|
|
|
|
"cmp %3, %%"REG_a" \n\t"
|
2001-11-22 19:40:38 +00:00
|
|
|
" jb 1b \n\t"
|
|
|
|
|
2004-10-21 11:55:20 +00:00
|
|
|
"xor %%"REG_a", %%"REG_a" \n\t"
|
2001-11-22 19:40:38 +00:00
|
|
|
|
2008-02-15 21:52:34 +00:00
|
|
|
ASMALIGN(4)
|
2001-11-22 19:40:38 +00:00
|
|
|
"2: \n\t"
|
2004-10-21 11:55:20 +00:00
|
|
|
"movq (%0, %%"REG_a"), %%mm0\n"
|
|
|
|
"movq 8(%0, %%"REG_a"), %%mm1\n"
|
|
|
|
"movq 16(%0, %%"REG_a"), %%mm2\n"
|
|
|
|
"movq 24(%0, %%"REG_a"), %%mm3\n"
|
|
|
|
"movq 32(%0, %%"REG_a"), %%mm4\n"
|
|
|
|
"movq 40(%0, %%"REG_a"), %%mm5\n"
|
|
|
|
"movq 48(%0, %%"REG_a"), %%mm6\n"
|
|
|
|
"movq 56(%0, %%"REG_a"), %%mm7\n"
|
|
|
|
MOVNTQ" %%mm0, (%1, %%"REG_a")\n"
|
|
|
|
MOVNTQ" %%mm1, 8(%1, %%"REG_a")\n"
|
|
|
|
MOVNTQ" %%mm2, 16(%1, %%"REG_a")\n"
|
|
|
|
MOVNTQ" %%mm3, 24(%1, %%"REG_a")\n"
|
|
|
|
MOVNTQ" %%mm4, 32(%1, %%"REG_a")\n"
|
|
|
|
MOVNTQ" %%mm5, 40(%1, %%"REG_a")\n"
|
|
|
|
MOVNTQ" %%mm6, 48(%1, %%"REG_a")\n"
|
|
|
|
MOVNTQ" %%mm7, 56(%1, %%"REG_a")\n"
|
|
|
|
"add $64, %%"REG_a" \n\t"
|
|
|
|
"cmp %3, %%"REG_a" \n\t"
|
2001-11-22 19:40:38 +00:00
|
|
|
"jb 2b \n\t"
|
|
|
|
|
|
|
|
#if CONFUSION_FACTOR > 0
|
|
|
|
// a few percent speedup on out of order executing CPUs
|
2004-10-21 11:55:20 +00:00
|
|
|
"mov %5, %%"REG_a" \n\t"
|
2001-11-22 19:40:38 +00:00
|
|
|
"2: \n\t"
|
2009-09-17 19:03:31 +00:00
|
|
|
"movl (%0), %%ecx \n\t"
|
|
|
|
"movl (%0), %%ecx \n\t"
|
|
|
|
"movl (%0), %%ecx \n\t"
|
|
|
|
"movl (%0), %%ecx \n\t"
|
2004-10-21 11:55:20 +00:00
|
|
|
"dec %%"REG_a" \n\t"
|
2001-11-22 19:40:38 +00:00
|
|
|
" jnz 2b \n\t"
|
|
|
|
#endif
|
|
|
|
|
2004-10-21 11:55:20 +00:00
|
|
|
"xor %%"REG_a", %%"REG_a" \n\t"
|
|
|
|
"add %3, %0 \n\t"
|
|
|
|
"add %3, %1 \n\t"
|
|
|
|
"sub %4, %2 \n\t"
|
|
|
|
"cmp %4, %2 \n\t"
|
2001-11-22 19:40:38 +00:00
|
|
|
" jae 1b \n\t"
|
|
|
|
: "+r" (from), "+r" (to), "+r" (i)
|
2010-01-03 09:20:01 +00:00
|
|
|
: "r" ((x86_reg)BLOCK_SIZE), "i" (BLOCK_SIZE/64), "i" ((x86_reg)CONFUSION_FACTOR)
|
2009-09-17 19:03:31 +00:00
|
|
|
: "%"REG_a, "%ecx"
|
2001-11-22 19:40:38 +00:00
|
|
|
);
|
|
|
|
|
2001-05-05 13:30:00 +00:00
|
|
|
for(; i>0; i--)
|
|
|
|
{
|
2008-10-16 20:17:56 +00:00
|
|
|
__asm__ volatile (
|
2007-05-27 21:45:33 +00:00
|
|
|
#ifndef HAVE_ONLY_MMX1
|
2001-05-05 13:30:00 +00:00
|
|
|
PREFETCH" 320(%0)\n"
|
|
|
|
#endif
|
|
|
|
"movq (%0), %%mm0\n"
|
|
|
|
"movq 8(%0), %%mm1\n"
|
|
|
|
"movq 16(%0), %%mm2\n"
|
|
|
|
"movq 24(%0), %%mm3\n"
|
|
|
|
"movq 32(%0), %%mm4\n"
|
|
|
|
"movq 40(%0), %%mm5\n"
|
|
|
|
"movq 48(%0), %%mm6\n"
|
|
|
|
"movq 56(%0), %%mm7\n"
|
|
|
|
MOVNTQ" %%mm0, (%1)\n"
|
|
|
|
MOVNTQ" %%mm1, 8(%1)\n"
|
|
|
|
MOVNTQ" %%mm2, 16(%1)\n"
|
|
|
|
MOVNTQ" %%mm3, 24(%1)\n"
|
|
|
|
MOVNTQ" %%mm4, 32(%1)\n"
|
|
|
|
MOVNTQ" %%mm5, 40(%1)\n"
|
|
|
|
MOVNTQ" %%mm6, 48(%1)\n"
|
|
|
|
MOVNTQ" %%mm7, 56(%1)\n"
|
|
|
|
:: "r" (from), "r" (to) : "memory");
|
2005-06-04 21:11:39 +00:00
|
|
|
from=((const unsigned char *)from)+64;
|
|
|
|
to=((unsigned char *)to)+64;
|
2001-05-05 13:30:00 +00:00
|
|
|
}
|
2001-11-22 19:40:38 +00:00
|
|
|
|
2001-05-05 13:30:00 +00:00
|
|
|
#endif /* Have SSE */
|
2009-01-16 09:21:21 +00:00
|
|
|
#if HAVE_MMX2
|
2001-05-05 13:30:00 +00:00
|
|
|
/* since movntq is weakly-ordered, a "sfence"
|
|
|
|
* is needed to become ordered again. */
|
2008-10-16 20:17:56 +00:00
|
|
|
__asm__ volatile ("sfence":::"memory");
|
2001-05-05 13:30:00 +00:00
|
|
|
#endif
|
2009-01-16 09:21:21 +00:00
|
|
|
#if !HAVE_SSE
|
2001-05-05 13:30:00 +00:00
|
|
|
/* enables to use FPU */
|
2008-10-16 20:17:56 +00:00
|
|
|
__asm__ volatile (EMMS:::"memory");
|
2001-11-22 19:40:38 +00:00
|
|
|
#endif
|
2001-05-05 13:30:00 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Now do the tail of the block
|
|
|
|
*/
|
|
|
|
if(len) small_memcpy(to, from, len);
|
|
|
|
return retval;
|
|
|
|
}
|
2002-02-12 23:17:14 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* special copy routine for mem -> agp/pci copy (based upon fast_memcpy)
|
|
|
|
*/
|
2002-08-22 23:28:33 +00:00
|
|
|
static void * RENAME(mem2agpcpy)(void * to, const void * from, size_t len)
|
2002-02-12 23:17:14 +00:00
|
|
|
{
|
|
|
|
void *retval;
|
|
|
|
size_t i;
|
|
|
|
retval = to;
|
|
|
|
#ifdef STATISTICS
|
|
|
|
{
|
|
|
|
static int freq[33];
|
|
|
|
static int t=0;
|
|
|
|
int i;
|
|
|
|
for(i=0; len>(1<<i); i++);
|
|
|
|
freq[i]++;
|
|
|
|
t++;
|
|
|
|
if(1024*1024*1024 % t == 0)
|
|
|
|
for(i=0; i<32; i++)
|
|
|
|
printf("mem2agp freq < %8d %4d\n", 1<<i, freq[i]);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if(len >= MIN_LEN)
|
|
|
|
{
|
2010-01-03 09:20:01 +00:00
|
|
|
register x86_reg delta;
|
2002-02-12 23:17:14 +00:00
|
|
|
/* Align destinition to MMREG_SIZE -boundary */
|
2010-01-03 09:20:01 +00:00
|
|
|
delta = ((intptr_t)to)&7;
|
2002-02-12 23:17:14 +00:00
|
|
|
if(delta)
|
|
|
|
{
|
|
|
|
delta=8-delta;
|
|
|
|
len -= delta;
|
|
|
|
small_memcpy(to, from, delta);
|
|
|
|
}
|
|
|
|
i = len >> 6; /* len/64 */
|
|
|
|
len &= 63;
|
|
|
|
/*
|
|
|
|
This algorithm is top effective when the code consequently
|
|
|
|
reads and writes blocks which have size of cache line.
|
|
|
|
Size of cache line is processor-dependent.
|
|
|
|
It will, however, be a minimum of 32 bytes on any processors.
|
|
|
|
It would be better to have a number of instructions which
|
|
|
|
perform reading and writing to be multiple to a number of
|
|
|
|
processor's decoders, but it's not always possible.
|
|
|
|
*/
|
|
|
|
for(; i>0; i--)
|
|
|
|
{
|
2008-10-16 20:17:56 +00:00
|
|
|
__asm__ volatile (
|
2002-02-12 23:17:14 +00:00
|
|
|
PREFETCH" 320(%0)\n"
|
|
|
|
"movq (%0), %%mm0\n"
|
|
|
|
"movq 8(%0), %%mm1\n"
|
|
|
|
"movq 16(%0), %%mm2\n"
|
|
|
|
"movq 24(%0), %%mm3\n"
|
|
|
|
"movq 32(%0), %%mm4\n"
|
|
|
|
"movq 40(%0), %%mm5\n"
|
|
|
|
"movq 48(%0), %%mm6\n"
|
|
|
|
"movq 56(%0), %%mm7\n"
|
|
|
|
MOVNTQ" %%mm0, (%1)\n"
|
|
|
|
MOVNTQ" %%mm1, 8(%1)\n"
|
|
|
|
MOVNTQ" %%mm2, 16(%1)\n"
|
|
|
|
MOVNTQ" %%mm3, 24(%1)\n"
|
|
|
|
MOVNTQ" %%mm4, 32(%1)\n"
|
|
|
|
MOVNTQ" %%mm5, 40(%1)\n"
|
|
|
|
MOVNTQ" %%mm6, 48(%1)\n"
|
|
|
|
MOVNTQ" %%mm7, 56(%1)\n"
|
|
|
|
:: "r" (from), "r" (to) : "memory");
|
2005-01-21 21:11:35 +00:00
|
|
|
from=((const unsigned char *)from)+64;
|
|
|
|
to=((unsigned char *)to)+64;
|
2002-02-12 23:17:14 +00:00
|
|
|
}
|
2009-01-16 09:21:21 +00:00
|
|
|
#if HAVE_MMX2
|
2002-02-12 23:17:14 +00:00
|
|
|
/* since movntq is weakly-ordered, a "sfence"
|
|
|
|
* is needed to become ordered again. */
|
2008-10-16 20:17:56 +00:00
|
|
|
__asm__ volatile ("sfence":::"memory");
|
2002-02-12 23:17:14 +00:00
|
|
|
#endif
|
|
|
|
/* enables to use FPU */
|
2008-10-16 20:17:56 +00:00
|
|
|
__asm__ volatile (EMMS:::"memory");
|
2002-02-12 23:17:14 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Now do the tail of the block
|
|
|
|
*/
|
|
|
|
if(len) small_memcpy(to, from, len);
|
|
|
|
return retval;
|
|
|
|
}
|