diff --git a/libavcodec/x86/fft_mmx.asm b/libavcodec/x86/fft_mmx.asm index 7d046df18e..f1b7f2a515 100644 --- a/libavcodec/x86/fft_mmx.asm +++ b/libavcodec/x86/fft_mmx.asm @@ -641,19 +641,21 @@ cglobal fft_permute, 2,7,1 %if ARCH_X86_64 mov r0, r1 mov r1, r5 +%endif +%if WIN64 + sub rsp, 8 + call memcpy + add rsp, 8 + RET +%elif ARCH_X86_64 + jmp memcpy %else push r2 push r5 push r1 -%endif -%if ARCH_X86_64 && WIN64 == 0 - jmp memcpy -%else call memcpy -%if ARCH_X86_32 add esp, 12 -%endif - REP_RET + RET %endif cglobal imdct_calc, 3,5,3 diff --git a/libavresample/x86/audio_convert_init.c b/libavresample/x86/audio_convert_init.c index f41d974445..637fd2fb14 100644 --- a/libavresample/x86/audio_convert_init.c +++ b/libavresample/x86/audio_convert_init.c @@ -53,14 +53,6 @@ av_cold void ff_audio_convert_init_x86(AudioConvert *ac) ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP, 6, 1, 4, "MMX", ff_conv_fltp_to_flt_6ch_mmx); } - if (mm_flags & AV_CPU_FLAG_SSE4 && HAVE_SSE) { - ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP, - 6, 16, 4, "SSE4", ff_conv_fltp_to_flt_6ch_sse4); - } - if (mm_flags & AV_CPU_FLAG_AVX && HAVE_AVX) { - ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP, - 6, 16, 4, "AVX", ff_conv_fltp_to_flt_6ch_avx); - } if (mm_flags & AV_CPU_FLAG_SSE2 && HAVE_SSE) { if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) { ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32, @@ -80,12 +72,16 @@ av_cold void ff_audio_convert_init_x86(AudioConvert *ac) if (mm_flags & AV_CPU_FLAG_SSE4 && HAVE_SSE) { ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, 0, 16, 8, "SSE4", ff_conv_s16_to_flt_sse4); + ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP, + 6, 16, 4, "SSE4", ff_conv_fltp_to_flt_6ch_sse4); } if (mm_flags & AV_CPU_FLAG_AVX && HAVE_AVX) { ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32, 0, 32, 16, "AVX", ff_conv_s32_to_flt_avx); ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT, 0, 32, 32, "AVX", ff_conv_flt_to_s32_avx); + ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP, + 6, 16, 4, "AVX", ff_conv_fltp_to_flt_6ch_avx); } #endif } diff --git a/libavutil/log.c b/libavutil/log.c index 155276c769..a641682b97 100644 --- a/libavutil/log.c +++ b/libavutil/log.c @@ -40,6 +40,7 @@ static int flags; #if defined(_WIN32) && !defined(__MINGW32CE__) #include +#include static const uint8_t color[16 + AV_CLASS_CATEGORY_NB] = { [AV_LOG_PANIC /8] = 12, [AV_LOG_FATAL /8] = 12, diff --git a/libavutil/x86/cpu.c b/libavutil/x86/cpu.c index 5782ff73cf..d77f0fcc15 100644 --- a/libavutil/x86/cpu.c +++ b/libavutil/x86/cpu.c @@ -26,16 +26,15 @@ #include "libavutil/cpu.h" /* ebx saving is necessary for PIC. gcc seems unable to see it alone */ -#define cpuid(index,eax,ebx,ecx,edx)\ - __asm__ volatile\ - ("mov %%"REG_b", %%"REG_S"\n\t"\ - "cpuid\n\t"\ - "xchg %%"REG_b", %%"REG_S\ - : "=a" (eax), "=S" (ebx),\ - "=c" (ecx), "=d" (edx)\ - : "0" (index)); +#define cpuid(index, eax, ebx, ecx, edx) \ + __asm__ volatile ( \ + "mov %%"REG_b", %%"REG_S" \n\t" \ + "cpuid \n\t" \ + "xchg %%"REG_b", %%"REG_S \ + : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \ + : "0" (index)) -#define xgetbv(index,eax,edx) \ +#define xgetbv(index, eax, edx) \ __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index)) /* Function to test if multimedia instructions are supported... */ @@ -43,8 +42,8 @@ int ff_get_cpu_flags_x86(void) { int rval = 0; int eax, ebx, ecx, edx; - int max_std_level, max_ext_level, std_caps=0, ext_caps=0; - int family=0, model=0; + int max_std_level, max_ext_level, std_caps = 0, ext_caps = 0; + int family = 0, model = 0; union { int i[3]; char c[12]; } vendor; #if ARCH_X86_32 @@ -79,19 +78,20 @@ int ff_get_cpu_flags_x86(void) vendor.i[1] = edx; vendor.i[2] = ecx; - if(max_std_level >= 1){ + if (max_std_level >= 1) { cpuid(1, eax, ebx, ecx, std_caps); - family = ((eax>>8)&0xf) + ((eax>>20)&0xff); - model = ((eax>>4)&0xf) + ((eax>>12)&0xf0); + family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); + model = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0); if (std_caps & (1 << 15)) rval |= AV_CPU_FLAG_CMOV; - if (std_caps & (1<<23)) + if (std_caps & (1 << 23)) rval |= AV_CPU_FLAG_MMX; - if (std_caps & (1<<25)) - rval |= AV_CPU_FLAG_MMX2 + if (std_caps & (1 << 25)) + rval |= AV_CPU_FLAG_MMX2; #if HAVE_SSE - | AV_CPU_FLAG_SSE; - if (std_caps & (1<<26)) + if (std_caps & (1 << 25)) + rval |= AV_CPU_FLAG_SSE; + if (std_caps & (1 << 26)) rval |= AV_CPU_FLAG_SSE2; if (ecx & 1) rval |= AV_CPU_FLAG_SSE3; @@ -111,20 +111,19 @@ int ff_get_cpu_flags_x86(void) } #endif #endif - ; } cpuid(0x80000000, max_ext_level, ebx, ecx, edx); - if(max_ext_level >= 0x80000001){ + if (max_ext_level >= 0x80000001) { cpuid(0x80000001, eax, ebx, ecx, ext_caps); - if (ext_caps & (1U<<31)) + if (ext_caps & (1U << 31)) rval |= AV_CPU_FLAG_3DNOW; - if (ext_caps & (1<<30)) + if (ext_caps & (1 << 30)) rval |= AV_CPU_FLAG_3DNOWEXT; - if (ext_caps & (1<<23)) + if (ext_caps & (1 << 23)) rval |= AV_CPU_FLAG_MMX; - if (ext_caps & (1<<22)) + if (ext_caps & (1 << 22)) rval |= AV_CPU_FLAG_MMX2; /* Allow for selectively disabling SSE2 functions on AMD processors @@ -151,14 +150,17 @@ int ff_get_cpu_flags_x86(void) if (!strncmp(vendor.c, "GenuineIntel", 12)) { if (family == 6 && (model == 9 || model == 13 || model == 14)) { - /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and 6/14 (core1 "yonah") - * theoretically support sse2, but it's usually slower than mmx, - * so let's just pretend they don't. AV_CPU_FLAG_SSE2 is disabled and - * AV_CPU_FLAG_SSE2SLOW is enabled so that SSE2 is not used unless - * explicitly enabled by checking AV_CPU_FLAG_SSE2SLOW. The same - * situation applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */ - if (rval & AV_CPU_FLAG_SSE2) rval ^= AV_CPU_FLAG_SSE2SLOW|AV_CPU_FLAG_SSE2; - if (rval & AV_CPU_FLAG_SSE3) rval ^= AV_CPU_FLAG_SSE3SLOW|AV_CPU_FLAG_SSE3; + /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and + * 6/14 (core1 "yonah") theoretically support sse2, but it's + * usually slower than mmx, so let's just pretend they don't. + * AV_CPU_FLAG_SSE2 is disabled and AV_CPU_FLAG_SSE2SLOW is + * enabled so that SSE2 is not used unless explicitly enabled + * by checking AV_CPU_FLAG_SSE2SLOW. The same situation + * applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */ + if (rval & AV_CPU_FLAG_SSE2) + rval ^= AV_CPU_FLAG_SSE2SLOW | AV_CPU_FLAG_SSE2; + if (rval & AV_CPU_FLAG_SSE3) + rval ^= AV_CPU_FLAG_SSE3SLOW | AV_CPU_FLAG_SSE3; } /* The Atom processor has SSSE3 support, which is useful in many cases, * but sometimes the SSSE3 version is slower than the SSE2 equivalent