mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2024-12-12 18:25:03 +00:00
avutil/mem_internal: local align should always work
Signed-off-by: Zhao Zhili <zhilizhao@tencent.com>
This commit is contained in:
parent
57861911a3
commit
59057aa807
7
configure
vendored
7
configure
vendored
@ -2246,7 +2246,6 @@ ARCH_FEATURES="
|
||||
fast_clz
|
||||
fast_cmov
|
||||
fast_float16
|
||||
local_aligned
|
||||
simd_align_16
|
||||
simd_align_32
|
||||
simd_align_64
|
||||
@ -5382,7 +5381,6 @@ elif enabled arm; then
|
||||
|
||||
elif enabled loongarch; then
|
||||
|
||||
enable local_aligned
|
||||
enable simd_align_32
|
||||
enable fast_64bit
|
||||
enable fast_clz
|
||||
@ -5449,7 +5447,6 @@ elif enabled mips; then
|
||||
;;
|
||||
# Cores from Loongson
|
||||
loongson2e|loongson2f|loongson3*)
|
||||
enable local_aligned
|
||||
enable simd_align_16
|
||||
enable fast_64bit
|
||||
enable fast_clz
|
||||
@ -6373,8 +6370,6 @@ elif enabled parisc; then
|
||||
|
||||
elif enabled ppc; then
|
||||
|
||||
enable local_aligned
|
||||
|
||||
check_inline_asm dcbzl '"dcbzl 0, %0" :: "r"(0)'
|
||||
check_inline_asm ibm_asm '"add 0, 0, 0"'
|
||||
check_inline_asm ppc4xx '"maclhw r10, r11, r12"'
|
||||
@ -6413,8 +6408,6 @@ elif enabled x86; then
|
||||
check_builtin rdtsc intrin.h "__rdtsc()"
|
||||
check_builtin mm_empty mmintrin.h "_mm_empty()"
|
||||
|
||||
enable local_aligned
|
||||
|
||||
# check whether EBP is available on x86
|
||||
# As 'i' is stored on the stack, this program will crash
|
||||
# if the base pointer is used to access it because the
|
||||
|
@ -111,38 +111,18 @@
|
||||
// to be forced to tokenize __VA_ARGS__
|
||||
#define E1(x) x
|
||||
|
||||
#define LOCAL_ALIGNED_A(a, t, v, s, o, ...) \
|
||||
uint8_t la_##v[sizeof(t s o) + (a)]; \
|
||||
t (*v) o = (void *)FFALIGN((uintptr_t)la_##v, a)
|
||||
|
||||
#define LOCAL_ALIGNED_D(a, t, v, s, o, ...) \
|
||||
DECLARE_ALIGNED(a, t, la_##v) s o; \
|
||||
t (*v) o = la_##v
|
||||
|
||||
#define LOCAL_ALIGNED(a, t, v, ...) LOCAL_ALIGNED_##a(t, v, __VA_ARGS__)
|
||||
|
||||
#if HAVE_LOCAL_ALIGNED
|
||||
# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_D(4, t, v, __VA_ARGS__,,))
|
||||
#else
|
||||
# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_A(4, t, v, __VA_ARGS__,,))
|
||||
#endif
|
||||
#define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_D(4, t, v, __VA_ARGS__,,))
|
||||
|
||||
#if HAVE_LOCAL_ALIGNED
|
||||
# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
|
||||
#else
|
||||
# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_A(8, t, v, __VA_ARGS__,,))
|
||||
#endif
|
||||
#define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
|
||||
|
||||
#if HAVE_LOCAL_ALIGNED
|
||||
# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
|
||||
#else
|
||||
# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_A(16, t, v, __VA_ARGS__,,))
|
||||
#endif
|
||||
#define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
|
||||
|
||||
#if HAVE_LOCAL_ALIGNED
|
||||
# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_D(32, t, v, __VA_ARGS__,,))
|
||||
#else
|
||||
# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_A(32, t, v, __VA_ARGS__,,))
|
||||
#endif
|
||||
#define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_D(32, t, v, __VA_ARGS__,,))
|
||||
|
||||
#endif /* AVUTIL_MEM_INTERNAL_H */
|
||||
|
Loading…
Reference in New Issue
Block a user