mirror of https://git.ffmpeg.org/ffmpeg.git
347 lines
7.7 KiB
C
347 lines
7.7 KiB
C
/*
|
|
* default memory allocator for libavutil
|
|
* Copyright (c) 2002 Fabrice Bellard
|
|
*
|
|
* This file is part of Libav.
|
|
*
|
|
* Libav is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
*
|
|
* Libav is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with Libav; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
|
|
/**
|
|
* @file
|
|
* default memory allocator for libavutil
|
|
*/
|
|
|
|
#include "config.h"
|
|
|
|
#include <limits.h>
|
|
#include <stdint.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
#if HAVE_MALLOC_H
|
|
#include <malloc.h>
|
|
#endif
|
|
|
|
#include "avutil.h"
|
|
#include "intreadwrite.h"
|
|
#include "mem.h"
|
|
|
|
#ifdef MALLOC_PREFIX
|
|
|
|
#define malloc AV_JOIN(MALLOC_PREFIX, malloc)
|
|
#define memalign AV_JOIN(MALLOC_PREFIX, memalign)
|
|
#define posix_memalign AV_JOIN(MALLOC_PREFIX, posix_memalign)
|
|
#define realloc AV_JOIN(MALLOC_PREFIX, realloc)
|
|
#define free AV_JOIN(MALLOC_PREFIX, free)
|
|
|
|
void *malloc(size_t size);
|
|
void *memalign(size_t align, size_t size);
|
|
int posix_memalign(void **ptr, size_t align, size_t size);
|
|
void *realloc(void *ptr, size_t size);
|
|
void free(void *ptr);
|
|
|
|
#endif /* MALLOC_PREFIX */
|
|
|
|
/* You can redefine av_malloc and av_free in your project to use your
|
|
* memory allocator. You do not need to suppress this file because the
|
|
* linker will do it automatically. */
|
|
|
|
void *av_malloc(size_t size)
|
|
{
|
|
void *ptr = NULL;
|
|
#if CONFIG_MEMALIGN_HACK
|
|
long diff;
|
|
#endif
|
|
|
|
/* let's disallow possibly ambiguous cases */
|
|
if (size > (INT_MAX - 32) || !size)
|
|
return NULL;
|
|
|
|
#if CONFIG_MEMALIGN_HACK
|
|
ptr = malloc(size + 32);
|
|
if (!ptr)
|
|
return ptr;
|
|
diff = ((-(long)ptr - 1) & 31) + 1;
|
|
ptr = (char *)ptr + diff;
|
|
((char *)ptr)[-1] = diff;
|
|
#elif HAVE_POSIX_MEMALIGN
|
|
if (posix_memalign(&ptr, 32, size))
|
|
ptr = NULL;
|
|
#elif HAVE_ALIGNED_MALLOC
|
|
ptr = _aligned_malloc(size, 32);
|
|
#elif HAVE_MEMALIGN
|
|
ptr = memalign(32, size);
|
|
/* Why 64?
|
|
* Indeed, we should align it:
|
|
* on 4 for 386
|
|
* on 16 for 486
|
|
* on 32 for 586, PPro - K6-III
|
|
* on 64 for K7 (maybe for P3 too).
|
|
* Because L1 and L2 caches are aligned on those values.
|
|
* But I don't want to code such logic here!
|
|
*/
|
|
/* Why 32?
|
|
* For AVX ASM. SSE / NEON needs only 16.
|
|
* Why not larger? Because I did not see a difference in benchmarks ...
|
|
*/
|
|
/* benchmarks with P3
|
|
* memalign(64) + 1 3071, 3051, 3032
|
|
* memalign(64) + 2 3051, 3032, 3041
|
|
* memalign(64) + 4 2911, 2896, 2915
|
|
* memalign(64) + 8 2545, 2554, 2550
|
|
* memalign(64) + 16 2543, 2572, 2563
|
|
* memalign(64) + 32 2546, 2545, 2571
|
|
* memalign(64) + 64 2570, 2533, 2558
|
|
*
|
|
* BTW, malloc seems to do 8-byte alignment by default here.
|
|
*/
|
|
#else
|
|
ptr = malloc(size);
|
|
#endif
|
|
return ptr;
|
|
}
|
|
|
|
void *av_realloc(void *ptr, size_t size)
|
|
{
|
|
#if CONFIG_MEMALIGN_HACK
|
|
int diff;
|
|
#endif
|
|
|
|
/* let's disallow possibly ambiguous cases */
|
|
if (size > (INT_MAX - 16))
|
|
return NULL;
|
|
|
|
#if CONFIG_MEMALIGN_HACK
|
|
//FIXME this isn't aligned correctly, though it probably isn't needed
|
|
if (!ptr)
|
|
return av_malloc(size);
|
|
diff = ((char *)ptr)[-1];
|
|
return (char *)realloc((char *)ptr - diff, size + diff) + diff;
|
|
#elif HAVE_ALIGNED_MALLOC
|
|
return _aligned_realloc(ptr, size, 32);
|
|
#else
|
|
return realloc(ptr, size);
|
|
#endif
|
|
}
|
|
|
|
int av_reallocp(void *ptr, size_t size)
|
|
{
|
|
void **ptrptr = ptr;
|
|
void *ret;
|
|
|
|
if (!size) {
|
|
av_freep(ptr);
|
|
return 0;
|
|
}
|
|
ret = av_realloc(*ptrptr, size);
|
|
|
|
if (!ret) {
|
|
av_freep(ptr);
|
|
return AVERROR(ENOMEM);
|
|
}
|
|
|
|
*ptrptr = ret;
|
|
return 0;
|
|
}
|
|
|
|
void *av_realloc_array(void *ptr, size_t nmemb, size_t size)
|
|
{
|
|
if (!size || nmemb >= INT_MAX / size)
|
|
return NULL;
|
|
return av_realloc(ptr, nmemb * size);
|
|
}
|
|
|
|
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
|
|
{
|
|
void **ptrptr = ptr;
|
|
void *ret;
|
|
if (!size || nmemb >= INT_MAX / size)
|
|
return AVERROR(ENOMEM);
|
|
if (!nmemb) {
|
|
av_freep(ptr);
|
|
return 0;
|
|
}
|
|
ret = av_realloc(*ptrptr, nmemb * size);
|
|
if (!ret) {
|
|
av_freep(ptr);
|
|
return AVERROR(ENOMEM);
|
|
}
|
|
*ptrptr = ret;
|
|
return 0;
|
|
}
|
|
|
|
void av_free(void *ptr)
|
|
{
|
|
#if CONFIG_MEMALIGN_HACK
|
|
if (ptr)
|
|
free((char *)ptr - ((char *)ptr)[-1]);
|
|
#elif HAVE_ALIGNED_MALLOC
|
|
_aligned_free(ptr);
|
|
#else
|
|
free(ptr);
|
|
#endif
|
|
}
|
|
|
|
void av_freep(void *arg)
|
|
{
|
|
void **ptr = (void **)arg;
|
|
av_free(*ptr);
|
|
*ptr = NULL;
|
|
}
|
|
|
|
void *av_mallocz(size_t size)
|
|
{
|
|
void *ptr = av_malloc(size);
|
|
if (ptr)
|
|
memset(ptr, 0, size);
|
|
return ptr;
|
|
}
|
|
|
|
char *av_strdup(const char *s)
|
|
{
|
|
char *ptr = NULL;
|
|
if (s) {
|
|
int len = strlen(s) + 1;
|
|
ptr = av_malloc(len);
|
|
if (ptr)
|
|
memcpy(ptr, s, len);
|
|
}
|
|
return ptr;
|
|
}
|
|
|
|
static void fill16(uint8_t *dst, int len)
|
|
{
|
|
uint32_t v = AV_RN16(dst - 2);
|
|
|
|
v |= v << 16;
|
|
|
|
while (len >= 4) {
|
|
AV_WN32(dst, v);
|
|
dst += 4;
|
|
len -= 4;
|
|
}
|
|
|
|
while (len--) {
|
|
*dst = dst[-2];
|
|
dst++;
|
|
}
|
|
}
|
|
|
|
static void fill24(uint8_t *dst, int len)
|
|
{
|
|
#if HAVE_BIGENDIAN
|
|
uint32_t v = AV_RB24(dst - 3);
|
|
uint32_t a = v << 8 | v >> 16;
|
|
uint32_t b = v << 16 | v >> 8;
|
|
uint32_t c = v << 24 | v;
|
|
#else
|
|
uint32_t v = AV_RL24(dst - 3);
|
|
uint32_t a = v | v << 24;
|
|
uint32_t b = v >> 8 | v << 16;
|
|
uint32_t c = v >> 16 | v << 8;
|
|
#endif
|
|
|
|
while (len >= 12) {
|
|
AV_WN32(dst, a);
|
|
AV_WN32(dst + 4, b);
|
|
AV_WN32(dst + 8, c);
|
|
dst += 12;
|
|
len -= 12;
|
|
}
|
|
|
|
if (len >= 4) {
|
|
AV_WN32(dst, a);
|
|
dst += 4;
|
|
len -= 4;
|
|
}
|
|
|
|
if (len >= 4) {
|
|
AV_WN32(dst, b);
|
|
dst += 4;
|
|
len -= 4;
|
|
}
|
|
|
|
while (len--) {
|
|
*dst = dst[-3];
|
|
dst++;
|
|
}
|
|
}
|
|
|
|
static void fill32(uint8_t *dst, int len)
|
|
{
|
|
uint32_t v = AV_RN32(dst - 4);
|
|
|
|
while (len >= 4) {
|
|
AV_WN32(dst, v);
|
|
dst += 4;
|
|
len -= 4;
|
|
}
|
|
|
|
while (len--) {
|
|
*dst = dst[-4];
|
|
dst++;
|
|
}
|
|
}
|
|
|
|
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
|
|
{
|
|
const uint8_t *src = &dst[-back];
|
|
if (!back)
|
|
return;
|
|
|
|
if (back == 1) {
|
|
memset(dst, *src, cnt);
|
|
} else if (back == 2) {
|
|
fill16(dst, cnt);
|
|
} else if (back == 3) {
|
|
fill24(dst, cnt);
|
|
} else if (back == 4) {
|
|
fill32(dst, cnt);
|
|
} else {
|
|
if (cnt >= 16) {
|
|
int blocklen = back;
|
|
while (cnt > blocklen) {
|
|
memcpy(dst, src, blocklen);
|
|
dst += blocklen;
|
|
cnt -= blocklen;
|
|
blocklen <<= 1;
|
|
}
|
|
memcpy(dst, src, cnt);
|
|
return;
|
|
}
|
|
if (cnt >= 8) {
|
|
AV_COPY32U(dst, src);
|
|
AV_COPY32U(dst + 4, src + 4);
|
|
src += 8;
|
|
dst += 8;
|
|
cnt -= 8;
|
|
}
|
|
if (cnt >= 4) {
|
|
AV_COPY32U(dst, src);
|
|
src += 4;
|
|
dst += 4;
|
|
cnt -= 4;
|
|
}
|
|
if (cnt >= 2) {
|
|
AV_COPY16U(dst, src);
|
|
src += 2;
|
|
dst += 2;
|
|
cnt -= 2;
|
|
}
|
|
if (cnt)
|
|
*dst = *src;
|
|
}
|
|
}
|