2012-10-07 13:45:44 +00:00
|
|
|
/*
|
2013-03-08 15:01:00 +00:00
|
|
|
* This file is part of FFmpeg.
|
2012-10-07 13:45:44 +00:00
|
|
|
*
|
2013-03-08 15:01:00 +00:00
|
|
|
* FFmpeg is free software; you can redistribute it and/or
|
2012-10-07 13:45:44 +00:00
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
2013-03-08 15:01:00 +00:00
|
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
2012-10-07 13:45:44 +00:00
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2013-03-08 15:01:00 +00:00
|
|
|
* License along with FFmpeg; if not, write to the Free Software
|
2012-10-07 13:45:44 +00:00
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
#include "atomic.h"
|
|
|
|
#include "buffer_internal.h"
|
|
|
|
#include "common.h"
|
|
|
|
#include "mem.h"
|
lavu: fix memory leaks by using a mutex instead of atomics
The buffer pool has to atomically add and remove entries from the linked
list of available buffers. This was done by removing the entire list
with a CAS operation, working on it, and then setting it back again
(using a retry-loop in case another thread was doing the same thing).
This could effectively cause memory leaks: while a thread was working on
the buffer list, other threads would allocate new buffers, increasing
the pool's total size. There was no real leak, but since these extra
buffers were not needed, but not free'd either (except when the buffer
pool was destroyed), this had the same effects as a real leak. For some
reason, growth was exponential, and could easily kill the process due
to OOM in real-world uses.
Fix this by using a mutex to protect the list operations. The fancy
way atomics remove the whole list to work on it is not needed anymore,
which also avoids the situation which was causing the leak.
Signed-off-by: Anton Khirnov <anton@khirnov.net>
2014-11-14 12:34:50 +00:00
|
|
|
#include "thread.h"
|
2012-10-07 13:45:44 +00:00
|
|
|
|
|
|
|
AVBufferRef *av_buffer_create(uint8_t *data, int size,
|
|
|
|
void (*free)(void *opaque, uint8_t *data),
|
|
|
|
void *opaque, int flags)
|
|
|
|
{
|
|
|
|
AVBufferRef *ref = NULL;
|
|
|
|
AVBuffer *buf = NULL;
|
|
|
|
|
|
|
|
buf = av_mallocz(sizeof(*buf));
|
|
|
|
if (!buf)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
buf->data = data;
|
|
|
|
buf->size = size;
|
|
|
|
buf->free = free ? free : av_buffer_default_free;
|
|
|
|
buf->opaque = opaque;
|
|
|
|
buf->refcount = 1;
|
|
|
|
|
|
|
|
if (flags & AV_BUFFER_FLAG_READONLY)
|
|
|
|
buf->flags |= BUFFER_FLAG_READONLY;
|
|
|
|
|
|
|
|
ref = av_mallocz(sizeof(*ref));
|
|
|
|
if (!ref) {
|
|
|
|
av_freep(&buf);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ref->buffer = buf;
|
|
|
|
ref->data = data;
|
|
|
|
ref->size = size;
|
|
|
|
|
|
|
|
return ref;
|
|
|
|
}
|
|
|
|
|
|
|
|
void av_buffer_default_free(void *opaque, uint8_t *data)
|
|
|
|
{
|
|
|
|
av_free(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
AVBufferRef *av_buffer_alloc(int size)
|
|
|
|
{
|
|
|
|
AVBufferRef *ret = NULL;
|
|
|
|
uint8_t *data = NULL;
|
|
|
|
|
|
|
|
data = av_malloc(size);
|
|
|
|
if (!data)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ret = av_buffer_create(data, size, av_buffer_default_free, NULL, 0);
|
|
|
|
if (!ret)
|
|
|
|
av_freep(&data);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
AVBufferRef *av_buffer_allocz(int size)
|
|
|
|
{
|
|
|
|
AVBufferRef *ret = av_buffer_alloc(size);
|
|
|
|
if (!ret)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
memset(ret->data, 0, size);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
AVBufferRef *av_buffer_ref(AVBufferRef *buf)
|
|
|
|
{
|
|
|
|
AVBufferRef *ret = av_mallocz(sizeof(*ret));
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
*ret = *buf;
|
|
|
|
|
|
|
|
avpriv_atomic_int_add_and_fetch(&buf->buffer->refcount, 1);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-01-14 23:40:35 +00:00
|
|
|
static void buffer_replace(AVBufferRef **dst, AVBufferRef **src)
|
2012-10-07 13:45:44 +00:00
|
|
|
{
|
|
|
|
AVBuffer *b;
|
|
|
|
|
2015-01-14 23:40:35 +00:00
|
|
|
b = (*dst)->buffer;
|
|
|
|
|
|
|
|
if (src) {
|
|
|
|
**dst = **src;
|
|
|
|
av_freep(src);
|
|
|
|
} else
|
|
|
|
av_freep(dst);
|
2012-10-07 13:45:44 +00:00
|
|
|
|
|
|
|
if (!avpriv_atomic_int_add_and_fetch(&b->refcount, -1)) {
|
|
|
|
b->free(b->opaque, b->data);
|
|
|
|
av_freep(&b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-14 23:40:35 +00:00
|
|
|
void av_buffer_unref(AVBufferRef **buf)
|
|
|
|
{
|
|
|
|
if (!buf || !*buf)
|
|
|
|
return;
|
|
|
|
|
|
|
|
buffer_replace(buf, NULL);
|
|
|
|
}
|
|
|
|
|
2012-10-07 13:45:44 +00:00
|
|
|
int av_buffer_is_writable(const AVBufferRef *buf)
|
|
|
|
{
|
|
|
|
if (buf->buffer->flags & AV_BUFFER_FLAG_READONLY)
|
|
|
|
return 0;
|
|
|
|
|
2013-03-12 07:31:28 +00:00
|
|
|
return avpriv_atomic_int_get(&buf->buffer->refcount) == 1;
|
2012-10-07 13:45:44 +00:00
|
|
|
}
|
|
|
|
|
2013-03-29 15:15:19 +00:00
|
|
|
void *av_buffer_get_opaque(const AVBufferRef *buf)
|
|
|
|
{
|
|
|
|
return buf->buffer->opaque;
|
|
|
|
}
|
|
|
|
|
2013-03-30 18:13:26 +00:00
|
|
|
int av_buffer_get_ref_count(const AVBufferRef *buf)
|
|
|
|
{
|
|
|
|
return buf->buffer->refcount;
|
|
|
|
}
|
|
|
|
|
2012-10-07 13:45:44 +00:00
|
|
|
int av_buffer_make_writable(AVBufferRef **pbuf)
|
|
|
|
{
|
|
|
|
AVBufferRef *newbuf, *buf = *pbuf;
|
|
|
|
|
|
|
|
if (av_buffer_is_writable(buf))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
newbuf = av_buffer_alloc(buf->size);
|
|
|
|
if (!newbuf)
|
|
|
|
return AVERROR(ENOMEM);
|
|
|
|
|
|
|
|
memcpy(newbuf->data, buf->data, buf->size);
|
2015-01-14 23:42:55 +00:00
|
|
|
|
|
|
|
buffer_replace(pbuf, &newbuf);
|
2012-10-07 13:45:44 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int av_buffer_realloc(AVBufferRef **pbuf, int size)
|
|
|
|
{
|
|
|
|
AVBufferRef *buf = *pbuf;
|
|
|
|
uint8_t *tmp;
|
|
|
|
|
|
|
|
if (!buf) {
|
|
|
|
/* allocate a new buffer with av_realloc(), so it will be reallocatable
|
|
|
|
* later */
|
|
|
|
uint8_t *data = av_realloc(NULL, size);
|
|
|
|
if (!data)
|
|
|
|
return AVERROR(ENOMEM);
|
|
|
|
|
|
|
|
buf = av_buffer_create(data, size, av_buffer_default_free, NULL, 0);
|
|
|
|
if (!buf) {
|
|
|
|
av_freep(&data);
|
|
|
|
return AVERROR(ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
buf->buffer->flags |= BUFFER_FLAG_REALLOCATABLE;
|
|
|
|
*pbuf = buf;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
} else if (buf->size == size)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(buf->buffer->flags & BUFFER_FLAG_REALLOCATABLE) ||
|
|
|
|
!av_buffer_is_writable(buf)) {
|
|
|
|
/* cannot realloc, allocate a new reallocable buffer and copy data */
|
|
|
|
AVBufferRef *new = NULL;
|
|
|
|
|
|
|
|
av_buffer_realloc(&new, size);
|
|
|
|
if (!new)
|
|
|
|
return AVERROR(ENOMEM);
|
|
|
|
|
|
|
|
memcpy(new->data, buf->data, FFMIN(size, buf->size));
|
|
|
|
|
2015-01-14 23:42:55 +00:00
|
|
|
buffer_replace(pbuf, &new);
|
2012-10-07 13:45:44 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = av_realloc(buf->buffer->data, size);
|
|
|
|
if (!tmp)
|
|
|
|
return AVERROR(ENOMEM);
|
|
|
|
|
|
|
|
buf->buffer->data = buf->data = tmp;
|
|
|
|
buf->buffer->size = buf->size = size;
|
|
|
|
return 0;
|
|
|
|
}
|
2013-01-20 07:03:13 +00:00
|
|
|
|
2016-01-09 08:25:32 +00:00
|
|
|
AVBufferPool *av_buffer_pool_init2(int size, void *opaque,
|
|
|
|
AVBufferRef* (*alloc)(void *opaque, int size),
|
|
|
|
void (*pool_free)(void *opaque))
|
|
|
|
{
|
|
|
|
AVBufferPool *pool = av_mallocz(sizeof(*pool));
|
|
|
|
if (!pool)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ff_mutex_init(&pool->mutex, NULL);
|
|
|
|
|
|
|
|
pool->size = size;
|
|
|
|
pool->opaque = opaque;
|
|
|
|
pool->alloc2 = alloc;
|
|
|
|
pool->pool_free = pool_free;
|
|
|
|
|
|
|
|
avpriv_atomic_int_set(&pool->refcount, 1);
|
|
|
|
|
|
|
|
return pool;
|
|
|
|
}
|
|
|
|
|
2013-01-20 07:03:13 +00:00
|
|
|
AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size))
|
|
|
|
{
|
|
|
|
AVBufferPool *pool = av_mallocz(sizeof(*pool));
|
|
|
|
if (!pool)
|
|
|
|
return NULL;
|
|
|
|
|
lavu: fix memory leaks by using a mutex instead of atomics
The buffer pool has to atomically add and remove entries from the linked
list of available buffers. This was done by removing the entire list
with a CAS operation, working on it, and then setting it back again
(using a retry-loop in case another thread was doing the same thing).
This could effectively cause memory leaks: while a thread was working on
the buffer list, other threads would allocate new buffers, increasing
the pool's total size. There was no real leak, but since these extra
buffers were not needed, but not free'd either (except when the buffer
pool was destroyed), this had the same effects as a real leak. For some
reason, growth was exponential, and could easily kill the process due
to OOM in real-world uses.
Fix this by using a mutex to protect the list operations. The fancy
way atomics remove the whole list to work on it is not needed anymore,
which also avoids the situation which was causing the leak.
Signed-off-by: Anton Khirnov <anton@khirnov.net>
2014-11-14 12:34:50 +00:00
|
|
|
ff_mutex_init(&pool->mutex, NULL);
|
|
|
|
|
2013-01-20 07:03:13 +00:00
|
|
|
pool->size = size;
|
|
|
|
pool->alloc = alloc ? alloc : av_buffer_alloc;
|
|
|
|
|
|
|
|
avpriv_atomic_int_set(&pool->refcount, 1);
|
|
|
|
|
|
|
|
return pool;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function gets called when the pool has been uninited and
|
|
|
|
* all the buffers returned to it.
|
|
|
|
*/
|
|
|
|
static void buffer_pool_free(AVBufferPool *pool)
|
|
|
|
{
|
|
|
|
while (pool->pool) {
|
|
|
|
BufferPoolEntry *buf = pool->pool;
|
|
|
|
pool->pool = buf->next;
|
|
|
|
|
|
|
|
buf->free(buf->opaque, buf->data);
|
|
|
|
av_freep(&buf);
|
|
|
|
}
|
lavu: fix memory leaks by using a mutex instead of atomics
The buffer pool has to atomically add and remove entries from the linked
list of available buffers. This was done by removing the entire list
with a CAS operation, working on it, and then setting it back again
(using a retry-loop in case another thread was doing the same thing).
This could effectively cause memory leaks: while a thread was working on
the buffer list, other threads would allocate new buffers, increasing
the pool's total size. There was no real leak, but since these extra
buffers were not needed, but not free'd either (except when the buffer
pool was destroyed), this had the same effects as a real leak. For some
reason, growth was exponential, and could easily kill the process due
to OOM in real-world uses.
Fix this by using a mutex to protect the list operations. The fancy
way atomics remove the whole list to work on it is not needed anymore,
which also avoids the situation which was causing the leak.
Signed-off-by: Anton Khirnov <anton@khirnov.net>
2014-11-14 12:34:50 +00:00
|
|
|
ff_mutex_destroy(&pool->mutex);
|
2016-01-09 08:25:32 +00:00
|
|
|
|
|
|
|
if (pool->pool_free)
|
|
|
|
pool->pool_free(pool->opaque);
|
|
|
|
|
2013-01-20 07:03:13 +00:00
|
|
|
av_freep(&pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
void av_buffer_pool_uninit(AVBufferPool **ppool)
|
|
|
|
{
|
|
|
|
AVBufferPool *pool;
|
|
|
|
|
|
|
|
if (!ppool || !*ppool)
|
|
|
|
return;
|
|
|
|
pool = *ppool;
|
|
|
|
*ppool = NULL;
|
|
|
|
|
|
|
|
if (!avpriv_atomic_int_add_and_fetch(&pool->refcount, -1))
|
|
|
|
buffer_pool_free(pool);
|
|
|
|
}
|
|
|
|
|
2014-12-06 19:24:27 +00:00
|
|
|
#if USE_ATOMICS
|
2013-01-20 07:03:13 +00:00
|
|
|
/* remove the whole buffer list from the pool and return it */
|
|
|
|
static BufferPoolEntry *get_pool(AVBufferPool *pool)
|
|
|
|
{
|
2013-03-17 15:37:06 +00:00
|
|
|
BufferPoolEntry *cur = *(void * volatile *)&pool->pool, *last = NULL;
|
2013-01-20 07:03:13 +00:00
|
|
|
|
2013-03-17 15:37:06 +00:00
|
|
|
while (cur != last) {
|
2013-03-17 15:51:40 +00:00
|
|
|
last = cur;
|
2013-01-20 07:03:13 +00:00
|
|
|
cur = avpriv_atomic_ptr_cas((void * volatile *)&pool->pool, last, NULL);
|
|
|
|
if (!cur)
|
|
|
|
return NULL;
|
2013-03-17 15:37:06 +00:00
|
|
|
}
|
2013-01-20 07:03:13 +00:00
|
|
|
|
|
|
|
return cur;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void add_to_pool(BufferPoolEntry *buf)
|
|
|
|
{
|
|
|
|
AVBufferPool *pool;
|
|
|
|
BufferPoolEntry *cur, *end = buf;
|
|
|
|
|
|
|
|
if (!buf)
|
|
|
|
return;
|
|
|
|
pool = buf->pool;
|
|
|
|
|
|
|
|
while (end->next)
|
|
|
|
end = end->next;
|
|
|
|
|
2013-03-19 12:35:10 +00:00
|
|
|
while (avpriv_atomic_ptr_cas((void * volatile *)&pool->pool, NULL, buf)) {
|
2013-01-20 07:03:13 +00:00
|
|
|
/* pool is not empty, retrieve it and append it to our list */
|
|
|
|
cur = get_pool(pool);
|
|
|
|
end->next = cur;
|
|
|
|
while (end->next)
|
|
|
|
end = end->next;
|
|
|
|
}
|
|
|
|
}
|
2014-12-06 19:24:27 +00:00
|
|
|
#endif
|
2013-01-20 07:03:13 +00:00
|
|
|
|
|
|
|
static void pool_release_buffer(void *opaque, uint8_t *data)
|
|
|
|
{
|
|
|
|
BufferPoolEntry *buf = opaque;
|
|
|
|
AVBufferPool *pool = buf->pool;
|
2013-03-24 00:44:48 +00:00
|
|
|
|
|
|
|
if(CONFIG_MEMORY_POISONING)
|
2013-05-12 11:37:33 +00:00
|
|
|
memset(buf->data, FF_MEMORY_POISON, pool->size);
|
2013-03-24 00:44:48 +00:00
|
|
|
|
2014-11-27 22:03:07 +00:00
|
|
|
#if USE_ATOMICS
|
2013-01-20 07:03:13 +00:00
|
|
|
add_to_pool(buf);
|
2014-11-27 22:03:07 +00:00
|
|
|
#else
|
lavu: fix memory leaks by using a mutex instead of atomics
The buffer pool has to atomically add and remove entries from the linked
list of available buffers. This was done by removing the entire list
with a CAS operation, working on it, and then setting it back again
(using a retry-loop in case another thread was doing the same thing).
This could effectively cause memory leaks: while a thread was working on
the buffer list, other threads would allocate new buffers, increasing
the pool's total size. There was no real leak, but since these extra
buffers were not needed, but not free'd either (except when the buffer
pool was destroyed), this had the same effects as a real leak. For some
reason, growth was exponential, and could easily kill the process due
to OOM in real-world uses.
Fix this by using a mutex to protect the list operations. The fancy
way atomics remove the whole list to work on it is not needed anymore,
which also avoids the situation which was causing the leak.
Signed-off-by: Anton Khirnov <anton@khirnov.net>
2014-11-14 12:34:50 +00:00
|
|
|
ff_mutex_lock(&pool->mutex);
|
|
|
|
buf->next = pool->pool;
|
|
|
|
pool->pool = buf;
|
|
|
|
ff_mutex_unlock(&pool->mutex);
|
2014-11-27 22:03:07 +00:00
|
|
|
#endif
|
lavu: fix memory leaks by using a mutex instead of atomics
The buffer pool has to atomically add and remove entries from the linked
list of available buffers. This was done by removing the entire list
with a CAS operation, working on it, and then setting it back again
(using a retry-loop in case another thread was doing the same thing).
This could effectively cause memory leaks: while a thread was working on
the buffer list, other threads would allocate new buffers, increasing
the pool's total size. There was no real leak, but since these extra
buffers were not needed, but not free'd either (except when the buffer
pool was destroyed), this had the same effects as a real leak. For some
reason, growth was exponential, and could easily kill the process due
to OOM in real-world uses.
Fix this by using a mutex to protect the list operations. The fancy
way atomics remove the whole list to work on it is not needed anymore,
which also avoids the situation which was causing the leak.
Signed-off-by: Anton Khirnov <anton@khirnov.net>
2014-11-14 12:34:50 +00:00
|
|
|
|
2013-01-20 07:03:13 +00:00
|
|
|
if (!avpriv_atomic_int_add_and_fetch(&pool->refcount, -1))
|
|
|
|
buffer_pool_free(pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate a new buffer and override its free() callback so that
|
|
|
|
* it is returned to the pool on free */
|
|
|
|
static AVBufferRef *pool_alloc_buffer(AVBufferPool *pool)
|
|
|
|
{
|
|
|
|
BufferPoolEntry *buf;
|
|
|
|
AVBufferRef *ret;
|
|
|
|
|
2016-01-09 08:25:32 +00:00
|
|
|
ret = pool->alloc2 ? pool->alloc2(pool->opaque, pool->size) :
|
|
|
|
pool->alloc(pool->size);
|
2013-01-20 07:03:13 +00:00
|
|
|
if (!ret)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
buf = av_mallocz(sizeof(*buf));
|
|
|
|
if (!buf) {
|
|
|
|
av_buffer_unref(&ret);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf->data = ret->buffer->data;
|
|
|
|
buf->opaque = ret->buffer->opaque;
|
|
|
|
buf->free = ret->buffer->free;
|
|
|
|
buf->pool = pool;
|
|
|
|
|
|
|
|
ret->buffer->opaque = buf;
|
|
|
|
ret->buffer->free = pool_release_buffer;
|
|
|
|
|
2014-11-27 22:03:07 +00:00
|
|
|
#if USE_ATOMICS
|
2013-01-20 07:03:13 +00:00
|
|
|
avpriv_atomic_int_add_and_fetch(&pool->refcount, 1);
|
2013-03-17 17:36:16 +00:00
|
|
|
avpriv_atomic_int_add_and_fetch(&pool->nb_allocated, 1);
|
2014-11-27 22:03:07 +00:00
|
|
|
#endif
|
2013-01-20 07:03:13 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
AVBufferRef *av_buffer_pool_get(AVBufferPool *pool)
|
|
|
|
{
|
|
|
|
AVBufferRef *ret;
|
|
|
|
BufferPoolEntry *buf;
|
|
|
|
|
2014-11-27 22:03:07 +00:00
|
|
|
#if USE_ATOMICS
|
2013-01-20 07:03:13 +00:00
|
|
|
/* check whether the pool is empty */
|
|
|
|
buf = get_pool(pool);
|
2013-03-17 17:36:16 +00:00
|
|
|
if (!buf && pool->refcount <= pool->nb_allocated) {
|
|
|
|
av_log(NULL, AV_LOG_DEBUG, "Pool race dectected, spining to avoid overallocation and eventual OOM\n");
|
|
|
|
while (!buf && avpriv_atomic_int_get(&pool->refcount) <= avpriv_atomic_int_get(&pool->nb_allocated))
|
|
|
|
buf = get_pool(pool);
|
|
|
|
}
|
|
|
|
|
2013-01-20 07:03:13 +00:00
|
|
|
if (!buf)
|
|
|
|
return pool_alloc_buffer(pool);
|
|
|
|
|
|
|
|
/* keep the first entry, return the rest of the list to the pool */
|
|
|
|
add_to_pool(buf->next);
|
|
|
|
buf->next = NULL;
|
|
|
|
|
|
|
|
ret = av_buffer_create(buf->data, pool->size, pool_release_buffer,
|
|
|
|
buf, 0);
|
|
|
|
if (!ret) {
|
|
|
|
add_to_pool(buf);
|
|
|
|
return NULL;
|
|
|
|
}
|
2014-11-27 22:03:07 +00:00
|
|
|
#else
|
lavu: fix memory leaks by using a mutex instead of atomics
The buffer pool has to atomically add and remove entries from the linked
list of available buffers. This was done by removing the entire list
with a CAS operation, working on it, and then setting it back again
(using a retry-loop in case another thread was doing the same thing).
This could effectively cause memory leaks: while a thread was working on
the buffer list, other threads would allocate new buffers, increasing
the pool's total size. There was no real leak, but since these extra
buffers were not needed, but not free'd either (except when the buffer
pool was destroyed), this had the same effects as a real leak. For some
reason, growth was exponential, and could easily kill the process due
to OOM in real-world uses.
Fix this by using a mutex to protect the list operations. The fancy
way atomics remove the whole list to work on it is not needed anymore,
which also avoids the situation which was causing the leak.
Signed-off-by: Anton Khirnov <anton@khirnov.net>
2014-11-14 12:34:50 +00:00
|
|
|
ff_mutex_lock(&pool->mutex);
|
|
|
|
buf = pool->pool;
|
|
|
|
if (buf) {
|
|
|
|
ret = av_buffer_create(buf->data, pool->size, pool_release_buffer,
|
|
|
|
buf, 0);
|
|
|
|
if (ret) {
|
|
|
|
pool->pool = buf->next;
|
|
|
|
buf->next = NULL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = pool_alloc_buffer(pool);
|
2013-01-20 07:03:13 +00:00
|
|
|
}
|
lavu: fix memory leaks by using a mutex instead of atomics
The buffer pool has to atomically add and remove entries from the linked
list of available buffers. This was done by removing the entire list
with a CAS operation, working on it, and then setting it back again
(using a retry-loop in case another thread was doing the same thing).
This could effectively cause memory leaks: while a thread was working on
the buffer list, other threads would allocate new buffers, increasing
the pool's total size. There was no real leak, but since these extra
buffers were not needed, but not free'd either (except when the buffer
pool was destroyed), this had the same effects as a real leak. For some
reason, growth was exponential, and could easily kill the process due
to OOM in real-world uses.
Fix this by using a mutex to protect the list operations. The fancy
way atomics remove the whole list to work on it is not needed anymore,
which also avoids the situation which was causing the leak.
Signed-off-by: Anton Khirnov <anton@khirnov.net>
2014-11-14 12:34:50 +00:00
|
|
|
ff_mutex_unlock(&pool->mutex);
|
2014-11-27 22:03:07 +00:00
|
|
|
#endif
|
lavu: fix memory leaks by using a mutex instead of atomics
The buffer pool has to atomically add and remove entries from the linked
list of available buffers. This was done by removing the entire list
with a CAS operation, working on it, and then setting it back again
(using a retry-loop in case another thread was doing the same thing).
This could effectively cause memory leaks: while a thread was working on
the buffer list, other threads would allocate new buffers, increasing
the pool's total size. There was no real leak, but since these extra
buffers were not needed, but not free'd either (except when the buffer
pool was destroyed), this had the same effects as a real leak. For some
reason, growth was exponential, and could easily kill the process due
to OOM in real-world uses.
Fix this by using a mutex to protect the list operations. The fancy
way atomics remove the whole list to work on it is not needed anymore,
which also avoids the situation which was causing the leak.
Signed-off-by: Anton Khirnov <anton@khirnov.net>
2014-11-14 12:34:50 +00:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
avpriv_atomic_int_add_and_fetch(&pool->refcount, 1);
|
2013-01-20 07:03:13 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|