2018-05-06 14:17:47 +00:00
|
|
|
/* Copyright (C) 2018 the mpv developers
|
2017-04-01 18:32:01 +00:00
|
|
|
*
|
2018-05-06 14:17:47 +00:00
|
|
|
* Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
2017-04-01 18:32:01 +00:00
|
|
|
*
|
2018-05-06 14:17:47 +00:00
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
2017-04-01 18:32:01 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <pthread.h>
|
|
|
|
|
|
|
|
#include "common/common.h"
|
2018-05-10 18:18:09 +00:00
|
|
|
#include "osdep/threads.h"
|
2018-05-06 14:17:47 +00:00
|
|
|
#include "osdep/timer.h"
|
2017-04-01 18:32:01 +00:00
|
|
|
|
|
|
|
#include "thread_pool.h"
|
|
|
|
|
2018-05-06 14:17:47 +00:00
|
|
|
// Threads destroy themselves after this many seconds, if there's no new work
|
|
|
|
// and the thread count is above the configured minimum.
|
|
|
|
#define DESTROY_TIMEOUT 10
|
|
|
|
|
2017-04-01 18:32:01 +00:00
|
|
|
struct work {
|
|
|
|
void (*fn)(void *ctx);
|
|
|
|
void *fn_ctx;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mp_thread_pool {
|
2018-05-06 14:17:47 +00:00
|
|
|
int min_threads, max_threads;
|
2017-04-01 18:32:01 +00:00
|
|
|
|
|
|
|
pthread_mutex_t lock;
|
|
|
|
pthread_cond_t wakeup;
|
|
|
|
|
|
|
|
// --- the following fields are protected by lock
|
2018-05-06 14:17:47 +00:00
|
|
|
|
|
|
|
pthread_t *threads;
|
|
|
|
int num_threads;
|
|
|
|
|
|
|
|
// Number of threads which have taken up work and are still processing it.
|
|
|
|
int busy_threads;
|
|
|
|
|
2017-04-01 18:32:01 +00:00
|
|
|
bool terminate;
|
2018-05-06 14:17:47 +00:00
|
|
|
|
2017-04-01 18:32:01 +00:00
|
|
|
struct work *work;
|
|
|
|
int num_work;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void *worker_thread(void *arg)
|
|
|
|
{
|
|
|
|
struct mp_thread_pool *pool = arg;
|
|
|
|
|
2018-05-10 18:18:09 +00:00
|
|
|
mpthread_set_name("worker");
|
|
|
|
|
2017-04-01 18:32:01 +00:00
|
|
|
pthread_mutex_lock(&pool->lock);
|
|
|
|
|
2018-05-06 14:17:47 +00:00
|
|
|
struct timespec ts = {0};
|
|
|
|
bool got_timeout = false;
|
|
|
|
while (1) {
|
|
|
|
struct work work = {0};
|
|
|
|
if (pool->num_work > 0) {
|
|
|
|
work = pool->work[pool->num_work - 1];
|
|
|
|
pool->num_work -= 1;
|
|
|
|
}
|
2017-04-01 18:32:01 +00:00
|
|
|
|
2018-05-06 14:17:47 +00:00
|
|
|
if (!work.fn) {
|
|
|
|
if (got_timeout || pool->terminate)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (pool->num_threads > pool->min_threads) {
|
|
|
|
if (!ts.tv_sec && !ts.tv_nsec)
|
|
|
|
ts = mp_rel_time_to_timespec(DESTROY_TIMEOUT);
|
|
|
|
if (pthread_cond_timedwait(&pool->wakeup, &pool->lock, &ts))
|
|
|
|
got_timeout = pool->num_threads > pool->min_threads;
|
|
|
|
} else {
|
|
|
|
pthread_cond_wait(&pool->wakeup, &pool->lock);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2017-04-01 18:32:01 +00:00
|
|
|
|
2018-05-06 14:17:47 +00:00
|
|
|
pool->busy_threads += 1;
|
2017-04-01 18:32:01 +00:00
|
|
|
pthread_mutex_unlock(&pool->lock);
|
2018-05-06 14:17:47 +00:00
|
|
|
|
2017-04-01 18:32:01 +00:00
|
|
|
work.fn(work.fn_ctx);
|
2018-05-06 14:17:47 +00:00
|
|
|
|
2017-04-01 18:32:01 +00:00
|
|
|
pthread_mutex_lock(&pool->lock);
|
2018-05-06 14:17:47 +00:00
|
|
|
pool->busy_threads -= 1;
|
|
|
|
|
|
|
|
ts = (struct timespec){0};
|
|
|
|
got_timeout = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If no termination signal was given, it must mean we died because of a
|
|
|
|
// timeout, and nobody is waiting for us. We have to remove ourselves.
|
|
|
|
if (!pool->terminate) {
|
|
|
|
for (int n = 0; n < pool->num_threads; n++) {
|
|
|
|
if (pthread_equal(pool->threads[n], pthread_self())) {
|
|
|
|
pthread_detach(pthread_self());
|
|
|
|
MP_TARRAY_REMOVE_AT(pool->threads, pool->num_threads, n);
|
|
|
|
pthread_mutex_unlock(&pool->lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(0);
|
2017-04-01 18:32:01 +00:00
|
|
|
}
|
|
|
|
|
2018-05-06 14:17:47 +00:00
|
|
|
pthread_mutex_unlock(&pool->lock);
|
2017-04-01 18:32:01 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void thread_pool_dtor(void *ctx)
|
|
|
|
{
|
|
|
|
struct mp_thread_pool *pool = ctx;
|
|
|
|
|
2018-05-06 14:17:47 +00:00
|
|
|
|
2017-04-01 18:32:01 +00:00
|
|
|
pthread_mutex_lock(&pool->lock);
|
2018-05-06 14:17:47 +00:00
|
|
|
|
2017-04-01 18:32:01 +00:00
|
|
|
pool->terminate = true;
|
|
|
|
pthread_cond_broadcast(&pool->wakeup);
|
2018-05-06 14:17:47 +00:00
|
|
|
|
|
|
|
pthread_t *threads = pool->threads;
|
|
|
|
int num_threads = pool->num_threads;
|
|
|
|
|
|
|
|
pool->threads = NULL;
|
|
|
|
pool->num_threads = 0;
|
|
|
|
|
2017-04-01 18:32:01 +00:00
|
|
|
pthread_mutex_unlock(&pool->lock);
|
|
|
|
|
2018-05-06 14:17:47 +00:00
|
|
|
for (int n = 0; n < num_threads; n++)
|
|
|
|
pthread_join(threads[n], NULL);
|
2017-04-01 18:32:01 +00:00
|
|
|
|
|
|
|
assert(pool->num_work == 0);
|
2018-05-06 14:17:47 +00:00
|
|
|
assert(pool->num_threads == 0);
|
2017-04-01 18:32:01 +00:00
|
|
|
pthread_cond_destroy(&pool->wakeup);
|
|
|
|
pthread_mutex_destroy(&pool->lock);
|
|
|
|
}
|
|
|
|
|
2018-05-18 21:24:17 +00:00
|
|
|
static bool add_thread(struct mp_thread_pool *pool)
|
2018-05-06 14:17:47 +00:00
|
|
|
{
|
|
|
|
pthread_t thread;
|
|
|
|
|
2018-05-18 21:24:17 +00:00
|
|
|
if (pthread_create(&thread, NULL, worker_thread, pool) != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MP_TARRAY_APPEND(pool, pool->threads, pool->num_threads, thread);
|
|
|
|
return true;
|
2018-05-06 14:17:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct mp_thread_pool *mp_thread_pool_create(void *ta_parent, int init_threads,
|
|
|
|
int min_threads, int max_threads)
|
2017-04-01 18:32:01 +00:00
|
|
|
{
|
2018-05-06 14:17:47 +00:00
|
|
|
assert(min_threads >= 0);
|
|
|
|
assert(init_threads <= min_threads);
|
|
|
|
assert(max_threads > 0 && max_threads >= min_threads);
|
2017-04-01 18:32:01 +00:00
|
|
|
|
|
|
|
struct mp_thread_pool *pool = talloc_zero(ta_parent, struct mp_thread_pool);
|
|
|
|
talloc_set_destructor(pool, thread_pool_dtor);
|
|
|
|
|
|
|
|
pthread_mutex_init(&pool->lock, NULL);
|
|
|
|
pthread_cond_init(&pool->wakeup, NULL);
|
|
|
|
|
2018-05-06 14:17:47 +00:00
|
|
|
pool->min_threads = min_threads;
|
|
|
|
pool->max_threads = max_threads;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&pool->lock);
|
|
|
|
for (int n = 0; n < init_threads; n++)
|
|
|
|
add_thread(pool);
|
|
|
|
bool ok = pool->num_threads >= init_threads;
|
|
|
|
pthread_mutex_unlock(&pool->lock);
|
|
|
|
|
|
|
|
if (!ok)
|
|
|
|
TA_FREEP(&pool);
|
2017-04-01 18:32:01 +00:00
|
|
|
|
|
|
|
return pool;
|
|
|
|
}
|
|
|
|
|
2018-05-18 21:24:17 +00:00
|
|
|
static bool thread_pool_add(struct mp_thread_pool *pool, void (*fn)(void *ctx),
|
|
|
|
void *fn_ctx, bool allow_queue)
|
2017-04-01 18:32:01 +00:00
|
|
|
{
|
2018-05-06 14:17:47 +00:00
|
|
|
bool ok = true;
|
|
|
|
|
|
|
|
assert(fn);
|
|
|
|
|
2017-04-01 18:32:01 +00:00
|
|
|
pthread_mutex_lock(&pool->lock);
|
|
|
|
struct work work = {fn, fn_ctx};
|
2018-05-06 14:17:47 +00:00
|
|
|
|
|
|
|
// If there are not enough threads to process all at once, but we can
|
|
|
|
// create a new thread, then do so. If work is queued quickly, it can
|
|
|
|
// happen that not all available threads have picked up work yet (up to
|
|
|
|
// num_threads - busy_threads threads), which has to be accounted for.
|
|
|
|
if (pool->busy_threads + pool->num_work + 1 > pool->num_threads &&
|
|
|
|
pool->num_threads < pool->max_threads)
|
|
|
|
{
|
2018-05-18 21:24:17 +00:00
|
|
|
if (!add_thread(pool)) {
|
|
|
|
// If we can queue it, it'll get done as long as there is 1 thread.
|
|
|
|
ok = allow_queue && pool->num_threads > 0;
|
|
|
|
}
|
2018-05-06 14:17:47 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 21:24:17 +00:00
|
|
|
if (ok) {
|
2018-05-06 14:17:47 +00:00
|
|
|
MP_TARRAY_INSERT_AT(pool, pool->work, pool->num_work, 0, work);
|
|
|
|
pthread_cond_signal(&pool->wakeup);
|
|
|
|
}
|
|
|
|
|
2017-04-01 18:32:01 +00:00
|
|
|
pthread_mutex_unlock(&pool->lock);
|
2018-05-06 14:17:47 +00:00
|
|
|
return ok;
|
2017-04-01 18:32:01 +00:00
|
|
|
}
|
2018-05-18 21:24:17 +00:00
|
|
|
|
|
|
|
bool mp_thread_pool_queue(struct mp_thread_pool *pool, void (*fn)(void *ctx),
|
|
|
|
void *fn_ctx)
|
|
|
|
{
|
|
|
|
return thread_pool_add(pool, fn, fn_ctx, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool mp_thread_pool_run(struct mp_thread_pool *pool, void (*fn)(void *ctx),
|
|
|
|
void *fn_ctx)
|
|
|
|
{
|
|
|
|
return thread_pool_add(pool, fn, fn_ctx, false);
|
|
|
|
}
|