2018-05-06 11:00:05 +00:00
|
|
|
/* Copyright (C) 2018 the mpv developers
|
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <assert.h>
|
2023-10-19 14:26:26 +00:00
|
|
|
#include <errno.h>
|
|
|
|
#include <stdatomic.h>
|
2018-05-06 11:00:05 +00:00
|
|
|
#include <string.h>
|
2018-05-17 18:58:49 +00:00
|
|
|
#include <sys/types.h>
|
|
|
|
|
2024-05-01 19:39:19 +00:00
|
|
|
#ifdef _WIN32
|
2018-05-17 18:58:49 +00:00
|
|
|
#include <windows.h>
|
|
|
|
#else
|
|
|
|
#include <poll.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "common/common.h"
|
2018-05-17 19:20:26 +00:00
|
|
|
#include "misc/linked_list.h"
|
2018-05-17 18:58:49 +00:00
|
|
|
#include "osdep/io.h"
|
2018-05-17 19:20:26 +00:00
|
|
|
#include "osdep/timer.h"
|
2018-05-06 11:00:05 +00:00
|
|
|
|
|
|
|
#include "thread_tools.h"
|
|
|
|
|
|
|
|
uintptr_t mp_waiter_wait(struct mp_waiter *waiter)
|
|
|
|
{
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_lock(&waiter->lock);
|
2018-05-06 11:00:05 +00:00
|
|
|
while (!waiter->done)
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_cond_wait(&waiter->wakeup, &waiter->lock);
|
|
|
|
mp_mutex_unlock(&waiter->lock);
|
2018-05-06 11:00:05 +00:00
|
|
|
|
|
|
|
uintptr_t ret = waiter->value;
|
|
|
|
|
|
|
|
// We document that after mp_waiter_wait() the waiter object becomes
|
|
|
|
// invalid. (It strictly returns only after mp_waiter_wakeup() has returned,
|
|
|
|
// and the object is "single-shot".) So destroy it here.
|
|
|
|
|
|
|
|
// Normally, we expect that the system uses futexes, in which case the
|
|
|
|
// following functions will do nearly nothing. This is true for Windows
|
|
|
|
// and Linux. But some lesser OSes still might allocate kernel objects
|
|
|
|
// when initializing mutexes, so destroy them here.
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_destroy(&waiter->lock);
|
|
|
|
mp_cond_destroy(&waiter->wakeup);
|
2018-05-06 11:00:05 +00:00
|
|
|
|
|
|
|
memset(waiter, 0xCA, sizeof(*waiter)); // for debugging
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mp_waiter_wakeup(struct mp_waiter *waiter, uintptr_t value)
|
|
|
|
{
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_lock(&waiter->lock);
|
2018-05-06 11:00:05 +00:00
|
|
|
assert(!waiter->done);
|
|
|
|
waiter->done = true;
|
|
|
|
waiter->value = value;
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_cond_signal(&waiter->wakeup);
|
|
|
|
mp_mutex_unlock(&waiter->lock);
|
2018-05-06 11:00:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool mp_waiter_poll(struct mp_waiter *waiter)
|
|
|
|
{
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_lock(&waiter->lock);
|
2018-05-06 11:00:05 +00:00
|
|
|
bool r = waiter->done;
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_unlock(&waiter->lock);
|
2018-05-06 11:00:05 +00:00
|
|
|
return r;
|
|
|
|
}
|
2018-05-17 18:58:49 +00:00
|
|
|
|
|
|
|
struct mp_cancel {
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex lock;
|
|
|
|
mp_cond wakeup;
|
2018-05-17 19:20:26 +00:00
|
|
|
|
|
|
|
// Semaphore state and "mirrors".
|
2018-05-17 18:58:49 +00:00
|
|
|
atomic_bool triggered;
|
2018-05-17 19:20:26 +00:00
|
|
|
void (*cb)(void *ctx);
|
|
|
|
void *cb_ctx;
|
2018-05-17 18:58:49 +00:00
|
|
|
int wakeup_pipe[2];
|
2018-05-17 19:20:26 +00:00
|
|
|
void *win32_event; // actually HANDLE
|
|
|
|
|
|
|
|
// Slave list. These are automatically notified as well.
|
|
|
|
struct {
|
|
|
|
struct mp_cancel *head, *tail;
|
|
|
|
} slaves;
|
|
|
|
|
|
|
|
// For slaves. Synchronization is managed by parent.lock!
|
|
|
|
struct mp_cancel *parent;
|
|
|
|
struct {
|
|
|
|
struct mp_cancel *next, *prev;
|
|
|
|
} siblings;
|
2018-05-17 18:58:49 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void cancel_destroy(void *p)
|
|
|
|
{
|
|
|
|
struct mp_cancel *c = p;
|
2018-05-17 19:20:26 +00:00
|
|
|
|
|
|
|
assert(!c->slaves.head); // API user error
|
|
|
|
|
2018-05-19 15:06:00 +00:00
|
|
|
mp_cancel_set_parent(c, NULL);
|
2018-05-17 19:20:26 +00:00
|
|
|
|
2018-05-17 18:58:49 +00:00
|
|
|
if (c->wakeup_pipe[0] >= 0) {
|
|
|
|
close(c->wakeup_pipe[0]);
|
|
|
|
close(c->wakeup_pipe[1]);
|
|
|
|
}
|
2018-05-17 19:20:26 +00:00
|
|
|
|
2024-06-04 17:01:58 +00:00
|
|
|
#ifdef _WIN32
|
2018-05-17 19:20:26 +00:00
|
|
|
if (c->win32_event)
|
|
|
|
CloseHandle(c->win32_event);
|
|
|
|
#endif
|
|
|
|
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_destroy(&c->lock);
|
|
|
|
mp_cond_destroy(&c->wakeup);
|
2018-05-17 18:58:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct mp_cancel *mp_cancel_new(void *talloc_ctx)
|
|
|
|
{
|
|
|
|
struct mp_cancel *c = talloc_ptrtype(talloc_ctx, c);
|
|
|
|
talloc_set_destructor(c, cancel_destroy);
|
2018-05-17 19:20:26 +00:00
|
|
|
*c = (struct mp_cancel){
|
2023-10-19 10:16:12 +00:00
|
|
|
.triggered = false,
|
2018-05-17 19:20:26 +00:00
|
|
|
.wakeup_pipe = {-1, -1},
|
|
|
|
};
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_init(&c->lock);
|
|
|
|
mp_cond_init(&c->wakeup);
|
2018-05-17 18:58:49 +00:00
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2018-05-17 19:20:26 +00:00
|
|
|
static void trigger_locked(struct mp_cancel *c)
|
2018-05-17 18:58:49 +00:00
|
|
|
{
|
|
|
|
atomic_store(&c->triggered, true);
|
2018-05-17 19:20:26 +00:00
|
|
|
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_cond_broadcast(&c->wakeup); // condition bound to c->triggered
|
2018-05-17 19:20:26 +00:00
|
|
|
|
|
|
|
if (c->cb)
|
|
|
|
c->cb(c->cb_ctx);
|
|
|
|
|
|
|
|
for (struct mp_cancel *sub = c->slaves.head; sub; sub = sub->siblings.next)
|
|
|
|
mp_cancel_trigger(sub);
|
|
|
|
|
|
|
|
if (c->wakeup_pipe[1] >= 0)
|
|
|
|
(void)write(c->wakeup_pipe[1], &(char){0}, 1);
|
|
|
|
|
2024-06-04 17:01:58 +00:00
|
|
|
#ifdef _WIN32
|
2018-05-17 19:20:26 +00:00
|
|
|
if (c->win32_event)
|
|
|
|
SetEvent(c->win32_event);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void mp_cancel_trigger(struct mp_cancel *c)
|
|
|
|
{
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_lock(&c->lock);
|
2018-05-17 19:20:26 +00:00
|
|
|
trigger_locked(c);
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_unlock(&c->lock);
|
2018-05-17 18:58:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void mp_cancel_reset(struct mp_cancel *c)
|
|
|
|
{
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_lock(&c->lock);
|
2018-05-17 19:20:26 +00:00
|
|
|
|
2018-05-17 18:58:49 +00:00
|
|
|
atomic_store(&c->triggered, false);
|
2018-05-17 19:20:26 +00:00
|
|
|
|
|
|
|
if (c->wakeup_pipe[0] >= 0) {
|
|
|
|
// Flush it fully.
|
|
|
|
while (1) {
|
|
|
|
int r = read(c->wakeup_pipe[0], &(char[256]){0}, 256);
|
|
|
|
if (r <= 0 && !(r < 0 && errno == EINTR))
|
|
|
|
break;
|
|
|
|
}
|
2018-05-17 18:58:49 +00:00
|
|
|
}
|
2018-05-17 19:20:26 +00:00
|
|
|
|
2024-06-04 17:01:58 +00:00
|
|
|
#ifdef _WIN32
|
2018-05-17 19:20:26 +00:00
|
|
|
if (c->win32_event)
|
|
|
|
ResetEvent(c->win32_event);
|
|
|
|
#endif
|
|
|
|
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_unlock(&c->lock);
|
2018-05-17 18:58:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool mp_cancel_test(struct mp_cancel *c)
|
|
|
|
{
|
|
|
|
return c ? atomic_load_explicit(&c->triggered, memory_order_relaxed) : false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool mp_cancel_wait(struct mp_cancel *c, double timeout)
|
|
|
|
{
|
2023-10-21 02:55:41 +00:00
|
|
|
int64_t wait_until = mp_time_ns_add(mp_time_ns(), timeout);
|
|
|
|
mp_mutex_lock(&c->lock);
|
2018-05-17 19:20:26 +00:00
|
|
|
while (!mp_cancel_test(c)) {
|
2023-10-21 02:55:41 +00:00
|
|
|
if (mp_cond_timedwait_until(&c->wakeup, &c->lock, wait_until))
|
2018-05-17 19:20:26 +00:00
|
|
|
break;
|
|
|
|
}
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_unlock(&c->lock);
|
2018-05-17 18:58:49 +00:00
|
|
|
|
2018-05-17 19:20:26 +00:00
|
|
|
return mp_cancel_test(c);
|
2018-05-17 18:58:49 +00:00
|
|
|
}
|
|
|
|
|
2018-05-17 19:20:26 +00:00
|
|
|
// If a new notification mechanism was added, and the mp_cancel state was
|
|
|
|
// already triggered, make sure the newly added mechanism is also triggered.
|
|
|
|
static void retrigger_locked(struct mp_cancel *c)
|
2018-05-17 18:58:49 +00:00
|
|
|
{
|
2018-05-17 19:20:26 +00:00
|
|
|
if (mp_cancel_test(c))
|
|
|
|
trigger_locked(c);
|
2018-05-17 18:58:49 +00:00
|
|
|
}
|
|
|
|
|
2018-05-17 19:20:26 +00:00
|
|
|
void mp_cancel_set_cb(struct mp_cancel *c, void (*cb)(void *ctx), void *ctx)
|
2018-05-17 18:58:49 +00:00
|
|
|
{
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_lock(&c->lock);
|
2018-05-17 19:20:26 +00:00
|
|
|
c->cb = cb;
|
|
|
|
c->cb_ctx = ctx;
|
|
|
|
retrigger_locked(c);
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_unlock(&c->lock);
|
2018-05-17 18:58:49 +00:00
|
|
|
}
|
|
|
|
|
2018-05-19 15:06:00 +00:00
|
|
|
void mp_cancel_set_parent(struct mp_cancel *slave, struct mp_cancel *parent)
|
2018-05-17 18:58:49 +00:00
|
|
|
{
|
2018-05-19 15:06:00 +00:00
|
|
|
// We can access c->parent without synchronization, because:
|
|
|
|
// - concurrent mp_cancel_set_parent() calls to slave are not allowed
|
|
|
|
// - slave->parent needs to stay valid as long as the slave exists
|
|
|
|
if (slave->parent == parent)
|
|
|
|
return;
|
2018-05-17 19:20:26 +00:00
|
|
|
if (slave->parent) {
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_lock(&slave->parent->lock);
|
2018-05-19 15:06:00 +00:00
|
|
|
LL_REMOVE(siblings, &slave->parent->slaves, slave);
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_unlock(&slave->parent->lock);
|
2018-05-19 15:06:00 +00:00
|
|
|
}
|
|
|
|
slave->parent = parent;
|
|
|
|
if (slave->parent) {
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_lock(&slave->parent->lock);
|
2018-05-19 15:06:00 +00:00
|
|
|
LL_APPEND(siblings, &slave->parent->slaves, slave);
|
|
|
|
retrigger_locked(slave->parent);
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_unlock(&slave->parent->lock);
|
2018-05-17 19:20:26 +00:00
|
|
|
}
|
2018-05-17 18:58:49 +00:00
|
|
|
}
|
|
|
|
|
2018-05-17 19:20:26 +00:00
|
|
|
int mp_cancel_get_fd(struct mp_cancel *c)
|
2018-05-17 18:58:49 +00:00
|
|
|
{
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_lock(&c->lock);
|
2018-05-17 19:20:26 +00:00
|
|
|
if (c->wakeup_pipe[0] < 0) {
|
2023-10-14 14:31:35 +00:00
|
|
|
#if defined(__GNUC__) && !defined(__clang__)
|
|
|
|
# pragma GCC diagnostic push
|
|
|
|
# pragma GCC diagnostic ignored "-Wstringop-overflow="
|
|
|
|
#endif
|
2018-05-17 19:20:26 +00:00
|
|
|
mp_make_wakeup_pipe(c->wakeup_pipe);
|
2023-10-14 14:31:35 +00:00
|
|
|
#if defined(__GNUC__) && !defined(__clang__)
|
|
|
|
# pragma GCC diagnostic pop
|
|
|
|
#endif
|
2018-05-17 19:20:26 +00:00
|
|
|
retrigger_locked(c);
|
|
|
|
}
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_unlock(&c->lock);
|
2018-05-17 18:58:49 +00:00
|
|
|
|
2018-05-17 19:20:26 +00:00
|
|
|
|
|
|
|
return c->wakeup_pipe[0];
|
2018-05-17 18:58:49 +00:00
|
|
|
}
|
|
|
|
|
2024-05-01 19:39:19 +00:00
|
|
|
#ifdef _WIN32
|
2018-05-17 18:58:49 +00:00
|
|
|
void *mp_cancel_get_event(struct mp_cancel *c)
|
|
|
|
{
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_lock(&c->lock);
|
2018-05-17 19:20:26 +00:00
|
|
|
if (!c->win32_event) {
|
|
|
|
c->win32_event = CreateEventW(NULL, TRUE, FALSE, NULL);
|
|
|
|
retrigger_locked(c);
|
|
|
|
}
|
2023-10-21 02:55:41 +00:00
|
|
|
mp_mutex_unlock(&c->lock);
|
2018-05-17 18:58:49 +00:00
|
|
|
|
2018-05-17 19:20:26 +00:00
|
|
|
return c->win32_event;
|
2018-05-17 18:58:49 +00:00
|
|
|
}
|
|
|
|
#endif
|