2017-04-15 14:20:00 +00:00
|
|
|
/* Copyright (C) 2017 the mpv developers
|
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and/or distribute this software for any
|
2014-02-10 20:01:35 +00:00
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stddef.h>
|
2014-04-05 21:54:21 +00:00
|
|
|
#include <stdint.h>
|
2014-02-10 20:01:35 +00:00
|
|
|
#include <stdlib.h>
|
2014-04-12 18:13:07 +00:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <fcntl.h>
|
2014-02-28 23:38:17 +00:00
|
|
|
#include <errno.h>
|
2018-02-20 12:30:18 +00:00
|
|
|
#include <math.h>
|
2014-02-10 20:01:35 +00:00
|
|
|
#include <assert.h>
|
|
|
|
|
|
|
|
#include "common/common.h"
|
2016-01-15 21:49:10 +00:00
|
|
|
#include "common/global.h"
|
2014-02-10 20:01:35 +00:00
|
|
|
#include "common/msg.h"
|
|
|
|
#include "common/msg_control.h"
|
2016-08-07 16:10:05 +00:00
|
|
|
#include "common/global.h"
|
2014-02-10 20:01:35 +00:00
|
|
|
#include "input/input.h"
|
2018-05-01 01:19:50 +00:00
|
|
|
#include "input/cmd.h"
|
2014-11-23 12:40:52 +00:00
|
|
|
#include "misc/ctype.h"
|
2014-04-23 18:37:57 +00:00
|
|
|
#include "misc/dispatch.h"
|
2018-05-17 13:29:03 +00:00
|
|
|
#include "misc/node.h"
|
2017-06-30 11:14:39 +00:00
|
|
|
#include "misc/rendezvous.h"
|
2018-05-06 16:27:18 +00:00
|
|
|
#include "misc/thread_tools.h"
|
2014-02-10 20:01:35 +00:00
|
|
|
#include "options/m_config.h"
|
|
|
|
#include "options/m_option.h"
|
|
|
|
#include "options/m_property.h"
|
2014-05-18 16:58:36 +00:00
|
|
|
#include "options/path.h"
|
|
|
|
#include "options/parse_configfile.h"
|
2019-10-24 23:57:51 +00:00
|
|
|
#include "osdep/atomic.h"
|
2014-02-10 20:01:35 +00:00
|
|
|
#include "osdep/threads.h"
|
2014-02-24 20:59:20 +00:00
|
|
|
#include "osdep/timer.h"
|
2014-04-12 18:13:07 +00:00
|
|
|
#include "osdep/io.h"
|
2014-09-13 14:52:42 +00:00
|
|
|
#include "stream/stream.h"
|
2014-02-10 20:01:35 +00:00
|
|
|
|
|
|
|
#include "command.h"
|
|
|
|
#include "core.h"
|
|
|
|
#include "client.h"
|
|
|
|
|
2014-04-05 21:54:21 +00:00
|
|
|
/*
|
|
|
|
* Locking hierarchy:
|
|
|
|
*
|
2014-06-08 14:11:39 +00:00
|
|
|
* MPContext > mp_client_api.lock > mpv_handle.lock > * > mpv_handle.wakeup_lock
|
2014-04-05 21:54:21 +00:00
|
|
|
*
|
2014-06-08 14:11:39 +00:00
|
|
|
* MPContext strictly speaking has no locks, and instead is implicitly managed
|
2014-04-05 21:54:21 +00:00
|
|
|
* by MPContext.dispatch, which basically stops the playback thread at defined
|
|
|
|
* points in order to let clients access it in a synchronized manner. Since
|
|
|
|
* MPContext code accesses the client API, it's on top of the lock hierarchy.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
struct mp_client_api {
|
|
|
|
struct MPContext *mpctx;
|
|
|
|
|
|
|
|
pthread_mutex_t lock;
|
|
|
|
|
|
|
|
// -- protected by lock
|
2016-08-07 16:10:05 +00:00
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
struct mpv_handle **clients;
|
|
|
|
int num_clients;
|
2016-09-21 13:55:34 +00:00
|
|
|
bool shutting_down; // do not allow new clients
|
2018-03-09 03:46:09 +00:00
|
|
|
bool have_terminator; // a client took over the role of destroying the core
|
|
|
|
bool terminate_core_thread; // make libmpv core thread exit
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
// This is incremented whenever the clients[] array above changes. This is
|
|
|
|
// used to safely unlock mp_client_api.lock while iterating the list of
|
|
|
|
// clients.
|
|
|
|
uint64_t clients_list_change_ts;
|
2020-03-26 22:39:35 +00:00
|
|
|
int64_t id_alloc;
|
2016-08-07 16:10:05 +00:00
|
|
|
|
|
|
|
struct mp_custom_protocol *custom_protocols;
|
|
|
|
int num_custom_protocols;
|
2018-02-20 12:30:18 +00:00
|
|
|
|
|
|
|
struct mpv_render_context *render_context;
|
|
|
|
struct mpv_opengl_cb_context *gl_cb_ctx;
|
2014-02-10 20:01:35 +00:00
|
|
|
};
|
|
|
|
|
2014-04-05 21:54:21 +00:00
|
|
|
struct observe_property {
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
// -- immutable
|
2019-10-24 23:57:51 +00:00
|
|
|
struct mpv_handle *owner;
|
2014-04-05 21:54:21 +00:00
|
|
|
char *name;
|
2014-08-01 23:39:28 +00:00
|
|
|
int id; // ==mp_get_property_id(name)
|
|
|
|
uint64_t event_mask; // ==mp_get_property_event_mask(name)
|
2014-04-05 21:54:21 +00:00
|
|
|
int64_t reply_id;
|
|
|
|
mpv_format format;
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
const struct m_option *type;
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
// -- protected by owner->lock
|
|
|
|
size_t refcount;
|
|
|
|
uint64_t change_ts; // logical timestamp incremented on each change
|
|
|
|
uint64_t value_ts; // logical timestamp for value contents
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
bool value_valid;
|
|
|
|
union m_option_value value;
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
uint64_t value_ret_ts; // logical timestamp of value returned to user
|
|
|
|
union m_option_value value_ret;
|
client API: provide ways to finish property changes on file changes
When the current file changes (or rather, when starting/finishing
playback of a playlist entry), clients tend to have the problem that
it's hard to tell whether a property change notification (via
mpv_observe_property() and mechanisms layered on top of it) is from the
previous or new playlist entry. The previous commit probably helps, but
all the asynchronity is still a bit unhelpful.
Try to make this better by adding new hooks, that are run before/after
playback init/deinit. This is similar to the existing hooks, except
they're outside of "initialized" playback, which excludes that you might
accidentally get an overlap between the current and the previous/next
playlist entry.
That still doesn't seem quite enough, since normally, property change
notifications come after the hook event. So basically a client would
have to explicitly "drain" the event queue within the hook, and make the
hook continue only after that is done. Knowing when property
notifications are done is another asynchronous nightmare (how exactly it
works keeps changing within client.c, and an API user probably can't
tell anymore when all pending properties are truly done). So introduce
another guarantee: properties that were changed before the hook happens
will be returned before the hook event is returned. That means the
client will have received all pending property notifications from the
previous playlist entry (or whatever) before the hook is entered.
As another minor complication, we shouldn't just keep the hook pending
until _all_ property notifications are done, since the client's hook
could produce new ones. (Or just consider things like the demuxer thread
hammering the client with cache update events, while the "on_preloaded"
hook is run.) So there is some extra untested, fragile logic in client.c
to handle this (the waiting_for_hook flag).
This probably works, but was barely tested. Not sure if this helps
anyone, but I think it's fine for my own purposes. (I really hated this
aspect of the API whenever I used it myself.)
2020-03-07 01:52:10 +00:00
|
|
|
bool waiting_for_hook; // flag for draining old property changes on a hook
|
2014-04-05 21:54:21 +00:00
|
|
|
};
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
struct mpv_handle {
|
|
|
|
// -- immmutable
|
2014-11-23 12:40:52 +00:00
|
|
|
char name[MAX_CLIENT_NAME];
|
2014-02-10 20:01:35 +00:00
|
|
|
struct mp_log *log;
|
|
|
|
struct MPContext *mpctx;
|
|
|
|
struct mp_client_api *clients;
|
2020-03-26 22:39:35 +00:00
|
|
|
int64_t id;
|
2014-02-10 20:01:35 +00:00
|
|
|
|
|
|
|
// -- not thread-safe
|
|
|
|
struct mpv_event *cur_event;
|
2014-04-05 21:54:21 +00:00
|
|
|
struct mpv_event_property cur_property_event;
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
struct observe_property *cur_property;
|
2014-02-10 20:01:35 +00:00
|
|
|
|
|
|
|
pthread_mutex_t lock;
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
|
|
|
|
pthread_mutex_t wakeup_lock;
|
2014-02-10 20:01:35 +00:00
|
|
|
pthread_cond_t wakeup;
|
|
|
|
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
// -- protected by wakeup_lock
|
|
|
|
bool need_wakeup;
|
|
|
|
void (*wakeup_cb)(void *d);
|
|
|
|
void *wakeup_cb_ctx;
|
|
|
|
int wakeup_pipe[2];
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
// -- protected by lock
|
|
|
|
|
|
|
|
uint64_t event_mask;
|
|
|
|
bool queued_wakeup;
|
|
|
|
|
2014-04-05 21:20:30 +00:00
|
|
|
mpv_event *events; // ringbuffer of max_events entries
|
|
|
|
int max_events; // allocated number of entries in events
|
|
|
|
int first_event; // events[first_event] is the first readable event
|
|
|
|
int num_events; // number of readable events
|
|
|
|
int reserved_events; // number of entries reserved for replies
|
2019-10-24 23:57:51 +00:00
|
|
|
size_t async_counter; // pending other async events
|
2015-01-19 18:54:20 +00:00
|
|
|
bool choked; // recovering from queue overflow
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
bool destroying; // pending destruction; no API accesses allowed
|
client API: provide ways to finish property changes on file changes
When the current file changes (or rather, when starting/finishing
playback of a playlist entry), clients tend to have the problem that
it's hard to tell whether a property change notification (via
mpv_observe_property() and mechanisms layered on top of it) is from the
previous or new playlist entry. The previous commit probably helps, but
all the asynchronity is still a bit unhelpful.
Try to make this better by adding new hooks, that are run before/after
playback init/deinit. This is similar to the existing hooks, except
they're outside of "initialized" playback, which excludes that you might
accidentally get an overlap between the current and the previous/next
playlist entry.
That still doesn't seem quite enough, since normally, property change
notifications come after the hook event. So basically a client would
have to explicitly "drain" the event queue within the hook, and make the
hook continue only after that is done. Knowing when property
notifications are done is another asynchronous nightmare (how exactly it
works keeps changing within client.c, and an API user probably can't
tell anymore when all pending properties are truly done). So introduce
another guarantee: properties that were changed before the hook happens
will be returned before the hook event is returned. That means the
client will have received all pending property notifications from the
previous playlist entry (or whatever) before the hook is entered.
As another minor complication, we shouldn't just keep the hook pending
until _all_ property notifications are done, since the client's hook
could produce new ones. (Or just consider things like the demuxer thread
hammering the client with cache update events, while the "on_preloaded"
hook is run.) So there is some extra untested, fragile logic in client.c
to handle this (the waiting_for_hook flag).
This probably works, but was barely tested. Not sure if this helps
anyone, but I think it's fine for my own purposes. (I really hated this
aspect of the API whenever I used it myself.)
2020-03-07 01:52:10 +00:00
|
|
|
bool hook_pending; // hook events are returned after draining properties
|
2014-02-10 20:01:35 +00:00
|
|
|
|
2014-04-05 21:54:21 +00:00
|
|
|
struct observe_property **properties;
|
|
|
|
int num_properties;
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
bool has_pending_properties; // (maybe) new property events (producer side)
|
|
|
|
bool new_property_events; // new property events (consumer side)
|
|
|
|
int cur_property_index; // round-robin for property events (consumer side)
|
2014-08-01 23:39:28 +00:00
|
|
|
uint64_t property_event_masks; // or-ed together event masks of all properties
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
// This is incremented whenever the properties[] array above changes. This
|
|
|
|
// is used to safely unlock mpv_handle.lock while reading a property. If
|
|
|
|
// the counter didn't change between unlock and relock, then it will assume
|
|
|
|
// the array did not change.
|
|
|
|
uint64_t properties_change_ts;
|
2014-04-05 21:54:21 +00:00
|
|
|
|
2014-10-09 19:23:46 +00:00
|
|
|
bool fuzzy_initialized; // see scripting.c wait_loaded()
|
2018-03-09 05:00:51 +00:00
|
|
|
bool is_weak; // can not keep core alive on its own
|
2014-02-10 20:01:35 +00:00
|
|
|
struct mp_log_buffer *messages;
|
2019-11-17 23:44:12 +00:00
|
|
|
int messages_level;
|
2014-02-10 20:01:35 +00:00
|
|
|
};
|
|
|
|
|
2015-01-19 19:03:17 +00:00
|
|
|
static bool gen_log_message_event(struct mpv_handle *ctx);
|
2019-11-16 16:38:59 +00:00
|
|
|
static bool gen_property_change_event(struct mpv_handle *ctx);
|
2020-01-09 17:11:03 +00:00
|
|
|
static void notify_property_events(struct mpv_handle *ctx, int event);
|
2014-04-05 21:54:21 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
// Must be called with prop->owner->lock held.
|
|
|
|
static void prop_unref(struct observe_property *prop)
|
|
|
|
{
|
|
|
|
if (!prop)
|
|
|
|
return;
|
|
|
|
|
|
|
|
assert(prop->refcount > 0);
|
|
|
|
prop->refcount -= 1;
|
|
|
|
if (!prop->refcount)
|
|
|
|
talloc_free(prop);
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
void mp_clients_init(struct MPContext *mpctx)
|
|
|
|
{
|
|
|
|
mpctx->clients = talloc_ptrtype(NULL, mpctx->clients);
|
|
|
|
*mpctx->clients = (struct mp_client_api) {
|
|
|
|
.mpctx = mpctx,
|
|
|
|
};
|
2016-01-15 21:49:10 +00:00
|
|
|
mpctx->global->client_api = mpctx->clients;
|
2014-02-10 20:01:35 +00:00
|
|
|
pthread_mutex_init(&mpctx->clients->lock, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void mp_clients_destroy(struct MPContext *mpctx)
|
|
|
|
{
|
|
|
|
if (!mpctx->clients)
|
|
|
|
return;
|
|
|
|
assert(mpctx->clients->num_clients == 0);
|
2018-02-20 12:30:18 +00:00
|
|
|
|
|
|
|
TA_FREEP(&mpctx->clients->gl_cb_ctx);
|
|
|
|
|
|
|
|
// The API user is supposed to call mpv_render_context_free(). It's simply
|
|
|
|
// not allowed not to do this.
|
|
|
|
if (mpctx->clients->render_context) {
|
|
|
|
MP_FATAL(mpctx, "Broken API use: mpv_render_context_free() not called.\n");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
pthread_mutex_destroy(&mpctx->clients->lock);
|
|
|
|
talloc_free(mpctx->clients);
|
|
|
|
mpctx->clients = NULL;
|
|
|
|
}
|
|
|
|
|
2014-09-06 15:02:47 +00:00
|
|
|
// Test for "fuzzy" initialization of all clients. That is, all clients have
|
|
|
|
// at least called mpv_wait_event() at least once since creation (or exited).
|
|
|
|
bool mp_clients_all_initialized(struct MPContext *mpctx)
|
|
|
|
{
|
|
|
|
bool all_ok = true;
|
|
|
|
pthread_mutex_lock(&mpctx->clients->lock);
|
|
|
|
for (int n = 0; n < mpctx->clients->num_clients; n++) {
|
|
|
|
struct mpv_handle *ctx = mpctx->clients->clients[n];
|
|
|
|
pthread_mutex_lock(&ctx->lock);
|
|
|
|
all_ok &= ctx->fuzzy_initialized;
|
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&mpctx->clients->lock);
|
|
|
|
return all_ok;
|
|
|
|
}
|
|
|
|
|
2020-03-26 22:39:35 +00:00
|
|
|
static struct mpv_handle *find_client_id(struct mp_client_api *clients, int64_t id)
|
|
|
|
{
|
|
|
|
for (int n = 0; n < clients->num_clients; n++) {
|
|
|
|
if (clients->clients[n]->id == id)
|
|
|
|
return clients->clients[n];
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
static struct mpv_handle *find_client(struct mp_client_api *clients,
|
|
|
|
const char *name)
|
|
|
|
{
|
2020-03-26 22:39:35 +00:00
|
|
|
if (name[0] == '@') {
|
|
|
|
char *end;
|
|
|
|
errno = 0;
|
|
|
|
long long int id = strtoll(name + 1, &end, 10);
|
|
|
|
if (errno || end[0])
|
|
|
|
return NULL;
|
|
|
|
return find_client_id(clients, id);
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
for (int n = 0; n < clients->num_clients; n++) {
|
|
|
|
if (strcmp(clients->clients[n]->name, name) == 0)
|
|
|
|
return clients->clients[n];
|
|
|
|
}
|
2020-03-26 22:39:35 +00:00
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-03-26 22:40:25 +00:00
|
|
|
bool mp_client_id_exists(struct MPContext *mpctx, int64_t id)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&mpctx->clients->lock);
|
|
|
|
bool r = find_client_id(mpctx->clients, id);
|
|
|
|
pthread_mutex_unlock(&mpctx->clients->lock);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
struct mpv_handle *mp_new_client(struct mp_client_api *clients, const char *name)
|
|
|
|
{
|
2016-09-21 13:55:34 +00:00
|
|
|
pthread_mutex_lock(&clients->lock);
|
|
|
|
|
2014-11-23 12:40:52 +00:00
|
|
|
char nname[MAX_CLIENT_NAME];
|
|
|
|
for (int n = 1; n < 1000; n++) {
|
2014-12-31 19:32:35 +00:00
|
|
|
if (!name)
|
|
|
|
name = "client";
|
2014-11-23 12:40:52 +00:00
|
|
|
snprintf(nname, sizeof(nname) - 3, "%s", name); // - space for number
|
|
|
|
for (int i = 0; nname[i]; i++)
|
|
|
|
nname[i] = mp_isalnum(nname[i]) ? nname[i] : '_';
|
|
|
|
if (n > 1)
|
|
|
|
mp_snprintf_cat(nname, sizeof(nname), "%d", n);
|
|
|
|
if (!find_client(clients, nname))
|
|
|
|
break;
|
|
|
|
nname[0] = '\0';
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
2014-11-23 12:40:52 +00:00
|
|
|
|
2016-09-21 13:55:34 +00:00
|
|
|
if (!nname[0] || clients->shutting_down) {
|
|
|
|
pthread_mutex_unlock(&clients->lock);
|
2014-11-23 12:40:52 +00:00
|
|
|
return NULL;
|
2016-09-21 13:55:34 +00:00
|
|
|
}
|
2014-02-10 20:01:35 +00:00
|
|
|
|
|
|
|
int num_events = 1000;
|
|
|
|
|
|
|
|
struct mpv_handle *client = talloc_ptrtype(NULL, client);
|
|
|
|
*client = (struct mpv_handle){
|
2014-11-23 12:40:52 +00:00
|
|
|
.log = mp_log_new(client, clients->mpctx->log, nname),
|
2014-02-10 20:01:35 +00:00
|
|
|
.mpctx = clients->mpctx,
|
|
|
|
.clients = clients,
|
2020-03-26 22:39:35 +00:00
|
|
|
.id = ++(clients->id_alloc),
|
2014-02-10 20:01:35 +00:00
|
|
|
.cur_event = talloc_zero(client, struct mpv_event),
|
2014-04-05 21:20:30 +00:00
|
|
|
.events = talloc_array(client, mpv_event, num_events),
|
2014-02-10 20:01:35 +00:00
|
|
|
.max_events = num_events,
|
2014-08-01 23:39:28 +00:00
|
|
|
.event_mask = (1ULL << INTERNAL_EVENT_BASE) - 1, // exclude internal events
|
2014-04-12 18:13:07 +00:00
|
|
|
.wakeup_pipe = {-1, -1},
|
2014-02-10 20:01:35 +00:00
|
|
|
};
|
|
|
|
pthread_mutex_init(&client->lock, NULL);
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
pthread_mutex_init(&client->wakeup_lock, NULL);
|
2014-02-10 20:01:35 +00:00
|
|
|
pthread_cond_init(&client->wakeup, NULL);
|
|
|
|
|
2014-11-23 12:40:52 +00:00
|
|
|
snprintf(client->name, sizeof(client->name), "%s", nname);
|
2014-08-01 23:39:28 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
clients->clients_list_change_ts += 1;
|
2014-02-10 20:01:35 +00:00
|
|
|
MP_TARRAY_APPEND(clients, clients->clients, clients->num_clients, client);
|
|
|
|
|
2018-03-09 03:46:09 +00:00
|
|
|
if (clients->num_clients == 1 && !clients->mpctx->is_cli)
|
|
|
|
client->fuzzy_initialized = true;
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
pthread_mutex_unlock(&clients->lock);
|
|
|
|
|
2014-08-28 15:35:50 +00:00
|
|
|
mpv_request_event(client, MPV_EVENT_TICK, 0);
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
return client;
|
|
|
|
}
|
|
|
|
|
2018-03-09 05:00:51 +00:00
|
|
|
void mp_client_set_weak(struct mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&ctx->lock);
|
|
|
|
ctx->is_weak = true;
|
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
const char *mpv_client_name(mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
return ctx->name;
|
|
|
|
}
|
|
|
|
|
2020-03-26 22:39:35 +00:00
|
|
|
int64_t mpv_client_id(mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
return ctx->id;
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
struct mp_log *mp_client_get_log(struct mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
return ctx->log;
|
|
|
|
}
|
|
|
|
|
2018-02-20 12:30:18 +00:00
|
|
|
struct mpv_global *mp_client_get_global(struct mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
return ctx->mpctx->global;
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
static void wakeup_client(struct mpv_handle *ctx)
|
|
|
|
{
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
pthread_mutex_lock(&ctx->wakeup_lock);
|
|
|
|
if (!ctx->need_wakeup) {
|
|
|
|
ctx->need_wakeup = true;
|
2015-02-02 16:24:33 +00:00
|
|
|
pthread_cond_broadcast(&ctx->wakeup);
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
if (ctx->wakeup_cb)
|
|
|
|
ctx->wakeup_cb(ctx->wakeup_cb_ctx);
|
|
|
|
if (ctx->wakeup_pipe[0] != -1)
|
2016-06-07 11:39:43 +00:00
|
|
|
(void)write(ctx->wakeup_pipe[1], &(char){0}, 1);
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&ctx->wakeup_lock);
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
// Note: the caller has to deal with sporadic wakeups.
|
|
|
|
static int wait_wakeup(struct mpv_handle *ctx, int64_t end)
|
2014-02-10 20:01:35 +00:00
|
|
|
{
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
int r = 0;
|
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
pthread_mutex_lock(&ctx->wakeup_lock);
|
2015-05-11 21:44:36 +00:00
|
|
|
if (!ctx->need_wakeup) {
|
|
|
|
struct timespec ts = mp_time_us_to_timespec(end);
|
|
|
|
r = pthread_cond_timedwait(&ctx->wakeup, &ctx->wakeup_lock, &ts);
|
|
|
|
}
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
if (r == 0)
|
|
|
|
ctx->need_wakeup = false;
|
|
|
|
pthread_mutex_unlock(&ctx->wakeup_lock);
|
2014-02-10 20:01:35 +00:00
|
|
|
pthread_mutex_lock(&ctx->lock);
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mpv_set_wakeup_callback(mpv_handle *ctx, void (*cb)(void *d), void *d)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&ctx->wakeup_lock);
|
2014-02-10 20:01:35 +00:00
|
|
|
ctx->wakeup_cb = cb;
|
|
|
|
ctx->wakeup_cb_ctx = d;
|
2014-06-08 14:11:11 +00:00
|
|
|
if (ctx->wakeup_cb)
|
|
|
|
ctx->wakeup_cb(ctx->wakeup_cb_ctx);
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
pthread_mutex_unlock(&ctx->wakeup_lock);
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void mpv_suspend(mpv_handle *ctx)
|
|
|
|
{
|
2016-11-22 13:47:50 +00:00
|
|
|
MP_ERR(ctx, "mpv_suspend() is deprecated and does nothing.\n");
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void mpv_resume(mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2014-05-18 16:57:02 +00:00
|
|
|
static void lock_core(mpv_handle *ctx)
|
|
|
|
{
|
2016-09-01 19:55:21 +00:00
|
|
|
mp_dispatch_lock(ctx->mpctx->dispatch);
|
2014-05-18 16:57:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void unlock_core(mpv_handle *ctx)
|
|
|
|
{
|
2016-09-01 19:55:21 +00:00
|
|
|
mp_dispatch_unlock(ctx->mpctx->dispatch);
|
2014-05-18 16:57:02 +00:00
|
|
|
}
|
|
|
|
|
2015-02-02 16:24:33 +00:00
|
|
|
void mpv_wait_async_requests(mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&ctx->lock);
|
2019-10-24 23:57:51 +00:00
|
|
|
while (ctx->reserved_events || ctx->async_counter)
|
2015-02-02 16:24:33 +00:00
|
|
|
wait_wakeup(ctx, INT64_MAX);
|
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
}
|
|
|
|
|
2018-05-13 11:48:47 +00:00
|
|
|
// Send abort signal to all matching work items.
|
|
|
|
// If type==0, destroy all of the matching ctx.
|
|
|
|
// If ctx==0, destroy all.
|
|
|
|
static void abort_async(struct MPContext *mpctx, mpv_handle *ctx,
|
|
|
|
int type, uint64_t id)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&mpctx->abort_lock);
|
|
|
|
|
|
|
|
// Destroy all => ensure any newly appearing work is aborted immediately.
|
|
|
|
if (ctx == NULL)
|
|
|
|
mpctx->abort_all = true;
|
|
|
|
|
|
|
|
for (int n = 0; n < mpctx->num_abort_list; n++) {
|
|
|
|
struct mp_abort_entry *abort = mpctx->abort_list[n];
|
|
|
|
if (!ctx || (abort->client == ctx && (!type ||
|
|
|
|
(abort->client_work_type == type && abort->client_work_id == id))))
|
|
|
|
{
|
|
|
|
mp_abort_trigger_locked(mpctx, abort);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&mpctx->abort_lock);
|
|
|
|
}
|
|
|
|
|
2018-03-09 03:46:09 +00:00
|
|
|
static void get_thread(void *ptr)
|
|
|
|
{
|
|
|
|
*(pthread_t *)ptr = pthread_self();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mp_destroy_client(mpv_handle *ctx, bool terminate)
|
2014-02-10 20:01:35 +00:00
|
|
|
{
|
2014-02-26 19:40:50 +00:00
|
|
|
if (!ctx)
|
|
|
|
return;
|
|
|
|
|
2018-03-09 03:46:09 +00:00
|
|
|
struct MPContext *mpctx = ctx->mpctx;
|
|
|
|
struct mp_client_api *clients = ctx->clients;
|
|
|
|
|
2019-11-01 00:54:39 +00:00
|
|
|
MP_DBG(ctx, "Exiting...\n");
|
2017-01-14 15:47:53 +00:00
|
|
|
|
2018-03-09 03:46:09 +00:00
|
|
|
if (terminate)
|
|
|
|
mpv_command(ctx, (const char*[]){"quit", NULL});
|
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
pthread_mutex_lock(&ctx->lock);
|
|
|
|
|
|
|
|
ctx->destroying = true;
|
|
|
|
|
|
|
|
for (int n = 0; n < ctx->num_properties; n++)
|
|
|
|
prop_unref(ctx->properties[n]);
|
|
|
|
ctx->num_properties = 0;
|
|
|
|
ctx->properties_change_ts += 1;
|
|
|
|
|
|
|
|
prop_unref(ctx->cur_property);
|
|
|
|
ctx->cur_property = NULL;
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
|
2018-05-13 11:48:47 +00:00
|
|
|
abort_async(mpctx, ctx, 0, 0);
|
|
|
|
|
2014-02-28 00:03:37 +00:00
|
|
|
// reserved_events equals the number of asynchronous requests that weren't
|
2014-04-05 21:20:30 +00:00
|
|
|
// yet replied. In order to avoid that trying to reply to a removed client
|
2014-02-28 00:03:37 +00:00
|
|
|
// causes a crash, block until all asynchronous requests were served.
|
2015-02-02 16:24:33 +00:00
|
|
|
mpv_wait_async_requests(ctx);
|
2014-02-28 00:03:37 +00:00
|
|
|
|
client API, lua: add new API for setting OSD overlays
Lua scripting has an undocumented mp.set_osd_ass() function, which is
used by osc.lua and console.lua. Apparently, 3rd party scripts also use
this. It's probably time to make this a public API.
The Lua implementation just bypassed the libmpv API. To make it usable
by any type of client, turn it into a command, "osd-overlay".
There's already a "overlay-add". Ignore it (although the manpage admits
guiltiness). I don't really want to deal with that old command. Its main
problem is that it uses global IDs, while I'd like to avoid that scripts
mess with each others overlays (whether that is accidentally or
intentionally). Maybe "overlay-add" can eventually be merged into
"osd-overlay", but I'm too lazy to do that now.
Scripting now uses the commands. There is a helper to manage OSD
overlays. The helper is very "thin"; I only want to force script authors
to use the ID allocation, which may help with putting multiple scripts
into a single .lua file without causing conflicts (basically, avoiding
singletons within a script's environment). The old set_osd_ass() is
emulated with the new API.
The JS scripting wrapper also provides a set_osd_ass() function, which
calls internal mpv API. Comment that part (to keep it compiling), but
I'm leaving it to @avih to finish the change.
2019-12-23 10:40:27 +00:00
|
|
|
osd_set_external_remove_owner(mpctx->osd, ctx);
|
2018-03-09 03:46:09 +00:00
|
|
|
mp_input_remove_sections_by_owner(mpctx->input, ctx->name);
|
2014-02-10 20:01:35 +00:00
|
|
|
|
|
|
|
pthread_mutex_lock(&clients->lock);
|
2018-03-09 03:46:09 +00:00
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
for (int n = 0; n < clients->num_clients; n++) {
|
|
|
|
if (clients->clients[n] == ctx) {
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
clients->clients_list_change_ts += 1;
|
2014-02-10 20:01:35 +00:00
|
|
|
MP_TARRAY_REMOVE_AT(clients->clients, clients->num_clients, n);
|
2014-04-05 21:20:30 +00:00
|
|
|
while (ctx->num_events) {
|
|
|
|
talloc_free(ctx->events[ctx->first_event].data);
|
|
|
|
ctx->first_event = (ctx->first_event + 1) % ctx->max_events;
|
|
|
|
ctx->num_events--;
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
mp_msg_log_buffer_destroy(ctx->messages);
|
|
|
|
pthread_cond_destroy(&ctx->wakeup);
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
pthread_mutex_destroy(&ctx->wakeup_lock);
|
2014-02-10 20:01:35 +00:00
|
|
|
pthread_mutex_destroy(&ctx->lock);
|
2014-04-12 18:13:07 +00:00
|
|
|
if (ctx->wakeup_pipe[0] != -1) {
|
|
|
|
close(ctx->wakeup_pipe[0]);
|
|
|
|
close(ctx->wakeup_pipe[1]);
|
|
|
|
}
|
2014-02-10 20:01:35 +00:00
|
|
|
talloc_free(ctx);
|
|
|
|
ctx = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(!ctx);
|
2018-03-09 03:46:09 +00:00
|
|
|
|
|
|
|
if (mpctx->is_cli) {
|
|
|
|
terminate = false;
|
|
|
|
} else {
|
2018-03-09 05:00:51 +00:00
|
|
|
// If the last strong mpv_handle got destroyed, destroy the core.
|
|
|
|
bool has_strong_ref = false;
|
|
|
|
for (int n = 0; n < clients->num_clients; n++)
|
|
|
|
has_strong_ref |= !clients->clients[n]->is_weak;
|
|
|
|
if (!has_strong_ref)
|
2018-03-09 03:46:09 +00:00
|
|
|
terminate = true;
|
|
|
|
|
|
|
|
// Reserve the right to destroy mpctx for us.
|
|
|
|
if (clients->have_terminator)
|
|
|
|
terminate = false;
|
|
|
|
clients->have_terminator |= terminate;
|
|
|
|
}
|
|
|
|
|
|
|
|
// mp_shutdown_clients() sleeps to avoid wasting CPU.
|
|
|
|
// mp_hook_test_completion() also relies on this a bit.
|
|
|
|
mp_wakeup_core(mpctx);
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&clients->lock);
|
|
|
|
|
|
|
|
// Note that even if num_clients==0, having set have_terminator keeps mpctx
|
|
|
|
// and the core thread alive.
|
|
|
|
if (terminate) {
|
|
|
|
// Make sure the core stops playing files etc. Being able to lock the
|
|
|
|
// dispatch queue requires that the core thread is still active.
|
|
|
|
mp_dispatch_lock(mpctx->dispatch);
|
|
|
|
mpctx->stop_play = PT_QUIT;
|
|
|
|
mp_dispatch_unlock(mpctx->dispatch);
|
|
|
|
|
2018-04-15 13:09:41 +00:00
|
|
|
pthread_t playthread;
|
|
|
|
mp_dispatch_run(mpctx->dispatch, get_thread, &playthread);
|
|
|
|
|
|
|
|
// Ask the core thread to stop.
|
2018-03-09 03:46:09 +00:00
|
|
|
pthread_mutex_lock(&clients->lock);
|
|
|
|
clients->terminate_core_thread = true;
|
|
|
|
pthread_mutex_unlock(&clients->lock);
|
|
|
|
mp_wakeup_core(mpctx);
|
|
|
|
|
|
|
|
// Blocking wait for all clients and core thread to terminate.
|
|
|
|
pthread_join(playthread, NULL);
|
|
|
|
|
|
|
|
mp_destroy(mpctx);
|
|
|
|
}
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
2018-03-09 05:05:50 +00:00
|
|
|
void mpv_destroy(mpv_handle *ctx)
|
2014-06-07 13:08:45 +00:00
|
|
|
{
|
2018-03-09 03:46:09 +00:00
|
|
|
mp_destroy_client(ctx, false);
|
2014-06-07 13:08:45 +00:00
|
|
|
}
|
|
|
|
|
2018-03-09 05:05:50 +00:00
|
|
|
void mpv_detach_destroy(mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
mpv_destroy(ctx);
|
|
|
|
}
|
|
|
|
|
2014-06-07 13:08:45 +00:00
|
|
|
void mpv_terminate_destroy(mpv_handle *ctx)
|
|
|
|
{
|
2018-03-09 03:46:09 +00:00
|
|
|
mp_destroy_client(ctx, true);
|
|
|
|
}
|
2014-07-04 00:20:18 +00:00
|
|
|
|
2018-03-09 03:46:09 +00:00
|
|
|
// Can be called on the core thread only. Idempotent.
|
2018-05-13 11:48:47 +00:00
|
|
|
// Also happens to take care of shutting down any async work.
|
2018-03-09 03:46:09 +00:00
|
|
|
void mp_shutdown_clients(struct MPContext *mpctx)
|
|
|
|
{
|
|
|
|
struct mp_client_api *clients = mpctx->clients;
|
2014-06-07 13:08:45 +00:00
|
|
|
|
2018-05-13 11:48:47 +00:00
|
|
|
// Forcefully abort async work after 2 seconds of waiting.
|
|
|
|
double abort_time = mp_time_sec() + 2;
|
|
|
|
|
2018-03-09 03:46:09 +00:00
|
|
|
pthread_mutex_lock(&clients->lock);
|
2018-05-06 11:32:50 +00:00
|
|
|
|
|
|
|
// Prevent that new clients can appear.
|
2018-03-09 03:46:09 +00:00
|
|
|
clients->shutting_down = true;
|
2014-06-07 13:08:45 +00:00
|
|
|
|
2018-05-06 11:32:50 +00:00
|
|
|
// Wait until we can terminate.
|
|
|
|
while (clients->num_clients || mpctx->outstanding_async ||
|
|
|
|
!(mpctx->is_cli || clients->terminate_core_thread))
|
|
|
|
{
|
|
|
|
pthread_mutex_unlock(&clients->lock);
|
|
|
|
|
2018-05-13 11:48:47 +00:00
|
|
|
double left = abort_time - mp_time_sec();
|
|
|
|
if (left >= 0) {
|
|
|
|
mp_set_timeout(mpctx, left);
|
|
|
|
} else {
|
|
|
|
// Forcefully abort any ongoing async work. This is quite rude and
|
|
|
|
// probably not what everyone wants, so it happens only after a
|
|
|
|
// timeout.
|
|
|
|
abort_async(mpctx, NULL, 0, 0);
|
|
|
|
}
|
|
|
|
|
2018-03-09 03:46:09 +00:00
|
|
|
mp_client_broadcast_event(mpctx, MPV_EVENT_SHUTDOWN, NULL);
|
|
|
|
mp_wait_events(mpctx);
|
2018-05-06 11:32:50 +00:00
|
|
|
|
|
|
|
pthread_mutex_lock(&clients->lock);
|
2018-03-09 03:46:09 +00:00
|
|
|
}
|
2018-05-06 11:32:50 +00:00
|
|
|
|
|
|
|
pthread_mutex_unlock(&clients->lock);
|
2014-06-07 13:08:45 +00:00
|
|
|
}
|
|
|
|
|
2018-05-06 16:27:18 +00:00
|
|
|
bool mp_is_shutting_down(struct MPContext *mpctx)
|
|
|
|
{
|
|
|
|
struct mp_client_api *clients = mpctx->clients;
|
|
|
|
pthread_mutex_lock(&clients->lock);
|
|
|
|
bool res = clients->shutting_down;
|
|
|
|
pthread_mutex_unlock(&clients->lock);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2018-03-09 03:46:09 +00:00
|
|
|
static void *core_thread(void *p)
|
2016-09-01 19:55:21 +00:00
|
|
|
{
|
2018-03-09 03:46:09 +00:00
|
|
|
struct MPContext *mpctx = p;
|
2016-09-01 19:55:21 +00:00
|
|
|
|
2018-03-09 03:46:09 +00:00
|
|
|
mpthread_set_name("mpv core");
|
2016-09-01 19:55:21 +00:00
|
|
|
|
|
|
|
while (!mpctx->initialized && mpctx->stop_play != PT_QUIT)
|
|
|
|
mp_idle(mpctx);
|
|
|
|
|
|
|
|
if (mpctx->initialized)
|
|
|
|
mp_play_files(mpctx);
|
|
|
|
|
|
|
|
// This actually waits until all clients are gone before actually
|
2018-03-09 03:46:09 +00:00
|
|
|
// destroying mpctx. Actual destruction is done by whatever destroys
|
|
|
|
// the last mpv_handle.
|
|
|
|
mp_shutdown_clients(mpctx);
|
2016-09-01 19:55:21 +00:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
mpv_handle *mpv_create(void)
|
|
|
|
{
|
2018-03-09 03:46:09 +00:00
|
|
|
struct MPContext *mpctx = mp_create();
|
|
|
|
if (!mpctx)
|
2016-09-01 19:55:21 +00:00
|
|
|
return NULL;
|
|
|
|
|
2018-03-09 03:46:09 +00:00
|
|
|
m_config_set_profile(mpctx->mconfig, "libmpv", 0);
|
2017-06-30 11:14:39 +00:00
|
|
|
|
2018-03-09 03:46:09 +00:00
|
|
|
mpv_handle *ctx = mp_new_client(mpctx->clients, "main");
|
|
|
|
if (!ctx) {
|
|
|
|
mp_destroy(mpctx);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_t thread;
|
|
|
|
if (pthread_create(&thread, NULL, core_thread, mpctx) != 0) {
|
|
|
|
ctx->clients->have_terminator = true; // avoid blocking
|
|
|
|
mpv_terminate_destroy(ctx);
|
|
|
|
mp_destroy(mpctx);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx;
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
2014-12-31 19:32:35 +00:00
|
|
|
mpv_handle *mpv_create_client(mpv_handle *ctx, const char *name)
|
|
|
|
{
|
|
|
|
if (!ctx)
|
|
|
|
return mpv_create();
|
|
|
|
mpv_handle *new = mp_new_client(ctx->mpctx->clients, name);
|
|
|
|
if (new)
|
|
|
|
mpv_wait_event(new, 0); // set fuzzy_initialized
|
|
|
|
return new;
|
|
|
|
}
|
|
|
|
|
2018-03-09 05:00:51 +00:00
|
|
|
mpv_handle *mpv_create_weak_client(mpv_handle *ctx, const char *name)
|
|
|
|
{
|
|
|
|
mpv_handle *new = mpv_create_client(ctx, name);
|
|
|
|
if (new)
|
|
|
|
mp_client_set_weak(new);
|
|
|
|
return new;
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
int mpv_initialize(mpv_handle *ctx)
|
|
|
|
{
|
2018-03-09 03:46:09 +00:00
|
|
|
lock_core(ctx);
|
|
|
|
int res = mp_initialize(ctx->mpctx, NULL) ? MPV_ERROR_INVALID_PARAMETER : 0;
|
2018-04-16 18:30:25 +00:00
|
|
|
mp_wakeup_core(ctx->mpctx);
|
2018-03-09 03:46:09 +00:00
|
|
|
unlock_core(ctx);
|
|
|
|
return res;
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
2014-08-31 17:51:41 +00:00
|
|
|
// set ev->data to a new copy of the original data
|
|
|
|
// (done only for message types that are broadcast)
|
|
|
|
static void dup_event_data(struct mpv_event *ev)
|
|
|
|
{
|
|
|
|
switch (ev->event_id) {
|
|
|
|
case MPV_EVENT_CLIENT_MESSAGE: {
|
|
|
|
struct mpv_event_client_message *src = ev->data;
|
|
|
|
struct mpv_event_client_message *msg =
|
|
|
|
talloc_zero(NULL, struct mpv_event_client_message);
|
|
|
|
for (int n = 0; n < src->num_args; n++) {
|
|
|
|
MP_TARRAY_APPEND(msg, msg->args, msg->num_args,
|
|
|
|
talloc_strdup(msg, src->args[n]));
|
|
|
|
}
|
|
|
|
ev->data = msg;
|
|
|
|
break;
|
|
|
|
}
|
2020-03-21 18:31:58 +00:00
|
|
|
case MPV_EVENT_START_FILE:
|
|
|
|
ev->data = talloc_memdup(NULL, ev->data, sizeof(mpv_event_start_file));
|
|
|
|
break;
|
2014-08-31 17:51:41 +00:00
|
|
|
case MPV_EVENT_END_FILE:
|
|
|
|
ev->data = talloc_memdup(NULL, ev->data, sizeof(mpv_event_end_file));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
// Doesn't use events with memory allocation.
|
|
|
|
if (ev->data)
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
// Reserve an entry in the ring buffer. This can be used to guarantee that the
|
|
|
|
// reply can be made, even if the buffer becomes congested _after_ sending
|
|
|
|
// the request.
|
|
|
|
// Returns an error code if the buffer is full.
|
|
|
|
static int reserve_reply(struct mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
int res = MPV_ERROR_EVENT_QUEUE_FULL;
|
|
|
|
pthread_mutex_lock(&ctx->lock);
|
2015-01-19 18:54:20 +00:00
|
|
|
if (ctx->reserved_events + ctx->num_events < ctx->max_events && !ctx->choked)
|
|
|
|
{
|
2014-02-10 20:01:35 +00:00
|
|
|
ctx->reserved_events++;
|
|
|
|
res = 0;
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2014-08-31 17:51:41 +00:00
|
|
|
static int append_event(struct mpv_handle *ctx, struct mpv_event event, bool copy)
|
2014-04-05 21:20:30 +00:00
|
|
|
{
|
|
|
|
if (ctx->num_events + ctx->reserved_events >= ctx->max_events)
|
|
|
|
return -1;
|
2014-08-31 17:51:41 +00:00
|
|
|
if (copy)
|
|
|
|
dup_event_data(&event);
|
|
|
|
ctx->events[(ctx->first_event + ctx->num_events) % ctx->max_events] = event;
|
2014-04-05 21:20:30 +00:00
|
|
|
ctx->num_events++;
|
|
|
|
wakeup_client(ctx);
|
2018-03-09 10:53:48 +00:00
|
|
|
if (event.event_id == MPV_EVENT_SHUTDOWN)
|
|
|
|
ctx->event_mask &= ctx->event_mask & ~(1ULL << MPV_EVENT_SHUTDOWN);
|
2014-04-05 21:20:30 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-31 17:51:41 +00:00
|
|
|
static int send_event(struct mpv_handle *ctx, struct mpv_event *event, bool copy)
|
2014-02-10 20:01:35 +00:00
|
|
|
{
|
|
|
|
pthread_mutex_lock(&ctx->lock);
|
2014-08-01 23:39:28 +00:00
|
|
|
uint64_t mask = 1ULL << event->event_id;
|
|
|
|
if (ctx->property_event_masks & mask)
|
2020-01-09 17:11:03 +00:00
|
|
|
notify_property_events(ctx, event->event_id);
|
2015-01-19 18:54:20 +00:00
|
|
|
int r;
|
2014-08-01 23:39:28 +00:00
|
|
|
if (!(ctx->event_mask & mask)) {
|
2015-01-19 18:54:20 +00:00
|
|
|
r = 0;
|
|
|
|
} else if (ctx->choked) {
|
|
|
|
r = -1;
|
|
|
|
} else {
|
|
|
|
r = append_event(ctx, *event, copy);
|
|
|
|
if (r < 0) {
|
|
|
|
MP_ERR(ctx, "Too many events queued.\n");
|
|
|
|
ctx->choked = true;
|
|
|
|
}
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
2014-04-05 21:20:30 +00:00
|
|
|
return r;
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Send a reply; the reply must have been previously reserved with
|
|
|
|
// reserve_reply (otherwise, use send_event()).
|
|
|
|
static void send_reply(struct mpv_handle *ctx, uint64_t userdata,
|
|
|
|
struct mpv_event *event)
|
|
|
|
{
|
|
|
|
event->reply_userdata = userdata;
|
|
|
|
pthread_mutex_lock(&ctx->lock);
|
2014-04-05 21:20:30 +00:00
|
|
|
// If this fails, reserve_reply() probably wasn't called.
|
2014-02-10 20:01:35 +00:00
|
|
|
assert(ctx->reserved_events > 0);
|
|
|
|
ctx->reserved_events--;
|
2014-08-31 17:51:41 +00:00
|
|
|
if (append_event(ctx, *event, false) < 0)
|
|
|
|
abort(); // not reached
|
2014-02-10 20:01:35 +00:00
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void mp_client_broadcast_event(struct MPContext *mpctx, int event, void *data)
|
|
|
|
{
|
|
|
|
struct mp_client_api *clients = mpctx->clients;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&clients->lock);
|
|
|
|
|
2014-02-24 21:49:07 +00:00
|
|
|
for (int n = 0; n < clients->num_clients; n++) {
|
|
|
|
struct mpv_event event_data = {
|
|
|
|
.event_id = event,
|
|
|
|
.data = data,
|
|
|
|
};
|
2014-08-31 17:51:41 +00:00
|
|
|
send_event(clients->clients[n], &event_data, true);
|
2014-02-24 21:49:07 +00:00
|
|
|
}
|
2014-02-10 20:01:35 +00:00
|
|
|
|
|
|
|
pthread_mutex_unlock(&clients->lock);
|
|
|
|
}
|
|
|
|
|
2020-01-08 01:31:18 +00:00
|
|
|
// Like mp_client_broadcast_event(), but can be called from any thread.
|
|
|
|
// Avoid using this.
|
|
|
|
void mp_client_broadcast_event_external(struct mp_client_api *api, int event,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct MPContext *mpctx = api->mpctx;
|
|
|
|
|
|
|
|
mp_client_broadcast_event(mpctx, event, data);
|
|
|
|
mp_wakeup_core(mpctx);
|
|
|
|
}
|
|
|
|
|
2014-11-23 14:08:49 +00:00
|
|
|
// If client_name == NULL, then broadcast and free the event.
|
2014-02-10 20:01:35 +00:00
|
|
|
int mp_client_send_event(struct MPContext *mpctx, const char *client_name,
|
2018-03-23 15:24:49 +00:00
|
|
|
uint64_t reply_userdata, int event, void *data)
|
2014-02-10 20:01:35 +00:00
|
|
|
{
|
2014-11-23 14:08:49 +00:00
|
|
|
if (!client_name) {
|
|
|
|
mp_client_broadcast_event(mpctx, event, data);
|
|
|
|
talloc_free(data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
struct mp_client_api *clients = mpctx->clients;
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
struct mpv_event event_data = {
|
|
|
|
.event_id = event,
|
|
|
|
.data = data,
|
2018-03-23 15:24:49 +00:00
|
|
|
.reply_userdata = reply_userdata,
|
2014-02-10 20:01:35 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
pthread_mutex_lock(&clients->lock);
|
|
|
|
|
|
|
|
struct mpv_handle *ctx = find_client(clients, client_name);
|
|
|
|
if (ctx) {
|
2014-08-31 17:51:41 +00:00
|
|
|
r = send_event(ctx, &event_data, false);
|
2014-02-10 20:01:35 +00:00
|
|
|
} else {
|
|
|
|
r = -1;
|
|
|
|
talloc_free(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&clients->lock);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-11-23 14:08:49 +00:00
|
|
|
int mp_client_send_event_dup(struct MPContext *mpctx, const char *client_name,
|
|
|
|
int event, void *data)
|
|
|
|
{
|
|
|
|
if (!client_name) {
|
|
|
|
mp_client_broadcast_event(mpctx, event, data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct mpv_event event_data = {
|
|
|
|
.event_id = event,
|
|
|
|
.data = data,
|
|
|
|
};
|
|
|
|
|
|
|
|
dup_event_data(&event_data);
|
2018-03-23 15:24:49 +00:00
|
|
|
return mp_client_send_event(mpctx, client_name, 0, event, event_data.data);
|
2014-11-23 14:08:49 +00:00
|
|
|
}
|
|
|
|
|
2020-03-21 13:02:53 +00:00
|
|
|
static bool deprecated_events[] = {
|
|
|
|
[MPV_EVENT_TRACKS_CHANGED] = true,
|
|
|
|
[MPV_EVENT_TRACK_SWITCHED] = true,
|
|
|
|
[MPV_EVENT_IDLE] = true,
|
|
|
|
[MPV_EVENT_PAUSE] = true,
|
|
|
|
[MPV_EVENT_UNPAUSE] = true,
|
|
|
|
[MPV_EVENT_TICK] = true,
|
|
|
|
[MPV_EVENT_SCRIPT_INPUT_DISPATCH] = true,
|
|
|
|
[MPV_EVENT_METADATA_UPDATE] = true,
|
|
|
|
[MPV_EVENT_CHAPTER_CHANGE] = true,
|
|
|
|
};
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
int mpv_request_event(mpv_handle *ctx, mpv_event_id event, int enable)
|
|
|
|
{
|
|
|
|
if (!mpv_event_name(event) || enable < 0 || enable > 1)
|
|
|
|
return MPV_ERROR_INVALID_PARAMETER;
|
2015-12-02 22:08:39 +00:00
|
|
|
if (event == MPV_EVENT_SHUTDOWN && !enable)
|
|
|
|
return MPV_ERROR_INVALID_PARAMETER;
|
2014-11-02 19:26:51 +00:00
|
|
|
assert(event < (int)INTERNAL_EVENT_BASE); // excluded above; they have no name
|
2014-02-10 20:01:35 +00:00
|
|
|
pthread_mutex_lock(&ctx->lock);
|
2014-08-01 23:39:28 +00:00
|
|
|
uint64_t bit = 1ULL << event;
|
2014-02-10 20:01:35 +00:00
|
|
|
ctx->event_mask = enable ? ctx->event_mask | bit : ctx->event_mask & ~bit;
|
2020-03-21 13:02:53 +00:00
|
|
|
if (enable && event < MP_ARRAY_SIZE(deprecated_events) &&
|
|
|
|
deprecated_events[event])
|
|
|
|
{
|
|
|
|
MP_WARN(ctx, "The '%s' event is deprecated and will be removed.\n",
|
|
|
|
mpv_event_name(event));
|
|
|
|
}
|
2014-02-10 20:01:35 +00:00
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
client API: provide ways to finish property changes on file changes
When the current file changes (or rather, when starting/finishing
playback of a playlist entry), clients tend to have the problem that
it's hard to tell whether a property change notification (via
mpv_observe_property() and mechanisms layered on top of it) is from the
previous or new playlist entry. The previous commit probably helps, but
all the asynchronity is still a bit unhelpful.
Try to make this better by adding new hooks, that are run before/after
playback init/deinit. This is similar to the existing hooks, except
they're outside of "initialized" playback, which excludes that you might
accidentally get an overlap between the current and the previous/next
playlist entry.
That still doesn't seem quite enough, since normally, property change
notifications come after the hook event. So basically a client would
have to explicitly "drain" the event queue within the hook, and make the
hook continue only after that is done. Knowing when property
notifications are done is another asynchronous nightmare (how exactly it
works keeps changing within client.c, and an API user probably can't
tell anymore when all pending properties are truly done). So introduce
another guarantee: properties that were changed before the hook happens
will be returned before the hook event is returned. That means the
client will have received all pending property notifications from the
previous playlist entry (or whatever) before the hook is entered.
As another minor complication, we shouldn't just keep the hook pending
until _all_ property notifications are done, since the client's hook
could produce new ones. (Or just consider things like the demuxer thread
hammering the client with cache update events, while the "on_preloaded"
hook is run.) So there is some extra untested, fragile logic in client.c
to handle this (the waiting_for_hook flag).
This probably works, but was barely tested. Not sure if this helps
anyone, but I think it's fine for my own purposes. (I really hated this
aspect of the API whenever I used it myself.)
2020-03-07 01:52:10 +00:00
|
|
|
// Set waiting_for_hook==true for all possibly pending properties.
|
|
|
|
static void set_wait_for_hook_flags(mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
for (int n = 0; n < ctx->num_properties; n++) {
|
|
|
|
struct observe_property *prop = ctx->properties[n];
|
|
|
|
|
|
|
|
if (prop->value_ret_ts != prop->change_ts)
|
|
|
|
prop->waiting_for_hook = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return whether any property still has waiting_for_hook set.
|
|
|
|
static bool check_for_for_hook_flags(mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
for (int n = 0; n < ctx->num_properties; n++) {
|
|
|
|
if (ctx->properties[n]->waiting_for_hook)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
mpv_event *mpv_wait_event(mpv_handle *ctx, double timeout)
|
|
|
|
{
|
|
|
|
mpv_event *event = ctx->cur_event;
|
|
|
|
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
pthread_mutex_lock(&ctx->lock);
|
|
|
|
|
2016-09-16 12:23:54 +00:00
|
|
|
if (!ctx->fuzzy_initialized)
|
|
|
|
mp_wakeup_core(ctx->clients->mpctx);
|
2014-09-06 15:02:47 +00:00
|
|
|
ctx->fuzzy_initialized = true;
|
|
|
|
|
2014-06-07 08:44:21 +00:00
|
|
|
if (timeout < 0)
|
|
|
|
timeout = 1e20;
|
|
|
|
|
2014-05-18 14:36:08 +00:00
|
|
|
int64_t deadline = mp_add_timeout(mp_time_us(), timeout);
|
2014-02-10 20:01:35 +00:00
|
|
|
|
|
|
|
*event = (mpv_event){0};
|
|
|
|
talloc_free_children(event);
|
|
|
|
|
|
|
|
while (1) {
|
2014-12-22 00:53:39 +00:00
|
|
|
if (ctx->queued_wakeup)
|
|
|
|
deadline = 0;
|
2015-01-19 18:54:20 +00:00
|
|
|
// Recover from overflow.
|
|
|
|
if (ctx->choked && !ctx->num_events) {
|
|
|
|
ctx->choked = false;
|
|
|
|
event->event_id = MPV_EVENT_QUEUE_OVERFLOW;
|
|
|
|
break;
|
|
|
|
}
|
client API: provide ways to finish property changes on file changes
When the current file changes (or rather, when starting/finishing
playback of a playlist entry), clients tend to have the problem that
it's hard to tell whether a property change notification (via
mpv_observe_property() and mechanisms layered on top of it) is from the
previous or new playlist entry. The previous commit probably helps, but
all the asynchronity is still a bit unhelpful.
Try to make this better by adding new hooks, that are run before/after
playback init/deinit. This is similar to the existing hooks, except
they're outside of "initialized" playback, which excludes that you might
accidentally get an overlap between the current and the previous/next
playlist entry.
That still doesn't seem quite enough, since normally, property change
notifications come after the hook event. So basically a client would
have to explicitly "drain" the event queue within the hook, and make the
hook continue only after that is done. Knowing when property
notifications are done is another asynchronous nightmare (how exactly it
works keeps changing within client.c, and an API user probably can't
tell anymore when all pending properties are truly done). So introduce
another guarantee: properties that were changed before the hook happens
will be returned before the hook event is returned. That means the
client will have received all pending property notifications from the
previous playlist entry (or whatever) before the hook is entered.
As another minor complication, we shouldn't just keep the hook pending
until _all_ property notifications are done, since the client's hook
could produce new ones. (Or just consider things like the demuxer thread
hammering the client with cache update events, while the "on_preloaded"
hook is run.) So there is some extra untested, fragile logic in client.c
to handle this (the waiting_for_hook flag).
This probably works, but was barely tested. Not sure if this helps
anyone, but I think it's fine for my own purposes. (I really hated this
aspect of the API whenever I used it myself.)
2020-03-07 01:52:10 +00:00
|
|
|
struct mpv_event *ev =
|
|
|
|
ctx->num_events ? &ctx->events[ctx->first_event] : NULL;
|
|
|
|
if (ev && ev->event_id == MPV_EVENT_HOOK) {
|
|
|
|
// Give old property notifications priority over hooks. This is a
|
|
|
|
// guarantee given to clients to simplify their logic. New property
|
|
|
|
// changes after this are treated normally, so
|
|
|
|
if (!ctx->hook_pending) {
|
|
|
|
ctx->hook_pending = true;
|
|
|
|
set_wait_for_hook_flags(ctx);
|
|
|
|
}
|
|
|
|
if (check_for_for_hook_flags(ctx)) {
|
|
|
|
ev = NULL; // delay
|
|
|
|
} else {
|
|
|
|
ctx->hook_pending = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (ev) {
|
|
|
|
*event = *ev;
|
2014-04-05 21:20:30 +00:00
|
|
|
ctx->first_event = (ctx->first_event + 1) % ctx->max_events;
|
|
|
|
ctx->num_events--;
|
2014-02-10 20:01:35 +00:00
|
|
|
talloc_steal(event, event->data);
|
|
|
|
break;
|
|
|
|
}
|
2015-01-19 18:54:20 +00:00
|
|
|
// If there's a changed property, generate change event (never queued).
|
2019-11-16 16:38:59 +00:00
|
|
|
if (gen_property_change_event(ctx))
|
2014-04-05 21:54:21 +00:00
|
|
|
break;
|
2015-01-19 18:54:20 +00:00
|
|
|
// Pop item from message queue, and return as event.
|
2015-01-19 19:03:17 +00:00
|
|
|
if (gen_log_message_event(ctx))
|
|
|
|
break;
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
int r = wait_wakeup(ctx, deadline);
|
2014-02-28 23:38:17 +00:00
|
|
|
if (r == ETIMEDOUT)
|
|
|
|
break;
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
ctx->queued_wakeup = false;
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
|
|
|
|
return event;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mpv_wakeup(mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&ctx->lock);
|
|
|
|
ctx->queued_wakeup = true;
|
|
|
|
wakeup_client(ctx);
|
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
}
|
|
|
|
|
2014-02-24 19:16:22 +00:00
|
|
|
// map client API types to internal types
|
|
|
|
static const struct m_option type_conv[] = {
|
|
|
|
[MPV_FORMAT_STRING] = { .type = CONF_TYPE_STRING },
|
|
|
|
[MPV_FORMAT_FLAG] = { .type = CONF_TYPE_FLAG },
|
|
|
|
[MPV_FORMAT_INT64] = { .type = CONF_TYPE_INT64 },
|
|
|
|
[MPV_FORMAT_DOUBLE] = { .type = CONF_TYPE_DOUBLE },
|
|
|
|
[MPV_FORMAT_NODE] = { .type = CONF_TYPE_NODE },
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct m_option *get_mp_type(mpv_format format)
|
|
|
|
{
|
2014-11-07 14:58:51 +00:00
|
|
|
if ((unsigned)format >= MP_ARRAY_SIZE(type_conv))
|
2014-02-24 19:16:22 +00:00
|
|
|
return NULL;
|
|
|
|
if (!type_conv[format].type)
|
|
|
|
return NULL;
|
|
|
|
return &type_conv[format];
|
|
|
|
}
|
|
|
|
|
|
|
|
// for read requests - MPV_FORMAT_OSD_STRING special handling
|
|
|
|
static const struct m_option *get_mp_type_get(mpv_format format)
|
|
|
|
{
|
|
|
|
if (format == MPV_FORMAT_OSD_STRING)
|
|
|
|
format = MPV_FORMAT_STRING; // it's string data, just other semantics
|
|
|
|
return get_mp_type(format);
|
|
|
|
}
|
|
|
|
|
|
|
|
// move src->dst, and do implicit conversion if possible (conversions to or
|
|
|
|
// from strings are handled otherwise)
|
|
|
|
static bool conv_node_to_format(void *dst, mpv_format dst_fmt, mpv_node *src)
|
|
|
|
{
|
|
|
|
if (dst_fmt == src->format) {
|
|
|
|
const struct m_option *type = get_mp_type(dst_fmt);
|
|
|
|
memcpy(dst, &src->u, type->type->size);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (dst_fmt == MPV_FORMAT_DOUBLE && src->format == MPV_FORMAT_INT64) {
|
|
|
|
*(double *)dst = src->u.int64;
|
|
|
|
return true;
|
|
|
|
}
|
2014-10-14 13:54:03 +00:00
|
|
|
if (dst_fmt == MPV_FORMAT_INT64 && src->format == MPV_FORMAT_DOUBLE) {
|
2020-03-18 19:43:56 +00:00
|
|
|
if (src->u.double_ > (double)INT64_MIN &&
|
|
|
|
src->u.double_ < (double)INT64_MAX)
|
|
|
|
{
|
2014-10-14 13:54:03 +00:00
|
|
|
*(int64_t *)dst = src->u.double_;
|
2016-02-13 12:07:53 +00:00
|
|
|
return true;
|
|
|
|
}
|
2014-10-14 13:54:03 +00:00
|
|
|
}
|
2014-02-24 19:16:22 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mpv_free_node_contents(mpv_node *node)
|
|
|
|
{
|
|
|
|
static const struct m_option type = { .type = CONF_TYPE_NODE };
|
|
|
|
m_option_free(&type, node);
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
int mpv_set_option(mpv_handle *ctx, const char *name, mpv_format format,
|
|
|
|
void *data)
|
|
|
|
{
|
2014-05-18 16:57:02 +00:00
|
|
|
const struct m_option *type = get_mp_type(format);
|
|
|
|
if (!type)
|
|
|
|
return MPV_ERROR_OPTION_FORMAT;
|
|
|
|
struct mpv_node tmp;
|
|
|
|
if (format != MPV_FORMAT_NODE) {
|
|
|
|
tmp.format = format;
|
|
|
|
memcpy(&tmp.u, data, type->type->size);
|
|
|
|
data = &tmp;
|
|
|
|
}
|
|
|
|
lock_core(ctx);
|
2019-11-10 22:53:57 +00:00
|
|
|
int err = m_config_set_option_node(ctx->mpctx->mconfig, bstr0(name), data, 0);
|
2014-05-18 16:57:02 +00:00
|
|
|
unlock_core(ctx);
|
|
|
|
switch (err) {
|
|
|
|
case M_OPT_MISSING_PARAM:
|
|
|
|
case M_OPT_INVALID:
|
|
|
|
return MPV_ERROR_OPTION_ERROR;
|
|
|
|
case M_OPT_OUT_OF_RANGE:
|
|
|
|
return MPV_ERROR_OPTION_FORMAT;
|
|
|
|
case M_OPT_UNKNOWN:
|
|
|
|
return MPV_ERROR_OPTION_NOT_FOUND;
|
|
|
|
default:
|
|
|
|
if (err >= 0)
|
|
|
|
return 0;
|
|
|
|
return MPV_ERROR_OPTION_ERROR;
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int mpv_set_option_string(mpv_handle *ctx, const char *name, const char *data)
|
|
|
|
{
|
2014-02-24 18:35:06 +00:00
|
|
|
return mpv_set_option(ctx, name, MPV_FORMAT_STRING, &data);
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Run a command in the playback thread.
|
|
|
|
static void run_locked(mpv_handle *ctx, void (*fn)(void *fn_data), void *fn_data)
|
|
|
|
{
|
2014-08-14 17:34:53 +00:00
|
|
|
mp_dispatch_lock(ctx->mpctx->dispatch);
|
|
|
|
fn(fn_data);
|
|
|
|
mp_dispatch_unlock(ctx->mpctx->dispatch);
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Run a command asynchronously. It's the responsibility of the caller to
|
|
|
|
// actually send the reply. This helper merely saves a small part of the
|
|
|
|
// required boilerplate to do so.
|
|
|
|
// fn: callback to execute the request
|
|
|
|
// fn_data: opaque caller-defined argument for fn. This will be automatically
|
|
|
|
// freed with talloc_free(fn_data).
|
|
|
|
static int run_async(mpv_handle *ctx, void (*fn)(void *fn_data), void *fn_data)
|
|
|
|
{
|
|
|
|
int err = reserve_reply(ctx);
|
|
|
|
if (err < 0) {
|
|
|
|
talloc_free(fn_data);
|
|
|
|
return err;
|
|
|
|
}
|
2018-05-06 16:27:18 +00:00
|
|
|
mp_dispatch_enqueue(ctx->mpctx->dispatch, fn, fn_data);
|
2014-02-10 20:01:35 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct cmd_request {
|
|
|
|
struct MPContext *mpctx;
|
|
|
|
struct mp_cmd *cmd;
|
|
|
|
int status;
|
2018-05-06 16:27:18 +00:00
|
|
|
struct mpv_node *res;
|
|
|
|
struct mp_waiter completion;
|
2014-02-10 20:01:35 +00:00
|
|
|
};
|
|
|
|
|
2018-05-06 16:27:18 +00:00
|
|
|
static void cmd_complete(struct mp_cmd_ctx *cmd)
|
2014-02-10 20:01:35 +00:00
|
|
|
{
|
2018-05-06 16:27:18 +00:00
|
|
|
struct cmd_request *req = cmd->on_completion_priv;
|
|
|
|
|
|
|
|
req->status = cmd->success ? 0 : MPV_ERROR_COMMAND;
|
|
|
|
if (req->res) {
|
|
|
|
*req->res = cmd->result;
|
|
|
|
cmd->result = (mpv_node){0};
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
2018-05-06 16:27:18 +00:00
|
|
|
|
|
|
|
// Unblock the waiting thread (especially for async commands).
|
|
|
|
mp_waiter_wakeup(&req->completion, 0);
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
2015-04-20 21:00:12 +00:00
|
|
|
static int run_client_command(mpv_handle *ctx, struct mp_cmd *cmd, mpv_node *res)
|
2014-02-10 20:01:35 +00:00
|
|
|
{
|
|
|
|
if (!cmd)
|
|
|
|
return MPV_ERROR_INVALID_PARAMETER;
|
2019-07-08 13:23:04 +00:00
|
|
|
if (!ctx->mpctx->initialized) {
|
|
|
|
talloc_free(cmd);
|
|
|
|
return MPV_ERROR_UNINITIALIZED;
|
|
|
|
}
|
2014-02-10 20:01:35 +00:00
|
|
|
|
command: add a mechanism to allow scripts to intercept file loads
A vague idea to get something similar what libquvi did.
Undocumented because it might change a lot, or even be removed. To give
an idea what it does, a Lua script could do the following:
-- type ID priority
mp.commandv("hook_add", "on_load", 0, 0)
mp.register_script_message("hook_run", function(param, param2)
-- param is "0", the user-chosen ID from the hook_add command
-- param2 is the magic value that has to be passed to finish
-- the hook
mp.resume_all()
-- do something, maybe set options that are reset on end:
mp.set_property("file-local-options/name", "value")
-- or change the URL that's being opened:
local url = mp.get_property("stream-open-filename")
mp.set_property("stream-open-filename", url .. ".png")
-- let the player (or the next script) continue
mp.commandv("hook_ack", param2)
end)
2014-10-15 21:09:53 +00:00
|
|
|
cmd->sender = ctx->name;
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
struct cmd_request req = {
|
|
|
|
.mpctx = ctx->mpctx,
|
|
|
|
.cmd = cmd,
|
2015-04-20 21:00:12 +00:00
|
|
|
.res = res,
|
2018-05-06 16:27:18 +00:00
|
|
|
.completion = MP_WAITER_INITIALIZER,
|
2014-02-10 20:01:35 +00:00
|
|
|
};
|
2018-05-06 16:27:18 +00:00
|
|
|
|
|
|
|
bool async = cmd->flags & MP_ASYNC_CMD;
|
|
|
|
|
|
|
|
lock_core(ctx);
|
|
|
|
if (async) {
|
2018-05-12 16:46:37 +00:00
|
|
|
run_command(ctx->mpctx, cmd, NULL, NULL, NULL);
|
2018-05-06 16:27:18 +00:00
|
|
|
} else {
|
2018-05-12 16:46:37 +00:00
|
|
|
struct mp_abort_entry *abort = NULL;
|
|
|
|
if (cmd->def->can_abort) {
|
|
|
|
abort = talloc_zero(NULL, struct mp_abort_entry);
|
|
|
|
abort->client = ctx;
|
|
|
|
}
|
|
|
|
run_command(ctx->mpctx, cmd, abort, cmd_complete, &req);
|
2018-05-06 16:27:18 +00:00
|
|
|
}
|
|
|
|
unlock_core(ctx);
|
|
|
|
|
|
|
|
if (!async)
|
|
|
|
mp_waiter_wait(&req.completion);
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
return req.status;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mpv_command(mpv_handle *ctx, const char **args)
|
|
|
|
{
|
2015-04-20 21:00:12 +00:00
|
|
|
return run_client_command(ctx, mp_input_parse_cmd_strv(ctx->log, args), NULL);
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
2014-10-10 21:58:59 +00:00
|
|
|
int mpv_command_node(mpv_handle *ctx, mpv_node *args, mpv_node *result)
|
|
|
|
{
|
2015-04-20 21:00:12 +00:00
|
|
|
struct mpv_node rn = {.format = MPV_FORMAT_NONE};
|
|
|
|
int r = run_client_command(ctx, mp_input_parse_cmd_node(ctx->log, args), &rn);
|
2014-10-11 05:16:56 +00:00
|
|
|
if (result && r >= 0)
|
2015-04-20 21:00:12 +00:00
|
|
|
*result = rn;
|
2014-10-10 21:58:59 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2019-07-14 00:36:34 +00:00
|
|
|
int mpv_command_ret(mpv_handle *ctx, const char **args, mpv_node *result)
|
|
|
|
{
|
|
|
|
struct mpv_node rn = {.format = MPV_FORMAT_NONE};
|
|
|
|
int r = run_client_command(ctx, mp_input_parse_cmd_strv(ctx->log, args), &rn);
|
|
|
|
if (result && r >= 0)
|
|
|
|
*result = rn;
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
int mpv_command_string(mpv_handle *ctx, const char *args)
|
|
|
|
{
|
|
|
|
return run_client_command(ctx,
|
2015-04-20 21:00:12 +00:00
|
|
|
mp_input_parse_cmd(ctx->mpctx->input, bstr0((char*)args), ctx->name), NULL);
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
2018-05-06 16:27:18 +00:00
|
|
|
struct async_cmd_request {
|
|
|
|
struct MPContext *mpctx;
|
|
|
|
struct mp_cmd *cmd;
|
|
|
|
struct mpv_handle *reply_ctx;
|
|
|
|
uint64_t userdata;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void async_cmd_complete(struct mp_cmd_ctx *cmd)
|
|
|
|
{
|
|
|
|
struct async_cmd_request *req = cmd->on_completion_priv;
|
|
|
|
|
2018-05-10 12:33:10 +00:00
|
|
|
struct mpv_event_command *data = talloc_zero(NULL, struct mpv_event_command);
|
|
|
|
data->result = cmd->result;
|
|
|
|
cmd->result = (mpv_node){0};
|
|
|
|
talloc_steal(data, node_get_alloc(&data->result));
|
2018-05-06 16:27:18 +00:00
|
|
|
|
2018-05-10 12:33:10 +00:00
|
|
|
struct mpv_event reply = {
|
|
|
|
.event_id = MPV_EVENT_COMMAND_REPLY,
|
|
|
|
.data = data,
|
|
|
|
.error = cmd->success ? 0 : MPV_ERROR_COMMAND,
|
|
|
|
};
|
|
|
|
send_reply(req->reply_ctx, req->userdata, &reply);
|
2018-05-06 16:27:18 +00:00
|
|
|
|
|
|
|
talloc_free(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void async_cmd_fn(void *data)
|
|
|
|
{
|
|
|
|
struct async_cmd_request *req = data;
|
|
|
|
|
|
|
|
struct mp_cmd *cmd = req->cmd;
|
2020-02-23 18:48:25 +00:00
|
|
|
ta_set_parent(cmd, NULL);
|
2018-05-06 16:27:18 +00:00
|
|
|
req->cmd = NULL;
|
|
|
|
|
2018-05-12 16:46:37 +00:00
|
|
|
struct mp_abort_entry *abort = NULL;
|
|
|
|
if (cmd->def->can_abort) {
|
|
|
|
abort = talloc_zero(NULL, struct mp_abort_entry);
|
|
|
|
abort->client = req->reply_ctx;
|
|
|
|
abort->client_work_type = MPV_EVENT_COMMAND_REPLY;
|
|
|
|
abort->client_work_id = req->userdata;
|
|
|
|
}
|
|
|
|
|
2018-05-06 16:27:18 +00:00
|
|
|
// This will synchronously or asynchronously call cmd_complete (depending
|
|
|
|
// on the command).
|
2018-05-12 16:46:37 +00:00
|
|
|
run_command(req->mpctx, cmd, abort, async_cmd_complete, req);
|
2018-05-06 16:27:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int run_async_cmd(mpv_handle *ctx, uint64_t ud, struct mp_cmd *cmd)
|
2014-02-10 20:01:35 +00:00
|
|
|
{
|
|
|
|
if (!cmd)
|
|
|
|
return MPV_ERROR_INVALID_PARAMETER;
|
2019-07-08 13:23:04 +00:00
|
|
|
if (!ctx->mpctx->initialized) {
|
|
|
|
talloc_free(cmd);
|
|
|
|
return MPV_ERROR_UNINITIALIZED;
|
|
|
|
}
|
2014-02-10 20:01:35 +00:00
|
|
|
|
command: add a mechanism to allow scripts to intercept file loads
A vague idea to get something similar what libquvi did.
Undocumented because it might change a lot, or even be removed. To give
an idea what it does, a Lua script could do the following:
-- type ID priority
mp.commandv("hook_add", "on_load", 0, 0)
mp.register_script_message("hook_run", function(param, param2)
-- param is "0", the user-chosen ID from the hook_add command
-- param2 is the magic value that has to be passed to finish
-- the hook
mp.resume_all()
-- do something, maybe set options that are reset on end:
mp.set_property("file-local-options/name", "value")
-- or change the URL that's being opened:
local url = mp.get_property("stream-open-filename")
mp.set_property("stream-open-filename", url .. ".png")
-- let the player (or the next script) continue
mp.commandv("hook_ack", param2)
end)
2014-10-15 21:09:53 +00:00
|
|
|
cmd->sender = ctx->name;
|
|
|
|
|
2018-05-06 16:27:18 +00:00
|
|
|
struct async_cmd_request *req = talloc_ptrtype(NULL, req);
|
|
|
|
*req = (struct async_cmd_request){
|
2014-02-10 20:01:35 +00:00
|
|
|
.mpctx = ctx->mpctx,
|
2018-05-06 16:27:18 +00:00
|
|
|
.cmd = talloc_steal(req, cmd),
|
2014-02-10 20:01:35 +00:00
|
|
|
.reply_ctx = ctx,
|
|
|
|
.userdata = ud,
|
|
|
|
};
|
2018-05-06 16:27:18 +00:00
|
|
|
return run_async(ctx, async_cmd_fn, req);
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
2014-10-10 21:58:59 +00:00
|
|
|
int mpv_command_async(mpv_handle *ctx, uint64_t ud, const char **args)
|
|
|
|
{
|
2018-05-06 16:27:18 +00:00
|
|
|
return run_async_cmd(ctx, ud, mp_input_parse_cmd_strv(ctx->log, args));
|
2014-10-10 21:58:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int mpv_command_node_async(mpv_handle *ctx, uint64_t ud, mpv_node *args)
|
|
|
|
{
|
2018-05-06 16:27:18 +00:00
|
|
|
return run_async_cmd(ctx, ud, mp_input_parse_cmd_node(ctx->log, args));
|
2014-10-10 21:58:59 +00:00
|
|
|
}
|
|
|
|
|
2018-05-12 16:46:37 +00:00
|
|
|
void mpv_abort_async_command(mpv_handle *ctx, uint64_t reply_userdata)
|
|
|
|
{
|
2018-05-13 11:48:47 +00:00
|
|
|
abort_async(ctx->mpctx, ctx, MPV_EVENT_COMMAND_REPLY, reply_userdata);
|
2018-05-12 16:46:37 +00:00
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
static int translate_property_error(int errc)
|
|
|
|
{
|
|
|
|
switch (errc) {
|
|
|
|
case M_PROPERTY_OK: return 0;
|
|
|
|
case M_PROPERTY_ERROR: return MPV_ERROR_PROPERTY_ERROR;
|
|
|
|
case M_PROPERTY_UNAVAILABLE: return MPV_ERROR_PROPERTY_UNAVAILABLE;
|
|
|
|
case M_PROPERTY_NOT_IMPLEMENTED: return MPV_ERROR_PROPERTY_ERROR;
|
|
|
|
case M_PROPERTY_UNKNOWN: return MPV_ERROR_PROPERTY_NOT_FOUND;
|
2014-02-24 19:16:22 +00:00
|
|
|
case M_PROPERTY_INVALID_FORMAT: return MPV_ERROR_PROPERTY_FORMAT;
|
2014-02-10 20:01:35 +00:00
|
|
|
// shouldn't happen
|
|
|
|
default: return MPV_ERROR_PROPERTY_ERROR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct setproperty_request {
|
|
|
|
struct MPContext *mpctx;
|
|
|
|
const char *name;
|
|
|
|
int format;
|
|
|
|
void *data;
|
|
|
|
int status;
|
|
|
|
struct mpv_handle *reply_ctx;
|
|
|
|
uint64_t userdata;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void setproperty_fn(void *arg)
|
|
|
|
{
|
|
|
|
struct setproperty_request *req = arg;
|
2014-02-24 19:16:22 +00:00
|
|
|
const struct m_option *type = get_mp_type(req->format);
|
2014-02-10 20:01:35 +00:00
|
|
|
|
2016-04-15 09:31:24 +00:00
|
|
|
struct mpv_node *node;
|
|
|
|
struct mpv_node tmp;
|
|
|
|
if (req->format == MPV_FORMAT_NODE) {
|
|
|
|
node = req->data;
|
|
|
|
} else {
|
|
|
|
tmp.format = req->format;
|
|
|
|
memcpy(&tmp.u, req->data, type->type->size);
|
|
|
|
node = &tmp;
|
2014-02-24 19:16:22 +00:00
|
|
|
}
|
|
|
|
|
2016-04-15 09:31:24 +00:00
|
|
|
int err = mp_property_do(req->name, M_PROPERTY_SET_NODE, node, req->mpctx);
|
|
|
|
|
2014-02-24 19:16:22 +00:00
|
|
|
req->status = translate_property_error(err);
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
if (req->reply_ctx) {
|
2018-05-10 12:35:07 +00:00
|
|
|
struct mpv_event reply = {
|
|
|
|
.event_id = MPV_EVENT_SET_PROPERTY_REPLY,
|
|
|
|
.error = req->status,
|
|
|
|
};
|
|
|
|
send_reply(req->reply_ctx, req->userdata, &reply);
|
2018-05-06 16:27:18 +00:00
|
|
|
talloc_free(req);
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int mpv_set_property(mpv_handle *ctx, const char *name, mpv_format format,
|
|
|
|
void *data)
|
|
|
|
{
|
2016-09-21 14:19:56 +00:00
|
|
|
if (!ctx->mpctx->initialized) {
|
|
|
|
int r = mpv_set_option(ctx, name, format, data);
|
|
|
|
if (r == MPV_ERROR_OPTION_NOT_FOUND &&
|
|
|
|
mp_get_property_id(ctx->mpctx, name) >= 0)
|
|
|
|
return MPV_ERROR_PROPERTY_UNAVAILABLE;
|
|
|
|
switch (r) {
|
2017-01-04 14:08:41 +00:00
|
|
|
case MPV_ERROR_SUCCESS: return MPV_ERROR_SUCCESS;
|
2016-09-21 14:19:56 +00:00
|
|
|
case MPV_ERROR_OPTION_FORMAT: return MPV_ERROR_PROPERTY_FORMAT;
|
|
|
|
case MPV_ERROR_OPTION_NOT_FOUND: return MPV_ERROR_PROPERTY_NOT_FOUND;
|
|
|
|
default: return MPV_ERROR_PROPERTY_ERROR;
|
|
|
|
}
|
|
|
|
}
|
2014-02-24 19:16:22 +00:00
|
|
|
if (!get_mp_type(format))
|
|
|
|
return MPV_ERROR_PROPERTY_FORMAT;
|
2014-02-10 20:01:35 +00:00
|
|
|
|
|
|
|
struct setproperty_request req = {
|
|
|
|
.mpctx = ctx->mpctx,
|
|
|
|
.name = name,
|
|
|
|
.format = format,
|
2014-02-26 13:05:17 +00:00
|
|
|
.data = data,
|
2014-02-10 20:01:35 +00:00
|
|
|
};
|
|
|
|
run_locked(ctx, setproperty_fn, &req);
|
|
|
|
return req.status;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mpv_set_property_string(mpv_handle *ctx, const char *name, const char *data)
|
|
|
|
{
|
2014-02-24 18:35:06 +00:00
|
|
|
return mpv_set_property(ctx, name, MPV_FORMAT_STRING, &data);
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
2014-02-24 19:16:22 +00:00
|
|
|
static void free_prop_set_req(void *ptr)
|
|
|
|
{
|
|
|
|
struct setproperty_request *req = ptr;
|
|
|
|
const struct m_option *type = get_mp_type(req->format);
|
|
|
|
m_option_free(type, req->data);
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
int mpv_set_property_async(mpv_handle *ctx, uint64_t ud, const char *name,
|
|
|
|
mpv_format format, void *data)
|
|
|
|
{
|
2014-02-24 19:16:22 +00:00
|
|
|
const struct m_option *type = get_mp_type(format);
|
2016-09-19 17:58:14 +00:00
|
|
|
if (!ctx->mpctx->initialized)
|
|
|
|
return MPV_ERROR_UNINITIALIZED;
|
2014-02-24 19:16:22 +00:00
|
|
|
if (!type)
|
|
|
|
return MPV_ERROR_PROPERTY_FORMAT;
|
2014-02-10 20:01:35 +00:00
|
|
|
|
|
|
|
struct setproperty_request *req = talloc_ptrtype(NULL, req);
|
|
|
|
*req = (struct setproperty_request){
|
|
|
|
.mpctx = ctx->mpctx,
|
|
|
|
.name = talloc_strdup(req, name),
|
2014-02-24 19:16:22 +00:00
|
|
|
.format = format,
|
|
|
|
.data = talloc_zero_size(req, type->type->size),
|
2014-02-10 20:01:35 +00:00
|
|
|
.reply_ctx = ctx,
|
|
|
|
.userdata = ud,
|
|
|
|
};
|
|
|
|
|
2014-02-24 19:16:22 +00:00
|
|
|
m_option_copy(type, req->data, data);
|
|
|
|
talloc_set_destructor(req, free_prop_set_req);
|
|
|
|
|
|
|
|
return run_async(ctx, setproperty_fn, req);
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct getproperty_request {
|
|
|
|
struct MPContext *mpctx;
|
|
|
|
const char *name;
|
|
|
|
mpv_format format;
|
|
|
|
void *data;
|
|
|
|
int status;
|
|
|
|
struct mpv_handle *reply_ctx;
|
|
|
|
uint64_t userdata;
|
|
|
|
};
|
|
|
|
|
2014-02-24 19:16:22 +00:00
|
|
|
static void free_prop_data(void *ptr)
|
|
|
|
{
|
|
|
|
struct mpv_event_property *prop = ptr;
|
|
|
|
const struct m_option *type = get_mp_type_get(prop->format);
|
|
|
|
m_option_free(type, prop->data);
|
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
static void getproperty_fn(void *arg)
|
|
|
|
{
|
|
|
|
struct getproperty_request *req = arg;
|
2014-02-24 19:16:22 +00:00
|
|
|
const struct m_option *type = get_mp_type_get(req->format);
|
2014-02-10 20:01:35 +00:00
|
|
|
|
2014-02-24 19:16:22 +00:00
|
|
|
union m_option_value xdata = {0};
|
2014-02-10 20:01:35 +00:00
|
|
|
void *data = req->data ? req->data : &xdata;
|
|
|
|
|
2014-02-24 19:16:22 +00:00
|
|
|
int err = -1;
|
|
|
|
switch (req->format) {
|
|
|
|
case MPV_FORMAT_OSD_STRING:
|
|
|
|
err = mp_property_do(req->name, M_PROPERTY_PRINT, data, req->mpctx);
|
|
|
|
break;
|
|
|
|
case MPV_FORMAT_STRING: {
|
|
|
|
char *s = NULL;
|
|
|
|
err = mp_property_do(req->name, M_PROPERTY_GET_STRING, &s, req->mpctx);
|
|
|
|
if (err == M_PROPERTY_OK)
|
2015-07-10 09:03:03 +00:00
|
|
|
*(char **)data = s;
|
2014-02-24 19:16:22 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case MPV_FORMAT_NODE:
|
|
|
|
case MPV_FORMAT_FLAG:
|
|
|
|
case MPV_FORMAT_INT64:
|
|
|
|
case MPV_FORMAT_DOUBLE: {
|
|
|
|
struct mpv_node node = {{0}};
|
|
|
|
err = mp_property_do(req->name, M_PROPERTY_GET_NODE, &node, req->mpctx);
|
|
|
|
if (err == M_PROPERTY_NOT_IMPLEMENTED) {
|
|
|
|
// Go through explicit string conversion. Same reasoning as on the
|
|
|
|
// GET code path.
|
|
|
|
char *s = NULL;
|
|
|
|
err = mp_property_do(req->name, M_PROPERTY_GET_STRING, &s,
|
|
|
|
req->mpctx);
|
|
|
|
if (err != M_PROPERTY_OK)
|
|
|
|
break;
|
|
|
|
node.format = MPV_FORMAT_STRING;
|
|
|
|
node.u.string = s;
|
2014-02-26 13:05:17 +00:00
|
|
|
} else if (err <= 0)
|
|
|
|
break;
|
2014-02-24 19:16:22 +00:00
|
|
|
if (req->format == MPV_FORMAT_NODE) {
|
|
|
|
*(struct mpv_node *)data = node;
|
|
|
|
} else {
|
|
|
|
if (!conv_node_to_format(data, req->format, &node)) {
|
|
|
|
err = M_PROPERTY_INVALID_FORMAT;
|
|
|
|
mpv_free_node_contents(&node);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
abort();
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
|
2014-02-24 19:16:22 +00:00
|
|
|
req->status = translate_property_error(err);
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
if (req->reply_ctx) {
|
|
|
|
struct mpv_event_property *prop = talloc_ptrtype(NULL, prop);
|
|
|
|
*prop = (struct mpv_event_property){
|
|
|
|
.name = talloc_steal(prop, (char *)req->name),
|
|
|
|
.format = req->format,
|
2014-02-24 19:16:22 +00:00
|
|
|
.data = talloc_size(prop, type->type->size),
|
2014-02-10 20:01:35 +00:00
|
|
|
};
|
2014-02-24 19:16:22 +00:00
|
|
|
// move data
|
|
|
|
memcpy(prop->data, &xdata, type->type->size);
|
|
|
|
talloc_set_destructor(prop, free_prop_data);
|
2014-02-10 20:01:35 +00:00
|
|
|
struct mpv_event reply = {
|
|
|
|
.event_id = MPV_EVENT_GET_PROPERTY_REPLY,
|
|
|
|
.data = prop,
|
|
|
|
.error = req->status,
|
|
|
|
};
|
|
|
|
send_reply(req->reply_ctx, req->userdata, &reply);
|
2018-05-06 16:27:18 +00:00
|
|
|
talloc_free(req);
|
2014-02-10 20:01:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int mpv_get_property(mpv_handle *ctx, const char *name, mpv_format format,
|
|
|
|
void *data)
|
|
|
|
{
|
2016-09-19 17:58:14 +00:00
|
|
|
if (!ctx->mpctx->initialized)
|
|
|
|
return MPV_ERROR_UNINITIALIZED;
|
2014-02-24 19:16:22 +00:00
|
|
|
if (!data)
|
|
|
|
return MPV_ERROR_INVALID_PARAMETER;
|
|
|
|
if (!get_mp_type_get(format))
|
|
|
|
return MPV_ERROR_PROPERTY_FORMAT;
|
2014-02-10 20:01:35 +00:00
|
|
|
|
|
|
|
struct getproperty_request req = {
|
|
|
|
.mpctx = ctx->mpctx,
|
|
|
|
.name = name,
|
|
|
|
.format = format,
|
|
|
|
.data = data,
|
|
|
|
};
|
|
|
|
run_locked(ctx, getproperty_fn, &req);
|
|
|
|
return req.status;
|
|
|
|
}
|
|
|
|
|
|
|
|
char *mpv_get_property_string(mpv_handle *ctx, const char *name)
|
|
|
|
{
|
|
|
|
char *str = NULL;
|
|
|
|
mpv_get_property(ctx, name, MPV_FORMAT_STRING, &str);
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
|
|
|
char *mpv_get_property_osd_string(mpv_handle *ctx, const char *name)
|
|
|
|
{
|
|
|
|
char *str = NULL;
|
|
|
|
mpv_get_property(ctx, name, MPV_FORMAT_OSD_STRING, &str);
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mpv_get_property_async(mpv_handle *ctx, uint64_t ud, const char *name,
|
|
|
|
mpv_format format)
|
|
|
|
{
|
2016-09-19 17:58:14 +00:00
|
|
|
if (!ctx->mpctx->initialized)
|
|
|
|
return MPV_ERROR_UNINITIALIZED;
|
2014-02-24 19:16:22 +00:00
|
|
|
if (!get_mp_type_get(format))
|
|
|
|
return MPV_ERROR_PROPERTY_FORMAT;
|
2014-02-10 20:01:35 +00:00
|
|
|
|
|
|
|
struct getproperty_request *req = talloc_ptrtype(NULL, req);
|
|
|
|
*req = (struct getproperty_request){
|
|
|
|
.mpctx = ctx->mpctx,
|
|
|
|
.name = talloc_strdup(req, name),
|
|
|
|
.format = format,
|
|
|
|
.reply_ctx = ctx,
|
|
|
|
.userdata = ud,
|
|
|
|
};
|
|
|
|
return run_async(ctx, getproperty_fn, req);
|
|
|
|
}
|
|
|
|
|
2014-04-05 21:54:21 +00:00
|
|
|
static void property_free(void *p)
|
|
|
|
{
|
|
|
|
struct observe_property *prop = p;
|
2019-10-24 23:57:51 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
assert(prop->refcount == 0);
|
2019-10-24 23:57:51 +00:00
|
|
|
|
|
|
|
if (prop->type) {
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
m_option_free(prop->type, &prop->value);
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
m_option_free(prop->type, &prop->value_ret);
|
2019-10-24 23:57:51 +00:00
|
|
|
}
|
2014-04-05 21:54:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int mpv_observe_property(mpv_handle *ctx, uint64_t userdata,
|
|
|
|
const char *name, mpv_format format)
|
|
|
|
{
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
const struct m_option *type = get_mp_type_get(format);
|
|
|
|
if (format != MPV_FORMAT_NONE && !type)
|
2014-04-05 21:54:21 +00:00
|
|
|
return MPV_ERROR_PROPERTY_FORMAT;
|
|
|
|
// Explicitly disallow this, because it would require a special code path.
|
|
|
|
if (format == MPV_FORMAT_OSD_STRING)
|
|
|
|
return MPV_ERROR_PROPERTY_FORMAT;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&ctx->lock);
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
assert(!ctx->destroying);
|
2014-04-05 21:54:21 +00:00
|
|
|
struct observe_property *prop = talloc_ptrtype(ctx, prop);
|
|
|
|
talloc_set_destructor(prop, property_free);
|
|
|
|
*prop = (struct observe_property){
|
2019-10-24 23:57:51 +00:00
|
|
|
.owner = ctx,
|
2014-04-05 21:54:21 +00:00
|
|
|
.name = talloc_strdup(prop, name),
|
2016-09-01 18:00:43 +00:00
|
|
|
.id = mp_get_property_id(ctx->mpctx, name),
|
2014-08-01 23:39:28 +00:00
|
|
|
.event_mask = mp_get_property_event_mask(name),
|
2014-04-05 21:54:21 +00:00
|
|
|
.reply_id = userdata,
|
|
|
|
.format = format,
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
.type = type,
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
.change_ts = 1, // force initial event
|
|
|
|
.refcount = 1,
|
2014-04-05 21:54:21 +00:00
|
|
|
};
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
ctx->properties_change_ts += 1;
|
2014-04-05 21:54:21 +00:00
|
|
|
MP_TARRAY_APPEND(ctx, ctx->properties, ctx->num_properties, prop);
|
2014-08-01 23:39:28 +00:00
|
|
|
ctx->property_event_masks |= prop->event_mask;
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
ctx->new_property_events = true;
|
|
|
|
ctx->cur_property_index = 0;
|
|
|
|
ctx->has_pending_properties = true;
|
2014-04-05 21:54:21 +00:00
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
mp_wakeup_core(ctx->mpctx);
|
2014-04-05 21:54:21 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mpv_unobserve_property(mpv_handle *ctx, uint64_t userdata)
|
|
|
|
{
|
|
|
|
pthread_mutex_lock(&ctx->lock);
|
|
|
|
int count = 0;
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
for (int n = ctx->num_properties - 1; n >= 0; n--) {
|
2014-04-05 21:54:21 +00:00
|
|
|
struct observe_property *prop = ctx->properties[n];
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
// Perform actual removal of the property lazily to avoid creating
|
|
|
|
// dangling pointers and such.
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
if (prop->reply_id == userdata) {
|
|
|
|
prop_unref(prop);
|
|
|
|
ctx->properties_change_ts += 1;
|
|
|
|
MP_TARRAY_REMOVE_AT(ctx->properties, ctx->num_properties, n);
|
|
|
|
ctx->cur_property_index = 0;
|
2014-04-05 21:54:21 +00:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2014-08-01 23:39:28 +00:00
|
|
|
// Broadcast that a property has changed.
|
|
|
|
void mp_client_property_change(struct MPContext *mpctx, const char *name)
|
2014-04-05 21:54:21 +00:00
|
|
|
{
|
|
|
|
struct mp_client_api *clients = mpctx->clients;
|
2016-09-01 18:00:43 +00:00
|
|
|
int id = mp_get_property_id(mpctx, name);
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
bool any_pending = false;
|
2014-04-05 21:54:21 +00:00
|
|
|
|
|
|
|
pthread_mutex_lock(&clients->lock);
|
|
|
|
|
|
|
|
for (int n = 0; n < clients->num_clients; n++) {
|
|
|
|
struct mpv_handle *client = clients->clients[n];
|
|
|
|
pthread_mutex_lock(&client->lock);
|
|
|
|
for (int i = 0; i < client->num_properties; i++) {
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
if (client->properties[i]->id == id) {
|
|
|
|
client->properties[i]->change_ts += 1;
|
|
|
|
client->has_pending_properties = true;
|
|
|
|
any_pending = true;
|
|
|
|
}
|
2014-04-05 21:54:21 +00:00
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&client->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&clients->lock);
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
|
|
|
|
// If we're inside mp_dispatch_queue_process(), this will cause the playloop
|
|
|
|
// to be re-run (to get mp_client_send_property_changes() called). If we're
|
|
|
|
// inside the normal playloop, this does nothing, but the latter function
|
|
|
|
// will be called at the end of the playloop anyway.
|
|
|
|
if (any_pending)
|
|
|
|
mp_dispatch_adjust_timeout(mpctx->dispatch, 0);
|
2014-04-05 21:54:21 +00:00
|
|
|
}
|
|
|
|
|
2014-08-01 23:39:28 +00:00
|
|
|
// Mark properties as changed in reaction to specific events.
|
|
|
|
// Called with ctx->lock held.
|
2020-01-09 17:11:03 +00:00
|
|
|
static void notify_property_events(struct mpv_handle *ctx, int event)
|
2014-08-01 23:39:28 +00:00
|
|
|
{
|
2020-01-09 17:11:03 +00:00
|
|
|
uint64_t mask = 1ULL << event;
|
2014-08-01 23:39:28 +00:00
|
|
|
for (int i = 0; i < ctx->num_properties; i++) {
|
2020-01-09 17:11:03 +00:00
|
|
|
if (ctx->properties[i]->event_mask & mask) {
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
ctx->properties[i]->change_ts += 1;
|
|
|
|
ctx->has_pending_properties = true;
|
|
|
|
}
|
2014-08-01 23:39:28 +00:00
|
|
|
}
|
2020-01-09 17:16:41 +00:00
|
|
|
|
|
|
|
// Same as in mp_client_property_change().
|
|
|
|
if (ctx->has_pending_properties)
|
|
|
|
mp_dispatch_adjust_timeout(ctx->mpctx->dispatch, 0);
|
2014-08-01 23:39:28 +00:00
|
|
|
}
|
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
// Call with ctx->lock held (only). May temporarily drop the lock.
|
|
|
|
static void send_client_property_changes(struct mpv_handle *ctx)
|
2014-04-05 21:54:21 +00:00
|
|
|
{
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
uint64_t cur_ts = ctx->properties_change_ts;
|
2014-04-05 21:54:21 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
ctx->has_pending_properties = false;
|
2014-04-05 21:54:21 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
for (int n = 0; n < ctx->num_properties; n++) {
|
|
|
|
struct observe_property *prop = ctx->properties[n];
|
2014-04-05 21:54:21 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
if (prop->value_ts == prop->change_ts)
|
|
|
|
continue;
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
bool changed = false;
|
|
|
|
if (prop->format) {
|
|
|
|
const struct m_option *type = prop->type;
|
|
|
|
union m_option_value val = {0};
|
|
|
|
struct getproperty_request req = {
|
|
|
|
.mpctx = ctx->mpctx,
|
|
|
|
.name = prop->name,
|
|
|
|
.format = prop->format,
|
|
|
|
.data = &val,
|
|
|
|
};
|
2019-10-24 23:57:51 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
// Temporarily unlock and read the property. The very important
|
|
|
|
// thing is that property getters can do whatever they want, _and_
|
|
|
|
// that they may wait on the client API user thread (if vo_libmpv
|
|
|
|
// or similar things are involved).
|
|
|
|
prop->refcount += 1; // keep prop alive (esp. prop->name)
|
|
|
|
ctx->async_counter += 1; // keep ctx alive
|
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
getproperty_fn(&req);
|
|
|
|
pthread_mutex_lock(&ctx->lock);
|
|
|
|
ctx->async_counter -= 1;
|
|
|
|
prop_unref(prop);
|
|
|
|
|
client API: provide ways to finish property changes on file changes
When the current file changes (or rather, when starting/finishing
playback of a playlist entry), clients tend to have the problem that
it's hard to tell whether a property change notification (via
mpv_observe_property() and mechanisms layered on top of it) is from the
previous or new playlist entry. The previous commit probably helps, but
all the asynchronity is still a bit unhelpful.
Try to make this better by adding new hooks, that are run before/after
playback init/deinit. This is similar to the existing hooks, except
they're outside of "initialized" playback, which excludes that you might
accidentally get an overlap between the current and the previous/next
playlist entry.
That still doesn't seem quite enough, since normally, property change
notifications come after the hook event. So basically a client would
have to explicitly "drain" the event queue within the hook, and make the
hook continue only after that is done. Knowing when property
notifications are done is another asynchronous nightmare (how exactly it
works keeps changing within client.c, and an API user probably can't
tell anymore when all pending properties are truly done). So introduce
another guarantee: properties that were changed before the hook happens
will be returned before the hook event is returned. That means the
client will have received all pending property notifications from the
previous playlist entry (or whatever) before the hook is entered.
As another minor complication, we shouldn't just keep the hook pending
until _all_ property notifications are done, since the client's hook
could produce new ones. (Or just consider things like the demuxer thread
hammering the client with cache update events, while the "on_preloaded"
hook is run.) So there is some extra untested, fragile logic in client.c
to handle this (the waiting_for_hook flag).
This probably works, but was barely tested. Not sure if this helps
anyone, but I think it's fine for my own purposes. (I really hated this
aspect of the API whenever I used it myself.)
2020-03-07 01:52:10 +00:00
|
|
|
// Set if observed properties was changed or something similar
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
// => start over, retry next time.
|
|
|
|
if (cur_ts != ctx->properties_change_ts || ctx->destroying) {
|
|
|
|
m_option_free(type, &val);
|
|
|
|
mp_wakeup_core(ctx->mpctx);
|
|
|
|
ctx->has_pending_properties = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
assert(prop->refcount > 0);
|
|
|
|
|
|
|
|
bool val_valid = req.status >= 0;
|
|
|
|
changed = prop->value_valid != val_valid;
|
|
|
|
if (prop->value_valid && val_valid)
|
|
|
|
changed = !equal_mpv_value(&prop->value, &val, prop->format);
|
|
|
|
if (prop->value_ts == 0)
|
|
|
|
changed = true; // initial event
|
|
|
|
|
|
|
|
prop->value_valid = val_valid;
|
|
|
|
if (changed && val_valid) {
|
|
|
|
// move val to prop->value
|
|
|
|
m_option_free(type, &prop->value);
|
|
|
|
memcpy(&prop->value, &val, type->type->size);
|
|
|
|
memset(&val, 0, type->type->size);
|
|
|
|
}
|
2019-10-24 23:57:51 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
m_option_free(prop->type, &val);
|
|
|
|
} else {
|
|
|
|
changed = true;
|
|
|
|
}
|
2019-10-24 23:57:51 +00:00
|
|
|
|
client API: provide ways to finish property changes on file changes
When the current file changes (or rather, when starting/finishing
playback of a playlist entry), clients tend to have the problem that
it's hard to tell whether a property change notification (via
mpv_observe_property() and mechanisms layered on top of it) is from the
previous or new playlist entry. The previous commit probably helps, but
all the asynchronity is still a bit unhelpful.
Try to make this better by adding new hooks, that are run before/after
playback init/deinit. This is similar to the existing hooks, except
they're outside of "initialized" playback, which excludes that you might
accidentally get an overlap between the current and the previous/next
playlist entry.
That still doesn't seem quite enough, since normally, property change
notifications come after the hook event. So basically a client would
have to explicitly "drain" the event queue within the hook, and make the
hook continue only after that is done. Knowing when property
notifications are done is another asynchronous nightmare (how exactly it
works keeps changing within client.c, and an API user probably can't
tell anymore when all pending properties are truly done). So introduce
another guarantee: properties that were changed before the hook happens
will be returned before the hook event is returned. That means the
client will have received all pending property notifications from the
previous playlist entry (or whatever) before the hook is entered.
As another minor complication, we shouldn't just keep the hook pending
until _all_ property notifications are done, since the client's hook
could produce new ones. (Or just consider things like the demuxer thread
hammering the client with cache update events, while the "on_preloaded"
hook is run.) So there is some extra untested, fragile logic in client.c
to handle this (the waiting_for_hook flag).
This probably works, but was barely tested. Not sure if this helps
anyone, but I think it's fine for my own purposes. (I really hated this
aspect of the API whenever I used it myself.)
2020-03-07 01:52:10 +00:00
|
|
|
if (prop->waiting_for_hook)
|
|
|
|
ctx->new_property_events = true; // make sure to wakeup
|
|
|
|
|
client API: avoid returning stale value on property notifications
This could happen if a property was flagged as changed, then updated,
then flagged again, but gen_property_change_event() was called before
the value was updated a second time. Then the function simply returned
the old value, and would later trigger a new change event again.
This was considered acceptable before, since property notifications are
asynchronous anyway (so they may always be "outdated", it just mattered
whether the most recent value was eventually delivered).
But consider ordering with events. It seems desirable that specific
important events (e.g. MPV_EVENT_START_FILE) should not be followed by
property updates that happened before it, because that would make
application logic really a mess, and property notification near-useless
in certain cases.
Avoid this by never returning a value if it was marked changed, but not
updated yet.
Unfortunately, this could lead to clients never receiving a value (or
receiving it with a high random delay), if they're too slow to read it
(or the property simply updates too often). Note that this is done for
_all_ property notifications, not just returned events. Hopefully not a
problem in practice. If it turns out to be one, this mechanism could be
restricted to actually returned events, for which this really matters.
2020-03-06 22:59:21 +00:00
|
|
|
// Avoid retriggering the change event if the property didn't change,
|
|
|
|
// and the previous value was actually returned to the client.
|
|
|
|
if (!changed && prop->value_ret_ts == prop->value_ts) {
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
prop->value_ret_ts = prop->change_ts; // no change => no event
|
client API: provide ways to finish property changes on file changes
When the current file changes (or rather, when starting/finishing
playback of a playlist entry), clients tend to have the problem that
it's hard to tell whether a property change notification (via
mpv_observe_property() and mechanisms layered on top of it) is from the
previous or new playlist entry. The previous commit probably helps, but
all the asynchronity is still a bit unhelpful.
Try to make this better by adding new hooks, that are run before/after
playback init/deinit. This is similar to the existing hooks, except
they're outside of "initialized" playback, which excludes that you might
accidentally get an overlap between the current and the previous/next
playlist entry.
That still doesn't seem quite enough, since normally, property change
notifications come after the hook event. So basically a client would
have to explicitly "drain" the event queue within the hook, and make the
hook continue only after that is done. Knowing when property
notifications are done is another asynchronous nightmare (how exactly it
works keeps changing within client.c, and an API user probably can't
tell anymore when all pending properties are truly done). So introduce
another guarantee: properties that were changed before the hook happens
will be returned before the hook event is returned. That means the
client will have received all pending property notifications from the
previous playlist entry (or whatever) before the hook is entered.
As another minor complication, we shouldn't just keep the hook pending
until _all_ property notifications are done, since the client's hook
could produce new ones. (Or just consider things like the demuxer thread
hammering the client with cache update events, while the "on_preloaded"
hook is run.) So there is some extra untested, fragile logic in client.c
to handle this (the waiting_for_hook flag).
This probably works, but was barely tested. Not sure if this helps
anyone, but I think it's fine for my own purposes. (I really hated this
aspect of the API whenever I used it myself.)
2020-03-07 01:52:10 +00:00
|
|
|
prop->waiting_for_hook = false;
|
client API: avoid returning stale value on property notifications
This could happen if a property was flagged as changed, then updated,
then flagged again, but gen_property_change_event() was called before
the value was updated a second time. Then the function simply returned
the old value, and would later trigger a new change event again.
This was considered acceptable before, since property notifications are
asynchronous anyway (so they may always be "outdated", it just mattered
whether the most recent value was eventually delivered).
But consider ordering with events. It seems desirable that specific
important events (e.g. MPV_EVENT_START_FILE) should not be followed by
property updates that happened before it, because that would make
application logic really a mess, and property notification near-useless
in certain cases.
Avoid this by never returning a value if it was marked changed, but not
updated yet.
Unfortunately, this could lead to clients never receiving a value (or
receiving it with a high random delay), if they're too slow to read it
(or the property simply updates too often). Note that this is done for
_all_ property notifications, not just returned events. Hopefully not a
problem in practice. If it turns out to be one, this mechanism could be
restricted to actually returned events, for which this really matters.
2020-03-06 22:59:21 +00:00
|
|
|
} else {
|
|
|
|
ctx->new_property_events = true;
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
}
|
2019-10-24 23:57:51 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
prop->value_ts = prop->change_ts;
|
|
|
|
}
|
2019-10-24 23:57:51 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
if (ctx->destroying || ctx->new_property_events)
|
|
|
|
wakeup_client(ctx);
|
2019-10-24 23:57:51 +00:00
|
|
|
}
|
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
void mp_client_send_property_changes(struct MPContext *mpctx)
|
2019-10-24 23:57:51 +00:00
|
|
|
{
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
struct mp_client_api *clients = mpctx->clients;
|
2019-10-24 23:57:51 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
pthread_mutex_lock(&clients->lock);
|
|
|
|
uint64_t cur_ts = clients->clients_list_change_ts;
|
|
|
|
|
|
|
|
for (int n = 0; n < clients->num_clients; n++) {
|
|
|
|
struct mpv_handle *ctx = clients->clients[n];
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
pthread_mutex_lock(&ctx->lock);
|
2020-02-23 16:52:21 +00:00
|
|
|
if (!ctx->has_pending_properties || ctx->destroying) {
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Keep ctx->lock locked (unlock order does not matter).
|
|
|
|
pthread_mutex_unlock(&clients->lock);
|
|
|
|
send_client_property_changes(ctx);
|
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
pthread_mutex_lock(&clients->lock);
|
|
|
|
if (cur_ts != clients->clients_list_change_ts) {
|
|
|
|
// List changed; need to start over. Do it in the next iteration.
|
|
|
|
mp_wakeup_core(mpctx);
|
|
|
|
break;
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
}
|
2014-04-05 21:54:21 +00:00
|
|
|
}
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
pthread_mutex_unlock(&clients->lock);
|
2014-04-05 21:54:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set ctx->cur_event to a generated property change event, if there is any
|
|
|
|
// outstanding property.
|
2019-11-16 16:38:59 +00:00
|
|
|
static bool gen_property_change_event(struct mpv_handle *ctx)
|
2014-04-05 21:54:21 +00:00
|
|
|
{
|
2016-09-19 17:58:14 +00:00
|
|
|
if (!ctx->mpctx->initialized)
|
|
|
|
return false;
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
while (1) {
|
|
|
|
if (ctx->cur_property_index >= ctx->num_properties) {
|
2020-03-07 11:39:03 +00:00
|
|
|
ctx->new_property_events &= ctx->num_properties > 0;
|
|
|
|
if (!ctx->new_property_events)
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
break;
|
|
|
|
ctx->new_property_events = false;
|
|
|
|
ctx->cur_property_index = 0;
|
|
|
|
}
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
struct observe_property *prop = ctx->properties[ctx->cur_property_index++];
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
|
client API: avoid returning stale value on property notifications
This could happen if a property was flagged as changed, then updated,
then flagged again, but gen_property_change_event() was called before
the value was updated a second time. Then the function simply returned
the old value, and would later trigger a new change event again.
This was considered acceptable before, since property notifications are
asynchronous anyway (so they may always be "outdated", it just mattered
whether the most recent value was eventually delivered).
But consider ordering with events. It seems desirable that specific
important events (e.g. MPV_EVENT_START_FILE) should not be followed by
property updates that happened before it, because that would make
application logic really a mess, and property notification near-useless
in certain cases.
Avoid this by never returning a value if it was marked changed, but not
updated yet.
Unfortunately, this could lead to clients never receiving a value (or
receiving it with a high random delay), if they're too slow to read it
(or the property simply updates too often). Note that this is done for
_all_ property notifications, not just returned events. Hopefully not a
problem in practice. If it turns out to be one, this mechanism could be
restricted to actually returned events, for which this really matters.
2020-03-06 22:59:21 +00:00
|
|
|
if (prop->value_ts == prop->change_ts && // not a stale value?
|
|
|
|
prop->value_ret_ts != prop->value_ts) // other value than last time?
|
|
|
|
{
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
prop->value_ret_ts = prop->value_ts;
|
client API: provide ways to finish property changes on file changes
When the current file changes (or rather, when starting/finishing
playback of a playlist entry), clients tend to have the problem that
it's hard to tell whether a property change notification (via
mpv_observe_property() and mechanisms layered on top of it) is from the
previous or new playlist entry. The previous commit probably helps, but
all the asynchronity is still a bit unhelpful.
Try to make this better by adding new hooks, that are run before/after
playback init/deinit. This is similar to the existing hooks, except
they're outside of "initialized" playback, which excludes that you might
accidentally get an overlap between the current and the previous/next
playlist entry.
That still doesn't seem quite enough, since normally, property change
notifications come after the hook event. So basically a client would
have to explicitly "drain" the event queue within the hook, and make the
hook continue only after that is done. Knowing when property
notifications are done is another asynchronous nightmare (how exactly it
works keeps changing within client.c, and an API user probably can't
tell anymore when all pending properties are truly done). So introduce
another guarantee: properties that were changed before the hook happens
will be returned before the hook event is returned. That means the
client will have received all pending property notifications from the
previous playlist entry (or whatever) before the hook is entered.
As another minor complication, we shouldn't just keep the hook pending
until _all_ property notifications are done, since the client's hook
could produce new ones. (Or just consider things like the demuxer thread
hammering the client with cache update events, while the "on_preloaded"
hook is run.) So there is some extra untested, fragile logic in client.c
to handle this (the waiting_for_hook flag).
This probably works, but was barely tested. Not sure if this helps
anyone, but I think it's fine for my own purposes. (I really hated this
aspect of the API whenever I used it myself.)
2020-03-07 01:52:10 +00:00
|
|
|
prop->waiting_for_hook = false;
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
prop_unref(ctx->cur_property);
|
|
|
|
ctx->cur_property = prop;
|
|
|
|
prop->refcount += 1;
|
|
|
|
|
|
|
|
if (prop->value_valid)
|
|
|
|
m_option_copy(prop->type, &prop->value_ret, &prop->value);
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
|
2019-04-16 22:32:54 +00:00
|
|
|
ctx->cur_property_event = (struct mpv_event_property){
|
|
|
|
.name = prop->name,
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
.format = prop->value_valid ? prop->format : 0,
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
.data = prop->value_valid ? &prop->value_ret : NULL,
|
2019-04-16 22:32:54 +00:00
|
|
|
};
|
|
|
|
*ctx->cur_event = (struct mpv_event){
|
|
|
|
.event_id = MPV_EVENT_PROPERTY_CHANGE,
|
|
|
|
.reply_userdata = prop->reply_id,
|
|
|
|
.data = &ctx->cur_property_event,
|
|
|
|
};
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
return true;
|
client API: simplify (?) property change notification generation
Property change notification works by having the mpv core wake up all
clients observing a property when the property potentially changes. The
clients then read the property's value, and determine if there was an
actual change. (The latter part depends what the property returned for
the previous change notification, so it depends on the client, and
cannot be generated by the core itself.)
Until now, reading the property value was done in a pseudo-async way by
queuing a callback back to the core, running it there, and then waking
up the client thread again. I cannot comprehend why this was done in
such a complicated, fragile way. Maybe it's a leftover from times when
client.c had to do this (in short, because properties could access
vo_opengl, which has thread-local state).
One past idea was to make the implementation of true async properties
easier (for which you would need such a state machine anyway). But they
don't exist yet, and I doubt the current mess would be really helpful
when actually implementing them.
Simplify this, and run the update in the client's thread directly. In
addition to the fundamental change, many roundabout things can be
removed as a consequence.
Unfortunately, I noticed that lock order issues force you to release
ctx->lock before doing so, which makes things more complex due to
possible concurrent mpv_unobserve_property() calls. Solve this by
removing properties lazily, which means you may have to do multiple
mpv_wait_event() calls before the property entry is actually destroyed.
This should not matter in practice, and does not affect the semantics.
It could also cause "leaks" by observing/unobserving properties in a
loop, without ever calling mpv_wait_event(). Just don't do this, duh.
(I considered making this dependent on whether the previous
mpv_wait_event() call returned the property being removed, but a
separate code path seemed too complicated. I also considered copying the
name and property data when returning a MPV_EVENT_PROPERTY_CHANGE, but
actually this doesn't solve the problem of update_prop() being
interrupted by mpv_unobserve_property(); there are ways around it, but I
just said no.)
This was made using the cowboy coding software engineering methodology.
If you find any bugs, keep them yourself.
2019-10-24 15:01:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
client API: rewrite property observation (again)
I intend to rewrite this code approximately every 2 months.
Last time, I did this in commit d66eb93e5d4 (and 065c307e8e7 and
b2006eeb74f). It was intended to remove the roundabout synchronous
thread "ping pong" when observing properties. At first, the original
async. code was replaced with some nice mostly synchronous code. But
then an async. code path had to be added for vo_libmpv, and finally the
sync. code was dropped because it broke in other obscure cases (like the
Objective-C Cocoa backend).
Try again. This time, update properties entirely on the main thread.
Updates get batched out on every playloop iteration. (At first I wanted
it to make it every time the player goes to sleep, but that might starve
API clients if the playloop get saturated.) One nice thing is that
clients only get woken up once all changed events have been sent, which
might reduce overhead.
While this sounds simple, it's not. The main problem is that reading
properties must not block the client API, i.e. no client API locks can
be held while reading the property. Maybe eventually we can avoid this
requirement, but currently it's just a fact. This means we have to
iterate over all clients and then over all properties (of each client),
all while releasing all locks when updating a property. Solve this by
rechecking on each iteration whether the list changed, and if so,
aborting the iteration and redo it "next time".
High risk change, expect bugs such as crashes and missing property
updates.
2019-12-19 10:11:51 +00:00
|
|
|
return false;
|
2014-04-05 21:54:21 +00:00
|
|
|
}
|
|
|
|
|
2018-03-23 15:24:49 +00:00
|
|
|
int mpv_hook_add(mpv_handle *ctx, uint64_t reply_userdata,
|
|
|
|
const char *name, int priority)
|
|
|
|
{
|
|
|
|
lock_core(ctx);
|
2020-03-26 22:40:25 +00:00
|
|
|
mp_hook_add(ctx->mpctx, ctx->name, ctx->id, name, reply_userdata, priority);
|
2018-03-23 15:24:49 +00:00
|
|
|
unlock_core(ctx);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mpv_hook_continue(mpv_handle *ctx, uint64_t id)
|
|
|
|
{
|
|
|
|
lock_core(ctx);
|
2020-03-26 22:40:25 +00:00
|
|
|
int r = mp_hook_continue(ctx->mpctx, ctx->id, id);
|
2018-03-23 15:24:49 +00:00
|
|
|
unlock_core(ctx);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2014-05-18 16:58:36 +00:00
|
|
|
int mpv_load_config_file(mpv_handle *ctx, const char *filename)
|
|
|
|
{
|
|
|
|
lock_core(ctx);
|
2019-11-10 22:53:57 +00:00
|
|
|
int r = m_config_parse_config_file(ctx->mpctx->mconfig, filename, NULL, 0);
|
2014-05-18 16:58:36 +00:00
|
|
|
unlock_core(ctx);
|
|
|
|
if (r == 0)
|
|
|
|
return MPV_ERROR_INVALID_PARAMETER;
|
|
|
|
if (r < 0)
|
|
|
|
return MPV_ERROR_OPTION_ERROR;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
static void msg_wakeup(void *p)
|
2014-06-06 17:24:30 +00:00
|
|
|
{
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
mpv_handle *ctx = p;
|
|
|
|
wakeup_client(ctx);
|
2014-06-06 17:24:30 +00:00
|
|
|
}
|
|
|
|
|
2019-11-17 23:44:12 +00:00
|
|
|
// Undocumented: if min_level starts with "silent:", then log messages are not
|
|
|
|
// returned to the API user, but are stored until logging is enabled normally
|
|
|
|
// again by calling this without "silent:". (Using a different level will
|
|
|
|
// flush it, though.)
|
2014-02-10 20:01:35 +00:00
|
|
|
int mpv_request_log_messages(mpv_handle *ctx, const char *min_level)
|
|
|
|
{
|
2019-11-17 23:44:12 +00:00
|
|
|
bstr blevel = bstr0(min_level);
|
|
|
|
bool silent = bstr_eatstart0(&blevel, "silent:");
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
int level = -1;
|
|
|
|
for (int n = 0; n < MSGL_MAX + 1; n++) {
|
2019-11-17 23:44:12 +00:00
|
|
|
if (mp_log_levels[n] && bstr_equals0(blevel, mp_log_levels[n])) {
|
2014-02-10 20:01:35 +00:00
|
|
|
level = n;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-11-17 23:44:12 +00:00
|
|
|
if (bstr_equals0(blevel, "terminal-default"))
|
2015-06-20 19:40:47 +00:00
|
|
|
level = MP_LOG_BUFFER_MSGL_TERM;
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
if (level < 0 && strcmp(min_level, "no") != 0)
|
|
|
|
return MPV_ERROR_INVALID_PARAMETER;
|
|
|
|
|
|
|
|
pthread_mutex_lock(&ctx->lock);
|
2019-11-17 23:44:12 +00:00
|
|
|
if (level < 0 || level != ctx->messages_level) {
|
|
|
|
mp_msg_log_buffer_destroy(ctx->messages);
|
|
|
|
ctx->messages = NULL;
|
|
|
|
}
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
if (level >= 0) {
|
2019-11-17 23:44:12 +00:00
|
|
|
if (!ctx->messages) {
|
|
|
|
int size = level >= MSGL_V ? 10000 : 1000;
|
|
|
|
ctx->messages = mp_msg_log_buffer_new(ctx->mpctx->global, size,
|
|
|
|
level, msg_wakeup, ctx);
|
|
|
|
ctx->messages_level = level;
|
|
|
|
}
|
|
|
|
mp_msg_log_buffer_set_silent(ctx->messages, silent);
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
}
|
2019-11-17 23:44:12 +00:00
|
|
|
wakeup_client(ctx);
|
2014-02-10 20:01:35 +00:00
|
|
|
pthread_mutex_unlock(&ctx->lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-01-19 19:03:17 +00:00
|
|
|
// Set ctx->cur_event to a generated log message event, if any available.
|
|
|
|
static bool gen_log_message_event(struct mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
if (ctx->messages) {
|
|
|
|
struct mp_log_buffer_entry *msg =
|
|
|
|
mp_msg_log_buffer_read(ctx->messages);
|
|
|
|
if (msg) {
|
|
|
|
struct mpv_event_log_message *cmsg =
|
|
|
|
talloc_ptrtype(ctx->cur_event, cmsg);
|
2015-06-18 16:40:12 +00:00
|
|
|
talloc_steal(cmsg, msg);
|
2015-01-19 19:03:17 +00:00
|
|
|
*cmsg = (struct mpv_event_log_message){
|
|
|
|
.prefix = msg->prefix,
|
|
|
|
.level = mp_log_levels[msg->level],
|
|
|
|
.log_level = mp_mpv_log_levels[msg->level],
|
|
|
|
.text = msg->text,
|
|
|
|
};
|
|
|
|
*ctx->cur_event = (struct mpv_event){
|
|
|
|
.event_id = MPV_EVENT_LOG_MESSAGE,
|
|
|
|
.data = cmsg,
|
|
|
|
};
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-04-12 18:13:07 +00:00
|
|
|
int mpv_get_wakeup_pipe(mpv_handle *ctx)
|
|
|
|
{
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
pthread_mutex_lock(&ctx->wakeup_lock);
|
2014-06-08 14:11:11 +00:00
|
|
|
if (ctx->wakeup_pipe[0] == -1) {
|
2014-07-25 12:30:59 +00:00
|
|
|
if (mp_make_wakeup_pipe(ctx->wakeup_pipe) >= 0)
|
2016-06-07 11:39:43 +00:00
|
|
|
(void)write(ctx->wakeup_pipe[1], &(char){0}, 1);
|
2014-06-08 14:11:11 +00:00
|
|
|
}
|
2014-07-25 12:30:59 +00:00
|
|
|
int fd = ctx->wakeup_pipe[0];
|
client API: restructure waiting, do log msg wakeup properly
Until now, availability of new log messages (through the mechanism
associated with mpv_request_log_messages()) did not wakeup the client
API properly. Commit 3b7402b5 was basically a hack to improve that
somewhat, but it wasn't a solution.
The main problem is that the client API itself is producing messages, so
the message callback would attempt to lock the client API lock,
resulting in a deadlock. Even if the lock was recursive, we'd run into
lock-order issues.
Solve this by using a separate lock for waiting and wakeup. Also, since
it's a natural addition, avoid redundant wakeups. This means the wakeup
callback as well as the wakeup pipe will be triggered only once until
the next mpv_wait_event() call happens.
This might make the wakeup callback be invoked in a reentrant way for
the first time, for example if a mpv_* function prints to a log. Adjust
the docs accordingly. (Note that non-reentrant beheavior was never
guaranteed - basically the wakeup callback is somewhat dangerous and
inconvenient.)
Also remove some traces of unneeded code. ctx->shutdown for one was
never set, and probably a leftover of an abandoned idea.
2014-06-07 21:15:07 +00:00
|
|
|
pthread_mutex_unlock(&ctx->wakeup_lock);
|
2014-07-25 12:30:59 +00:00
|
|
|
return fd;
|
2014-04-12 18:13:07 +00:00
|
|
|
}
|
|
|
|
|
2014-02-10 20:01:35 +00:00
|
|
|
unsigned long mpv_client_api_version(void)
|
|
|
|
{
|
|
|
|
return MPV_CLIENT_API_VERSION;
|
|
|
|
}
|
|
|
|
|
2020-03-21 18:31:58 +00:00
|
|
|
int mpv_event_to_node(mpv_node *dst, mpv_event *event)
|
|
|
|
{
|
|
|
|
*dst = (mpv_node){0};
|
|
|
|
|
|
|
|
node_init(dst, MPV_FORMAT_NODE_MAP, NULL);
|
|
|
|
node_map_add_string(dst, "event", mpv_event_name(event->event_id));
|
|
|
|
|
|
|
|
if (event->error < 0)
|
|
|
|
node_map_add_string(dst, "error", mpv_error_string(event->error));
|
|
|
|
|
2020-03-21 21:09:07 +00:00
|
|
|
if (event->reply_userdata)
|
|
|
|
node_map_add_int64(dst, "id", event->reply_userdata);
|
|
|
|
|
2020-03-21 18:31:58 +00:00
|
|
|
switch (event->event_id) {
|
|
|
|
|
|
|
|
case MPV_EVENT_START_FILE: {
|
|
|
|
mpv_event_start_file *esf = event->data;
|
|
|
|
|
|
|
|
node_map_add_int64(dst, "playlist_entry_id", esf->playlist_entry_id);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MPV_EVENT_END_FILE: {
|
|
|
|
mpv_event_end_file *eef = event->data;
|
|
|
|
|
|
|
|
const char *reason;
|
|
|
|
switch (eef->reason) {
|
|
|
|
case MPV_END_FILE_REASON_EOF: reason = "eof"; break;
|
|
|
|
case MPV_END_FILE_REASON_STOP: reason = "stop"; break;
|
|
|
|
case MPV_END_FILE_REASON_QUIT: reason = "quit"; break;
|
|
|
|
case MPV_END_FILE_REASON_ERROR: reason = "error"; break;
|
|
|
|
case MPV_END_FILE_REASON_REDIRECT: reason = "redirect"; break;
|
|
|
|
default:
|
|
|
|
reason = "unknown";
|
|
|
|
}
|
|
|
|
node_map_add_string(dst, "reason", reason);
|
|
|
|
|
|
|
|
node_map_add_int64(dst, "playlist_entry_id", eef->playlist_entry_id);
|
|
|
|
|
2020-03-26 23:57:11 +00:00
|
|
|
if (eef->playlist_insert_id) {
|
|
|
|
node_map_add_int64(dst, "playlist_insert_id", eef->playlist_insert_id);
|
|
|
|
node_map_add_int64(dst, "playlist_insert_num_entries",
|
|
|
|
eef->playlist_insert_num_entries);
|
|
|
|
}
|
|
|
|
|
2020-03-21 18:31:58 +00:00
|
|
|
if (eef->reason == MPV_END_FILE_REASON_ERROR)
|
|
|
|
node_map_add_string(dst, "file_error", mpv_error_string(eef->error));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MPV_EVENT_LOG_MESSAGE: {
|
|
|
|
mpv_event_log_message *msg = event->data;
|
|
|
|
|
|
|
|
node_map_add_string(dst, "prefix", msg->prefix);
|
|
|
|
node_map_add_string(dst, "level", msg->level);
|
|
|
|
node_map_add_string(dst, "text", msg->text);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MPV_EVENT_CLIENT_MESSAGE: {
|
|
|
|
mpv_event_client_message *msg = event->data;
|
|
|
|
|
|
|
|
struct mpv_node *args = node_map_add(dst, "args", MPV_FORMAT_NODE_ARRAY);
|
|
|
|
for (int n = 0; n < msg->num_args; n++) {
|
|
|
|
struct mpv_node *sn = node_array_add(args, MPV_FORMAT_NONE);
|
|
|
|
sn->format = MPV_FORMAT_STRING;
|
|
|
|
sn->u.string = (char *)msg->args[n];
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case MPV_EVENT_PROPERTY_CHANGE: {
|
|
|
|
mpv_event_property *prop = event->data;
|
|
|
|
|
|
|
|
node_map_add_string(dst, "name", prop->name);
|
|
|
|
|
|
|
|
switch (prop->format) {
|
|
|
|
case MPV_FORMAT_NODE:
|
|
|
|
*node_map_add(dst, "data", MPV_FORMAT_NONE) =
|
|
|
|
*(struct mpv_node *)prop->data;
|
|
|
|
break;
|
|
|
|
case MPV_FORMAT_DOUBLE:
|
|
|
|
node_map_add_double(dst, "data", *(double *)prop->data);
|
|
|
|
break;
|
|
|
|
case MPV_FORMAT_FLAG:
|
|
|
|
node_map_add_flag(dst, "data", *(int *)prop->data);
|
|
|
|
break;
|
|
|
|
case MPV_FORMAT_STRING:
|
|
|
|
node_map_add_string(dst, "data", *(char **)prop->data);
|
|
|
|
break;
|
|
|
|
default: ;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-03-21 21:09:07 +00:00
|
|
|
case MPV_EVENT_COMMAND_REPLY: {
|
|
|
|
mpv_event_command *cmd = event->data;
|
|
|
|
|
|
|
|
*node_map_add(dst, "result", MPV_FORMAT_NONE) = cmd->result;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-03-21 18:31:58 +00:00
|
|
|
case MPV_EVENT_HOOK: {
|
|
|
|
mpv_event_hook *hook = event->data;
|
|
|
|
|
|
|
|
node_map_add_int64(dst, "hook_id", hook->id);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-10 21:56:05 +00:00
|
|
|
static const char *const err_table[] = {
|
2014-02-10 20:01:35 +00:00
|
|
|
[-MPV_ERROR_SUCCESS] = "success",
|
|
|
|
[-MPV_ERROR_EVENT_QUEUE_FULL] = "event queue full",
|
|
|
|
[-MPV_ERROR_NOMEM] = "memory allocation failed",
|
|
|
|
[-MPV_ERROR_UNINITIALIZED] = "core not uninitialized",
|
|
|
|
[-MPV_ERROR_INVALID_PARAMETER] = "invalid parameter",
|
|
|
|
[-MPV_ERROR_OPTION_NOT_FOUND] = "option not found",
|
2014-02-24 19:05:56 +00:00
|
|
|
[-MPV_ERROR_OPTION_FORMAT] = "unsupported format for accessing option",
|
2014-02-10 20:01:35 +00:00
|
|
|
[-MPV_ERROR_OPTION_ERROR] = "error setting option",
|
|
|
|
[-MPV_ERROR_PROPERTY_NOT_FOUND] = "property not found",
|
2014-02-24 19:05:56 +00:00
|
|
|
[-MPV_ERROR_PROPERTY_FORMAT] = "unsupported format for accessing property",
|
2014-02-10 20:01:35 +00:00
|
|
|
[-MPV_ERROR_PROPERTY_UNAVAILABLE] = "property unavailable",
|
|
|
|
[-MPV_ERROR_PROPERTY_ERROR] = "error accessing property",
|
2014-06-01 01:41:46 +00:00
|
|
|
[-MPV_ERROR_COMMAND] = "error running command",
|
2014-10-28 15:19:07 +00:00
|
|
|
[-MPV_ERROR_LOADING_FAILED] = "loading failed",
|
|
|
|
[-MPV_ERROR_AO_INIT_FAILED] = "audio output initialization failed",
|
2016-09-09 10:28:03 +00:00
|
|
|
[-MPV_ERROR_VO_INIT_FAILED] = "video output initialization failed",
|
2015-10-26 14:55:40 +00:00
|
|
|
[-MPV_ERROR_NOTHING_TO_PLAY] = "no audio or video data played",
|
2014-10-28 15:19:07 +00:00
|
|
|
[-MPV_ERROR_UNKNOWN_FORMAT] = "unrecognized file format",
|
2014-12-09 16:47:02 +00:00
|
|
|
[-MPV_ERROR_UNSUPPORTED] = "not supported",
|
|
|
|
[-MPV_ERROR_NOT_IMPLEMENTED] = "operation not implemented",
|
2016-08-07 16:05:54 +00:00
|
|
|
[-MPV_ERROR_GENERIC] = "something happened",
|
2014-02-10 20:01:35 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
const char *mpv_error_string(int error)
|
|
|
|
{
|
|
|
|
error = -error;
|
|
|
|
if (error < 0)
|
|
|
|
error = 0;
|
|
|
|
const char *name = NULL;
|
|
|
|
if (error < MP_ARRAY_SIZE(err_table))
|
|
|
|
name = err_table[error];
|
|
|
|
return name ? name : "unknown error";
|
|
|
|
}
|
|
|
|
|
2014-06-10 21:56:05 +00:00
|
|
|
static const char *const event_table[] = {
|
2014-02-10 20:01:35 +00:00
|
|
|
[MPV_EVENT_NONE] = "none",
|
|
|
|
[MPV_EVENT_SHUTDOWN] = "shutdown",
|
|
|
|
[MPV_EVENT_LOG_MESSAGE] = "log-message",
|
|
|
|
[MPV_EVENT_GET_PROPERTY_REPLY] = "get-property-reply",
|
|
|
|
[MPV_EVENT_SET_PROPERTY_REPLY] = "set-property-reply",
|
|
|
|
[MPV_EVENT_COMMAND_REPLY] = "command-reply",
|
|
|
|
[MPV_EVENT_START_FILE] = "start-file",
|
|
|
|
[MPV_EVENT_END_FILE] = "end-file",
|
2014-02-28 00:31:38 +00:00
|
|
|
[MPV_EVENT_FILE_LOADED] = "file-loaded",
|
2014-02-10 20:01:35 +00:00
|
|
|
[MPV_EVENT_TRACKS_CHANGED] = "tracks-changed",
|
|
|
|
[MPV_EVENT_TRACK_SWITCHED] = "track-switched",
|
|
|
|
[MPV_EVENT_IDLE] = "idle",
|
|
|
|
[MPV_EVENT_PAUSE] = "pause",
|
|
|
|
[MPV_EVENT_UNPAUSE] = "unpause",
|
|
|
|
[MPV_EVENT_TICK] = "tick",
|
|
|
|
[MPV_EVENT_SCRIPT_INPUT_DISPATCH] = "script-input-dispatch",
|
2014-02-17 01:33:47 +00:00
|
|
|
[MPV_EVENT_CLIENT_MESSAGE] = "client-message",
|
2014-02-17 01:52:26 +00:00
|
|
|
[MPV_EVENT_VIDEO_RECONFIG] = "video-reconfig",
|
|
|
|
[MPV_EVENT_AUDIO_RECONFIG] = "audio-reconfig",
|
2014-02-19 15:00:37 +00:00
|
|
|
[MPV_EVENT_METADATA_UPDATE] = "metadata-update",
|
2014-02-28 00:31:38 +00:00
|
|
|
[MPV_EVENT_SEEK] = "seek",
|
|
|
|
[MPV_EVENT_PLAYBACK_RESTART] = "playback-restart",
|
2014-04-05 21:54:21 +00:00
|
|
|
[MPV_EVENT_PROPERTY_CHANGE] = "property-change",
|
2014-04-27 20:28:07 +00:00
|
|
|
[MPV_EVENT_CHAPTER_CHANGE] = "chapter-change",
|
2015-01-19 18:54:20 +00:00
|
|
|
[MPV_EVENT_QUEUE_OVERFLOW] = "event-queue-overflow",
|
2018-03-23 15:24:49 +00:00
|
|
|
[MPV_EVENT_HOOK] = "hook",
|
2014-02-10 20:01:35 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
const char *mpv_event_name(mpv_event_id event)
|
|
|
|
{
|
2014-11-07 14:58:51 +00:00
|
|
|
if ((unsigned)event >= MP_ARRAY_SIZE(event_table))
|
2014-02-10 20:01:35 +00:00
|
|
|
return NULL;
|
|
|
|
return event_table[event];
|
|
|
|
}
|
|
|
|
|
|
|
|
void mpv_free(void *data)
|
|
|
|
{
|
|
|
|
talloc_free(data);
|
|
|
|
}
|
2014-02-24 20:59:20 +00:00
|
|
|
|
|
|
|
int64_t mpv_get_time_us(mpv_handle *ctx)
|
|
|
|
{
|
|
|
|
return mp_time_us();
|
|
|
|
}
|
2014-12-09 16:47:02 +00:00
|
|
|
|
2018-02-20 12:30:18 +00:00
|
|
|
#include "video/out/libmpv.h"
|
|
|
|
|
2018-04-20 17:26:04 +00:00
|
|
|
static void do_kill(void *ptr)
|
2014-12-31 19:31:19 +00:00
|
|
|
{
|
2019-05-26 08:58:25 +00:00
|
|
|
struct MPContext *mpctx = ptr;
|
2018-04-20 17:26:04 +00:00
|
|
|
|
2016-09-09 08:59:09 +00:00
|
|
|
struct track *track = mpctx->vo_chain ? mpctx->vo_chain->track : NULL;
|
2015-01-04 21:19:42 +00:00
|
|
|
uninit_video_out(mpctx);
|
2016-09-09 08:59:09 +00:00
|
|
|
if (track) {
|
|
|
|
mpctx->error_playing = MPV_ERROR_VO_INIT_FAILED;
|
|
|
|
error_on_track(mpctx, track);
|
|
|
|
}
|
2018-04-20 17:26:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Used by vo_libmpv to (a)synchronously uninitialize video.
|
2019-05-26 08:58:25 +00:00
|
|
|
void kill_video_async(struct mp_client_api *client_api)
|
2018-04-20 17:26:04 +00:00
|
|
|
{
|
|
|
|
struct MPContext *mpctx = client_api->mpctx;
|
2019-05-26 08:58:25 +00:00
|
|
|
mp_dispatch_enqueue(mpctx->dispatch, do_kill, mpctx);
|
2014-12-31 19:31:19 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 12:30:18 +00:00
|
|
|
// Used by vo_libmpv to set the current render context.
|
|
|
|
bool mp_set_main_render_context(struct mp_client_api *client_api,
|
|
|
|
struct mpv_render_context *ctx, bool active)
|
|
|
|
{
|
|
|
|
assert(ctx);
|
|
|
|
|
|
|
|
pthread_mutex_lock(&client_api->lock);
|
|
|
|
bool is_set = !!client_api->render_context;
|
|
|
|
bool is_same = client_api->render_context == ctx;
|
|
|
|
// Can set if it doesn't remove another existing ctx.
|
|
|
|
bool res = is_same || !is_set;
|
|
|
|
if (res)
|
|
|
|
client_api->render_context = active ? ctx : NULL;
|
|
|
|
pthread_mutex_unlock(&client_api->lock);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Used by vo_libmpv. Relies on guarantees by mp_render_context_acquire().
|
|
|
|
struct mpv_render_context *
|
|
|
|
mp_client_api_acquire_render_context(struct mp_client_api *ca)
|
|
|
|
{
|
|
|
|
struct mpv_render_context *res = NULL;
|
|
|
|
pthread_mutex_lock(&ca->lock);
|
|
|
|
if (ca->render_context && mp_render_context_acquire(ca->render_context))
|
|
|
|
res = ca->render_context;
|
|
|
|
pthread_mutex_unlock(&ca->lock);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emulation of old opengl_cb API.
|
|
|
|
|
2014-12-09 16:47:02 +00:00
|
|
|
#include "libmpv/opengl_cb.h"
|
2018-02-20 12:30:18 +00:00
|
|
|
#include "libmpv/render_gl.h"
|
|
|
|
|
|
|
|
struct mpv_opengl_cb_context {
|
|
|
|
struct mp_client_api *client_api;
|
|
|
|
mpv_opengl_cb_update_fn callback;
|
|
|
|
void *callback_ctx;
|
|
|
|
};
|
2014-12-09 16:47:02 +00:00
|
|
|
|
|
|
|
static mpv_opengl_cb_context *opengl_cb_get_context(mpv_handle *ctx)
|
|
|
|
{
|
2018-02-20 12:30:18 +00:00
|
|
|
pthread_mutex_lock(&ctx->clients->lock);
|
|
|
|
mpv_opengl_cb_context *cb = ctx->clients->gl_cb_ctx;
|
2014-12-09 16:47:02 +00:00
|
|
|
if (!cb) {
|
2018-02-20 12:30:18 +00:00
|
|
|
cb = talloc_zero(NULL, struct mpv_opengl_cb_context);
|
|
|
|
cb->client_api = ctx->clients;
|
|
|
|
cb->client_api->gl_cb_ctx = cb;
|
2014-12-09 16:47:02 +00:00
|
|
|
}
|
2018-02-20 12:30:18 +00:00
|
|
|
pthread_mutex_unlock(&ctx->clients->lock);
|
2014-12-09 16:47:02 +00:00
|
|
|
return cb;
|
|
|
|
}
|
2018-02-20 12:30:18 +00:00
|
|
|
|
2014-12-09 16:47:02 +00:00
|
|
|
void mpv_opengl_cb_set_update_callback(mpv_opengl_cb_context *ctx,
|
|
|
|
mpv_opengl_cb_update_fn callback,
|
|
|
|
void *callback_ctx)
|
|
|
|
{
|
2018-02-20 12:30:18 +00:00
|
|
|
// This was probably supposed to be thread-safe, but we don't care. It's
|
|
|
|
// compatibility code, and if you have problems, use the new API.
|
|
|
|
if (ctx->client_api->render_context) {
|
|
|
|
mpv_render_context_set_update_callback(ctx->client_api->render_context,
|
|
|
|
callback, callback_ctx);
|
|
|
|
}
|
|
|
|
// Nasty thing: could set this even while not initialized, so we need to
|
|
|
|
// preserve it.
|
|
|
|
ctx->callback = callback;
|
|
|
|
ctx->callback_ctx = callback_ctx;
|
2014-12-09 16:47:02 +00:00
|
|
|
}
|
2018-02-20 12:30:18 +00:00
|
|
|
|
2014-12-09 16:47:02 +00:00
|
|
|
int mpv_opengl_cb_init_gl(mpv_opengl_cb_context *ctx, const char *exts,
|
|
|
|
mpv_opengl_cb_get_proc_address_fn get_proc_address,
|
|
|
|
void *get_proc_address_ctx)
|
|
|
|
{
|
2018-02-20 12:30:18 +00:00
|
|
|
if (ctx->client_api->render_context)
|
|
|
|
return MPV_ERROR_INVALID_PARAMETER;
|
|
|
|
|
|
|
|
// mpv_render_context_create() only calls mp_client_get_global() on it.
|
|
|
|
mpv_handle dummy = {.mpctx = ctx->client_api->mpctx};
|
|
|
|
|
|
|
|
mpv_render_param params[] = {
|
|
|
|
{MPV_RENDER_PARAM_API_TYPE, MPV_RENDER_API_TYPE_OPENGL},
|
|
|
|
{MPV_RENDER_PARAM_OPENGL_INIT_PARAMS, &(mpv_opengl_init_params){
|
|
|
|
.get_proc_address = get_proc_address,
|
|
|
|
.get_proc_address_ctx = get_proc_address_ctx,
|
|
|
|
.extra_exts = exts,
|
|
|
|
}},
|
client API: add a new way to pass X11 Display etc. to render API
Hardware decoding things often need access to additional handles from
the windowing system, such as the X11 or Wayland display when using
vaapi. The opengl-cb had nothing dedicated for this, and used the weird
GL_MP_MPGetNativeDisplay GL extension (which was mpv specific and not
officially registered with OpenGL).
This was awkward, and a pain due to having to emulate GL context
behavior (like needing a TLS variable to store context for the pseudo GL
extension function). In addition (and not inherently due to this), we
could pass only one resource from mpv builtin context backends to
hwdecs. It was also all GL specific.
Replace this with a newer mechanism. It works for all RA backends, not
just GL. the API user can explicitly pass the objects at init time via
mpv_render_context_create(). Multiple resources are naturally possible.
The API uses MPV_RENDER_PARAM_* defines, but internally we use strings.
This is done for 2 reasons: 1. trying to leave libmpv and internal
mechanisms decoupled, 2. not having to add public API for some of the
internal resource types (especially D3D/GL interop stuff).
To remain sane, drop support for obscure half-working opengl-cb things,
like the DRM interop (was missing necessary things), the RPI window
thing (nobody used it), and obscure D3D interop things (not needed with
ANGLE, others were undocumented). In order not to break ABI and the C
API, we don't remove the associated structs from opengl_cb.h.
The parts which are still needed (in particular DRM interop) needs to be
ported to the render API.
2018-03-22 16:05:01 +00:00
|
|
|
// Hack for explicit legacy hwdec loading. We really want to make it
|
|
|
|
// impossible for proper render API users to trigger this.
|
|
|
|
{(mpv_render_param_type)-1, ctx->client_api->mpctx->global},
|
2018-02-20 12:30:18 +00:00
|
|
|
{0}
|
|
|
|
};
|
|
|
|
int err = mpv_render_context_create(&ctx->client_api->render_context,
|
|
|
|
&dummy, params);
|
|
|
|
if (err >= 0) {
|
|
|
|
mpv_render_context_set_update_callback(ctx->client_api->render_context,
|
|
|
|
ctx->callback, ctx->callback_ctx);
|
|
|
|
}
|
|
|
|
return err;
|
2014-12-09 16:47:02 +00:00
|
|
|
}
|
2018-02-20 12:30:18 +00:00
|
|
|
|
2015-04-09 17:31:01 +00:00
|
|
|
int mpv_opengl_cb_draw(mpv_opengl_cb_context *ctx, int fbo, int w, int h)
|
2014-12-09 16:47:02 +00:00
|
|
|
{
|
2018-02-20 12:30:18 +00:00
|
|
|
if (!ctx->client_api->render_context)
|
|
|
|
return MPV_ERROR_INVALID_PARAMETER;
|
|
|
|
|
|
|
|
mpv_render_param params[] = {
|
|
|
|
{MPV_RENDER_PARAM_OPENGL_FBO, &(mpv_opengl_fbo){
|
|
|
|
.fbo = fbo,
|
|
|
|
.w = w,
|
|
|
|
.h = abs(h),
|
|
|
|
}},
|
|
|
|
{MPV_RENDER_PARAM_FLIP_Y, &(int){h < 0}},
|
|
|
|
{0}
|
|
|
|
};
|
|
|
|
return mpv_render_context_render(ctx->client_api->render_context, params);
|
2014-12-09 16:47:02 +00:00
|
|
|
}
|
2018-02-20 12:30:18 +00:00
|
|
|
|
2015-04-09 17:30:26 +00:00
|
|
|
int mpv_opengl_cb_report_flip(mpv_opengl_cb_context *ctx, int64_t time)
|
|
|
|
{
|
2018-02-20 12:30:18 +00:00
|
|
|
if (!ctx->client_api->render_context)
|
|
|
|
return MPV_ERROR_INVALID_PARAMETER;
|
|
|
|
|
|
|
|
mpv_render_context_report_swap(ctx->client_api->render_context);
|
|
|
|
return 0;
|
2015-04-09 17:30:26 +00:00
|
|
|
}
|
2018-02-20 12:30:18 +00:00
|
|
|
|
2014-12-09 16:47:02 +00:00
|
|
|
int mpv_opengl_cb_uninit_gl(mpv_opengl_cb_context *ctx)
|
|
|
|
{
|
2018-02-20 12:30:18 +00:00
|
|
|
if (ctx->client_api->render_context)
|
|
|
|
mpv_render_context_free(ctx->client_api->render_context);
|
|
|
|
ctx->client_api->render_context = NULL;
|
|
|
|
return 0;
|
2014-12-09 16:47:02 +00:00
|
|
|
}
|
2018-02-20 12:30:18 +00:00
|
|
|
|
2015-04-09 17:31:01 +00:00
|
|
|
int mpv_opengl_cb_render(mpv_opengl_cb_context *ctx, int fbo, int vp[4])
|
|
|
|
{
|
|
|
|
return mpv_opengl_cb_draw(ctx, fbo, vp[2], vp[3]);
|
|
|
|
}
|
|
|
|
|
2018-02-28 14:09:14 +00:00
|
|
|
void *mpv_get_sub_api(mpv_handle *ctx, mpv_sub_api sub_api)
|
2014-12-09 16:47:02 +00:00
|
|
|
{
|
2016-09-19 17:58:14 +00:00
|
|
|
if (!ctx->mpctx->initialized)
|
|
|
|
return NULL;
|
2014-12-09 16:47:02 +00:00
|
|
|
void *res = NULL;
|
|
|
|
switch (sub_api) {
|
|
|
|
case MPV_SUB_API_OPENGL_CB:
|
|
|
|
res = opengl_cb_get_context(ctx);
|
|
|
|
break;
|
|
|
|
default:;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
2016-08-07 16:10:05 +00:00
|
|
|
|
2018-02-20 12:30:18 +00:00
|
|
|
// stream_cb
|
|
|
|
|
2016-08-07 16:10:05 +00:00
|
|
|
struct mp_custom_protocol {
|
|
|
|
char *protocol;
|
|
|
|
void *user_data;
|
|
|
|
mpv_stream_cb_open_ro_fn open_fn;
|
|
|
|
};
|
|
|
|
|
|
|
|
int mpv_stream_cb_add_ro(mpv_handle *ctx, const char *protocol, void *user_data,
|
|
|
|
mpv_stream_cb_open_ro_fn open_fn)
|
|
|
|
{
|
|
|
|
if (!open_fn)
|
|
|
|
return MPV_ERROR_INVALID_PARAMETER;
|
|
|
|
|
|
|
|
struct mp_client_api *clients = ctx->clients;
|
|
|
|
int r = 0;
|
|
|
|
pthread_mutex_lock(&clients->lock);
|
|
|
|
for (int n = 0; n < clients->num_custom_protocols; n++) {
|
|
|
|
struct mp_custom_protocol *proto = &clients->custom_protocols[n];
|
|
|
|
if (strcmp(proto->protocol, protocol) == 0) {
|
|
|
|
r = MPV_ERROR_INVALID_PARAMETER;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (stream_has_proto(protocol))
|
|
|
|
r = MPV_ERROR_INVALID_PARAMETER;
|
|
|
|
if (r >= 0) {
|
|
|
|
struct mp_custom_protocol proto = {
|
|
|
|
.protocol = talloc_strdup(clients, protocol),
|
|
|
|
.user_data = user_data,
|
|
|
|
.open_fn = open_fn,
|
|
|
|
};
|
|
|
|
MP_TARRAY_APPEND(clients, clients->custom_protocols,
|
|
|
|
clients->num_custom_protocols, proto);
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&clients->lock);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool mp_streamcb_lookup(struct mpv_global *g, const char *protocol,
|
|
|
|
void **out_user_data, mpv_stream_cb_open_ro_fn *out_fn)
|
|
|
|
{
|
|
|
|
struct mp_client_api *clients = g->client_api;
|
|
|
|
bool found = false;
|
|
|
|
pthread_mutex_lock(&clients->lock);
|
|
|
|
for (int n = 0; n < clients->num_custom_protocols; n++) {
|
|
|
|
struct mp_custom_protocol *proto = &clients->custom_protocols[n];
|
|
|
|
if (strcmp(proto->protocol, protocol) == 0) {
|
|
|
|
*out_user_data = proto->user_data;
|
|
|
|
*out_fn = proto->open_fn;
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pthread_mutex_unlock(&clients->lock);
|
|
|
|
return found;
|
|
|
|
}
|