1
0
mirror of https://github.com/mpv-player/mpv synced 2024-12-24 15:52:25 +00:00

filter: minor cosmetic naming issue

Just putting some more lipstick on the pig, maybe it looks a bit nicer
now.
This commit is contained in:
wm4 2020-03-08 19:37:20 +01:00
parent 048334c396
commit 3b4641a5a9
6 changed files with 46 additions and 40 deletions

View File

@ -1076,7 +1076,7 @@ static void *dec_thread(void *ptr)
mpthread_set_name(t_name);
while (!p->request_terminate_dec_thread) {
mp_filter_run(p->dec_root_filter);
mp_filter_graph_run(p->dec_root_filter);
update_cached_values(p);
mp_dispatch_queue_process(p->dec_dispatch, INFINITY);
}
@ -1191,7 +1191,7 @@ struct mp_decoder_wrapper *mp_decoder_wrapper_create(struct mp_filter *parent,
p->queue = mp_async_queue_create();
p->dec_dispatch = mp_dispatch_create(p);
p->dec_root_filter = mp_filter_create_root(public_f->global);
mp_filter_root_set_wakeup_cb(p->dec_root_filter, wakeup_dec_thread, p);
mp_filter_graph_set_wakeup_cb(p->dec_root_filter, wakeup_dec_thread, p);
mp_dispatch_set_onlock_fn(p->dec_dispatch, onlock_dec_thread, p);
struct mp_stream_info *sinfo = mp_filter_find_stream_info(parent);

View File

@ -139,10 +139,10 @@ static void add_pending(struct mp_filter *f)
// Possibly enter recursive filtering. This is done as convenience for
// "external" filter users only. (Normal filtering does this iteratively via
// mp_filter_run() to avoid filter reentrancy issues and deep call stacks.) If
// the API users uses an external manually connected pin, do recursive filtering
// as a not strictly necessary feature which makes outside I/O with filters
// easier.
// mp_filter_graph_run() to avoid filter reentrancy issues and deep call
// stacks.) If the API users uses an external manually connected pin, do
// recursive filtering as a not strictly necessary feature which makes outside
// I/O with filters easier.
static void filter_recursive(struct mp_filter *f)
{
assert(f);
@ -154,7 +154,7 @@ static void filter_recursive(struct mp_filter *f)
// Also don't lose the pending state, which the user may or may not
// care about.
r->external_pending |= mp_filter_run(r->root_filter);
r->external_pending |= mp_filter_graph_run(r->root_filter);
}
void mp_filter_internal_mark_progress(struct mp_filter *f)
@ -179,9 +179,10 @@ static void flush_async_notifications(struct filter_runner *r)
pthread_mutex_unlock(&r->async_lock);
}
bool mp_filter_run(struct mp_filter *filter)
bool mp_filter_graph_run(struct mp_filter *filter)
{
struct filter_runner *r = filter->in->runner;
assert(filter == r->root_filter); // user is supposed to call this on root only
int64_t end_time = 0;
if (isfinite(r->max_run_time))
@ -674,12 +675,14 @@ void mp_filter_mark_async_progress(struct mp_filter *f)
void mp_filter_graph_set_max_run_time(struct mp_filter *f, double seconds)
{
struct filter_runner *r = f->in->runner;
assert(f == r->root_filter); // user is supposed to call this on root only
r->max_run_time = seconds;
}
void mp_filter_graph_interrupt(struct mp_filter *f)
{
struct filter_runner *r = f->in->runner;
assert(f == r->root_filter); // user is supposed to call this on root only
atomic_store(&r->interrupt_flag, true);
}
@ -809,10 +812,11 @@ struct mp_filter *mp_filter_create_root(struct mpv_global *global)
return mp_filter_create_with_params(&params);
}
void mp_filter_root_set_wakeup_cb(struct mp_filter *root,
void (*wakeup_cb)(void *ctx), void *ctx)
void mp_filter_graph_set_wakeup_cb(struct mp_filter *root,
void (*wakeup_cb)(void *ctx), void *ctx)
{
struct filter_runner *r = root->in->runner;
assert(root == r->root_filter); // user is supposed to call this on root only
pthread_mutex_lock(&r->async_lock);
r->wakeup_cb = wakeup_cb;
r->wakeup_ctx = ctx;

View File

@ -204,10 +204,10 @@ const char *mp_pin_get_name(struct mp_pin *p);
* --- Driving filters:
*
* The filter root (created by mp_filter_create_root()) will internally create
* a graph runner, that can be entered with mp_filter_run(). This will check if
* any filter/pin has unhandled requests, and call filter process() functions
* accordingly. Outside of the filter, this can be triggered implicitly via the
* mp_pin_* functions.
* a graph runner, that can be entered with mp_filter_graph_run(). This will
* check if any filter/pin has unhandled requests, and call filter process()
* functions accordingly. Outside of the filter, this can be triggered
* implicitly via the mp_pin_* functions.
*
* Multiple filters are driven by letting mp_pin flag filters which need
* process() to be called. The process starts by requesting output from the
@ -409,52 +409,56 @@ struct AVBufferRef *mp_filter_load_hwdec_device(struct mp_filter *f, int avtype)
// Perform filtering. This runs until the filter graph is blocked (due to
// missing external input or unread output). It returns whether any outside
// pins have changed state.
// Note: this always operates on the filter graph associated with f, f itself
// is not treated differently from any other filters in the graph.
bool mp_filter_run(struct mp_filter *f);
// Can be called on the root filter only.
bool mp_filter_graph_run(struct mp_filter *root);
// Set the maximum time mp_filter_run() should block. If the maximum time
// Set the maximum time mp_filter_graph_run() should block. If the maximum time
// expires, the effect is the same as calling mp_filter_graph_interrupt() while
// the function is running. See that function for further details.
// The default is seconds==INFINITY. Values <=0 make it return after 1 iteration.
void mp_filter_graph_set_max_run_time(struct mp_filter *f, double seconds);
// Can be called on the root filter only.
void mp_filter_graph_set_max_run_time(struct mp_filter *root, double seconds);
// Interrupt mp_filter_run() asynchronously. This does not stop filtering in a
// destructive way, but merely suspends it. In practice, this will make
// mp_filter_run() return after the current filter's process() function has
// finished. Filtering can be resumed with subsequent mp_filter_run() calls.
// When mp_filter_run() is interrupted, it will trigger the filter graph wakeup
// callback, which in turn ensures that the user will call mp_filter_run() again.
// If it is called if not in mp_filter_run(), the next mp_filter_run() call is
// interrupted and no filtering is done for that call.
// Interrupt mp_filter_graph_run() asynchronously. This does not stop filtering
// in a destructive way, but merely suspends it. In practice, this will make
// mp_filter_graph_run() return after the current filter's process() function has
// finished. Filtering can be resumed with subsequent mp_filter_graph_run() calls.
// When mp_filter_graph_run() is interrupted, it will trigger the filter graph
// wakeup callback, which in turn ensures that the user will call
// mp_filter_graph_run() again.
// If it is called if not in mp_filter_graph_run(), the next mp_filter_graph_run()
// call is interrupted and no filtering is done for that call.
// Calling this too often will starve filtering.
// This does not call the graph wakeup callback directly, which will avoid
// potential reentrancy issues. (But mp_filter_run() will call it in reaction to
// it, as described above.)
// potential reentrancy issues. (But mp_filter_graph_run() will call it in
// reaction to it, as described above.)
// Explicitly thread-safe.
void mp_filter_graph_interrupt(struct mp_filter *f);
// Can be called on the root filter only.
void mp_filter_graph_interrupt(struct mp_filter *root);
// Create a root dummy filter with no inputs or outputs. This fulfills the
// following functions:
// - creating a new filter graph (attached to the root filter)
// - passing it as parent filter to top-level filters
// - driving the filter loop between the shared filters
// - setting the wakeup callback for async filtering
// - implicitly passing down global data like mpv_global and keeping filter
// constructor functions simple
// Note that you can still connect pins of filters with different parents or
// root filters, but then you may have to manually invoke mp_filter_run() on
// the root filters of the connected filters to drive data flow.
// root filters, but then you may have to manually invoke mp_filter_graph_run()
// on the root filters of the connected filters to drive data flow.
struct mp_filter *mp_filter_create_root(struct mpv_global *global);
// Asynchronous filters may need to wakeup the user thread if the status of any
// mp_pin has changed. If this is called, the callback provider should get the
// user's thread to call mp_filter_run() again.
// user's thread to call mp_filter_graph_run() again.
// The wakeup callback must not recursively call into any filter APIs, or do
// blocking waits on the filter API (deadlocks will happen).
// A wakeup callback should always set a "wakeup" flag, that is reset only when
// mp_filter_run() is going to be called again with no wait time.
void mp_filter_root_set_wakeup_cb(struct mp_filter *root,
void (*wakeup_cb)(void *ctx), void *ctx);
// mp_filter_graph_run() is going to be called again with no wait time.
// Can be called on the root filter only.
void mp_filter_graph_set_wakeup_cb(struct mp_filter *root,
void (*wakeup_cb)(void *ctx), void *ctx);
// Debugging internal stuff.
void mp_filter_dump_states(struct mp_filter *f);

View File

@ -119,8 +119,6 @@ struct mp_filter_info {
// automatically free'd.
// All filters in the same parent tree must be driven in the same thread (or be
// explicitly synchronized otherwise).
// Driving the parent (or root) filter with mp_filter_run() will make sure this
// filter is driven too, without having to resort to recursion.
struct mp_filter *mp_filter_create(struct mp_filter *parent,
const struct mp_filter_info *info);

View File

@ -1403,7 +1403,7 @@ static void play_current_file(struct MPContext *mpctx)
mpctx->last_seek_pts = 0.0;
mpctx->seek = (struct seek_params){ 0 };
mpctx->filter_root = mp_filter_create_root(mpctx->global);
mp_filter_root_set_wakeup_cb(mpctx->filter_root, mp_wakeup_core_cb, mpctx);
mp_filter_graph_set_wakeup_cb(mpctx->filter_root, mp_wakeup_core_cb, mpctx);
mp_filter_graph_set_max_run_time(mpctx->filter_root, 0.1);
reset_playback_state(mpctx);

View File

@ -1221,7 +1221,7 @@ void run_playloop(struct MPContext *mpctx)
handle_osd_redraw(mpctx);
if (mp_filter_run(mpctx->filter_root))
if (mp_filter_graph_run(mpctx->filter_root))
mp_wakeup_core(mpctx);
mp_wait_events(mpctx);