2008-03-31 03:19:29 +00:00
|
|
|
#ifndef MPLAYER_OPTIONS_H
|
|
|
|
#define MPLAYER_OPTIONS_H
|
|
|
|
|
2013-03-04 16:40:21 +00:00
|
|
|
#include <stdbool.h>
|
2013-03-08 01:08:02 +00:00
|
|
|
#include <stdint.h>
|
2013-12-17 01:02:25 +00:00
|
|
|
#include "m_option.h"
|
2015-05-22 19:00:24 +00:00
|
|
|
#include "common/common.h"
|
2012-11-15 17:49:17 +00:00
|
|
|
|
2013-03-04 21:41:27 +00:00
|
|
|
typedef struct mp_vo_opts {
|
2016-11-25 20:00:39 +00:00
|
|
|
struct m_obj_settings *video_driver_list;
|
2013-03-04 21:41:27 +00:00
|
|
|
|
2023-02-20 03:32:50 +00:00
|
|
|
bool taskbar_progress;
|
|
|
|
bool snap_window;
|
2023-05-20 21:51:12 +00:00
|
|
|
int drag_and_drop;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool ontop;
|
2017-02-13 19:08:43 +00:00
|
|
|
int ontop_level;
|
options: introduce bool option type, use it for --fullscreen
The option code is very old and was added to MPlayer in the early 2000s,
when C99 was still new. MPlayer did not use the "bool" type anywhere,l
and the logical option equivalent to bool, the "flag" option type, used
int, with the convention that only the values 0 and 1 are allowed.
mpv may have hammered many, many additional tentacles to the option
code, but some of the basics never changed, and m_option_type_flag still
uses int. This seems a bit weird, since mpv uses bool for booleans. So
finally introduce an m_option_type_bool. To avoid duplicating too much
code, change the flag code to bool, and "reimplement" m_option_type_flag
on top of m_option_type_bool.
As a "demonstration", change the --fullscreen option to this new type.
Ideally, all options would be changed too bool, and m_option_type_flag
would be removed. But that is a lot of monotonous thankless work, so I'm
not doing it, and making it a painful years long transition.
At the same time, I'm introducing a new concept for option declarations.
Instead of OPT_BOOL(), which define the full m_option struct contents,
there's OPTF_BOOL(), which only takes the option field name itself. The
name is provided via a normal struct field initializer. Other fields
(such as flags) can be provided via designated initializers.
The advantage of this is that we don't need tons of nested vararg
macros. We also don't need to deal with 0-sized varargs being a pain
(and in fact they are not a thing in standard C99 and probably C11).
There is no need to provide a mandatory flags argument either, which is
the reason why so many OPT_ macros are used with a "0" argument. (The
flag argument seems to confuse other developers; they either don't
immediately recognize what it is, and sometimes it's supposed to be the
option's default value.)
Not having to mess with the flag argument in such option macros is also
a reason for the removal of M_OPT_RANGE etc., for the better or worse.
The only place that special-cased the _flag option type was in
command.c; change it to use something effectively very similar that
automatically includes the new _bool option type. Everything else should
be transparent to the change. The fullscreen option change should be
transparent too, as C99 bool is basically an integer type that is
clamped to 0/1 (except in Swift, Swift sucks).
2020-03-14 01:07:35 +00:00
|
|
|
bool fullscreen;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool border;
|
2023-03-19 05:38:32 +00:00
|
|
|
bool title_bar;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool all_workspaces;
|
|
|
|
bool window_minimized;
|
|
|
|
bool window_maximized;
|
2024-03-10 12:44:40 +00:00
|
|
|
int focus_on;
|
2015-01-16 22:38:47 +00:00
|
|
|
|
2013-03-04 21:41:27 +00:00
|
|
|
int screen_id;
|
2020-11-23 19:26:57 +00:00
|
|
|
char *screen_name;
|
2013-03-04 21:41:27 +00:00
|
|
|
int fsscreen_id;
|
2020-11-23 19:26:57 +00:00
|
|
|
char *fsscreen_name;
|
2013-03-04 21:41:27 +00:00
|
|
|
char *winname;
|
2020-08-12 14:51:51 +00:00
|
|
|
char *appid;
|
wayland: add support for content-type protocol
The content-type protocol allows mpv to send compositor a hint about the
type of content being displayed on its surface so it could potentially
make some sort of optimization. Fundamentally, this is pretty simple but
since this requires a very new wayland-protocols version (1.27), we have
to mess with the build to add a new define and add a bunch of if's in
here. The protocol itself exposes 4 different types of content: none,
photo, video, and game.
To do that, let's add a new option (wayland-content-type) that lets
users control what hint to send to the compossitor. Since the previous
commit adds a VOCTRL that notifies us about the content being displayed,
we can also add an auto value to this option. As you'd expect, the
compositor hint would be set to photo if mpv's core detects an image,
video for other things, and it is set to none for the special case of
forcing a window when there is not a video track. For completion's sake,
game is also allowed as a value for this option, but in practice there
shouldn't be a reason to use that.
2022-11-15 21:51:45 +00:00
|
|
|
int content_type;
|
2014-05-15 22:47:13 +00:00
|
|
|
int x11_netwm;
|
2015-11-18 20:37:38 +00:00
|
|
|
int x11_bypass_compositor;
|
2022-06-20 00:40:20 +00:00
|
|
|
int x11_present;
|
2023-08-10 02:11:53 +00:00
|
|
|
bool x11_wid_title;
|
2022-05-12 21:26:49 +00:00
|
|
|
bool cursor_passthrough;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool native_keyrepeat;
|
2013-03-04 21:41:27 +00:00
|
|
|
|
|
|
|
float panscan;
|
2013-06-14 22:15:32 +00:00
|
|
|
float zoom;
|
|
|
|
float pan_x, pan_y;
|
|
|
|
float align_x, align_y;
|
2020-06-03 15:26:08 +00:00
|
|
|
float scale_x, scale_y;
|
2019-06-15 15:53:59 +00:00
|
|
|
float margin_x[2];
|
|
|
|
float margin_y[2];
|
2013-09-01 01:27:03 +00:00
|
|
|
int unscaled;
|
2013-03-04 21:41:27 +00:00
|
|
|
|
|
|
|
struct m_geometry geometry;
|
|
|
|
struct m_geometry autofit;
|
|
|
|
struct m_geometry autofit_larger;
|
2015-01-16 21:30:32 +00:00
|
|
|
struct m_geometry autofit_smaller;
|
2016-09-19 23:20:22 +00:00
|
|
|
double window_scale;
|
2013-03-04 21:41:27 +00:00
|
|
|
|
2021-10-20 02:47:23 +00:00
|
|
|
bool auto_window_resize;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool keepaspect;
|
|
|
|
bool keepaspect_window;
|
|
|
|
bool hidpi_window_scale;
|
|
|
|
bool native_fs;
|
2024-04-13 04:00:40 +00:00
|
|
|
bool show_in_taskbar;
|
2013-03-04 21:41:27 +00:00
|
|
|
|
|
|
|
int64_t WinID;
|
|
|
|
|
|
|
|
float force_monitor_aspect;
|
|
|
|
float monitor_pixel_aspect;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool force_render;
|
|
|
|
bool force_window_position;
|
2013-05-10 22:22:23 +00:00
|
|
|
|
2023-09-22 13:35:39 +00:00
|
|
|
int backdrop_type;
|
2023-09-21 21:13:07 +00:00
|
|
|
int window_affinity;
|
2015-12-06 18:20:23 +00:00
|
|
|
char *mmcss_profile;
|
2023-08-21 22:12:33 +00:00
|
|
|
int window_corners;
|
2015-12-06 18:20:23 +00:00
|
|
|
|
2023-10-15 15:18:49 +00:00
|
|
|
double display_fps_override;
|
2018-03-13 10:23:23 +00:00
|
|
|
double timing_offset;
|
2022-04-06 21:58:32 +00:00
|
|
|
int video_sync;
|
2018-03-13 10:23:23 +00:00
|
|
|
|
2020-09-20 10:04:25 +00:00
|
|
|
struct m_geometry android_surface_size;
|
|
|
|
|
2019-09-28 08:26:23 +00:00
|
|
|
int swapchain_depth; // max number of images to render ahead
|
2023-08-25 17:21:21 +00:00
|
|
|
|
|
|
|
struct m_geometry video_crop;
|
2013-03-04 21:41:27 +00:00
|
|
|
} mp_vo_opts;
|
|
|
|
|
2017-12-29 16:19:25 +00:00
|
|
|
// Subtitle options needed by the subtitle decoders/renderers.
|
|
|
|
struct mp_subtitle_opts {
|
|
|
|
float sub_fps;
|
|
|
|
float sub_speed;
|
2023-08-29 01:15:49 +00:00
|
|
|
bool sub_forced_events_only;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool stretch_dvd_subs;
|
|
|
|
bool stretch_image_subs;
|
|
|
|
bool image_subs_video_res;
|
2023-09-09 02:15:03 +00:00
|
|
|
bool sub_fix_timing;
|
2023-10-28 12:54:43 +00:00
|
|
|
bool sub_stretch_durations;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool sub_scale_by_window;
|
|
|
|
bool sub_scale_with_window;
|
|
|
|
bool ass_scale_with_window;
|
2017-12-29 16:19:25 +00:00
|
|
|
struct osd_style_opts *sub_style;
|
|
|
|
float sub_scale;
|
|
|
|
float sub_gauss;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool sub_gray;
|
|
|
|
bool ass_enabled;
|
2017-12-29 16:19:25 +00:00
|
|
|
float ass_line_spacing;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool ass_use_margins;
|
|
|
|
bool sub_use_margins;
|
|
|
|
bool ass_vsfilter_aspect_compat;
|
2017-12-29 16:19:25 +00:00
|
|
|
int ass_vsfilter_color_compat;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool ass_vsfilter_blur_compat;
|
2023-12-15 20:02:45 +00:00
|
|
|
bool sub_vsfilter_bidi_compat;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool use_embedded_fonts;
|
2023-10-15 15:28:40 +00:00
|
|
|
char **ass_style_override_list;
|
2017-12-29 16:19:25 +00:00
|
|
|
char *ass_styles_file;
|
|
|
|
int ass_hinting;
|
|
|
|
int ass_shaper;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool ass_justify;
|
|
|
|
bool sub_clear_on_seek;
|
2017-12-29 16:19:25 +00:00
|
|
|
int teletext_page;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool sub_past_video_end;
|
2024-01-14 20:38:20 +00:00
|
|
|
char **sub_avopts;
|
2017-12-29 16:19:25 +00:00
|
|
|
};
|
|
|
|
|
2023-12-13 21:36:58 +00:00
|
|
|
// Options for both primary and secondary subs.
|
|
|
|
struct mp_subtitle_shared_opts {
|
|
|
|
float sub_delay[2];
|
|
|
|
float sub_pos[2];
|
|
|
|
bool sub_visibility[2];
|
2023-12-17 19:18:11 +00:00
|
|
|
int ass_style_override[2];
|
2023-12-13 21:36:58 +00:00
|
|
|
};
|
|
|
|
|
sub: make filter_sdh a "proper" filter, allow runtime changes
Until now, filter_sdh was simply a function that was called by sd_ass
directly (if enabled).
I want to add another filter, so it's time to turn this into a somewhat
more general subtitle filtering infrastructure.
I pondered whether to reuse the audio/video filtering stuff - but better
not. Also, since subtitles are horrible and tend to refuse proper
abstraction, it's still messed into sd_ass, instead of working on the
dec_sub.c level. Actually mpv used to have subtitle "filters" and even
made subtitle converters part of it, but it was fairly horrible, so
don't do that again.
In addition, make runtime changes possible. Since this was supposed to
be a quick hack, I just decided to put all subtitle filter options into
a separate option group (=> simpler change notification), to manually
push the change through the playloop (like it was sort of before for OSD
options), and to recreate the sub filter chain completely in every
change. Should be good enough.
One strangeness is that due to prefetching and such, most subtitle
packets (or those some time ahead) are actually done filtering when we
change, so the user still needs to manually seek to actually refresh
everything. And since subtitle data is usually cached in ASS_Track (for
other terrible but user-friendly reasons), we also must clear the
subtitle data, but of course only on seek, since otherwise all subtitles
would just disappear. What a fucking mess, but such is life. We could
trigger a "refresh seek" to make this more automatic, but I don't feel
like it currently.
This is slightly inefficient (lots of allocations and copying), but I
decided that it doesn't matter. Could matter slightly for crazy ASS
subtitles that render with thousands of events.
Not very well tested. Still seems to work, but I didn't have many test
cases.
2020-02-16 00:02:17 +00:00
|
|
|
struct mp_sub_filter_opts {
|
2023-02-20 03:32:50 +00:00
|
|
|
bool sub_filter_SDH;
|
|
|
|
bool sub_filter_SDH_harder;
|
2023-11-05 18:51:43 +00:00
|
|
|
char *sub_filter_SDH_enclosures;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool rf_enable;
|
|
|
|
bool rf_plain;
|
2020-02-16 01:03:36 +00:00
|
|
|
char **rf_items;
|
2021-07-23 16:11:23 +00:00
|
|
|
char **jsre_items;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool rf_warn;
|
sub: make filter_sdh a "proper" filter, allow runtime changes
Until now, filter_sdh was simply a function that was called by sd_ass
directly (if enabled).
I want to add another filter, so it's time to turn this into a somewhat
more general subtitle filtering infrastructure.
I pondered whether to reuse the audio/video filtering stuff - but better
not. Also, since subtitles are horrible and tend to refuse proper
abstraction, it's still messed into sd_ass, instead of working on the
dec_sub.c level. Actually mpv used to have subtitle "filters" and even
made subtitle converters part of it, but it was fairly horrible, so
don't do that again.
In addition, make runtime changes possible. Since this was supposed to
be a quick hack, I just decided to put all subtitle filter options into
a separate option group (=> simpler change notification), to manually
push the change through the playloop (like it was sort of before for OSD
options), and to recreate the sub filter chain completely in every
change. Should be good enough.
One strangeness is that due to prefetching and such, most subtitle
packets (or those some time ahead) are actually done filtering when we
change, so the user still needs to manually seek to actually refresh
everything. And since subtitle data is usually cached in ASS_Track (for
other terrible but user-friendly reasons), we also must clear the
subtitle data, but of course only on seek, since otherwise all subtitles
would just disappear. What a fucking mess, but such is life. We could
trigger a "refresh seek" to make this more automatic, but I don't feel
like it currently.
This is slightly inefficient (lots of allocations and copying), but I
decided that it doesn't matter. Could matter slightly for crazy ASS
subtitles that render with thousands of events.
Not very well tested. Still seems to work, but I didn't have many test
cases.
2020-02-16 00:02:17 +00:00
|
|
|
};
|
|
|
|
|
2017-12-29 16:19:25 +00:00
|
|
|
struct mp_osd_render_opts {
|
|
|
|
float osd_bar_align_x;
|
|
|
|
float osd_bar_align_y;
|
|
|
|
float osd_bar_w;
|
|
|
|
float osd_bar_h;
|
2023-11-24 14:28:02 +00:00
|
|
|
float osd_bar_border_size;
|
2017-12-29 16:19:25 +00:00
|
|
|
float osd_scale;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool osd_scale_by_window;
|
2017-12-29 16:19:25 +00:00
|
|
|
struct osd_style_opts *osd_style;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool force_rgba_osd;
|
2017-12-29 16:19:25 +00:00
|
|
|
};
|
|
|
|
|
2013-03-04 21:41:27 +00:00
|
|
|
typedef struct MPOpts {
|
2023-02-20 03:32:50 +00:00
|
|
|
bool property_print_help;
|
|
|
|
bool use_terminal;
|
2014-04-17 19:47:00 +00:00
|
|
|
char *dump_stats;
|
2013-12-21 22:11:12 +00:00
|
|
|
int verbose;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool msg_really_quiet;
|
2015-02-06 15:48:52 +00:00
|
|
|
char **msg_levels;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool msg_color;
|
|
|
|
bool msg_module;
|
|
|
|
bool msg_time;
|
2015-01-26 10:31:02 +00:00
|
|
|
char *log_file;
|
2013-12-18 18:04:30 +00:00
|
|
|
|
2016-09-23 19:24:50 +00:00
|
|
|
int operation_mode;
|
|
|
|
|
2013-04-10 19:06:00 +00:00
|
|
|
char **reset_options;
|
2014-12-14 23:31:30 +00:00
|
|
|
char **script_files;
|
|
|
|
char **script_opts;
|
2023-10-25 23:17:25 +00:00
|
|
|
bool js_memory_report;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool lua_load_osc;
|
|
|
|
bool lua_load_ytdl;
|
2014-11-19 22:33:28 +00:00
|
|
|
char *lua_ytdl_format;
|
2015-02-22 20:32:42 +00:00
|
|
|
char **lua_ytdl_raw_options;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool lua_load_stats;
|
|
|
|
bool lua_load_console;
|
2020-08-05 20:37:47 +00:00
|
|
|
int lua_load_auto_profiles;
|
2014-11-19 22:33:28 +00:00
|
|
|
|
2023-02-20 03:32:50 +00:00
|
|
|
bool auto_load_scripts;
|
2013-04-10 19:06:00 +00:00
|
|
|
|
2023-02-20 03:32:50 +00:00
|
|
|
bool audio_exclusive;
|
|
|
|
bool ao_null_fallback;
|
|
|
|
bool audio_stream_silence;
|
2016-08-09 14:26:44 +00:00
|
|
|
float audio_wait_open;
|
2013-10-01 23:15:59 +00:00
|
|
|
int force_vo;
|
2016-07-09 16:31:18 +00:00
|
|
|
float softvol_volume;
|
2017-04-26 22:21:17 +00:00
|
|
|
int rgain_mode;
|
2017-04-26 19:45:50 +00:00
|
|
|
float rgain_preamp; // Set replaygain pre-amplification
|
2023-02-20 03:32:50 +00:00
|
|
|
bool rgain_clip; // Enable/disable clipping prevention
|
2017-04-26 19:45:50 +00:00
|
|
|
float rgain_fallback;
|
2016-07-09 16:31:18 +00:00
|
|
|
int softvol_mute;
|
2012-01-21 07:28:07 +00:00
|
|
|
float softvol_max;
|
2023-12-17 19:43:38 +00:00
|
|
|
float softvol_gain;
|
|
|
|
float softvol_gain_min;
|
|
|
|
float softvol_gain_max;
|
2010-11-12 12:06:37 +00:00
|
|
|
int gapless_audio;
|
2013-03-04 16:40:21 +00:00
|
|
|
|
2016-08-30 21:50:57 +00:00
|
|
|
mp_vo_opts *vo;
|
2018-05-21 13:11:19 +00:00
|
|
|
struct ao_opts *ao_opts;
|
2013-03-04 16:40:21 +00:00
|
|
|
|
|
|
|
char *wintitle;
|
2014-09-02 20:28:11 +00:00
|
|
|
char *media_title;
|
2013-03-04 16:40:21 +00:00
|
|
|
|
video: redo video equalizer option handling
I really wouldn't care much about this, but some parts of the core code
are under HAVE_GPL, so there's some need to get rid of it. Simply turn
the video equalizer from its current fine-grained handling with vf/vo
fallbacks into global options. This makes updating them much simpler.
This removes any possibility of applying video equalizers in filters,
which affects vf_scale, and the previously removed vf_eq. Not a big
loss, since the preferred VOs have this builtin.
Remove video equalizer handling from vo_direct3d, vo_sdl, vo_vaapi, and
vo_xv. I'm not going to waste my time on these legacy VOs.
vo.eq_opts_cache exists _only_ to send a VOCTRL_SET_EQUALIZER, which
exists _only_ to trigger a redraw. This seems silly, but for now I feel
like this is less of a pain. The rest of the equalizer using code is
self-updating.
See commit 96b906a51d5 for how some video equalizer code was GPL only.
Some command line option names and ranges can probably be traced back to
a GPL only committer, but we don't consider these copyrightable.
2017-08-22 15:01:35 +00:00
|
|
|
struct mp_csp_equalizer_opts *video_equalizer;
|
2013-03-04 16:40:21 +00:00
|
|
|
|
2013-06-13 22:24:41 +00:00
|
|
|
int stop_screensaver;
|
2020-01-12 00:32:13 +00:00
|
|
|
int cursor_autohide_delay;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool cursor_autohide_fs;
|
2013-09-08 00:07:20 +00:00
|
|
|
|
2017-12-29 16:19:25 +00:00
|
|
|
struct mp_subtitle_opts *subs_rend;
|
2023-12-13 21:36:58 +00:00
|
|
|
struct mp_subtitle_shared_opts *subs_shared;
|
sub: make filter_sdh a "proper" filter, allow runtime changes
Until now, filter_sdh was simply a function that was called by sd_ass
directly (if enabled).
I want to add another filter, so it's time to turn this into a somewhat
more general subtitle filtering infrastructure.
I pondered whether to reuse the audio/video filtering stuff - but better
not. Also, since subtitles are horrible and tend to refuse proper
abstraction, it's still messed into sd_ass, instead of working on the
dec_sub.c level. Actually mpv used to have subtitle "filters" and even
made subtitle converters part of it, but it was fairly horrible, so
don't do that again.
In addition, make runtime changes possible. Since this was supposed to
be a quick hack, I just decided to put all subtitle filter options into
a separate option group (=> simpler change notification), to manually
push the change through the playloop (like it was sort of before for OSD
options), and to recreate the sub filter chain completely in every
change. Should be good enough.
One strangeness is that due to prefetching and such, most subtitle
packets (or those some time ahead) are actually done filtering when we
change, so the user still needs to manually seek to actually refresh
everything. And since subtitle data is usually cached in ASS_Track (for
other terrible but user-friendly reasons), we also must clear the
subtitle data, but of course only on seek, since otherwise all subtitles
would just disappear. What a fucking mess, but such is life. We could
trigger a "refresh seek" to make this more automatic, but I don't feel
like it currently.
This is slightly inefficient (lots of allocations and copying), but I
decided that it doesn't matter. Could matter slightly for crazy ASS
subtitles that render with thousands of events.
Not very well tested. Still seems to work, but I didn't have many test
cases.
2020-02-16 00:02:17 +00:00
|
|
|
struct mp_sub_filter_opts *subs_filt;
|
2017-12-29 16:19:25 +00:00
|
|
|
struct mp_osd_render_opts *osd_rend;
|
|
|
|
|
2009-03-29 23:06:58 +00:00
|
|
|
int osd_level;
|
|
|
|
int osd_duration;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool osd_fractions;
|
2018-01-21 23:36:08 +00:00
|
|
|
int osd_on_seek;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool video_osd;
|
2016-08-28 16:15:37 +00:00
|
|
|
|
2023-02-20 03:32:50 +00:00
|
|
|
bool untimed;
|
2013-05-11 20:40:46 +00:00
|
|
|
char *stream_dump;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool stop_playback_on_init_failure;
|
2008-04-21 02:18:40 +00:00
|
|
|
int loop_times;
|
2014-04-17 21:55:04 +00:00
|
|
|
int loop_file;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool shuffle;
|
|
|
|
bool ordered_chapters;
|
2013-12-14 20:52:37 +00:00
|
|
|
char *ordered_chapters_files;
|
2010-11-26 14:56:05 +00:00
|
|
|
int chapter_merge_threshold;
|
2013-08-14 01:25:50 +00:00
|
|
|
double chapter_seek_threshold;
|
2014-11-02 15:47:23 +00:00
|
|
|
char *chapter_file;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool merge_files;
|
|
|
|
bool quiet;
|
|
|
|
bool load_config;
|
2014-02-25 20:04:04 +00:00
|
|
|
char *force_configdir;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool use_filedir_conf;
|
2014-09-01 21:47:27 +00:00
|
|
|
int hls_bitrate;
|
2009-12-01 12:28:34 +00:00
|
|
|
int edition_id;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool initial_audio_sync;
|
2015-08-10 16:43:25 +00:00
|
|
|
double sync_max_video_change;
|
|
|
|
double sync_max_audio_change;
|
2020-05-23 01:48:51 +00:00
|
|
|
int sync_max_factor;
|
2010-12-14 23:09:47 +00:00
|
|
|
int hr_seek;
|
2011-11-06 14:54:57 +00:00
|
|
|
float hr_seek_demuxer_offset;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool hr_seek_framedrop;
|
2013-03-08 01:08:02 +00:00
|
|
|
float audio_delay;
|
|
|
|
float default_max_pts_correction;
|
2010-11-12 20:04:16 +00:00
|
|
|
int autosync;
|
2012-09-17 06:38:19 +00:00
|
|
|
int frame_dropping;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool video_latency_hacks;
|
2010-11-13 21:10:58 +00:00
|
|
|
int term_osd;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool term_osd_bar;
|
2014-01-15 15:14:37 +00:00
|
|
|
char *term_osd_bar_chars;
|
2020-05-25 18:37:37 +00:00
|
|
|
char *term_title;
|
2010-11-13 21:10:58 +00:00
|
|
|
char *playing_msg;
|
2014-09-01 22:09:03 +00:00
|
|
|
char *osd_playing_msg;
|
2022-04-07 22:00:00 +00:00
|
|
|
int osd_playing_msg_duration;
|
2012-10-02 01:12:09 +00:00
|
|
|
char *status_msg;
|
2013-02-16 21:14:33 +00:00
|
|
|
char *osd_status_msg;
|
2014-09-17 23:19:27 +00:00
|
|
|
char *osd_msg[3];
|
2010-11-13 21:10:58 +00:00
|
|
|
int player_idle_mode;
|
2024-03-01 23:38:35 +00:00
|
|
|
char **input_commands;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool consolecontrols;
|
2015-08-22 20:08:17 +00:00
|
|
|
int playlist_pos;
|
2012-11-15 17:49:17 +00:00
|
|
|
struct m_rel_time play_start;
|
|
|
|
struct m_rel_time play_end;
|
|
|
|
struct m_rel_time play_length;
|
Implement backwards playback
See manpage additions. This is a huge hack. You can bet there are shit
tons of bugs. It's literally forcing square pegs into round holes.
Hopefully, the manpage wall of text makes it clear enough that the whole
shit can easily crash and burn. (Although it shouldn't literally crash.
That would be a bug. It possibly _could_ start a fire by entering some
sort of endless loop, not a literal one, just something where it tries
to do work without making progress.)
(Some obvious bugs I simply ignored for this initial version, but
there's a number of potential bugs I can't even imagine. Normal playback
should remain completely unaffected, though.)
How this works is also described in the manpage. Basically, we demux in
reverse, then we decode in reverse, then we render in reverse.
The decoding part is the simplest: just reorder the decoder output. This
weirdly integrates with the timeline/ordered chapter code, which also
has special requirements on feeding the packets to the decoder in a
non-straightforward way (it doesn't conflict, although a bugmessmass
breaks correct slicing of segments, so EDL/ordered chapter playback is
broken in backward direction).
Backward demuxing is pretty involved. In theory, it could be much
easier: simply iterating the usual demuxer output backward. But this
just doesn't fit into our code, so there's a cthulhu nightmare of shit.
To be specific, each stream (audio, video) is reversed separately. At
least this means we can do backward playback within cached content (for
example, you could play backwards in a live stream; on that note, it
disables prefetching, which would lead to losing new live video, but
this could be avoided).
The fuckmess also meant that I didn't bother trying to support
subtitles. Subtitles are a problem because they're "sparse" streams.
They need to be "passively" demuxed: you don't try to read a subtitle
packet, you demux audio and video, and then look whether there was a
subtitle packet. This means to get subtitles for a time range, you need
to know that you demuxed video and audio over this range, which becomes
pretty messy when you demux audio and video backwards separately.
Backward display is the most weird (and potentially buggy) part. To
avoid that we need to touch a LOT of timing code, we negate all
timestamps. The basic idea is that due to the navigation, all
comparisons and subtractions of timestamps keep working, and you don't
need to touch every single of them to "reverse" them.
E.g.:
bool before = pts_a < pts_b;
would need to be:
bool before = forward
? pts_a < pts_b
: pts_a > pts_b;
or:
bool before = pts_a * dir < pts_b * dir;
or if you, as it's implemented now, just do this after decoding:
pts_a *= dir;
pts_b *= dir;
and then in the normal timing/renderer code:
bool before = pts_a < pts_b;
Consequently, we don't need many changes in the latter code. But some
assumptions inhererently true for forward playback may have been broken
anyway. What is mainly needed is fixing places where values are passed
between positive and negative "domains". For example, seeking and
timestamp user display always uses positive timestamps. The main mess is
that it's not obvious which domain a given variable should or does use.
Well, in my tests with a single file, it suddenly started to work when I
did this. I'm honestly surprised that it did, and that I didn't have to
change a single line in the timing code past decoder (just something
minor to make external/cached text subtitles display). I committed it
immediately while avoiding thinking about it. But there really likely
are subtle problems of all sorts.
As far as I'm aware, gstreamer also supports backward playback. When I
looked at this years ago, I couldn't find a way to actually try this,
and I didn't revisit it now. Back then I also read talk slides from the
person who implemented it, and I'm not sure if and which ideas I might
have taken from it. It's possible that the timestamp reversal is
inspired by it, but I didn't check. (I think it claimed that it could
avoid large changes by changing a sign?)
VapourSynth has some sort of reverse function, which provides a backward
view on a video. The function itself is trivial to implement, as
VapourSynth aims to provide random access to video by frame numbers (so
you just request decreasing frame numbers). From what I remember, it
wasn't exactly fluid, but it worked. It's implemented by creating an
index, and seeking to the target on demand, and a bunch of caching. mpv
could use it, but it would either require using VapourSynth as demuxer
and decoder for everything, or replacing the current file every time
something is supposed to be played backwards.
FFmpeg's libavfilter has reversal filters for audio and video. These
require buffering the entire media data of the file, and don't really
fit into mpv's architecture. It could be used by playing a libavfilter
graph that also demuxes, but that's like VapourSynth but worse.
2019-05-18 00:10:51 +00:00
|
|
|
int play_dir;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool rebase_start_time;
|
2013-03-08 01:08:02 +00:00
|
|
|
int play_frames;
|
2014-11-17 23:09:42 +00:00
|
|
|
double ab_loop[2];
|
2020-02-08 13:57:45 +00:00
|
|
|
int ab_loop_count;
|
2013-03-08 01:08:02 +00:00
|
|
|
double step_sec;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool position_resume;
|
|
|
|
bool position_check_mtime;
|
|
|
|
bool position_save_on_quit;
|
|
|
|
bool write_filename_in_watch_later_config;
|
|
|
|
bool ignore_path_in_watch_later_config;
|
2023-10-15 16:00:52 +00:00
|
|
|
char *watch_later_dir;
|
2021-07-21 09:06:41 +00:00
|
|
|
char **watch_later_options;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool pause;
|
core: add --keep-open, which doesn't close the file on EOF
The --keep-open option causes mpv not to close the current file.
Instead, it will pause, and allow the user to seek around. When
seeking beyond the end of the file, mpv does a precise seek back to
the previous last known position that produced video output.
In some corner cases, mpv might not be able to produce video output at
all, despite having created a VO. (Possibly when only 1 frame could be
decoded, but the video filter chain queues frames. Then a VO would be
created, without sending an actual video frame to the VO.) In these
cases, the VO window will not redraw, not even OSD.
Based on a patch by coax [1].
[1] http://devel.mplayer2.org/ticket/210#comment:4
2012-11-12 23:56:20 +00:00
|
|
|
int keep_open;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool keep_open_pause;
|
2016-08-17 20:45:44 +00:00
|
|
|
double image_display_duration;
|
2016-02-05 22:19:56 +00:00
|
|
|
char *lavfi_complex;
|
2015-05-22 19:00:24 +00:00
|
|
|
int stream_id[2][STREAM_TYPE_COUNT];
|
|
|
|
char **stream_lang[STREAM_TYPE_COUNT];
|
2023-02-20 03:32:50 +00:00
|
|
|
bool stream_auto_sel;
|
2023-12-28 16:16:42 +00:00
|
|
|
int subs_with_matching_audio;
|
2023-08-07 04:49:37 +00:00
|
|
|
bool subs_match_os_language;
|
2021-05-26 23:18:35 +00:00
|
|
|
int subs_fallback;
|
2023-08-26 04:38:57 +00:00
|
|
|
int subs_fallback_forced;
|
2012-12-10 17:52:06 +00:00
|
|
|
int audio_display;
|
2014-12-29 21:51:18 +00:00
|
|
|
char **display_tags;
|
2013-04-28 23:49:20 +00:00
|
|
|
|
2014-06-17 23:40:20 +00:00
|
|
|
char **audio_files;
|
2010-11-11 14:24:17 +00:00
|
|
|
char *demuxer_name;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool demuxer_thread;
|
player: make playback termination asynchronous
Until now, stopping playback aborted the demuxer and I/O layer violently
by signaling mp_cancel (bound to libavformat's AVIOInterruptCB
mechanism). Change it to try closing them gracefully.
The main purpose is to silence those libavformat errors that happen when
you request termination. Most of libavformat barely cares about the
termination mechanism (AVIOInterruptCB), and essentially it's like the
network connection is abruptly severed, or file I/O suddenly returns I/O
errors. There were issues with dumb TLS warnings, parsers complaining
about incomplete data, and some special protocols that require server
communication to gracefully disconnect.
We still want to abort it forcefully if it refuses to terminate on its
own, so a timeout is required. Users can set the timeout to 0, which
should give them the old behavior.
This also removes the old mechanism that treats certain commands (like
"quit") specially, and tries to terminate the demuxers even if the core
is currently frozen. This is for situations where the core synchronized
to the demuxer or stream layer while network is unresponsive. This in
turn can only happen due to the "program" or "cache-size" properties in
the current code (see one of the previous commits). Also, the old
mechanism doesn't fit particularly well with the new one. We wouldn't
want to abort playback immediately on a "quit" command - the new code is
all about giving it a chance to end it gracefully. We'd need some sort
of watchdog thread or something equally complicated to handle this. So
just remove it.
The change in osd.c is to prevent that it clears the status line while
waiting for termination. The normal status line code doesn't output
anything useful at this point, and the code path taken clears it, both
of which is an annoying behavior change, so just let it show the old
one.
2018-05-19 16:41:13 +00:00
|
|
|
double demux_termination_timeout;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool demuxer_cache_wait;
|
|
|
|
bool prefetch_open;
|
2010-11-11 14:24:17 +00:00
|
|
|
char *audio_demuxer_name;
|
|
|
|
char *sub_demuxer_name;
|
|
|
|
|
2023-02-20 03:32:50 +00:00
|
|
|
bool cache_pause;
|
|
|
|
bool cache_pause_initial;
|
2018-01-03 20:28:28 +00:00
|
|
|
float cache_pause_wait;
|
player: redo how stream caching and pausing on low cache works
Add the --cache-secs option, which literally overrides the value of
--demuxer-readahead-secs if the stream cache is active. The default
value is very high (10 seconds), which means it can act as network
cache.
Remove the old behavior of trying to pause once the byte cache runs
low. Instead, do something similar wit the demuxer cache. The nice
thing is that we can guess how many seconds of video it has cached,
and we can make better decisions. But for now, apply a relatively
naive heuristic: if the cache is below 0.5 secs, pause, and wait
until at least 2 secs are available.
Note that due to timestamp reordering, the estimated cached duration
of video might be inaccurate, depending on the file format. If the
file format has DTS, it's easy, otherwise the duration will seemingly
jump back and forth.
2014-08-26 23:13:20 +00:00
|
|
|
|
2012-08-06 15:48:30 +00:00
|
|
|
struct image_writer_opts *screenshot_image_opts;
|
screenshot: make screenshot filenames configurable
This adds the --screenshot-template option, which specifies a template
for the filename used for a screenshot. The '%' character is parsed as
format specifier. These format specifiers insert metadata into the
filename. For example, '%f' is replaced with the filename of the
currently played file.
The following format specifiers are available:
%n Insert sequence number (padded with 4 zeros), e.g. "0002".
%0Nn Like %n, but pad to N zeros (N = 0 to 9).
%n behaves like %04n.
%#n Like %n, but reset the sequence counter on every screenshot.
(Useful if other parts in the template make the resulting
filename already mostly unique.)
%#0Nn Use %0Nn and %#n at the same time.
%f Insert filename of the currently played video.
%F Like %f, but with stripped file extension ("." and rest).
%p Insert current playback time, in HH:MM:SS format.
%P Like %p, but adds milliseconds: HH:MM:SS.mmmm
%tX Insert the current local date/time, using the date format X.
X is a single letter and is passed to strftime() as "%X".
E.g. "%td" inserts the number of the current day.
%{prop} Insert the value of the slave property 'prop'.
E.g. %{filename} is the same as %f. If the property doesn't
exist or is not available, nothing is inserted, unless a
fallback is specified as in %{prop:fallback text}.
%% Insert the character '%'.
The strings inserted by format specifiers will be checked for
characters not allowed in filenames (including '/' and '\'), and
replaced with the placeholder '_'. (This doesn't happen for text that
was passed with the --screenshot-template option, and allows specifying
a screenshot target directory by prefixing the template with a relative
or absolute path.)
2012-02-29 02:46:25 +00:00
|
|
|
char *screenshot_template;
|
2023-10-15 15:56:12 +00:00
|
|
|
char *screenshot_dir;
|
2020-10-04 22:16:46 +00:00
|
|
|
bool screenshot_sw;
|
2011-10-06 18:46:02 +00:00
|
|
|
|
2016-08-04 18:49:20 +00:00
|
|
|
struct m_channels audio_output_channels;
|
2010-10-31 05:26:40 +00:00
|
|
|
int audio_output_format;
|
2013-03-08 01:08:02 +00:00
|
|
|
int force_srate;
|
2013-08-04 21:56:20 +00:00
|
|
|
double playback_speed;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool pitch_correction;
|
2023-09-20 04:51:51 +00:00
|
|
|
struct m_obj_settings *vf_settings;
|
|
|
|
struct m_obj_settings *af_settings;
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
struct filter_opts *filter_opts;
|
2020-02-29 23:28:09 +00:00
|
|
|
struct dec_wrapper_opts *dec_wrapper;
|
2011-02-07 23:35:51 +00:00
|
|
|
char **sub_name;
|
2011-03-03 10:31:12 +00:00
|
|
|
char **sub_paths;
|
2015-12-25 12:17:11 +00:00
|
|
|
char **audiofile_paths;
|
2020-09-27 22:12:52 +00:00
|
|
|
char **coverart_files;
|
2016-02-08 20:18:35 +00:00
|
|
|
char **external_files;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool autoload_files;
|
2011-02-07 23:35:51 +00:00
|
|
|
int sub_auto;
|
player: make all autoload extensions configurable
--audio-file-auto, --cover-art-auto, and --sub-auto all work by using an
internally hardcoded list that determine what file extensions get
recognized. This is fine and people periodically update it, but we can
actually expose this as a stringlist option instead. This way users can
add or remove any file extension for any type. For the most part, this
is pretty pretty easy and involves making sub_exts, etc. the defaults
for the new options (--audio-file-auto-exts, --cover-art-auto-exts, and
--sub-auto-exts). There's actually one slight complication however. The
input code uses mp_might_be_subtitle_file which guesses if the file drag
and dropped file is a subtitle. The input ctx has no access to mpctx so
we have to be clever here.
For this, the trick is to recognize that we can leverage the
m_option_change_callback. We add a new flag, UPDATE_SUB_EXTS, which
fires when the player starts up. Then in the callback, we can set the
value of sub_exts in external_files to opts->sub_auto_exts. Whenever the
option updates, the callback is fired again and sub_exts updates. That
way mp_might_be_subtitle_file can just operate off of this global
variable instead of trying to mess with the core mpv state directly.
Fixes #12000.
2023-08-10 22:36:22 +00:00
|
|
|
char **sub_auto_exts;
|
2015-02-02 20:23:12 +00:00
|
|
|
int audiofile_auto;
|
player: make all autoload extensions configurable
--audio-file-auto, --cover-art-auto, and --sub-auto all work by using an
internally hardcoded list that determine what file extensions get
recognized. This is fine and people periodically update it, but we can
actually expose this as a stringlist option instead. This way users can
add or remove any file extension for any type. For the most part, this
is pretty pretty easy and involves making sub_exts, etc. the defaults
for the new options (--audio-file-auto-exts, --cover-art-auto-exts, and
--sub-auto-exts). There's actually one slight complication however. The
input code uses mp_might_be_subtitle_file which guesses if the file drag
and dropped file is a subtitle. The input ctx has no access to mpctx so
we have to be clever here.
For this, the trick is to recognize that we can leverage the
m_option_change_callback. We add a new flag, UPDATE_SUB_EXTS, which
fires when the player starts up. Then in the callback, we can set the
value of sub_exts in external_files to opts->sub_auto_exts. Whenever the
option updates, the callback is fired again and sub_exts updates. That
way mp_might_be_subtitle_file can just operate off of this global
variable instead of trying to mess with the core mpv state directly.
Fixes #12000.
2023-08-10 22:36:22 +00:00
|
|
|
char **audiofile_auto_exts;
|
2020-09-27 22:12:52 +00:00
|
|
|
int coverart_auto;
|
player: make all autoload extensions configurable
--audio-file-auto, --cover-art-auto, and --sub-auto all work by using an
internally hardcoded list that determine what file extensions get
recognized. This is fine and people periodically update it, but we can
actually expose this as a stringlist option instead. This way users can
add or remove any file extension for any type. For the most part, this
is pretty pretty easy and involves making sub_exts, etc. the defaults
for the new options (--audio-file-auto-exts, --cover-art-auto-exts, and
--sub-auto-exts). There's actually one slight complication however. The
input code uses mp_might_be_subtitle_file which guesses if the file drag
and dropped file is a subtitle. The input ctx has no access to mpctx so
we have to be clever here.
For this, the trick is to recognize that we can leverage the
m_option_change_callback. We add a new flag, UPDATE_SUB_EXTS, which
fires when the player starts up. Then in the callback, we can set the
value of sub_exts in external_files to opts->sub_auto_exts. Whenever the
option updates, the callback is fired again and sub_exts updates. That
way mp_might_be_subtitle_file can just operate off of this global
variable instead of trying to mess with the core mpv state directly.
Fixes #12000.
2023-08-10 22:36:22 +00:00
|
|
|
char **coverart_auto_exts;
|
2021-12-03 15:52:53 +00:00
|
|
|
bool coverart_whitelist;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool osd_bar_visible;
|
2012-12-11 17:16:42 +00:00
|
|
|
|
2014-06-11 22:34:20 +00:00
|
|
|
int w32_priority;
|
|
|
|
|
2023-09-19 21:51:43 +00:00
|
|
|
struct bluray_opts *stream_bluray_opts;
|
2023-09-19 21:56:15 +00:00
|
|
|
struct cdda_opts *stream_cdda_opts;
|
|
|
|
struct dvb_opts *stream_dvb_opts;
|
|
|
|
struct lavf_opts *stream_lavf_opts;
|
2014-06-10 18:46:15 +00:00
|
|
|
|
2014-07-14 23:49:02 +00:00
|
|
|
char *bluray_device;
|
2014-06-09 21:54:45 +00:00
|
|
|
|
2014-06-10 21:06:42 +00:00
|
|
|
struct demux_rawaudio_opts *demux_rawaudio;
|
|
|
|
struct demux_rawvideo_opts *demux_rawvideo;
|
2023-02-11 20:08:38 +00:00
|
|
|
struct demux_playlist_opts *demux_playlist;
|
2014-06-10 23:46:20 +00:00
|
|
|
struct demux_lavf_opts *demux_lavf;
|
2015-04-23 17:21:17 +00:00
|
|
|
struct demux_mkv_opts *demux_mkv;
|
2019-04-07 13:10:52 +00:00
|
|
|
struct demux_cue_opts *demux_cue;
|
2014-06-10 21:06:42 +00:00
|
|
|
|
2016-09-06 18:09:56 +00:00
|
|
|
struct demux_opts *demux_opts;
|
demux: add a on-disk cache
Somewhat similar to the old --cache-file, except for the demuxer cache.
Instead of keeping packet data in memory, it's written to disk and read
back when needed.
The idea is to reduce main memory usage, while allowing fast seeking in
large cached network streams (especially live streams). Keeping the
packet metadata on disk would be rather hard (would use mmap or so, or
rewrite the entire demux.c packet queue handling), and since it's
relatively small, just keep it in memory.
Also for simplicity, the disk cache is append-only. If you're watching
really long livestreams, and need pruning, you're probably out of luck.
This still could be improved by trying to free unused blocks with
fallocate(), but since we're writing multiple streams in an interleaved
manner, this is slightly hard.
Some rather gross ugliness in packet.h: we want to store the file
position of the cached data somewhere, but on 32 bit architectures, we
don't have any usable 64 bit members for this, just the buf/len fields,
which add up to 64 bit - so the shitty union aliases this memory.
Error paths untested. Side data (the complicated part of trying to
serialize ffmpeg packets) untested.
Stream recording had to be adjusted. Some minor details change due to
this, but probably nothing important.
The change in attempt_range_joining() is because packets in cache
have no valid len field. It was a useful check (heuristically
finding broken cases), but not a necessary one.
Various other approaches were tried. It would be interesting to list
them and to mention the pros and cons, but I don't feel like it.
2019-06-13 17:10:32 +00:00
|
|
|
struct demux_cache_opts *demux_cache_opts;
|
stream: turn into a ring buffer, make size configurable
In some corner cases (see #6802), it can be beneficial to use a larger
stream buffer size. Use this as argument to rewrite everything for no
reason.
Turn stream.c itself into a ring buffer, with configurable size. The
latter would have been easily achievable with minimal changes, and the
ring buffer is the hard part. There is no reason to have a ring buffer
at all, except possibly if ffmpeg don't fix their awful mp4 demuxer, and
some subtle issues with demux_mkv.c wanting to seek back by small
offsets (the latter was handled with small stream_peek() calls, which
are unneeded now).
In addition, this turns small forward seeks into reads (where data is
simply skipped). Before this commit, only stream_skip() did this (which
also mean that stream_skip() simply calls stream_seek() now).
Replace all stream_peek() calls with something else (usually
stream_read_peek()). The function was a problem, because it returned a
pointer to the internal buffer, which is now a ring buffer with
wrapping. The new function just copies the data into a buffer, and in
some cases requires callers to dynamically allocate memory. (The most
common case, demux_lavf.c, required a separate buffer allocation anyway
due to FFmpeg "idiosyncrasies".) This is the bulk of the demuxer_*
changes.
I'm not happy with this. There still isn't a good reason why there
should be a ring buffer, that is complex, and most of the time just
wastes half of the available memory. Maybe another rewrite soon.
It also contains bugs; you're an alpha tester now.
2019-11-06 20:36:02 +00:00
|
|
|
struct stream_opts *stream_opts;
|
2016-09-06 18:09:56 +00:00
|
|
|
|
2014-06-10 23:35:39 +00:00
|
|
|
struct vd_lavc_params *vd_lavc_params;
|
2014-06-10 23:39:51 +00:00
|
|
|
struct ad_lavc_params *ad_lavc_params;
|
2013-03-31 02:24:53 +00:00
|
|
|
|
2014-06-10 23:54:03 +00:00
|
|
|
struct input_opts *input_opts;
|
2012-09-14 15:51:26 +00:00
|
|
|
|
2014-06-11 00:04:02 +00:00
|
|
|
// may be NULL if encoding is not compiled-in
|
|
|
|
struct encode_opts *encode_opts;
|
2014-10-16 09:48:18 +00:00
|
|
|
|
|
|
|
char *ipc_path;
|
2020-04-08 23:05:51 +00:00
|
|
|
char *ipc_client;
|
2016-09-02 13:59:40 +00:00
|
|
|
|
2018-01-12 03:02:55 +00:00
|
|
|
struct mp_resample_opts *resample_opts;
|
|
|
|
|
2021-04-18 09:48:35 +00:00
|
|
|
struct ra_ctx_opts *ra_ctx_opts;
|
2016-09-02 13:59:40 +00:00
|
|
|
struct gl_video_opts *gl_video_opts;
|
2024-02-14 18:36:05 +00:00
|
|
|
struct gl_next_opts *gl_next_opts;
|
2017-01-20 12:38:28 +00:00
|
|
|
struct angle_opts *angle_opts;
|
vo_opengl: refactor into vo_gpu
This is done in several steps:
1. refactor MPGLContext -> struct ra_ctx
2. move GL-specific stuff in vo_opengl into opengl/context.c
3. generalize context creation to support other APIs, and add --gpu-api
4. rename all of the --opengl- options that are no longer opengl-specific
5. move all of the stuff from opengl/* that isn't GL-specific into gpu/
(note: opengl/gl_utils.h became opengl/utils.h)
6. rename vo_opengl to vo_gpu
7. to handle window screenshots, the short-term approach was to just add
it to ra_swchain_fns. Long term (and for vulkan) this has to be moved to
ra itself (and vo_gpu altered to compensate), but this was a stop-gap
measure to prevent this commit from getting too big
8. move ra->fns->flush to ra_gl_ctx instead
9. some other minor changes that I've probably already forgotten
Note: This is one half of a major refactor, the other half of which is
provided by rossy's following commit. This commit enables support for
all linux platforms, while his version enables support for all non-linux
platforms.
Note 2: vo_opengl_cb.c also re-uses ra_gl_ctx so it benefits from the
--opengl- options like --opengl-early-flush, --opengl-finish etc. Should
be a strict superset of the old functionality.
Disclaimer: Since I have no way of compiling mpv on all platforms, some
of these ports were done blindly. Specifically, the blind ports included
context_mali_fbdev.c and context_rpi.c. Since they're both based on
egl_helpers, the port should have gone smoothly without any major
changes required. But if somebody complains about a compile error on
those platforms (assuming anybody actually uses them), you know where to
complain.
2017-09-14 06:04:55 +00:00
|
|
|
struct opengl_opts *opengl_opts;
|
vo_gpu: vulkan: initial implementation
This time based on ra/vo_gpu. 2017 is the year of the vulkan desktop!
Current problems / limitations / improvement opportunities:
1. The swapchain/flipping code violates the vulkan spec, by assuming
that the presentation queue will be bounded (in cases where rendering
is significantly faster than vsync). But apparently, there's simply
no better way to do this right now, to the point where even the
stupid cube.c examples from LunarG etc. do it wrong.
(cf. https://github.com/KhronosGroup/Vulkan-Docs/issues/370)
2. The memory allocator could be improved. (This is a universal
constant)
3. Could explore using push descriptors instead of descriptor sets,
especially since we expect to switch descriptors semi-often for some
passes (like interpolation). Probably won't make a difference, but
the synchronization overhead might be a factor. Who knows.
4. Parallelism across frames / async transfer is not well-defined, we
either need to use a better semaphore / command buffer strategy or a
resource pooling layer to safely handle cross-frame parallelism.
(That said, I gave resource pooling a try and was not happy with the
result at all - so I'm still exploring the semaphore strategy)
5. We aggressively use pipeline barriers where events would offer a much
more fine-grained synchronization mechanism. As a result of this, we
might be suffering from GPU bubbles due to too-short dependencies on
objects. (That said, I'm also exploring the use of semaphores as a an
ordering tactic which would allow cross-frame time slicing in theory)
Some minor changes to the vo_gpu and infrastructure, but nothing
consequential.
NOTE: For safety, all use of asynchronous commands / multiple command
pools is currently disabled completely. There are some left-over relics
of this in the code (e.g. the distinction between dev_poll and
pool_poll), but that is kept in place mostly because this will be
re-extended in the future (vulkan rev 2).
The queue count is also currently capped to 1, because of the lack of
cross-frame semaphores means we need the implicit synchronization from
the same-queue semantics to guarantee a correct result.
2016-09-14 18:54:18 +00:00
|
|
|
struct vulkan_opts *vulkan_opts;
|
vo_gpu: vulkan: implement a VkDisplayKHR backed context
This is the Vulkan equivalent of the drm context for OpenGL, with
the big difference that it's implemented purely in terms of Vulkan
calls and doesn't actually require drm or kms.
The basic idea is to identify a display, mode, and plane on a device,
and then create a display backed surface for the swapchain. In theory,
past that point, everything is the same, and this is in fact the case
on Intel hardware. I can get a video playing on a vt.
On nvidia, naturally, things don't work that way. Instead, nvidia only
implemented the extension for scenarios where a VR application is
stealing a display from a running window system, and not for
standalone scenarios. With additional code, I've got this scenario to
work but that's a separate incremental change.
Other people have tested on AMD, and report roughly the same behaviour
as on Intel.
Note, that in this change, the VT will not be correctly restored after
qutting. The only way to restore the VT is to introduce some drm
specific code which I will illustrate in a separate change.
2019-12-02 02:37:40 +00:00
|
|
|
struct vulkan_display_opts *vulkan_display_opts;
|
2017-09-13 01:09:48 +00:00
|
|
|
struct spirv_opts *spirv_opts;
|
vo_gpu: d3d11: initial implementation
This is a new RA/vo_gpu backend that uses Direct3D 11. The GLSL
generated by vo_gpu is cross-compiled to HLSL with SPIRV-Cross.
What works:
- All of mpv's internal shaders should work, including compute shaders.
- Some external shaders have been tested and work, including RAVU and
adaptive-sharpen.
- Non-dumb mode works, even on very old hardware. Most features work at
feature level 9_3 and all features work at feature level 10_0. Some
features also work at feature level 9_1 and 9_2, but without high-bit-
depth FBOs, it's not very useful. (Hardware this old is probably not
fast enough for advanced features anyway.)
Note: This is more compatible than ANGLE, which requires 9_3 to work
at all (GLES 2.0,) and 10_1 for non-dumb-mode (GLES 3.0.)
- Hardware decoding with D3D11VA, including decoding of 10-bit formats
without truncation to 8-bit.
What doesn't work / can be improved:
- PBO upload and direct rendering does not work yet. Direct rendering
requires persistent-mapped PBOs because the decoder needs to be able
to read data from images that have already been decoded and uploaded.
Unfortunately, it seems like persistent-mapped PBOs are fundamentally
incompatible with D3D11, which requires all resources to use driver-
managed memory and requires memory to be unmapped (and hence pointers
to be invalidated) when a resource is used in a draw or copy
operation.
However it might be possible to use D3D11's limited multithreading
capabilities to emulate some features of PBOs, like asynchronous
texture uploading.
- The blit() and clear() operations don't have equivalents in the D3D11
API that handle all cases, so in most cases, they have to be emulated
with a shader. This is currently done inside ra_d3d11, but ideally it
would be done in generic code, so it can take advantage of mpv's
shader generation utilities.
- SPIRV-Cross is used through a NIH C-compatible wrapper library, since
it does not expose a C interface itself.
The library is available here: https://github.com/rossy/crossc
- The D3D11 context could be made to support more modern DXGI features
in future. For example, it should be possible to add support for
high-bit-depth and HDR output with DXGI 1.5/1.6.
2017-09-07 10:18:06 +00:00
|
|
|
struct d3d11_opts *d3d11_opts;
|
2017-11-01 11:38:41 +00:00
|
|
|
struct d3d11va_opts *d3d11va_opts;
|
2018-02-16 12:07:15 +00:00
|
|
|
struct macos_opts *macos_opts;
|
2023-01-05 03:34:26 +00:00
|
|
|
struct drm_opts *drm_opts;
|
2019-10-14 17:16:42 +00:00
|
|
|
struct wayland_opts *wayland_opts;
|
2023-09-19 22:26:21 +00:00
|
|
|
struct wingl_opts *wingl_opts;
|
2023-09-19 23:12:42 +00:00
|
|
|
struct cuda_opts *cuda_opts;
|
2016-09-08 19:46:48 +00:00
|
|
|
struct dvd_opts *dvd_opts;
|
2018-03-29 21:23:27 +00:00
|
|
|
struct vaapi_opts *vaapi_opts;
|
2019-10-31 14:18:57 +00:00
|
|
|
struct sws_opts *sws_opts;
|
|
|
|
struct zimg_opts *zimg_opts;
|
2017-05-29 16:48:10 +00:00
|
|
|
|
|
|
|
int cuda_device;
|
2008-03-31 03:19:29 +00:00
|
|
|
} MPOpts;
|
|
|
|
|
2023-09-19 23:12:42 +00:00
|
|
|
struct cuda_opts {
|
|
|
|
int cuda_device;
|
|
|
|
};
|
|
|
|
|
2016-09-08 19:46:48 +00:00
|
|
|
struct dvd_opts {
|
|
|
|
int angle;
|
|
|
|
int speed;
|
|
|
|
char *device;
|
|
|
|
};
|
|
|
|
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
struct filter_opts {
|
2024-01-22 02:37:47 +00:00
|
|
|
int deinterlace;
|
2024-03-05 01:20:01 +00:00
|
|
|
int field_parity;
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
};
|
|
|
|
|
2016-09-02 13:50:54 +00:00
|
|
|
extern const struct m_sub_options vo_sub_opts;
|
2023-09-19 23:12:42 +00:00
|
|
|
extern const struct m_sub_options cuda_conf;
|
2016-09-08 19:46:48 +00:00
|
|
|
extern const struct m_sub_options dvd_conf;
|
2017-12-29 16:19:25 +00:00
|
|
|
extern const struct m_sub_options mp_subtitle_sub_opts;
|
2023-12-13 21:36:58 +00:00
|
|
|
extern const struct m_sub_options mp_subtitle_shared_sub_opts;
|
sub: make filter_sdh a "proper" filter, allow runtime changes
Until now, filter_sdh was simply a function that was called by sd_ass
directly (if enabled).
I want to add another filter, so it's time to turn this into a somewhat
more general subtitle filtering infrastructure.
I pondered whether to reuse the audio/video filtering stuff - but better
not. Also, since subtitles are horrible and tend to refuse proper
abstraction, it's still messed into sd_ass, instead of working on the
dec_sub.c level. Actually mpv used to have subtitle "filters" and even
made subtitle converters part of it, but it was fairly horrible, so
don't do that again.
In addition, make runtime changes possible. Since this was supposed to
be a quick hack, I just decided to put all subtitle filter options into
a separate option group (=> simpler change notification), to manually
push the change through the playloop (like it was sort of before for OSD
options), and to recreate the sub filter chain completely in every
change. Should be good enough.
One strangeness is that due to prefetching and such, most subtitle
packets (or those some time ahead) are actually done filtering when we
change, so the user still needs to manually seek to actually refresh
everything. And since subtitle data is usually cached in ASS_Track (for
other terrible but user-friendly reasons), we also must clear the
subtitle data, but of course only on seek, since otherwise all subtitles
would just disappear. What a fucking mess, but such is life. We could
trigger a "refresh seek" to make this more automatic, but I don't feel
like it currently.
This is slightly inefficient (lots of allocations and copying), but I
decided that it doesn't matter. Could matter slightly for crazy ASS
subtitles that render with thousands of events.
Not very well tested. Still seems to work, but I didn't have many test
cases.
2020-02-16 00:02:17 +00:00
|
|
|
extern const struct m_sub_options mp_sub_filter_opts;
|
2017-12-29 16:19:25 +00:00
|
|
|
extern const struct m_sub_options mp_osd_render_sub_opts;
|
video: rewrite filtering glue code
Get rid of the old vf.c code. Replace it with a generic filtering
framework, which can potentially handle more than just --vf. At least
reimplementing --af with this code is planned.
This changes some --vf semantics (including runtime behavior and the
"vf" command). The most important ones are listed in interface-changes.
vf_convert.c is renamed to f_swscale.c. It is now an internal filter
that can not be inserted by the user manually.
f_lavfi.c is a refactor of player/lavfi.c. The latter will be removed
once --lavfi-complex is reimplemented on top of f_lavfi.c. (which is
conceptually easy, but a big mess due to the data flow changes).
The existing filters are all changed heavily. The data flow of the new
filter framework is different. Especially EOF handling changes - EOF is
now a "frame" rather than a state, and must be passed through exactly
once.
Another major thing is that all filters must support dynamic format
changes. The filter reconfig() function goes away. (This sounds complex,
but since all filters need to handle EOF draining anyway, they can use
the same code, and it removes the mess with reconfig() having to predict
the output format, which completely breaks with libavfilter anyway.)
In addition, there is no automatic format negotiation or conversion.
libavfilter's primitive and insufficient API simply doesn't allow us to
do this in a reasonable way. Instead, filters can use f_autoconvert as
sub-filter, and tell it which formats they support. This filter will in
turn add actual conversion filters, such as f_swscale, to perform
necessary format changes.
vf_vapoursynth.c uses the same basic principle of operation as before,
but with worryingly different details in data flow. Still appears to
work.
The hardware deint filters (vf_vavpp.c, vf_d3d11vpp.c, vf_vdpaupp.c) are
heavily changed. Fortunately, they all used refqueue.c, which is for
sharing the data flow logic (especially for managing future/past
surfaces and such). It turns out it can be used to factor out most of
the data flow. Some of these filters accepted software input. Instead of
having ad-hoc upload code in each filter, surface upload is now
delegated to f_autoconvert, which can use f_hwupload to perform this.
Exporting VO capabilities is still a big mess (mp_stream_info stuff).
The D3D11 code drops the redundant image formats, and all code uses the
hw_subfmt (sw_format in FFmpeg) instead. Although that too seems to be a
big mess for now.
f_async_queue is unused.
2018-01-16 10:53:44 +00:00
|
|
|
extern const struct m_sub_options filter_conf;
|
2018-01-18 13:44:20 +00:00
|
|
|
extern const struct m_sub_options resample_conf;
|
stream: turn into a ring buffer, make size configurable
In some corner cases (see #6802), it can be beneficial to use a larger
stream buffer size. Use this as argument to rewrite everything for no
reason.
Turn stream.c itself into a ring buffer, with configurable size. The
latter would have been easily achievable with minimal changes, and the
ring buffer is the hard part. There is no reason to have a ring buffer
at all, except possibly if ffmpeg don't fix their awful mp4 demuxer, and
some subtle issues with demux_mkv.c wanting to seek back by small
offsets (the latter was handled with small stream_peek() calls, which
are unneeded now).
In addition, this turns small forward seeks into reads (where data is
simply skipped). Before this commit, only stream_skip() did this (which
also mean that stream_skip() simply calls stream_seek() now).
Replace all stream_peek() calls with something else (usually
stream_read_peek()). The function was a problem, because it returned a
pointer to the internal buffer, which is now a ring buffer with
wrapping. The new function just copies the data into a buffer, and in
some cases requires callers to dynamically allocate memory. (The most
common case, demux_lavf.c, required a separate buffer allocation anyway
due to FFmpeg "idiosyncrasies".) This is the bulk of the demuxer_*
changes.
I'm not happy with this. There still isn't a good reason why there
should be a ring buffer, that is complex, and most of the time just
wastes half of the available memory. Maybe another rewrite soon.
It also contains bugs; you're an alpha tester now.
2019-11-06 20:36:02 +00:00
|
|
|
extern const struct m_sub_options stream_conf;
|
2020-02-29 23:28:09 +00:00
|
|
|
extern const struct m_sub_options dec_wrapper_conf;
|
2019-11-28 23:16:52 +00:00
|
|
|
extern const struct m_sub_options mp_opt_root;
|
2013-06-07 20:57:00 +00:00
|
|
|
|
2008-03-31 03:19:29 +00:00
|
|
|
#endif
|