2019-10-19 23:54:45 +00:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <stdbool.h>
|
|
|
|
|
|
|
|
#include <zimg.h>
|
|
|
|
|
|
|
|
#include "mp_image.h"
|
|
|
|
|
|
|
|
#define ZIMG_ALIGN 64
|
|
|
|
|
|
|
|
struct mpv_global;
|
|
|
|
|
|
|
|
bool mp_zimg_supports_in_format(int imgfmt);
|
|
|
|
bool mp_zimg_supports_out_format(int imgfmt);
|
|
|
|
|
sws_utils, zimg: destroy vo_x11 and vo_drm performance
Raise swscale and zimg default parameters. This restores screenshot
quality settings (maybe) unset in the commit before. Also expose some
more libswscale and zimg options.
Since these options are also used for VOs like x11 and drm, this will
make x11/drm/etc. much slower. For compensation, provide a profile that
sets the old option values: sw-fast. I'm also enabling zimg here, just
as an experiment.
The core problem is that we have a single set of command line options
which control the settings used for most swscale/zimg uses. This was
done in the previous commit. It cannot differentiate between the VOs,
which need to be realtime and may accept/require lower quality options,
and things like screenshots or vo_image, which can be slower, but should
not sacrifice quality by default.
Should this have two sets of options or something similar to do the
right thing depending on the code which calls libswscale? Maybe. Or
should I just ignore the problem, make it someone else's problem (users
who want to use software conversion VOs), provide a sub-optimal
solution, and call it a day? Definitely, sounds good, pushing to master,
goodbye.
2019-10-31 15:45:28 +00:00
|
|
|
struct zimg_opts {
|
|
|
|
int scaler;
|
|
|
|
double scaler_params[2];
|
|
|
|
int scaler_chroma;
|
|
|
|
double scaler_chroma_params[2];
|
|
|
|
int dither;
|
|
|
|
int fast;
|
|
|
|
};
|
|
|
|
|
2020-05-09 15:56:21 +00:00
|
|
|
extern const struct zimg_opts zimg_opts_defaults;
|
|
|
|
|
2019-10-19 23:54:45 +00:00
|
|
|
struct mp_zimg_context {
|
|
|
|
// Can be set for verbose error printing.
|
|
|
|
struct mp_log *log;
|
|
|
|
|
|
|
|
// User configuration. Note: changing these requires calling mp_zimg_config()
|
|
|
|
// to update the filter graph. The first mp_zimg_convert() call (or if the
|
|
|
|
// image format changes) will do this automatically.
|
sws_utils, zimg: destroy vo_x11 and vo_drm performance
Raise swscale and zimg default parameters. This restores screenshot
quality settings (maybe) unset in the commit before. Also expose some
more libswscale and zimg options.
Since these options are also used for VOs like x11 and drm, this will
make x11/drm/etc. much slower. For compensation, provide a profile that
sets the old option values: sw-fast. I'm also enabling zimg here, just
as an experiment.
The core problem is that we have a single set of command line options
which control the settings used for most swscale/zimg uses. This was
done in the previous commit. It cannot differentiate between the VOs,
which need to be realtime and may accept/require lower quality options,
and things like screenshots or vo_image, which can be slower, but should
not sacrifice quality by default.
Should this have two sets of options or something similar to do the
right thing depending on the code which calls libswscale? Maybe. Or
should I just ignore the problem, make it someone else's problem (users
who want to use software conversion VOs), provide a sub-optimal
solution, and call it a day? Definitely, sounds good, pushing to master,
goodbye.
2019-10-31 15:45:28 +00:00
|
|
|
struct zimg_opts opts;
|
2019-10-19 23:54:45 +00:00
|
|
|
|
|
|
|
// Input/output parameters. Note: if these mismatch with the
|
|
|
|
// mp_zimg_convert() parameters, mp_zimg_config() will be called
|
|
|
|
// automatically.
|
|
|
|
struct mp_image_params src, dst;
|
|
|
|
|
|
|
|
// Cached zimg state (if any). Private, do not touch.
|
2019-10-31 14:18:57 +00:00
|
|
|
struct m_config_cache *opts_cache;
|
2019-10-19 23:54:45 +00:00
|
|
|
zimg_filter_graph *zimg_graph;
|
|
|
|
void *zimg_tmp;
|
2020-04-30 22:55:13 +00:00
|
|
|
void *zimg_tmp_alloc;
|
2019-10-19 23:54:45 +00:00
|
|
|
struct mp_zimg_repack *zimg_src;
|
|
|
|
struct mp_zimg_repack *zimg_dst;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Allocate a zimg context. Always succeeds. Returns a talloc pointer (use
|
|
|
|
// talloc_free() to release it).
|
|
|
|
struct mp_zimg_context *mp_zimg_alloc(void);
|
|
|
|
|
2019-10-31 14:18:57 +00:00
|
|
|
// Enable auto-update of parameters from command line. Don't try to set custom
|
|
|
|
// options (other than possibly .src/.dst), because they might be overwritten
|
|
|
|
// if the user changes any options.
|
|
|
|
void mp_zimg_enable_cmdline_opts(struct mp_zimg_context *ctx,
|
|
|
|
struct mpv_global *g);
|
|
|
|
|
2019-10-19 23:54:45 +00:00
|
|
|
// Try to build the conversion chain using the parameters currently set in ctx.
|
|
|
|
// If this succeeds, mp_zimg_convert() will always succeed (probably), as long
|
|
|
|
// as the input has the same parameters.
|
|
|
|
// Returns false on error.
|
|
|
|
bool mp_zimg_config(struct mp_zimg_context *ctx);
|
|
|
|
|
|
|
|
// Similar to mp_zimg_config(), but assume none of the user parameters changed,
|
|
|
|
// except possibly .src and .dst. This essentially checks whether src/dst
|
|
|
|
// changed, and if so, calls mp_zimg_config().
|
|
|
|
bool mp_zimg_config_image_params(struct mp_zimg_context *ctx);
|
|
|
|
|
|
|
|
// Convert/scale src to dst. On failure, the data in dst is not touched.
|
|
|
|
bool mp_zimg_convert(struct mp_zimg_context *ctx, struct mp_image *dst,
|
|
|
|
struct mp_image *src);
|