2019-10-19 23:54:45 +00:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <stdbool.h>
|
|
|
|
|
|
|
|
#include <zimg.h>
|
|
|
|
|
|
|
|
#include "mp_image.h"
|
|
|
|
|
|
|
|
#define ZIMG_ALIGN 64
|
|
|
|
|
|
|
|
struct mpv_global;
|
|
|
|
|
|
|
|
bool mp_zimg_supports_in_format(int imgfmt);
|
|
|
|
bool mp_zimg_supports_out_format(int imgfmt);
|
|
|
|
|
sws_utils, zimg: destroy vo_x11 and vo_drm performance
Raise swscale and zimg default parameters. This restores screenshot
quality settings (maybe) unset in the commit before. Also expose some
more libswscale and zimg options.
Since these options are also used for VOs like x11 and drm, this will
make x11/drm/etc. much slower. For compensation, provide a profile that
sets the old option values: sw-fast. I'm also enabling zimg here, just
as an experiment.
The core problem is that we have a single set of command line options
which control the settings used for most swscale/zimg uses. This was
done in the previous commit. It cannot differentiate between the VOs,
which need to be realtime and may accept/require lower quality options,
and things like screenshots or vo_image, which can be slower, but should
not sacrifice quality by default.
Should this have two sets of options or something similar to do the
right thing depending on the code which calls libswscale? Maybe. Or
should I just ignore the problem, make it someone else's problem (users
who want to use software conversion VOs), provide a sub-optimal
solution, and call it a day? Definitely, sounds good, pushing to master,
goodbye.
2019-10-31 15:45:28 +00:00
|
|
|
struct zimg_opts {
|
|
|
|
int scaler;
|
|
|
|
double scaler_params[2];
|
|
|
|
int scaler_chroma;
|
|
|
|
double scaler_chroma_params[2];
|
|
|
|
int dither;
|
2023-02-20 03:32:50 +00:00
|
|
|
bool fast;
|
zimg: add slice threading and use it by default
This probably makes it much faster (I wouldn't know, I didn't run any
benchmarks ). Seems to work as well (although I'm not sure, it's not
like I'd perform rigorous tests).
The scale_zimg test seems to mysteriously treat color in fully
transparent alpha differently, which makes no sense, and isn't visible
(but makes the test fail). I can't be bothered with investigating this
more. What do you do with failing tests? Correct, you disable them. Or
rather, you disable whatever appears to cause them to fail, which is the
threading in this case.
This change follows mostly the tile_example.cpp. The slice size uses a
minimum of 64, which was suggested by the zimg author. Some of this
commit is a bit inelegant and weird, such as recomputing the scale
factor for every slice, or the way slice_h is managed. Too lazy to make
this more elegant.
zimg git had a regressio around active_region (which is needed by the
slicing), which was fixed in commit 83071706b2e6bc634. Apparently, the
bug was never released, so just add a warning to the manpage.
2020-07-14 20:52:27 +00:00
|
|
|
int threads;
|
sws_utils, zimg: destroy vo_x11 and vo_drm performance
Raise swscale and zimg default parameters. This restores screenshot
quality settings (maybe) unset in the commit before. Also expose some
more libswscale and zimg options.
Since these options are also used for VOs like x11 and drm, this will
make x11/drm/etc. much slower. For compensation, provide a profile that
sets the old option values: sw-fast. I'm also enabling zimg here, just
as an experiment.
The core problem is that we have a single set of command line options
which control the settings used for most swscale/zimg uses. This was
done in the previous commit. It cannot differentiate between the VOs,
which need to be realtime and may accept/require lower quality options,
and things like screenshots or vo_image, which can be slower, but should
not sacrifice quality by default.
Should this have two sets of options or something similar to do the
right thing depending on the code which calls libswscale? Maybe. Or
should I just ignore the problem, make it someone else's problem (users
who want to use software conversion VOs), provide a sub-optimal
solution, and call it a day? Definitely, sounds good, pushing to master,
goodbye.
2019-10-31 15:45:28 +00:00
|
|
|
};
|
|
|
|
|
2020-05-09 15:56:21 +00:00
|
|
|
extern const struct zimg_opts zimg_opts_defaults;
|
|
|
|
|
2019-10-19 23:54:45 +00:00
|
|
|
struct mp_zimg_context {
|
|
|
|
// Can be set for verbose error printing.
|
|
|
|
struct mp_log *log;
|
|
|
|
|
|
|
|
// User configuration. Note: changing these requires calling mp_zimg_config()
|
|
|
|
// to update the filter graph. The first mp_zimg_convert() call (or if the
|
|
|
|
// image format changes) will do this automatically.
|
sws_utils, zimg: destroy vo_x11 and vo_drm performance
Raise swscale and zimg default parameters. This restores screenshot
quality settings (maybe) unset in the commit before. Also expose some
more libswscale and zimg options.
Since these options are also used for VOs like x11 and drm, this will
make x11/drm/etc. much slower. For compensation, provide a profile that
sets the old option values: sw-fast. I'm also enabling zimg here, just
as an experiment.
The core problem is that we have a single set of command line options
which control the settings used for most swscale/zimg uses. This was
done in the previous commit. It cannot differentiate between the VOs,
which need to be realtime and may accept/require lower quality options,
and things like screenshots or vo_image, which can be slower, but should
not sacrifice quality by default.
Should this have two sets of options or something similar to do the
right thing depending on the code which calls libswscale? Maybe. Or
should I just ignore the problem, make it someone else's problem (users
who want to use software conversion VOs), provide a sub-optimal
solution, and call it a day? Definitely, sounds good, pushing to master,
goodbye.
2019-10-31 15:45:28 +00:00
|
|
|
struct zimg_opts opts;
|
2019-10-19 23:54:45 +00:00
|
|
|
|
|
|
|
// Input/output parameters. Note: if these mismatch with the
|
|
|
|
// mp_zimg_convert() parameters, mp_zimg_config() will be called
|
|
|
|
// automatically.
|
|
|
|
struct mp_image_params src, dst;
|
|
|
|
|
|
|
|
// Cached zimg state (if any). Private, do not touch.
|
2019-10-31 14:18:57 +00:00
|
|
|
struct m_config_cache *opts_cache;
|
2020-07-14 18:48:48 +00:00
|
|
|
struct mp_zimg_state **states;
|
|
|
|
int num_states;
|
zimg: add slice threading and use it by default
This probably makes it much faster (I wouldn't know, I didn't run any
benchmarks ). Seems to work as well (although I'm not sure, it's not
like I'd perform rigorous tests).
The scale_zimg test seems to mysteriously treat color in fully
transparent alpha differently, which makes no sense, and isn't visible
(but makes the test fail). I can't be bothered with investigating this
more. What do you do with failing tests? Correct, you disable them. Or
rather, you disable whatever appears to cause them to fail, which is the
threading in this case.
This change follows mostly the tile_example.cpp. The slice size uses a
minimum of 64, which was suggested by the zimg author. Some of this
commit is a bit inelegant and weird, such as recomputing the scale
factor for every slice, or the way slice_h is managed. Too lazy to make
this more elegant.
zimg git had a regressio around active_region (which is needed by the
slicing), which was fixed in commit 83071706b2e6bc634. Apparently, the
bug was never released, so just add a warning to the manpage.
2020-07-14 20:52:27 +00:00
|
|
|
struct mp_thread_pool *tp;
|
|
|
|
int current_thread_count;
|
2019-10-19 23:54:45 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Allocate a zimg context. Always succeeds. Returns a talloc pointer (use
|
|
|
|
// talloc_free() to release it).
|
|
|
|
struct mp_zimg_context *mp_zimg_alloc(void);
|
|
|
|
|
2019-10-31 14:18:57 +00:00
|
|
|
// Enable auto-update of parameters from command line. Don't try to set custom
|
|
|
|
// options (other than possibly .src/.dst), because they might be overwritten
|
|
|
|
// if the user changes any options.
|
|
|
|
void mp_zimg_enable_cmdline_opts(struct mp_zimg_context *ctx,
|
|
|
|
struct mpv_global *g);
|
|
|
|
|
2019-10-19 23:54:45 +00:00
|
|
|
// Try to build the conversion chain using the parameters currently set in ctx.
|
|
|
|
// If this succeeds, mp_zimg_convert() will always succeed (probably), as long
|
|
|
|
// as the input has the same parameters.
|
|
|
|
// Returns false on error.
|
|
|
|
bool mp_zimg_config(struct mp_zimg_context *ctx);
|
|
|
|
|
|
|
|
// Similar to mp_zimg_config(), but assume none of the user parameters changed,
|
|
|
|
// except possibly .src and .dst. This essentially checks whether src/dst
|
|
|
|
// changed, and if so, calls mp_zimg_config().
|
|
|
|
bool mp_zimg_config_image_params(struct mp_zimg_context *ctx);
|
|
|
|
|
|
|
|
// Convert/scale src to dst. On failure, the data in dst is not touched.
|
|
|
|
bool mp_zimg_convert(struct mp_zimg_context *ctx, struct mp_image *dst,
|
|
|
|
struct mp_image *src);
|