1
0
mirror of https://github.com/mpv-player/mpv synced 2024-12-17 12:25:03 +00:00
mpv/video/hwdec.h

64 lines
2.0 KiB
C
Raw Normal View History

#ifndef MP_HWDEC_H_
#define MP_HWDEC_H_
#include "options/m_option.h"
struct mp_image_pool;
// keep in sync with --hwdec option (see mp_hwdec_names)
enum hwdec_type {
HWDEC_AUTO = -1,
HWDEC_NONE = 0,
HWDEC_VDPAU = 1,
HWDEC_VIDEOTOOLBOX = 3,
HWDEC_VAAPI = 4,
HWDEC_VAAPI_COPY = 5,
HWDEC_DXVA2_COPY = 6,
RPI support This requires FFmpeg git master for accelerated hardware decoding. Keep in mind that FFmpeg must be compiled with --enable-mmal. Libav will also work. Most things work. Screenshots don't work with accelerated/opaque decoding (except using full window screenshot mode). Subtitles are very slow - even simple but huge overlays can cause frame drops. This always uses fullscreen mode. It uses dispmanx and mmal directly, and there are no window managers or anything on this level. vo_opengl also kind of works, but is pretty useless and slow. It can't use opaque hardware decoding (copy back can be used by forcing the option --vd=lavc:h264_mmal). Keep in mind that the dispmanx backend is preferred over the X11 ones in case you're trying on X11; but X11 is even more useless on RPI. This doesn't correctly reject extended h264 profiles and thus doesn't fallback to software decoding. The hw supports only up to the high profile, and will e.g. return garbage for Hi10P video. This sets a precedent of enabling hw decoding by default, but only if RPI support is compiled (which most hopefully it will be disabled on desktop Linux platforms). While it's more or less required to use hw decoding on the weak RPI, it causes more problems than it solves on real platforms (Linux has the Intel GPU problem, OSX still has some cases with broken decoding.) So I can live with this compromise of having different defaults depending on the platform. Raspberry Pi 2 is required. This wasn't tested on the original RPI, though at least decoding itself seems to work (but full playback was not tested).
2015-03-29 13:12:11 +00:00
HWDEC_RPI = 7,
};
// hwdec_type names (options.c)
extern const struct m_opt_choice_alternatives mp_hwdec_names[];
struct mp_hwdec_ctx {
enum hwdec_type type;
void *priv; // for free use by hwdec implementation
// API-specific, not needed by all backends.
struct mp_vdpau_ctx *vdpau_ctx;
struct mp_vaapi_ctx *vaapi_ctx;
struct mp_d3d_ctx *d3d_ctx;
// Optional.
// Allocates a software image from the pool, downloads the hw image from
// mpi, and returns it.
// pool can be NULL (then just use straight allocation).
// Return NULL on error or if mpi has the wrong format.
struct mp_image *(*download_image)(struct mp_hwdec_ctx *ctx,
struct mp_image *mpi,
struct mp_image_pool *swpool);
};
// Used to communicate hardware decoder API handles from VO to video decoder.
// The VO can set the context pointer for supported APIs.
struct mp_hwdec_info {
// (Since currently only 1 hwdec API is loaded at a time, this pointer
// simply maps to the loaded one.)
struct mp_hwdec_ctx *hwctx;
// Can be used to lazily load a requested API.
// api_name is e.g. "vdpau" (like the fields above, without "_ctx")
// Can be NULL, is idempotent, caller checks hwctx fields for success/access.
video: move display and timing to a separate thread The VO is run inside its own thread. It also does most of video timing. The playloop hands the image data and a realtime timestamp to the VO, and the VO does the rest. In particular, this allows the playloop to do other things, instead of blocking for video redraw. But if anything accesses the VO during video timing, it will block. This also fixes vo_sdl.c event handling; but that is only a side-effect, since reimplementing the broken way would require more effort. Also drop --softsleep. In theory, this option helps if the kernel's sleeping mechanism is too inaccurate for video timing. In practice, I haven't ever encountered a situation where it helps, and it just burns CPU cycles. On the other hand it's probably actively harmful, because it prevents the libavcodec decoder threads from doing real work. Side note: Originally, I intended that multiple frames can be queued to the VO. But this is not done, due to problems with OSD and other certain features. OSD in particular is simply designed in a way that it can be neither timed nor copied, so you do have to render it into the video frame before you can draw the next frame. (Subtitles have no such restriction. sd_lavc was even updated to fix this.) It seems the right solution to queuing multiple VO frames is rendering on VO-backed framebuffers, like vo_vdpau.c does. This requires VO driver support, and is out of scope of this commit. As consequence, the VO has a queue size of 1. The existing video queue is just needed to compute frame duration, and will be moved out in the next commit.
2014-08-12 21:02:08 +00:00
// Due to threading, the callback is the only code that is allowed to
// change fields in this struct after initialization.
void (*load_api)(struct mp_hwdec_info *info, const char *api_name);
void *load_api_ctx;
};
// Trivial helper to call info->load_api().
// Implemented in vd_lavc.c.
void hwdec_request_api(struct mp_hwdec_info *info, const char *api_name);
#endif