mpv/video/decode/vd_lavc.c

1330 lines
41 KiB
C
Raw Normal View History

/*
* This file is part of mpv.
*
* mpv is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* mpv is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with mpv. If not, see <http://www.gnu.org/licenses/>.
*
* Almost LGPLv3+.
*
* The parts potentially making this file LGPL v3 (instead of v2.1 or later) are:
* 376e3abf5c7d2 xvmc use get_format for IDCT/MC recognition
* c73f0e18bd1d6 Return PIX_FMT_NONE if the video system refuses all other formats.
* (iive agreed to LGPL v3+ only. Jeremy agreed to LGPL v2.1 or later.)
* Once these changes are not relevant to for copyright anymore (e.g. because
* they have been removed), and the core is LGPL, this file will change to
* LGPLv2.1+.
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <stdbool.h>
#include <sys/types.h>
#include <libavutil/common.h>
#include <libavutil/opt.h>
#include <libavutil/hwcontext.h>
#include <libavutil/intreadwrite.h>
#include <libavutil/pixdesc.h>
#include "mpv_talloc.h"
#include "config.h"
#include "common/msg.h"
#include "options/options.h"
#include "misc/bstr.h"
#include "common/av_common.h"
#include "common/codecs.h"
#include "video/fmt-conversion.h"
#include "vd.h"
#include "video/img_format.h"
#include "video/filter/vf.h"
#include "video/decode/dec_video.h"
#include "demux/demux.h"
#include "demux/stheader.h"
#include "demux/packet.h"
#include "video/csputils.h"
#include "video/sws_utils.h"
#include "video/out/vo.h"
#if LIBAVCODEC_VERSION_MICRO >= 100
#include <libavutil/mastering_display_metadata.h>
#endif
#include "lavc.h"
#if AVPALETTE_SIZE != MP_PALETTE_SIZE
#error palette too large, adapt video/mp_image.h:MP_PALETTE_SIZE
#endif
#include "options/m_option.h"
static void init_avctx(struct dec_video *vd, const char *decoder,
struct vd_lavc_hwdec *hwdec);
static void uninit_avctx(struct dec_video *vd);
static int get_buffer2_direct(AVCodecContext *avctx, AVFrame *pic, int flags);
static int get_buffer2_hwdec(AVCodecContext *avctx, AVFrame *pic, int flags);
static enum AVPixelFormat get_format_hwdec(struct AVCodecContext *avctx,
const enum AVPixelFormat *pix_fmt);
#define OPT_BASE_STRUCT struct vd_lavc_params
struct vd_lavc_params {
int fast;
int show_all;
int skip_loop_filter;
int skip_idct;
int skip_frame;
int framedrop;
int threads;
int bitexact;
int check_hw_profile;
int software_fallback;
char **avopts;
int dr;
};
static const struct m_opt_choice_alternatives discard_names[] = {
{"none", AVDISCARD_NONE},
{"default", AVDISCARD_DEFAULT},
{"nonref", AVDISCARD_NONREF},
{"bidir", AVDISCARD_BIDIR},
{"nonkey", AVDISCARD_NONKEY},
{"all", AVDISCARD_ALL},
{0}
};
#define OPT_DISCARD(name, field, flags) \
OPT_GENERAL(int, name, field, flags, .type = CONF_TYPE_CHOICE, \
.priv = (void *)discard_names)
const struct m_sub_options vd_lavc_conf = {
.opts = (const m_option_t[]){
OPT_FLAG("fast", fast, 0),
OPT_FLAG("show-all", show_all, 0),
OPT_DISCARD("skiploopfilter", skip_loop_filter, 0),
OPT_DISCARD("skipidct", skip_idct, 0),
OPT_DISCARD("skipframe", skip_frame, 0),
OPT_DISCARD("framedrop", framedrop, 0),
OPT_INT("threads", threads, M_OPT_MIN, .min = 0),
OPT_FLAG("bitexact", bitexact, 0),
OPT_FLAG("check-hw-profile", check_hw_profile, 0),
OPT_CHOICE_OR_INT("software-fallback", software_fallback, 0, 1, INT_MAX,
({"no", INT_MAX}, {"yes", 1})),
OPT_KEYVALUELIST("o", avopts, 0),
OPT_FLAG("dr", dr, 0),
{0}
},
.size = sizeof(struct vd_lavc_params),
.defaults = &(const struct vd_lavc_params){
.show_all = 0,
.check_hw_profile = 1,
.software_fallback = 3,
.skip_loop_filter = AVDISCARD_DEFAULT,
.skip_idct = AVDISCARD_DEFAULT,
.skip_frame = AVDISCARD_DEFAULT,
.framedrop = AVDISCARD_NONREF,
},
};
extern const struct vd_lavc_hwdec mp_vd_lavc_videotoolbox;
extern const struct vd_lavc_hwdec mp_vd_lavc_videotoolbox_copy;
extern const struct vd_lavc_hwdec mp_vd_lavc_dxva2;
extern const struct vd_lavc_hwdec mp_vd_lavc_dxva2_copy;
extern const struct vd_lavc_hwdec mp_vd_lavc_d3d11va;
extern const struct vd_lavc_hwdec mp_vd_lavc_d3d11va_copy;
extern const struct vd_lavc_hwdec mp_vd_lavc_cuda_old;
#if HAVE_RPI
static const struct vd_lavc_hwdec mp_vd_lavc_rpi = {
.type = HWDEC_RPI,
.lavc_suffix = "_mmal",
.image_format = IMGFMT_MMAL,
};
static const struct vd_lavc_hwdec mp_vd_lavc_rpi_copy = {
.type = HWDEC_RPI_COPY,
.lavc_suffix = "_mmal",
.copying = true,
};
#endif
#if HAVE_ANDROID
static const struct vd_lavc_hwdec mp_vd_lavc_mediacodec = {
.type = HWDEC_MEDIACODEC,
.lavc_suffix = "_mediacodec",
.copying = true,
};
#endif
#if NEW_CUDA_HWACCEL
static const struct vd_lavc_hwdec mp_vd_lavc_cuda = {
.type = HWDEC_CUDA,
.image_format = IMGFMT_CUDA,
.lavc_suffix = "_cuvid",
.generic_hwaccel = true,
};
#endif
#if HAVE_CUDA_HWACCEL
static const struct vd_lavc_hwdec mp_vd_lavc_cuda_copy = {
.type = HWDEC_CUDA_COPY,
.lavc_suffix = "_cuvid",
.copying = true,
};
#endif
static const struct vd_lavc_hwdec mp_vd_lavc_crystalhd = {
.type = HWDEC_CRYSTALHD,
.lavc_suffix = "_crystalhd",
.copying = true,
};
#if HAVE_VAAPI_HWACCEL
static const struct vd_lavc_hwdec mp_vd_lavc_vaapi = {
.type = HWDEC_VAAPI,
.image_format = IMGFMT_VAAPI,
.generic_hwaccel = true,
.set_hwframes = true,
.static_pool = true,
.pixfmt_map = (const enum AVPixelFormat[][2]) {
{AV_PIX_FMT_YUV420P10, AV_PIX_FMT_P010},
{AV_PIX_FMT_YUV420P, AV_PIX_FMT_NV12},
{AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_NV12},
{AV_PIX_FMT_NONE}
},
};
#include "video/vaapi.h"
static const struct vd_lavc_hwdec mp_vd_lavc_vaapi_copy = {
.type = HWDEC_VAAPI_COPY,
.copying = true,
.image_format = IMGFMT_VAAPI,
.generic_hwaccel = true,
.set_hwframes = true,
.static_pool = true,
.create_dev = va_create_standalone,
.pixfmt_map = (const enum AVPixelFormat[][2]) {
{AV_PIX_FMT_YUV420P10, AV_PIX_FMT_P010},
{AV_PIX_FMT_YUV420P, AV_PIX_FMT_NV12},
{AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_NV12},
{AV_PIX_FMT_NONE}
},
};
#endif
#if HAVE_VDPAU_HWACCEL
static const struct vd_lavc_hwdec mp_vd_lavc_vdpau = {
.type = HWDEC_VDPAU,
.image_format = IMGFMT_VDPAU,
.generic_hwaccel = true,
.set_hwframes = true,
.pixfmt_map = (const enum AVPixelFormat[][2]) {
{AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P},
{AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUV420P},
{AV_PIX_FMT_NONE}
},
};
#include "video/vdpau.h"
static const struct vd_lavc_hwdec mp_vd_lavc_vdpau_copy = {
.type = HWDEC_VDPAU_COPY,
.copying = true,
.image_format = IMGFMT_VDPAU,
.generic_hwaccel = true,
.set_hwframes = true,
.create_dev = vdpau_create_standalone,
.pixfmt_map = (const enum AVPixelFormat[][2]) {
{AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P},
{AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUV420P},
{AV_PIX_FMT_NONE}
},
};
#endif
static const struct vd_lavc_hwdec *const hwdec_list[] = {
RPI support This requires FFmpeg git master for accelerated hardware decoding. Keep in mind that FFmpeg must be compiled with --enable-mmal. Libav will also work. Most things work. Screenshots don't work with accelerated/opaque decoding (except using full window screenshot mode). Subtitles are very slow - even simple but huge overlays can cause frame drops. This always uses fullscreen mode. It uses dispmanx and mmal directly, and there are no window managers or anything on this level. vo_opengl also kind of works, but is pretty useless and slow. It can't use opaque hardware decoding (copy back can be used by forcing the option --vd=lavc:h264_mmal). Keep in mind that the dispmanx backend is preferred over the X11 ones in case you're trying on X11; but X11 is even more useless on RPI. This doesn't correctly reject extended h264 profiles and thus doesn't fallback to software decoding. The hw supports only up to the high profile, and will e.g. return garbage for Hi10P video. This sets a precedent of enabling hw decoding by default, but only if RPI support is compiled (which most hopefully it will be disabled on desktop Linux platforms). While it's more or less required to use hw decoding on the weak RPI, it causes more problems than it solves on real platforms (Linux has the Intel GPU problem, OSX still has some cases with broken decoding.) So I can live with this compromise of having different defaults depending on the platform. Raspberry Pi 2 is required. This wasn't tested on the original RPI, though at least decoding itself seems to work (but full playback was not tested).
2015-03-29 13:12:11 +00:00
#if HAVE_RPI
&mp_vd_lavc_rpi,
&mp_vd_lavc_rpi_copy,
RPI support This requires FFmpeg git master for accelerated hardware decoding. Keep in mind that FFmpeg must be compiled with --enable-mmal. Libav will also work. Most things work. Screenshots don't work with accelerated/opaque decoding (except using full window screenshot mode). Subtitles are very slow - even simple but huge overlays can cause frame drops. This always uses fullscreen mode. It uses dispmanx and mmal directly, and there are no window managers or anything on this level. vo_opengl also kind of works, but is pretty useless and slow. It can't use opaque hardware decoding (copy back can be used by forcing the option --vd=lavc:h264_mmal). Keep in mind that the dispmanx backend is preferred over the X11 ones in case you're trying on X11; but X11 is even more useless on RPI. This doesn't correctly reject extended h264 profiles and thus doesn't fallback to software decoding. The hw supports only up to the high profile, and will e.g. return garbage for Hi10P video. This sets a precedent of enabling hw decoding by default, but only if RPI support is compiled (which most hopefully it will be disabled on desktop Linux platforms). While it's more or less required to use hw decoding on the weak RPI, it causes more problems than it solves on real platforms (Linux has the Intel GPU problem, OSX still has some cases with broken decoding.) So I can live with this compromise of having different defaults depending on the platform. Raspberry Pi 2 is required. This wasn't tested on the original RPI, though at least decoding itself seems to work (but full playback was not tested).
2015-03-29 13:12:11 +00:00
#endif
#if HAVE_VDPAU_HWACCEL
&mp_vd_lavc_vdpau,
&mp_vd_lavc_vdpau_copy,
#endif
#if HAVE_VIDEOTOOLBOX_HWACCEL
&mp_vd_lavc_videotoolbox,
&mp_vd_lavc_videotoolbox_copy,
#endif
#if HAVE_VAAPI_HWACCEL
&mp_vd_lavc_vaapi,
&mp_vd_lavc_vaapi_copy,
#endif
#if HAVE_D3D_HWACCEL
&mp_vd_lavc_d3d11va,
#if HAVE_D3D9_HWACCEL
&mp_vd_lavc_dxva2,
&mp_vd_lavc_dxva2_copy,
#endif
&mp_vd_lavc_d3d11va_copy,
#endif
#if HAVE_ANDROID
&mp_vd_lavc_mediacodec,
hwdec/opengl: Add support for CUDA and cuvid/NvDecode Nvidia's "NvDecode" API (up until recently called "cuvid" is a cross platform, but nvidia proprietary API that exposes their hardware video decoding capabilities. It is analogous to their DXVA or VDPAU support on Windows or Linux but without using platform specific API calls. As a rule, you'd rather use DXVA or VDPAU as these are more mature and well supported APIs, but on Linux, VDPAU is falling behind the hardware capabilities, and there's no sign that nvidia are making the investments to update it. Most concretely, this means that there is no VP8/9 or HEVC Main10 support in VDPAU. On the other hand, NvDecode does export vp8/9 and partial support for HEVC Main10 (more on that below). ffmpeg already has support in the form of the "cuvid" family of decoders. Due to the design of the API, it is best exposed as a full decoder rather than an hwaccel. As such, there are decoders like h264_cuvid, hevc_cuvid, etc. These decoders support two output paths today - in both cases, NV12 frames are returned, either in CUDA device memory or regular system memory. In the case of the system memory path, the decoders can be used as-is in mpv today with a command line like: mpv --vd=lavc:h264_cuvid foobar.mp4 Doing this will take advantage of hardware decoding, but the cost of the memcpy to system memory adds up, especially for high resolution video (4K etc). To avoid that, we need an hwdec that takes advantage of CUDA's OpenGL interop to copy from device memory into OpenGL textures. That is what this change implements. The process is relatively simple as only basic device context aquisition needs to be done by us - the CUDA buffer pool is managed by the decoder - thankfully. The hwdec looks a bit like the vdpau interop one - the hwdec maintains a single set of plane textures and each output frame is repeatedly mapped into these textures to pass on. The frames are always in NV12 format, at least until 10bit output supports emerges. The only slightly interesting part of the copying process is that CUDA works by associating PBOs, so we need to define these for each of the textures. TODO Items: * I need to add a download_image function for screenshots. This would do the same copy to system memory that the decoder's system memory output does. * There are items to investigate on the ffmpeg side. There appears to be a problem with timestamps for some content. Final note: I mentioned HEVC Main10. While there is no 10bit output support, NvDecode can return dithered 8bit NV12 so you can take advantage of the hardware acceleration. This particular mode requires compiling ffmpeg with a modified header (or possibly the CUDA 8 RC) and is not upstream in ffmpeg yet. Usage: You will need to specify vo=opengl and hwdec=cuda. Note that hwdec=auto will probably not work as it will try to use vdpau first. mpv --hwdec=cuda --vo=opengl foobar.mp4 If you want to use filters that require frames in system memory, just use the decoder directly without the hwdec, as documented above.
2016-09-04 22:23:55 +00:00
#endif
#if HAVE_CUDA_HWACCEL
#if NEW_CUDA_HWACCEL
hwdec/opengl: Add support for CUDA and cuvid/NvDecode Nvidia's "NvDecode" API (up until recently called "cuvid" is a cross platform, but nvidia proprietary API that exposes their hardware video decoding capabilities. It is analogous to their DXVA or VDPAU support on Windows or Linux but without using platform specific API calls. As a rule, you'd rather use DXVA or VDPAU as these are more mature and well supported APIs, but on Linux, VDPAU is falling behind the hardware capabilities, and there's no sign that nvidia are making the investments to update it. Most concretely, this means that there is no VP8/9 or HEVC Main10 support in VDPAU. On the other hand, NvDecode does export vp8/9 and partial support for HEVC Main10 (more on that below). ffmpeg already has support in the form of the "cuvid" family of decoders. Due to the design of the API, it is best exposed as a full decoder rather than an hwaccel. As such, there are decoders like h264_cuvid, hevc_cuvid, etc. These decoders support two output paths today - in both cases, NV12 frames are returned, either in CUDA device memory or regular system memory. In the case of the system memory path, the decoders can be used as-is in mpv today with a command line like: mpv --vd=lavc:h264_cuvid foobar.mp4 Doing this will take advantage of hardware decoding, but the cost of the memcpy to system memory adds up, especially for high resolution video (4K etc). To avoid that, we need an hwdec that takes advantage of CUDA's OpenGL interop to copy from device memory into OpenGL textures. That is what this change implements. The process is relatively simple as only basic device context aquisition needs to be done by us - the CUDA buffer pool is managed by the decoder - thankfully. The hwdec looks a bit like the vdpau interop one - the hwdec maintains a single set of plane textures and each output frame is repeatedly mapped into these textures to pass on. The frames are always in NV12 format, at least until 10bit output supports emerges. The only slightly interesting part of the copying process is that CUDA works by associating PBOs, so we need to define these for each of the textures. TODO Items: * I need to add a download_image function for screenshots. This would do the same copy to system memory that the decoder's system memory output does. * There are items to investigate on the ffmpeg side. There appears to be a problem with timestamps for some content. Final note: I mentioned HEVC Main10. While there is no 10bit output support, NvDecode can return dithered 8bit NV12 so you can take advantage of the hardware acceleration. This particular mode requires compiling ffmpeg with a modified header (or possibly the CUDA 8 RC) and is not upstream in ffmpeg yet. Usage: You will need to specify vo=opengl and hwdec=cuda. Note that hwdec=auto will probably not work as it will try to use vdpau first. mpv --hwdec=cuda --vo=opengl foobar.mp4 If you want to use filters that require frames in system memory, just use the decoder directly without the hwdec, as documented above.
2016-09-04 22:23:55 +00:00
&mp_vd_lavc_cuda,
#else
&mp_vd_lavc_cuda_old,
#endif
&mp_vd_lavc_cuda_copy,
video: add vaapi decode and output support This is based on the MPlayer VA API patches. To be exact it's based on a very stripped down version of commit f1ad459a263f8537f6c from git://gitorious.org/vaapi/mplayer.git. This doesn't contain useless things like benchmarking hacks and the demo code for GLX interop. Also, unlike in the original patch, decoding and video output are split into separate source files (the separation between decoding and display also makes pixel format hacks unnecessary). On the other hand, some features not present in the original patch were added, like screenshot support. VA API is rather bad for actual video output. Dealing with older libva versions or the completely broken vdpau backend doesn't help. OSD is low quality and should be rather slow. In some cases, only either OSD or subtitles can be shown at the same time (because OSD is drawn first, OSD is prefered). Also, libva can't decide whether it accepts straight or premultiplied alpha for OSD sub-pictures: the vdpau backend seems to assume premultiplied, while a native vaapi driver uses straight. So I picked straight alpha. It doesn't matter much, because the blending code for straight alpha I added to img_convert.c is probably buggy, and ASS subtitles might be blended incorrectly. Really good video output with VA API would probably use OpenGL and the GL interop features, but at this point you might just use vo_opengl. (Patches for making HW decoding with vo_opengl have a chance of being accepted.) Despite these issues, decoding seems to work ok. I still got tearing on the Intel system I tested (Intel(R) Core(TM) i3-2350M). It was also tested with the vdpau vaapi wrapper on a nvidia system; however this was rather broken. (Fortunately, there is no reason to use mpv's VAAPI support over native VDPAU.)
2013-08-09 12:01:30 +00:00
#endif
&mp_vd_lavc_crystalhd,
NULL
};
static struct vd_lavc_hwdec *find_hwcodec(enum hwdec_type api)
{
for (int n = 0; hwdec_list[n]; n++) {
if (hwdec_list[n]->type == api)
return (struct vd_lavc_hwdec *)hwdec_list[n];
}
return NULL;
}
static bool hwdec_codec_allowed(struct dec_video *vd, const char *codec)
{
bstr s = bstr0(vd->opts->hwdec_codecs);
while (s.len) {
bstr item;
bstr_split_tok(s, ",", &item, &s);
if (bstr_equals0(item, "all") || bstr_equals0(item, codec))
return true;
}
return false;
}
int hwdec_get_max_refs(struct lavc_ctx *ctx)
{
switch (ctx->avctx->codec_id) {
case AV_CODEC_ID_H264:
case AV_CODEC_ID_HEVC:
return 16;
case AV_CODEC_ID_VP9:
return 8;
}
return 2;
}
// This is intended to return the name of a decoder for a given wrapper API.
// Decoder wrappers are usually added to libavcodec with a specific suffix.
// For example the mmal h264 decoder is named h264_mmal.
// This API would e.g. return h264_mmal for
// hwdec_find_decoder("h264", "_mmal").
// Just concatenating the two names will not always work due to inconsistencies
// (e.g. "mpeg2video" vs. "mpeg2").
static const char *hwdec_find_decoder(const char *codec, const char *suffix)
{
enum AVCodecID codec_id = mp_codec_to_av_codec_id(codec);
if (codec_id == AV_CODEC_ID_NONE)
return NULL;
AVCodec *cur = NULL;
for (;;) {
cur = av_codec_next(cur);
if (!cur)
break;
if (cur->id == codec_id && av_codec_is_decoder(cur) &&
bstr_endswith0(bstr0(cur->name), suffix))
return cur->name;
}
return NULL;
}
// Parallel to hwdec_find_decoder(): return whether a hwdec can use the given
// decoder. This can't be answered accurately; it works for wrapper decoders
// only (like mmal), and for real hwaccels this will always return false.
static bool hwdec_is_wrapper(struct vd_lavc_hwdec *hwdec, const char *decoder)
{
if (!hwdec->lavc_suffix)
return false;
return bstr_endswith0(bstr0(decoder), hwdec->lavc_suffix);
}
static struct mp_hwdec_ctx *hwdec_create_dev(struct dec_video *vd,
struct vd_lavc_hwdec *hwdec,
bool autoprobe)
{
if (hwdec->create_dev)
return hwdec->create_dev(vd->global, vd->log, autoprobe);
if (vd->hwdec_devs) {
hwdec_devices_request(vd->hwdec_devs, hwdec->type);
return hwdec_devices_get(vd->hwdec_devs, hwdec->type);
}
return NULL;
}
static int hwdec_probe(struct dec_video *vd, struct vd_lavc_hwdec *hwdec,
const char *codec, bool autoprobe)
{
vd_ffmpeg_ctx *ctx = vd->priv;
int r = 0;
if (hwdec->probe)
r = hwdec->probe(ctx, hwdec, codec);
if (hwdec->generic_hwaccel) {
assert(!hwdec->probe && !hwdec->init && !hwdec->init_decoder &&
!hwdec->uninit && !hwdec->allocate_image);
struct mp_hwdec_ctx *dev = hwdec_create_dev(vd, hwdec, autoprobe);
if (!dev)
return hwdec->copying ? -1 : HWDEC_ERR_NO_CTX;
if (dev->emulated)
r = HWDEC_ERR_EMULATED;
if (hwdec->create_dev && dev->destroy)
dev->destroy(dev);
}
if (r >= 0) {
if (hwdec->lavc_suffix && !hwdec_find_decoder(codec, hwdec->lavc_suffix))
return HWDEC_ERR_NO_CODEC;
}
return r;
}
static struct vd_lavc_hwdec *probe_hwdec(struct dec_video *vd, bool autoprobe,
enum hwdec_type api,
const char *codec)
{
MP_VERBOSE(vd, "Probing '%s'...\n", m_opt_choice_str(mp_hwdec_names, api));
struct vd_lavc_hwdec *hwdec = find_hwcodec(api);
if (!hwdec) {
int level = autoprobe ? MSGL_V : MSGL_WARN;
MP_MSG(vd, level, "Requested hardware decoder not compiled.\n");
return NULL;
}
int r = hwdec_probe(vd, hwdec, codec, autoprobe);
if (r == HWDEC_ERR_EMULATED) {
if (autoprobe)
return NULL;
// User requested this explicitly.
MP_WARN(vd, "Using emulated hardware decoding API.\n");
r = 0;
}
if (r >= 0) {
return hwdec;
} else if (r == HWDEC_ERR_NO_CODEC) {
MP_VERBOSE(vd, "Hardware decoder for '%s' with the given API not found "
"in libavcodec.\n", codec);
} else if (r == HWDEC_ERR_NO_CTX && !autoprobe) {
MP_WARN(vd, "VO does not support requested hardware decoder, or "
"loading it failed.\n");
}
return NULL;
}
static void uninit(struct dec_video *vd)
{
vd_ffmpeg_ctx *ctx = vd->priv;
uninit_avctx(vd);
pthread_mutex_destroy(&ctx->dr_lock);
talloc_free(vd->priv);
}
static void force_fallback(struct dec_video *vd)
{
vd_ffmpeg_ctx *ctx = vd->priv;
uninit_avctx(vd);
int lev = ctx->hwdec_notified ? MSGL_WARN : MSGL_V;
mp_msg(vd->log, lev, "Falling back to software decoding.\n");
init_avctx(vd, ctx->decoder, NULL);
}
static void reinit(struct dec_video *vd)
2011-10-22 00:51:37 +00:00
{
vd_ffmpeg_ctx *ctx = vd->priv;
const char *decoder = ctx->decoder;
const char *codec = vd->codec->codec;
uninit_avctx(vd);
vdpau: split off decoder parts, use "new" libavcodec vdpau hwaccel API Move the decoder parts from vo_vdpau.c to a new file vdpau_old.c. This file is named so because because it's written against the "old" libavcodec vdpau pseudo-decoder (e.g. "h264_vdpau"). Add support for the "new" libavcodec vdpau support. This was recently added and replaces the "old" vdpau parts. (In fact, Libav is about to deprecate and remove the "old" API without deprecation grace period, so we have to support it now. Moreover, there will probably be no Libav release which supports both, so the transition is even less smooth than we could hope, and we have to support both the old and new API.) Whether the old or new API is used is checked by a configure test: if the new API is found, it is used, otherwise the old API is assumed. Some details might be handled differently. Especially display preemption is a bit problematic with the "new" libavcodec vdpau support: it wants to keep a pointer to a specific vdpau API function (which can be driver specific, because preemption might switch drivers). Also, surface IDs are now directly stored in AVFrames (and mp_images), so they can't be forced to VDP_INVALID_HANDLE on preemption. (This changes even with older libavcodec versions, because mp_image always uses the newer representation to make vo_vdpau.c simpler.) Decoder initialization in the new code tries to deal with codec profiles, while the old code always uses the highest profile per codec. Surface allocation changes. Since the decoder won't call config() in vo_vdpau.c on video size change anymore, we allow allocating surfaces of arbitrary size instead of locking it to what the VO was configured. The non-hwdec code also has slightly different allocation behavior now. Enabling the old vdpau special decoders via e.g. --vd=lavc:h264_vdpau doesn't work anymore (a warning suggesting the --hwdec option is printed instead).
2013-07-27 23:49:45 +00:00
struct vd_lavc_hwdec *hwdec = NULL;
if (hwdec_codec_allowed(vd, codec)) {
int api = vd->opts->hwdec_api;
if (HWDEC_IS_AUTO(api)) {
// If a specific decoder is forced, we should try a hwdec method
// that works with it, instead of simply failing later at runtime.
// This is good for avoiding trying "normal" hwaccels on wrapper
// decoders (like vaapi on a mmal decoder). Since libavcodec doesn't
// tell us which decoder supports which hwaccel methods without
// actually running it, do it by detecting such wrapper decoders.
// On the other hand, e.g. "--hwdec=rpi" should always force the
// wrapper decoder, so be careful not to break this case.
bool might_be_wrapper = false;
for (int n = 0; hwdec_list[n]; n++) {
struct vd_lavc_hwdec *other = (void *)hwdec_list[n];
if (hwdec_is_wrapper(other, decoder))
might_be_wrapper = true;
}
for (int n = 0; hwdec_list[n]; n++) {
hwdec = probe_hwdec(vd, true, hwdec_list[n]->type, codec);
if (hwdec) {
if (might_be_wrapper && !hwdec_is_wrapper(hwdec, decoder)) {
MP_VERBOSE(vd, "This hwaccel is not compatible.\n");
continue;
}
if (api == HWDEC_AUTO_COPY && !hwdec->copying) {
MP_VERBOSE(vd, "Not using this for auto-copy mode.\n");
continue;
}
break;
}
vdpau: split off decoder parts, use "new" libavcodec vdpau hwaccel API Move the decoder parts from vo_vdpau.c to a new file vdpau_old.c. This file is named so because because it's written against the "old" libavcodec vdpau pseudo-decoder (e.g. "h264_vdpau"). Add support for the "new" libavcodec vdpau support. This was recently added and replaces the "old" vdpau parts. (In fact, Libav is about to deprecate and remove the "old" API without deprecation grace period, so we have to support it now. Moreover, there will probably be no Libav release which supports both, so the transition is even less smooth than we could hope, and we have to support both the old and new API.) Whether the old or new API is used is checked by a configure test: if the new API is found, it is used, otherwise the old API is assumed. Some details might be handled differently. Especially display preemption is a bit problematic with the "new" libavcodec vdpau support: it wants to keep a pointer to a specific vdpau API function (which can be driver specific, because preemption might switch drivers). Also, surface IDs are now directly stored in AVFrames (and mp_images), so they can't be forced to VDP_INVALID_HANDLE on preemption. (This changes even with older libavcodec versions, because mp_image always uses the newer representation to make vo_vdpau.c simpler.) Decoder initialization in the new code tries to deal with codec profiles, while the old code always uses the highest profile per codec. Surface allocation changes. Since the decoder won't call config() in vo_vdpau.c on video size change anymore, we allow allocating surfaces of arbitrary size instead of locking it to what the VO was configured. The non-hwdec code also has slightly different allocation behavior now. Enabling the old vdpau special decoders via e.g. --vd=lavc:h264_vdpau doesn't work anymore (a warning suggesting the --hwdec option is printed instead).
2013-07-27 23:49:45 +00:00
}
} else if (api != HWDEC_NONE) {
hwdec = probe_hwdec(vd, false, api, codec);
}
} else {
MP_VERBOSE(vd, "Not trying to use hardware decoding: codec %s is not "
"on whitelist, or does not support hardware acceleration.\n",
codec);
}
if (hwdec) {
const char *orig_decoder = decoder;
if (hwdec->lavc_suffix)
decoder = hwdec_find_decoder(codec, hwdec->lavc_suffix);
MP_VERBOSE(vd, "Trying hardware decoding.\n");
if (strcmp(orig_decoder, decoder) != 0)
MP_VERBOSE(vd, "Using underlying hw-decoder '%s'\n", decoder);
} else {
MP_VERBOSE(vd, "Using software decoding.\n");
}
init_avctx(vd, decoder, hwdec);
if (!ctx->avctx && hwdec)
force_fallback(vd);
}
static int init(struct dec_video *vd, const char *decoder)
{
vd_ffmpeg_ctx *ctx;
ctx = vd->priv = talloc_zero(NULL, vd_ffmpeg_ctx);
ctx->log = vd->log;
ctx->opts = vd->opts;
ctx->decoder = talloc_strdup(ctx, decoder);
ctx->hwdec_devs = vd->hwdec_devs;
ctx->hwdec_swpool = talloc_steal(ctx, mp_image_pool_new(17));
ctx->dr_pool = talloc_steal(ctx, mp_image_pool_new(INT_MAX));
pthread_mutex_init(&ctx->dr_lock, NULL);
reinit(vd);
if (!ctx->avctx) {
uninit(vd);
return 0;
}
return 1;
}
static void init_avctx(struct dec_video *vd, const char *decoder,
struct vd_lavc_hwdec *hwdec)
{
vd_ffmpeg_ctx *ctx = vd->priv;
struct vd_lavc_params *lavc_param = vd->opts->vd_lavc_params;
struct mp_codec_params *c = vd->codec;
core: redo how codecs are mapped, remove codecs.conf Use codec names instead of FourCCs to identify codecs. Rewrite how codecs are selected and initialized. Now each decoder exports a list of decoders (and the codec it supports) via add_decoders(). The order matters, and the first decoder for a given decoder is preferred over the other decoders. E.g. all ad_mpg123 decoders are preferred over ad_lavc, because it comes first in the mpcodecs_ad_drivers array. Likewise, decoders within ad_lavc that are enumerated first by libavcodec (using av_codec_next()) are preferred. (This is actually critical to select h264 software decoding by default instead of vdpau. libavcodec and ffmpeg/avconv use the same method to select decoders by default, so we hope this is sane.) The codec names follow libavcodec's codec names as defined by AVCodecDescriptor.name (see libavcodec/codec_desc.c). Some decoders have names different from the canonical codec name. The AVCodecDescriptor API is relatively new, so we need a compatibility layer for older libavcodec versions for codec names that are referenced internally, and which are different from the decoder name. (Add a configure check for that, because checking versions is getting way too messy.) demux/codec_tags.c is generated from the former codecs.conf (minus "special" decoders like vdpau, and excluding the mappings that are the same as the mappings libavformat's exported RIFF tables). It contains all the mappings from FourCCs to codec name. This is needed for demux_mkv, demux_mpg, demux_avi and demux_asf. demux_lavf will set the codec as determined by libavformat, while the other demuxers have to do this on their own, using the mp_set_audio/video_codec_from_tag() functions. Note that the sh_audio/video->format members don't uniquely identify the codec anymore, and sh->codec takes over this role. Replace the --ac/--vc/--afm/--vfm with new --vd/--ad options, which provide cover the functionality of the removed switched. Note: there's no CODECS_FLAG_FLIP flag anymore. This means some obscure container/video combinations (e.g. the sample Film_200_zygo_pro.mov) are played flipped. ffplay/avplay doesn't handle this properly either, so we don't care and blame ffmeg/libav instead.
2013-02-09 14:15:19 +00:00
assert(!ctx->avctx);
core: redo how codecs are mapped, remove codecs.conf Use codec names instead of FourCCs to identify codecs. Rewrite how codecs are selected and initialized. Now each decoder exports a list of decoders (and the codec it supports) via add_decoders(). The order matters, and the first decoder for a given decoder is preferred over the other decoders. E.g. all ad_mpg123 decoders are preferred over ad_lavc, because it comes first in the mpcodecs_ad_drivers array. Likewise, decoders within ad_lavc that are enumerated first by libavcodec (using av_codec_next()) are preferred. (This is actually critical to select h264 software decoding by default instead of vdpau. libavcodec and ffmpeg/avconv use the same method to select decoders by default, so we hope this is sane.) The codec names follow libavcodec's codec names as defined by AVCodecDescriptor.name (see libavcodec/codec_desc.c). Some decoders have names different from the canonical codec name. The AVCodecDescriptor API is relatively new, so we need a compatibility layer for older libavcodec versions for codec names that are referenced internally, and which are different from the decoder name. (Add a configure check for that, because checking versions is getting way too messy.) demux/codec_tags.c is generated from the former codecs.conf (minus "special" decoders like vdpau, and excluding the mappings that are the same as the mappings libavformat's exported RIFF tables). It contains all the mappings from FourCCs to codec name. This is needed for demux_mkv, demux_mpg, demux_avi and demux_asf. demux_lavf will set the codec as determined by libavformat, while the other demuxers have to do this on their own, using the mp_set_audio/video_codec_from_tag() functions. Note that the sh_audio/video->format members don't uniquely identify the codec anymore, and sh->codec takes over this role. Replace the --ac/--vc/--afm/--vfm with new --vd/--ad options, which provide cover the functionality of the removed switched. Note: there's no CODECS_FLAG_FLIP flag anymore. This means some obscure container/video combinations (e.g. the sample Film_200_zygo_pro.mov) are played flipped. ffplay/avplay doesn't handle this properly either, so we don't care and blame ffmeg/libav instead.
2013-02-09 14:15:19 +00:00
AVCodec *lavc_codec = avcodec_find_decoder_by_name(decoder);
if (!lavc_codec)
return;
video, audio: use lavc decoders without codecs.conf entries Add support for using libavcodec decoders that do not have entries in codecs.conf. This is currently only used with demux_lavf, and the codec selection is based on codec_id returned by libavformat. Also modify codec-related terminal output somewhat to make it use information from libavcodec and avoid excessively long default output. The new any-lavc-codec support is implemented with codecs.conf entries that invoke vd_ffmpeg/ad_ffmpeg without directly specifying any libavcodec codec name. In this mode, the decoders now instead select the libavcodec codec based on codec_id previously set by demux_lavf (if any). These new "generic" codecs.conf entries specify "status buggy", so that they're tried after any specific entries with higher-priority status. Add new directive "anyinput" to codecs.conf syntax. This means the entry will always match regardless of fourcc. This is used for the above new codecs.conf entries (so the driver always gets to decide whether to accept the input, and will fail init() if it can't find a suitable codec in libavcodec). Remove parsing support for the obsolete codecs.conf directive "cpuflags". This directive has not had any effect and has not been used in default codecs.conf since many years ago. Shorten codec-related terminal output. When using libavcodec decoders, show the libavcodec long_name field rather than codecs.conf "info" field as the name of the codec. Stop showing the codecs.conf entry name and "vfm/afm" name by default, as these are rarely needed; they're now in verbose output only. Show "VIDEO:" line at VO initialization rather than at demuxer open. This didn't really belong in demuxer code; the new location may show more accurate values (known after decoder has been opened) and works right if video track is changed after initial demuxer open. The vd.c changes (primarily done for terminal output changes) remove round-to-even behavior from code setting dimensions based on aspect ratio. I hope nothing depended on this; at least the even values were not consistently guaranteed anyway, as the rounding code did not run if the video file did not specify a nonzero aspect value.
2012-07-24 06:01:47 +00:00
ctx->codec_timebase = mp_get_codec_timebase(vd->codec);
// This decoder does not read pkt_timebase correctly yet.
if (strstr(decoder, "_mmal"))
ctx->codec_timebase = (AVRational){1, 1000000};
ctx->pix_fmt = AV_PIX_FMT_NONE;
ctx->hwdec = hwdec;
ctx->hwdec_fmt = 0;
ctx->hwdec_failed = false;
ctx->hwdec_request_reinit = false;
ctx->avctx = avcodec_alloc_context3(lavc_codec);
AVCodecContext *avctx = ctx->avctx;
2014-12-12 16:28:22 +00:00
if (!ctx->avctx)
goto error;
avctx->codec_type = AVMEDIA_TYPE_VIDEO;
avctx->codec_id = lavc_codec->id;
#if LIBAVCODEC_VERSION_MICRO >= 100
avctx->pkt_timebase = ctx->codec_timebase;
#endif
ctx->pic = av_frame_alloc();
2014-12-12 16:28:22 +00:00
if (!ctx->pic)
goto error;
if (ctx->hwdec) {
avctx->opaque = vd;
avctx->thread_count = 1;
#if HAVE_VDPAU_HWACCEL
avctx->hwaccel_flags |= AV_HWACCEL_FLAG_IGNORE_LEVEL;
#endif
#ifdef AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH
if (!lavc_param->check_hw_profile)
avctx->hwaccel_flags |= AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH;
#endif
if (ctx->hwdec->image_format)
avctx->get_format = get_format_hwdec;
RPI support This requires FFmpeg git master for accelerated hardware decoding. Keep in mind that FFmpeg must be compiled with --enable-mmal. Libav will also work. Most things work. Screenshots don't work with accelerated/opaque decoding (except using full window screenshot mode). Subtitles are very slow - even simple but huge overlays can cause frame drops. This always uses fullscreen mode. It uses dispmanx and mmal directly, and there are no window managers or anything on this level. vo_opengl also kind of works, but is pretty useless and slow. It can't use opaque hardware decoding (copy back can be used by forcing the option --vd=lavc:h264_mmal). Keep in mind that the dispmanx backend is preferred over the X11 ones in case you're trying on X11; but X11 is even more useless on RPI. This doesn't correctly reject extended h264 profiles and thus doesn't fallback to software decoding. The hw supports only up to the high profile, and will e.g. return garbage for Hi10P video. This sets a precedent of enabling hw decoding by default, but only if RPI support is compiled (which most hopefully it will be disabled on desktop Linux platforms). While it's more or less required to use hw decoding on the weak RPI, it causes more problems than it solves on real platforms (Linux has the Intel GPU problem, OSX still has some cases with broken decoding.) So I can live with this compromise of having different defaults depending on the platform. Raspberry Pi 2 is required. This wasn't tested on the original RPI, though at least decoding itself seems to work (but full playback was not tested).
2015-03-29 13:12:11 +00:00
if (ctx->hwdec->allocate_image)
avctx->get_buffer2 = get_buffer2_hwdec;
if (ctx->hwdec->init && ctx->hwdec->init(ctx) < 0)
goto error;
if (ctx->hwdec->generic_hwaccel) {
ctx->hwdec_dev = hwdec_create_dev(vd, ctx->hwdec, false);
if (!ctx->hwdec_dev)
goto error;
ctx->owns_hwdec_dev = !!ctx->hwdec->create_dev;
vdpau: crappy hack to allow initializing hw decoding after preemption If vo_opengl is used, and vo_opengl already created the vdpau interop (for whatever reasons), and then preemption happens, and then you try to enable hw decoding, it failed. The reason was that preemption recovery is not run at any point before libavcodec accesses the vdpau device. The actual impact was that with libmpv + opengl-cb use, hardware decoding was permanently broken after display mode switching (something that caused the display to get preempted at least with older drivers). With mpv CLI, you can for example enable hw decoding during playback, then disable it, VT switch to console, switch back to X, and try to enable hw decoding again. This is mostly because libav* does not deal with preemption, and NVIDIA driver preemption behavior being horrible garbage. In addition to being misdesigned API, the preemption callback is not called before you try to access vdpau API, and then only with _some_ accesses. In summary, the preemption callback was never called, neither before nor after libavcodec tried to init the decoder. So we have to get mp_vdpau_handle_preemption() called before libavcodec accesses it. This in turn will do a dummy API access which usually triggers the preemption callback immediately (with NVIDIA's drivers). In addition, we have to update the AVHWDeviceContext's device. In theory it could change (in practice it usually seems to use handle "0"). Creating a new device would cause chaos, as we don't have a concept of switching the device context on the fly. So we simply update it directly. I'm fairly sure this violates the libav* API, but it's the best we can do.
2017-05-19 13:24:38 +00:00
if (ctx->hwdec_dev->restore_device)
ctx->hwdec_dev->restore_device(ctx->hwdec_dev);
if (!ctx->hwdec->set_hwframes) {
#if HAVE_VDPAU_HWACCEL
avctx->hw_device_ctx = av_buffer_ref(ctx->hwdec_dev->av_device_ref);
#else
goto error;
#endif
}
}
ctx->max_delay_queue = ctx->hwdec->delay_queue;
ctx->hw_probing = true;
} else {
mp_set_avcodec_threads(vd->log, avctx, lavc_param->threads);
}
if (!ctx->hwdec && vd->vo && lavc_param->dr) {
avctx->opaque = vd;
avctx->get_buffer2 = get_buffer2_direct;
avctx->thread_safe_callbacks = 1;
}
avctx->flags |= lavc_param->bitexact ? AV_CODEC_FLAG_BITEXACT : 0;
avctx->flags2 |= lavc_param->fast ? AV_CODEC_FLAG2_FAST : 0;
if (lavc_param->show_all)
avctx->flags |= AV_CODEC_FLAG_OUTPUT_CORRUPT;
avctx->skip_loop_filter = lavc_param->skip_loop_filter;
avctx->skip_idct = lavc_param->skip_idct;
avctx->skip_frame = lavc_param->skip_frame;
mp_set_avopts(vd->log, avctx, lavc_param->avopts);
// Do this after the above avopt handling in case it changes values
ctx->skip_frame = avctx->skip_frame;
if (mp_set_avctx_codec_headers(avctx, c) < 0) {
MP_ERR(vd, "Could not set codec parameters.\n");
goto error;
core: redo how codecs are mapped, remove codecs.conf Use codec names instead of FourCCs to identify codecs. Rewrite how codecs are selected and initialized. Now each decoder exports a list of decoders (and the codec it supports) via add_decoders(). The order matters, and the first decoder for a given decoder is preferred over the other decoders. E.g. all ad_mpg123 decoders are preferred over ad_lavc, because it comes first in the mpcodecs_ad_drivers array. Likewise, decoders within ad_lavc that are enumerated first by libavcodec (using av_codec_next()) are preferred. (This is actually critical to select h264 software decoding by default instead of vdpau. libavcodec and ffmpeg/avconv use the same method to select decoders by default, so we hope this is sane.) The codec names follow libavcodec's codec names as defined by AVCodecDescriptor.name (see libavcodec/codec_desc.c). Some decoders have names different from the canonical codec name. The AVCodecDescriptor API is relatively new, so we need a compatibility layer for older libavcodec versions for codec names that are referenced internally, and which are different from the decoder name. (Add a configure check for that, because checking versions is getting way too messy.) demux/codec_tags.c is generated from the former codecs.conf (minus "special" decoders like vdpau, and excluding the mappings that are the same as the mappings libavformat's exported RIFF tables). It contains all the mappings from FourCCs to codec name. This is needed for demux_mkv, demux_mpg, demux_avi and demux_asf. demux_lavf will set the codec as determined by libavformat, while the other demuxers have to do this on their own, using the mp_set_audio/video_codec_from_tag() functions. Note that the sh_audio/video->format members don't uniquely identify the codec anymore, and sh->codec takes over this role. Replace the --ac/--vc/--afm/--vfm with new --vd/--ad options, which provide cover the functionality of the removed switched. Note: there's no CODECS_FLAG_FLIP flag anymore. This means some obscure container/video combinations (e.g. the sample Film_200_zygo_pro.mov) are played flipped. ffplay/avplay doesn't handle this properly either, so we don't care and blame ffmeg/libav instead.
2013-02-09 14:15:19 +00:00
}
/* open it */
if (avcodec_open2(avctx, lavc_codec, NULL) < 0)
goto error;
return;
error:
MP_ERR(vd, "Could not open codec.\n");
uninit_avctx(vd);
}
static void reset_avctx(struct dec_video *vd)
{
vd_ffmpeg_ctx *ctx = vd->priv;
if (ctx->avctx && avcodec_is_open(ctx->avctx))
avcodec_flush_buffers(ctx->avctx);
ctx->flushing = false;
ctx->hwdec_request_reinit = false;
}
static void flush_all(struct dec_video *vd)
{
vd_ffmpeg_ctx *ctx = vd->priv;
for (int n = 0; n < ctx->num_delay_queue; n++)
talloc_free(ctx->delay_queue[n]);
ctx->num_delay_queue = 0;
for (int n = 0; n < ctx->num_sent_packets; n++)
talloc_free(ctx->sent_packets[n]);
ctx->num_sent_packets = 0;
for (int n = 0; n < ctx->num_requeue_packets; n++)
talloc_free(ctx->requeue_packets[n]);
ctx->num_requeue_packets = 0;
reset_avctx(vd);
}
static void uninit_avctx(struct dec_video *vd)
2011-10-22 00:51:37 +00:00
{
vd_ffmpeg_ctx *ctx = vd->priv;
flush_all(vd);
av_frame_free(&ctx->pic);
av_buffer_unref(&ctx->cached_hw_frames_ctx);
if (ctx->hwdec && ctx->hwdec->uninit)
ctx->hwdec->uninit(ctx);
ctx->hwdec = NULL;
assert(ctx->hwdec_priv == NULL);
avcodec_free_context(&ctx->avctx);
if (ctx->hwdec_dev && ctx->owns_hwdec_dev && ctx->hwdec_dev->destroy)
ctx->hwdec_dev->destroy(ctx->hwdec_dev);
ctx->hwdec_dev = NULL;
ctx->owns_hwdec_dev = false;
2015-05-28 19:52:04 +00:00
ctx->hwdec_failed = false;
ctx->hwdec_fail_count = 0;
ctx->max_delay_queue = 0;
ctx->hw_probing = false;
}
static void update_image_params(struct dec_video *vd, AVFrame *frame,
struct mp_image_params *params)
{
vd_ffmpeg_ctx *ctx = vd->priv;
AVFrameSideData *sd;
#if HAVE_AVUTIL_CONTENT_LIGHT_LEVEL
// Get the content light metadata if available
sd = av_frame_get_side_data(frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
if (sd) {
AVContentLightMetadata *clm = (AVContentLightMetadata *)sd->data;
params->color.sig_peak = clm->MaxCLL / MP_REF_WHITE;
}
#endif
#if LIBAVCODEC_VERSION_MICRO >= 100
// Otherwise, try getting the mastering metadata if available
sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
if (!params->color.sig_peak && sd) {
AVMasteringDisplayMetadata *mdm = (AVMasteringDisplayMetadata *)sd->data;
if (mdm->has_luminance)
params->color.sig_peak = av_q2d(mdm->max_luminance) / MP_REF_WHITE;
}
#endif
if (params->color.sig_peak) {
ctx->cached_sig_peak = params->color.sig_peak;
} else {
params->color.sig_peak = ctx->cached_sig_peak;
}
params->rotate = vd->codec->rotate;
params->stereo_in = vd->codec->stereo_mode;
}
// Allocate and set AVCodecContext.hw_frames_ctx. Also caches them on redundant
// calls (useful because seeks issue get_format, which clears hw_frames_ctx).
// device_ctx: reference to an AVHWDeviceContext
// av_sw_format: AV_PIX_FMT_ for the underlying hardware frame format
// initial_pool_size: number of frames in the memory pool on creation
// Return >=0 on success, <0 on error.
int hwdec_setup_hw_frames_ctx(struct lavc_ctx *ctx, AVBufferRef *device_ctx,
int av_sw_format, int initial_pool_size)
{
int w = ctx->avctx->coded_width;
int h = ctx->avctx->coded_height;
int av_hw_format = imgfmt2pixfmt(ctx->hwdec_fmt);
if (!device_ctx) {
MP_ERR(ctx, "Missing device context.\n");
return -1;
}
if (ctx->cached_hw_frames_ctx) {
AVHWFramesContext *fctx = (void *)ctx->cached_hw_frames_ctx->data;
if (fctx->width != w || fctx->height != h ||
fctx->sw_format != av_sw_format ||
fctx->format != av_hw_format)
{
av_buffer_unref(&ctx->cached_hw_frames_ctx);
}
}
if (!ctx->cached_hw_frames_ctx) {
ctx->cached_hw_frames_ctx = av_hwframe_ctx_alloc(device_ctx);
if (!ctx->cached_hw_frames_ctx)
return -1;
AVHWFramesContext *fctx = (void *)ctx->cached_hw_frames_ctx->data;
fctx->format = av_hw_format;
fctx->sw_format = av_sw_format;
fctx->width = w;
fctx->height = h;
fctx->initial_pool_size = initial_pool_size;
if (ctx->hwdec->hwframes_refine)
ctx->hwdec->hwframes_refine(ctx, ctx->cached_hw_frames_ctx);
int res = av_hwframe_ctx_init(ctx->cached_hw_frames_ctx);
if (res < 0) {
MP_ERR(ctx, "Failed to allocate hw frames.\n");
av_buffer_unref(&ctx->cached_hw_frames_ctx);
return -1;
}
}
assert(!ctx->avctx->hw_frames_ctx);
ctx->avctx->hw_frames_ctx = av_buffer_ref(ctx->cached_hw_frames_ctx);
return ctx->avctx->hw_frames_ctx ? 0 : -1;
}
static int init_generic_hwaccel(struct dec_video *vd)
{
struct lavc_ctx *ctx = vd->priv;
struct vd_lavc_hwdec *hwdec = ctx->hwdec;
if (!ctx->hwdec_dev)
return -1;
if (!hwdec->set_hwframes)
return 0;
// libavcodec has no way yet to communicate the exact surface format needed
// for the frame pool, or the required minimum size of the frame pool.
// Hopefully, this weakness in the libavcodec API will be fixed in the
// future.
// For the pixel format, we try to second-guess from what the libavcodec
// software decoder would require (sw_pix_fmt). It could break and require
// adjustment if new hwaccel surface formats are added.
enum AVPixelFormat av_sw_format = AV_PIX_FMT_NONE;
assert(hwdec->pixfmt_map);
for (int n = 0; hwdec->pixfmt_map[n][0] != AV_PIX_FMT_NONE; n++) {
if (ctx->avctx->sw_pix_fmt == hwdec->pixfmt_map[n][0]) {
av_sw_format = hwdec->pixfmt_map[n][1];
break;
}
}
if (hwdec->image_format == IMGFMT_VIDEOTOOLBOX)
av_sw_format = imgfmt2pixfmt(vd->opts->videotoolbox_format);
if (av_sw_format == AV_PIX_FMT_NONE) {
MP_VERBOSE(ctx, "Unsupported hw decoding format: %s\n",
mp_imgfmt_to_name(pixfmt2imgfmt(ctx->avctx->sw_pix_fmt)));
return -1;
}
// The video output might not support all formats.
// Note that supported_formats==NULL means any are accepted.
int *render_formats = ctx->hwdec_dev->supported_formats;
if (render_formats) {
int mp_format = pixfmt2imgfmt(av_sw_format);
bool found = false;
for (int n = 0; render_formats[n]; n++) {
if (render_formats[n] == mp_format) {
found = true;
break;
}
}
if (!found) {
MP_WARN(ctx, "Surface format %s not supported for direct rendering.\n",
mp_imgfmt_to_name(mp_format));
return -1;
}
}
int pool_size = 0;
if (hwdec->static_pool)
pool_size = hwdec_get_max_refs(ctx) + HWDEC_EXTRA_SURFACES;
ctx->hwdec_fmt = hwdec->image_format;
if (hwdec->image_format == IMGFMT_VDPAU &&
ctx->avctx->codec_id == AV_CODEC_ID_HEVC)
{
MP_WARN(ctx, "HEVC video output may be broken due to nVidia bugs.\n");
}
return hwdec_setup_hw_frames_ctx(ctx, ctx->hwdec_dev->av_device_ref,
av_sw_format, pool_size);
}
static enum AVPixelFormat get_format_hwdec(struct AVCodecContext *avctx,
const enum AVPixelFormat *fmt)
{
struct dec_video *vd = avctx->opaque;
vd_ffmpeg_ctx *ctx = vd->priv;
MP_VERBOSE(vd, "Pixel formats supported by decoder:");
for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++)
MP_VERBOSE(vd, " %s", av_get_pix_fmt_name(fmt[i]));
MP_VERBOSE(vd, "\n");
const char *profile = avcodec_profile_name(avctx->codec_id, avctx->profile);
MP_VERBOSE(vd, "Codec profile: %s (0x%x)\n", profile ? profile : "unknown",
avctx->profile);
assert(ctx->hwdec);
ctx->hwdec_request_reinit |= ctx->hwdec_failed;
ctx->hwdec_failed = false;
enum AVPixelFormat select = AV_PIX_FMT_NONE;
for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++) {
if (ctx->hwdec->image_format == pixfmt2imgfmt(fmt[i])) {
if (ctx->hwdec->generic_hwaccel) {
if (init_generic_hwaccel(vd) < 0)
break;
select = fmt[i];
break;
}
// There could be more reasons for a change, and it's possible
// that we miss some. (Might also depend on the hwaccel type.)
bool change =
ctx->hwdec_w != avctx->coded_width ||
ctx->hwdec_h != avctx->coded_height ||
ctx->hwdec_fmt != ctx->hwdec->image_format ||
ctx->hwdec_profile != avctx->profile ||
ctx->hwdec_request_reinit ||
ctx->hwdec->volatile_context;
ctx->hwdec_w = avctx->coded_width;
ctx->hwdec_h = avctx->coded_height;
ctx->hwdec_fmt = ctx->hwdec->image_format;
ctx->hwdec_profile = avctx->profile;
ctx->hwdec_request_reinit = false;
if (change && ctx->hwdec->init_decoder) {
if (ctx->hwdec->init_decoder(ctx, ctx->hwdec_w, ctx->hwdec_h) < 0)
{
ctx->hwdec_fmt = 0;
break;
}
}
select = fmt[i];
break;
vdpau: split off decoder parts, use "new" libavcodec vdpau hwaccel API Move the decoder parts from vo_vdpau.c to a new file vdpau_old.c. This file is named so because because it's written against the "old" libavcodec vdpau pseudo-decoder (e.g. "h264_vdpau"). Add support for the "new" libavcodec vdpau support. This was recently added and replaces the "old" vdpau parts. (In fact, Libav is about to deprecate and remove the "old" API without deprecation grace period, so we have to support it now. Moreover, there will probably be no Libav release which supports both, so the transition is even less smooth than we could hope, and we have to support both the old and new API.) Whether the old or new API is used is checked by a configure test: if the new API is found, it is used, otherwise the old API is assumed. Some details might be handled differently. Especially display preemption is a bit problematic with the "new" libavcodec vdpau support: it wants to keep a pointer to a specific vdpau API function (which can be driver specific, because preemption might switch drivers). Also, surface IDs are now directly stored in AVFrames (and mp_images), so they can't be forced to VDP_INVALID_HANDLE on preemption. (This changes even with older libavcodec versions, because mp_image always uses the newer representation to make vo_vdpau.c simpler.) Decoder initialization in the new code tries to deal with codec profiles, while the old code always uses the highest profile per codec. Surface allocation changes. Since the decoder won't call config() in vo_vdpau.c on video size change anymore, we allow allocating surfaces of arbitrary size instead of locking it to what the VO was configured. The non-hwdec code also has slightly different allocation behavior now. Enabling the old vdpau special decoders via e.g. --vd=lavc:h264_vdpau doesn't work anymore (a warning suggesting the --hwdec option is printed instead).
2013-07-27 23:49:45 +00:00
}
}
if (select == AV_PIX_FMT_NONE) {
ctx->hwdec_failed = true;
for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++) {
const AVPixFmtDescriptor *d = av_pix_fmt_desc_get(fmt[i]);
if (d && !(d->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
select = fmt[i];
break;
}
}
}
const char *name = av_get_pix_fmt_name(select);
MP_VERBOSE(vd, "Requesting pixfmt '%s' from decoder.\n", name ? name : "-");
return select;
}
static int get_buffer2_direct(AVCodecContext *avctx, AVFrame *pic, int flags)
{
struct dec_video *vd = avctx->opaque;
vd_ffmpeg_ctx *p = vd->priv;
pthread_mutex_lock(&p->dr_lock);
int w = pic->width;
int h = pic->height;
int linesize_align[AV_NUM_DATA_POINTERS] = {0};
avcodec_align_dimensions2(avctx, &w, &h, linesize_align);
// We assume that different alignments are just different power-of-2s.
// Thus, a higher alignment always satisfies a lower alignment.
int stride_align = 0;
for (int n = 0; n < AV_NUM_DATA_POINTERS; n++)
stride_align = MPMAX(stride_align, linesize_align[n]);
int imgfmt = pixfmt2imgfmt(pic->format);
if (!imgfmt)
goto fallback;
if (p->dr_failed)
goto fallback;
// (For simplicity, we realloc on any parameter change, instead of trying
// to be clever.)
if (stride_align != p->dr_stride_align || w != p->dr_w || h != p->dr_h ||
imgfmt != p->dr_imgfmt)
{
mp_image_pool_clear(p->dr_pool);
p->dr_imgfmt = imgfmt;
p->dr_w = w;
p->dr_h = h;
p->dr_stride_align = stride_align;
MP_VERBOSE(p, "DR parameter change to %dx%d %s align=%d\n", w, h,
mp_imgfmt_to_name(imgfmt), stride_align);
}
struct mp_image *img = mp_image_pool_get_no_alloc(p->dr_pool, imgfmt, w, h);
if (!img) {
MP_VERBOSE(p, "Allocating new DR image...\n");
img = vo_get_image(vd->vo, imgfmt, w, h, stride_align);
if (!img) {
MP_VERBOSE(p, "...failed..\n");
goto fallback;
}
// Now make the mp_image part of the pool. This requires doing magic to
// the image, so just add it to the pool and get it back to avoid
// dealing with magic ourselves. (Normally this never fails.)
mp_image_pool_add(p->dr_pool, img);
img = mp_image_pool_get_no_alloc(p->dr_pool, imgfmt, w, h);
if (!img)
goto fallback;
}
// get_buffer2 callers seem very unappreciative of overwriting pic with a
// new reference. The AVCodecContext.get_buffer2 comments tell us exactly
// what we should do, so follow that.
for (int n = 0; n < 4; n++) {
pic->data[n] = img->planes[n];
pic->linesize[n] = img->stride[n];
pic->buf[n] = img->bufs[n];
img->bufs[n] = NULL;
}
talloc_free(img);
pthread_mutex_unlock(&p->dr_lock);
return 0;
fallback:
if (!p->dr_failed)
MP_VERBOSE(p, "DR failed - disabling.\n");
p->dr_failed = true;
pthread_mutex_unlock(&p->dr_lock);
return avcodec_default_get_buffer2(avctx, pic, flags);
}
static int get_buffer2_hwdec(AVCodecContext *avctx, AVFrame *pic, int flags)
{
struct dec_video *vd = avctx->opaque;
vd_ffmpeg_ctx *ctx = vd->priv;
int imgfmt = pixfmt2imgfmt(pic->format);
if (!ctx->hwdec || ctx->hwdec_fmt != imgfmt)
ctx->hwdec_failed = true;
/* Hardware decoding failed, and we will trigger a proper fallback later
* when returning from the decode call. (We are forcing complete
* reinitialization later to reset the thread count properly.)
*/
if (ctx->hwdec_failed)
return avcodec_default_get_buffer2(avctx, pic, flags);
// We expect it to use the exact size used to create the hw decoder in
// get_format_hwdec(). For cropped video, this is expected to be the
// uncropped size (usually coded_width/coded_height).
int w = pic->width;
int h = pic->height;
if (imgfmt != ctx->hwdec_fmt && w != ctx->hwdec_w && h != ctx->hwdec_h)
return AVERROR(EINVAL);
struct mp_image *mpi = ctx->hwdec->allocate_image(ctx, w, h);
if (!mpi)
return AVERROR(ENOMEM);
for (int i = 0; i < 4; i++) {
pic->data[i] = mpi->planes[i];
pic->buf[i] = mpi->bufs[i];
mpi->bufs[i] = NULL;
}
talloc_free(mpi);
return 0;
}
static bool prepare_decoding(struct dec_video *vd)
{
vd_ffmpeg_ctx *ctx = vd->priv;
AVCodecContext *avctx = ctx->avctx;
struct vd_lavc_params *opts = ctx->opts->vd_lavc_params;
if (!avctx || ctx->hwdec_failed)
return false;
int drop = ctx->framedrop_flags;
if (drop) {
// hr-seek framedrop vs. normal framedrop
avctx->skip_frame = drop == 2 ? AVDISCARD_NONREF : opts->framedrop;
} else {
// normal playback
avctx->skip_frame = ctx->skip_frame;
}
if (ctx->hwdec_request_reinit)
reset_avctx(vd);
return true;
}
static void handle_err(struct dec_video *vd)
{
vd_ffmpeg_ctx *ctx = vd->priv;
struct vd_lavc_params *opts = ctx->opts->vd_lavc_params;
MP_WARN(vd, "Error while decoding frame!\n");
if (ctx->hwdec) {
ctx->hwdec_fail_count += 1;
// The FFmpeg VT hwaccel is buggy and can crash after 1 broken frame.
bool force = false;
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 82, 101)
force |= ctx->hwdec && ctx->hwdec->type == HWDEC_VIDEOTOOLBOX;
#endif
if (ctx->hwdec_fail_count >= opts->software_fallback || force)
ctx->hwdec_failed = true;
}
}
static bool do_send_packet(struct dec_video *vd, struct demux_packet *pkt)
{
vd_ffmpeg_ctx *ctx = vd->priv;
AVCodecContext *avctx = ctx->avctx;
if (!prepare_decoding(vd))
return false;
AVPacket avpkt;
mp_set_av_packet(&avpkt, pkt, &ctx->codec_timebase);
int ret = avcodec_send_packet(avctx, pkt ? &avpkt : NULL);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return false;
if (ctx->hw_probing && ctx->num_sent_packets < 32) {
pkt = pkt ? demux_copy_packet(pkt) : NULL;
MP_TARRAY_APPEND(ctx, ctx->sent_packets, ctx->num_sent_packets, pkt);
}
if (ret < 0)
handle_err(vd);
return true;
}
static bool send_packet(struct dec_video *vd, struct demux_packet *pkt)
{
vd_ffmpeg_ctx *ctx = vd->priv;
if (ctx->num_requeue_packets) {
if (do_send_packet(vd, ctx->requeue_packets[0])) {
talloc_free(ctx->requeue_packets[0]);
MP_TARRAY_REMOVE_AT(ctx->requeue_packets, ctx->num_requeue_packets, 0);
}
return false;
}
return do_send_packet(vd, pkt);
}
// Returns whether decoder is still active (!EOF state).
static bool decode_frame(struct dec_video *vd)
{
vd_ffmpeg_ctx *ctx = vd->priv;
AVCodecContext *avctx = ctx->avctx;
if (!prepare_decoding(vd))
return true;
int ret = avcodec_receive_frame(avctx, ctx->pic);
if (ret == AVERROR_EOF) {
// If flushing was initialized earlier and has ended now, make it start
// over in case we get new packets at some point in the future.
reset_avctx(vd);
return false;
} else if (ret < 0 && ret != AVERROR(EAGAIN)) {
handle_err(vd);
}
if (!ctx->pic->buf[0])
return true;
ctx->hwdec_fail_count = 0;
AVFrameSideData *sd = NULL;
sd = av_frame_get_side_data(ctx->pic, AV_FRAME_DATA_A53_CC);
if (sd) {
struct demux_packet *cc = new_demux_packet_from(sd->data, sd->size);
cc->pts = vd->codec_pts;
cc->dts = vd->codec_dts;
demuxer_feed_caption(vd->header, cc);
}
struct mp_image *mpi = mp_image_from_av_frame(ctx->pic);
if (!mpi) {
av_frame_unref(ctx->pic);
return true;
}
assert(mpi->planes[0] || mpi->planes[3]);
mpi->pts = mp_pts_from_av(ctx->pic->pts, &ctx->codec_timebase);
mpi->dts = mp_pts_from_av(ctx->pic->pkt_dts, &ctx->codec_timebase);
#if LIBAVCODEC_VERSION_MICRO >= 100
mpi->pkt_duration =
mp_pts_from_av(av_frame_get_pkt_duration(ctx->pic), &ctx->codec_timebase);
#endif
#if HAVE_AVUTIL_ICC_PROFILE
sd = av_frame_get_side_data(ctx->pic, AV_FRAME_DATA_ICC_PROFILE);
if (sd)
mpi->icc_profile = av_buffer_ref(sd->buf);
#endif
update_image_params(vd, ctx->pic, &mpi->params);
av_frame_unref(ctx->pic);
MP_TARRAY_APPEND(ctx, ctx->delay_queue, ctx->num_delay_queue, mpi);
return true;
}
static bool receive_frame(struct dec_video *vd, struct mp_image **out_image)
{
vd_ffmpeg_ctx *ctx = vd->priv;
assert(!*out_image);
bool progress = decode_frame(vd);
if (ctx->hwdec_failed) {
// Failed hardware decoding? Try again in software.
struct demux_packet **pkts = ctx->sent_packets;
int num_pkts = ctx->num_sent_packets;
ctx->sent_packets = NULL;
ctx->num_sent_packets = 0;
force_fallback(vd);
ctx->requeue_packets = pkts;
ctx->num_requeue_packets = num_pkts;
}
if (!ctx->num_delay_queue)
return progress;
if (ctx->num_delay_queue <= ctx->max_delay_queue && progress)
return true;
struct mp_image *res = ctx->delay_queue[0];
MP_TARRAY_REMOVE_AT(ctx->delay_queue, ctx->num_delay_queue, 0);
if (ctx->hwdec && ctx->hwdec->process_image)
res = ctx->hwdec->process_image(ctx, res);
res = res ? mp_img_swap_to_native(res) : NULL;
if (!res)
return progress;
if (ctx->hwdec && ctx->hwdec->copying && (res->fmt.flags & MP_IMGFLAG_HWACCEL))
{
struct mp_image *sw = mp_image_hw_download(res, ctx->hwdec_swpool);
mp_image_unrefp(&res);
res = sw;
if (!res) {
MP_ERR(vd, "Could not copy back hardware decoded frame.\n");
ctx->hwdec_fail_count = INT_MAX - 1; // force fallback
handle_err(vd);
return false;
}
}
if (!ctx->hwdec_notified && vd->opts->hwdec_api != HWDEC_NONE) {
if (ctx->hwdec) {
MP_INFO(vd, "Using hardware decoding (%s).\n",
m_opt_choice_str(mp_hwdec_names, ctx->hwdec->type));
} else {
MP_VERBOSE(vd, "Using software decoding.\n");
}
ctx->hwdec_notified = true;
}
if (ctx->hw_probing) {
for (int n = 0; n < ctx->num_sent_packets; n++)
talloc_free(ctx->sent_packets[n]);
ctx->num_sent_packets = 0;
ctx->hw_probing = false;
}
*out_image = res;
return true;
}
static int control(struct dec_video *vd, int cmd, void *arg)
{
vd_ffmpeg_ctx *ctx = vd->priv;
switch (cmd) {
case VDCTRL_RESET:
flush_all(vd);
return CONTROL_TRUE;
case VDCTRL_SET_FRAMEDROP:
ctx->framedrop_flags = *(int *)arg;
return CONTROL_TRUE;
video: approximate AVI timestamps via DTS handling Until now (and in mplayer traditionally), avi timestamps were handled with a timestamp FIFO. AVI timestamps are essentially just strictly increasing frame numbers and are not reordered like normal timestamps. Limiting the FIFO is required because frames can be dropped. To make it worse, frame dropping can't be distinguished from the decoder not returning output due to increasing the buffering required for B-frames. ("Measuring" the buffering at playback start seems like an interesting idea, but won't work as the buffering could be increased mid-playback.) Another problem are skipped frames (packets with data, but which do not contain a video frame). Besides dropped and skipped frames, there is the problem that we can't always know the delay. External decoders like MMAL are not going to tell us. (And later perhaps others, like direct VideoToolbox usage.) In general, this works not-well enough that I prefer the solution of passing through AVI timestamps as DTS. This is slightly incorrect, because most decoders treat DTS as mpeg-style timestamps, which already include a b-frame delay, and thus will be shifted by a few frames. This means there will be a problem with A/V sync in some situations. Note that the FFmpeg AVI demuxer shifts timestamps by an additional amount (which increases after the first seek!?!?), which makes the situation worse. It works well with VfW-muxed Matroska files, though. On RPI, the first X timestamps are broken until the MMAL decoder "locks on".
2016-02-11 15:01:11 +00:00
case VDCTRL_GET_BFRAMES: {
AVCodecContext *avctx = ctx->avctx;
if (!avctx)
break;
if (ctx->hwdec && ctx->hwdec->type == HWDEC_RPI)
break; // MMAL has arbitrary buffering, thus unknown
video: approximate AVI timestamps via DTS handling Until now (and in mplayer traditionally), avi timestamps were handled with a timestamp FIFO. AVI timestamps are essentially just strictly increasing frame numbers and are not reordered like normal timestamps. Limiting the FIFO is required because frames can be dropped. To make it worse, frame dropping can't be distinguished from the decoder not returning output due to increasing the buffering required for B-frames. ("Measuring" the buffering at playback start seems like an interesting idea, but won't work as the buffering could be increased mid-playback.) Another problem are skipped frames (packets with data, but which do not contain a video frame). Besides dropped and skipped frames, there is the problem that we can't always know the delay. External decoders like MMAL are not going to tell us. (And later perhaps others, like direct VideoToolbox usage.) In general, this works not-well enough that I prefer the solution of passing through AVI timestamps as DTS. This is slightly incorrect, because most decoders treat DTS as mpeg-style timestamps, which already include a b-frame delay, and thus will be shifted by a few frames. This means there will be a problem with A/V sync in some situations. Note that the FFmpeg AVI demuxer shifts timestamps by an additional amount (which increases after the first seek!?!?), which makes the situation worse. It works well with VfW-muxed Matroska files, though. On RPI, the first X timestamps are broken until the MMAL decoder "locks on".
2016-02-11 15:01:11 +00:00
*(int *)arg = avctx->has_b_frames;
return CONTROL_TRUE;
}
case VDCTRL_GET_HWDEC: {
*(int *)arg = ctx->hwdec ? ctx->hwdec->type : 0;
return CONTROL_TRUE;
}
case VDCTRL_FORCE_HWDEC_FALLBACK:
if (ctx->hwdec) {
force_fallback(vd);
return ctx->avctx ? CONTROL_OK : CONTROL_ERROR;
}
return CONTROL_FALSE;
case VDCTRL_REINIT:
reinit(vd);
return CONTROL_TRUE;
}
return CONTROL_UNKNOWN;
}
core: redo how codecs are mapped, remove codecs.conf Use codec names instead of FourCCs to identify codecs. Rewrite how codecs are selected and initialized. Now each decoder exports a list of decoders (and the codec it supports) via add_decoders(). The order matters, and the first decoder for a given decoder is preferred over the other decoders. E.g. all ad_mpg123 decoders are preferred over ad_lavc, because it comes first in the mpcodecs_ad_drivers array. Likewise, decoders within ad_lavc that are enumerated first by libavcodec (using av_codec_next()) are preferred. (This is actually critical to select h264 software decoding by default instead of vdpau. libavcodec and ffmpeg/avconv use the same method to select decoders by default, so we hope this is sane.) The codec names follow libavcodec's codec names as defined by AVCodecDescriptor.name (see libavcodec/codec_desc.c). Some decoders have names different from the canonical codec name. The AVCodecDescriptor API is relatively new, so we need a compatibility layer for older libavcodec versions for codec names that are referenced internally, and which are different from the decoder name. (Add a configure check for that, because checking versions is getting way too messy.) demux/codec_tags.c is generated from the former codecs.conf (minus "special" decoders like vdpau, and excluding the mappings that are the same as the mappings libavformat's exported RIFF tables). It contains all the mappings from FourCCs to codec name. This is needed for demux_mkv, demux_mpg, demux_avi and demux_asf. demux_lavf will set the codec as determined by libavformat, while the other demuxers have to do this on their own, using the mp_set_audio/video_codec_from_tag() functions. Note that the sh_audio/video->format members don't uniquely identify the codec anymore, and sh->codec takes over this role. Replace the --ac/--vc/--afm/--vfm with new --vd/--ad options, which provide cover the functionality of the removed switched. Note: there's no CODECS_FLAG_FLIP flag anymore. This means some obscure container/video combinations (e.g. the sample Film_200_zygo_pro.mov) are played flipped. ffplay/avplay doesn't handle this properly either, so we don't care and blame ffmeg/libav instead.
2013-02-09 14:15:19 +00:00
static void add_decoders(struct mp_decoder_list *list)
{
mp_add_lavc_decoders(list, AVMEDIA_TYPE_VIDEO);
}
const struct vd_functions mpcodecs_vd_ffmpeg = {
core: redo how codecs are mapped, remove codecs.conf Use codec names instead of FourCCs to identify codecs. Rewrite how codecs are selected and initialized. Now each decoder exports a list of decoders (and the codec it supports) via add_decoders(). The order matters, and the first decoder for a given decoder is preferred over the other decoders. E.g. all ad_mpg123 decoders are preferred over ad_lavc, because it comes first in the mpcodecs_ad_drivers array. Likewise, decoders within ad_lavc that are enumerated first by libavcodec (using av_codec_next()) are preferred. (This is actually critical to select h264 software decoding by default instead of vdpau. libavcodec and ffmpeg/avconv use the same method to select decoders by default, so we hope this is sane.) The codec names follow libavcodec's codec names as defined by AVCodecDescriptor.name (see libavcodec/codec_desc.c). Some decoders have names different from the canonical codec name. The AVCodecDescriptor API is relatively new, so we need a compatibility layer for older libavcodec versions for codec names that are referenced internally, and which are different from the decoder name. (Add a configure check for that, because checking versions is getting way too messy.) demux/codec_tags.c is generated from the former codecs.conf (minus "special" decoders like vdpau, and excluding the mappings that are the same as the mappings libavformat's exported RIFF tables). It contains all the mappings from FourCCs to codec name. This is needed for demux_mkv, demux_mpg, demux_avi and demux_asf. demux_lavf will set the codec as determined by libavformat, while the other demuxers have to do this on their own, using the mp_set_audio/video_codec_from_tag() functions. Note that the sh_audio/video->format members don't uniquely identify the codec anymore, and sh->codec takes over this role. Replace the --ac/--vc/--afm/--vfm with new --vd/--ad options, which provide cover the functionality of the removed switched. Note: there's no CODECS_FLAG_FLIP flag anymore. This means some obscure container/video combinations (e.g. the sample Film_200_zygo_pro.mov) are played flipped. ffplay/avplay doesn't handle this properly either, so we don't care and blame ffmeg/libav instead.
2013-02-09 14:15:19 +00:00
.name = "lavc",
.add_decoders = add_decoders,
.init = init,
.uninit = uninit,
.control = control,
.send_packet = send_packet,
.receive_frame = receive_frame,
};