mpv/video/fmt-conversion.c

170 lines
5.3 KiB
C
Raw Normal View History

/*
* This file is part of mpv.
*
* mpv is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* mpv is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with mpv. If not, see <http://www.gnu.org/licenses/>.
*/
#include <libavutil/pixdesc.h>
#include <libavutil/avutil.h>
#include "video/img_format.h"
#include "fmt-conversion.h"
#include "config.h"
static const struct {
int fmt;
enum AVPixelFormat pix_fmt;
} conversion_map[] = {
{IMGFMT_ARGB, AV_PIX_FMT_ARGB},
{IMGFMT_BGRA, AV_PIX_FMT_BGRA},
{IMGFMT_BGR24, AV_PIX_FMT_BGR24},
{IMGFMT_RGB565, AV_PIX_FMT_RGB565},
{IMGFMT_RGB555, AV_PIX_FMT_RGB555},
{IMGFMT_RGB444, AV_PIX_FMT_RGB444},
{IMGFMT_RGB8, AV_PIX_FMT_RGB8},
{IMGFMT_RGB4, AV_PIX_FMT_RGB4},
{IMGFMT_MONO, AV_PIX_FMT_MONOBLACK},
{IMGFMT_MONO_W, AV_PIX_FMT_MONOWHITE},
{IMGFMT_RGB4_BYTE, AV_PIX_FMT_RGB4_BYTE},
{IMGFMT_BGR4_BYTE, AV_PIX_FMT_BGR4_BYTE},
{IMGFMT_RGB48, AV_PIX_FMT_RGB48},
{IMGFMT_ABGR, AV_PIX_FMT_ABGR},
{IMGFMT_RGBA, AV_PIX_FMT_RGBA},
{IMGFMT_RGB24, AV_PIX_FMT_RGB24},
{IMGFMT_BGR565, AV_PIX_FMT_BGR565},
{IMGFMT_BGR555, AV_PIX_FMT_BGR555},
{IMGFMT_BGR444, AV_PIX_FMT_BGR444},
{IMGFMT_BGR8, AV_PIX_FMT_BGR8},
{IMGFMT_BGR4, AV_PIX_FMT_BGR4},
{IMGFMT_PAL8, AV_PIX_FMT_PAL8},
{IMGFMT_GBRP, AV_PIX_FMT_GBRP},
{IMGFMT_YUYV, AV_PIX_FMT_YUYV422},
{IMGFMT_UYVY, AV_PIX_FMT_UYVY422},
{IMGFMT_NV12, AV_PIX_FMT_NV12},
{IMGFMT_NV21, AV_PIX_FMT_NV21},
{IMGFMT_Y8, AV_PIX_FMT_GRAY8},
// FFmpeg prefers AV_PIX_FMT_GRAY8A, but Libav has only Y400A
{IMGFMT_YA8, AV_PIX_FMT_Y400A},
{IMGFMT_Y16, AV_PIX_FMT_GRAY16},
{IMGFMT_410P, AV_PIX_FMT_YUV410P},
{IMGFMT_420P, AV_PIX_FMT_YUV420P},
{IMGFMT_411P, AV_PIX_FMT_YUV411P},
{IMGFMT_422P, AV_PIX_FMT_YUV422P},
{IMGFMT_444P, AV_PIX_FMT_YUV444P},
{IMGFMT_440P, AV_PIX_FMT_YUV440P},
{IMGFMT_420P16, AV_PIX_FMT_YUV420P16},
{IMGFMT_420P9, AV_PIX_FMT_YUV420P9},
{IMGFMT_420P10, AV_PIX_FMT_YUV420P10},
{IMGFMT_422P10, AV_PIX_FMT_YUV422P10},
{IMGFMT_444P9, AV_PIX_FMT_YUV444P9},
{IMGFMT_444P10, AV_PIX_FMT_YUV444P10},
{IMGFMT_422P16, AV_PIX_FMT_YUV422P16},
{IMGFMT_422P9, AV_PIX_FMT_YUV422P9},
{IMGFMT_444P16, AV_PIX_FMT_YUV444P16},
video: decouple internal pixel formats from FourCCs mplayer's video chain traditionally used FourCCs for pixel formats. For example, it used IMGFMT_YV12 for 4:2:0 YUV, which was defined to the string 'YV12' interpreted as unsigned int. Additionally, it used to encode information into the numeric values of some formats. The RGB formats had their bit depth and endian encoded into the least significant byte. Extended planar formats (420P10 etc.) had chroma shift, endian, and component bit depth encoded. (This has been removed in recent commits.) Replace the FourCC mess with a simple enum. Remove all the redundant formats like YV12/I420/IYUV. Replace some image format names by something more intuitive, most importantly IMGFMT_YV12 -> IMGFMT_420P. Add img_fourcc.h, which contains the old IDs for code that actually uses FourCCs. Change the way demuxers, that output raw video, identify the video format: they set either MP_FOURCC_RAWVIDEO or MP_FOURCC_IMGFMT to request the rawvideo decoder, and sh_video->imgfmt specifies the pixel format. Like the previous hack, this is supposed to avoid the need for a complete codecs.cfg entry per format, or other lookup tables. (Note that the RGB raw video FourCCs mostly rely on ffmpeg's mappings for NUT raw video, but this is still considered better than adding a raw video decoder - even if trivial, it would be full of annoying lookup tables.) The TV code has not been tested. Some corrective changes regarding endian and other image format flags creep in.
2012-12-23 19:03:30 +00:00
// YUVJ are YUV formats that use the full Y range. Decoder color range
// information is used instead. Deprecated in ffmpeg.
{IMGFMT_420P, AV_PIX_FMT_YUVJ420P},
{IMGFMT_422P, AV_PIX_FMT_YUVJ422P},
{IMGFMT_444P, AV_PIX_FMT_YUVJ444P},
{IMGFMT_440P, AV_PIX_FMT_YUVJ440P},
{IMGFMT_420AP, AV_PIX_FMT_YUVA420P},
{IMGFMT_422AP, AV_PIX_FMT_YUVA422P},
{IMGFMT_444AP, AV_PIX_FMT_YUVA444P},
{IMGFMT_XYZ12, AV_PIX_FMT_XYZ12},
#ifdef AV_PIX_FMT_YUV420P12
{IMGFMT_420P12, AV_PIX_FMT_YUV420P12},
{IMGFMT_420P14, AV_PIX_FMT_YUV420P14},
{IMGFMT_422P12, AV_PIX_FMT_YUV422P12},
{IMGFMT_422P14, AV_PIX_FMT_YUV422P14},
{IMGFMT_444P12, AV_PIX_FMT_YUV444P12},
{IMGFMT_444P14, AV_PIX_FMT_YUV444P14},
#endif
#ifdef AV_PIX_FMT_RGBA64
{IMGFMT_RGBA64, AV_PIX_FMT_RGBA64},
{IMGFMT_BGRA64, AV_PIX_FMT_BGRA64},
#endif
#if LIBAVUTIL_VERSION_MICRO >= 100
{IMGFMT_BGR0, AV_PIX_FMT_BGR0},
{IMGFMT_0RGB, AV_PIX_FMT_0RGB},
{IMGFMT_RGB0, AV_PIX_FMT_RGB0},
{IMGFMT_0BGR, AV_PIX_FMT_0BGR},
#else
{IMGFMT_BGR0, AV_PIX_FMT_BGRA},
{IMGFMT_0RGB, AV_PIX_FMT_ARGB},
{IMGFMT_RGB0, AV_PIX_FMT_RGBA},
{IMGFMT_0BGR, AV_PIX_FMT_ABGR},
#endif
#ifdef AV_PIX_FMT_YA16
{IMGFMT_YA16, AV_PIX_FMT_YA16},
#endif
{IMGFMT_VDPAU, AV_PIX_FMT_VDPAU},
#if HAVE_VIDEOTOOLBOX_HWACCEL
{IMGFMT_VIDEOTOOLBOX, AV_PIX_FMT_VIDEOTOOLBOX},
#endif
{IMGFMT_VAAPI, AV_PIX_FMT_VAAPI_VLD},
{IMGFMT_DXVA2, AV_PIX_FMT_DXVA2_VLD},
RPI support This requires FFmpeg git master for accelerated hardware decoding. Keep in mind that FFmpeg must be compiled with --enable-mmal. Libav will also work. Most things work. Screenshots don't work with accelerated/opaque decoding (except using full window screenshot mode). Subtitles are very slow - even simple but huge overlays can cause frame drops. This always uses fullscreen mode. It uses dispmanx and mmal directly, and there are no window managers or anything on this level. vo_opengl also kind of works, but is pretty useless and slow. It can't use opaque hardware decoding (copy back can be used by forcing the option --vd=lavc:h264_mmal). Keep in mind that the dispmanx backend is preferred over the X11 ones in case you're trying on X11; but X11 is even more useless on RPI. This doesn't correctly reject extended h264 profiles and thus doesn't fallback to software decoding. The hw supports only up to the high profile, and will e.g. return garbage for Hi10P video. This sets a precedent of enabling hw decoding by default, but only if RPI support is compiled (which most hopefully it will be disabled on desktop Linux platforms). While it's more or less required to use hw decoding on the weak RPI, it causes more problems than it solves on real platforms (Linux has the Intel GPU problem, OSX still has some cases with broken decoding.) So I can live with this compromise of having different defaults depending on the platform. Raspberry Pi 2 is required. This wasn't tested on the original RPI, though at least decoding itself seems to work (but full playback was not tested).
2015-03-29 13:12:11 +00:00
#if HAVE_AV_PIX_FMT_MMAL
{IMGFMT_MMAL, AV_PIX_FMT_MMAL},
#endif
video: add vaapi decode and output support This is based on the MPlayer VA API patches. To be exact it's based on a very stripped down version of commit f1ad459a263f8537f6c from git://gitorious.org/vaapi/mplayer.git. This doesn't contain useless things like benchmarking hacks and the demo code for GLX interop. Also, unlike in the original patch, decoding and video output are split into separate source files (the separation between decoding and display also makes pixel format hacks unnecessary). On the other hand, some features not present in the original patch were added, like screenshot support. VA API is rather bad for actual video output. Dealing with older libva versions or the completely broken vdpau backend doesn't help. OSD is low quality and should be rather slow. In some cases, only either OSD or subtitles can be shown at the same time (because OSD is drawn first, OSD is prefered). Also, libva can't decide whether it accepts straight or premultiplied alpha for OSD sub-pictures: the vdpau backend seems to assume premultiplied, while a native vaapi driver uses straight. So I picked straight alpha. It doesn't matter much, because the blending code for straight alpha I added to img_convert.c is probably buggy, and ASS subtitles might be blended incorrectly. Really good video output with VA API would probably use OpenGL and the GL interop features, but at this point you might just use vo_opengl. (Patches for making HW decoding with vo_opengl have a chance of being accepted.) Despite these issues, decoding seems to work ok. I still got tearing on the Intel system I tested (Intel(R) Core(TM) i3-2350M). It was also tested with the vdpau vaapi wrapper on a nvidia system; however this was rather broken. (Fortunately, there is no reason to use mpv's VAAPI support over native VDPAU.)
2013-08-09 12:01:30 +00:00
{0, AV_PIX_FMT_NONE}
};
enum AVPixelFormat imgfmt2pixfmt(int fmt)
{
if (fmt == IMGFMT_NONE)
return AV_PIX_FMT_NONE;
if (fmt >= IMGFMT_AVPIXFMT_START && fmt < IMGFMT_AVPIXFMT_END) {
enum AVPixelFormat pixfmt = fmt - IMGFMT_AVPIXFMT_START;
// Avoid duplicate format - each format must be unique.
int mpfmt = pixfmt2imgfmt(pixfmt);
if (mpfmt == fmt)
return pixfmt;
return AV_PIX_FMT_NONE;
}
for (int i = 0; conversion_map[i].fmt; i++) {
if (conversion_map[i].fmt == fmt)
return conversion_map[i].pix_fmt;
}
return AV_PIX_FMT_NONE;
}
int pixfmt2imgfmt(enum AVPixelFormat pix_fmt)
{
if (pix_fmt == AV_PIX_FMT_NONE)
return IMGFMT_NONE;
for (int i = 0; conversion_map[i].pix_fmt != AV_PIX_FMT_NONE; i++) {
if (conversion_map[i].pix_fmt == pix_fmt)
return conversion_map[i].fmt;
}
int generic = IMGFMT_AVPIXFMT_START + pix_fmt;
if (generic < IMGFMT_AVPIXFMT_END)
return generic;
return 0;
}