1
0
mirror of https://github.com/mpv-player/mpv synced 2024-12-25 16:33:02 +00:00

video: add vda decode support (with hwaccel) and direct rendering

Decoding H264 using Video Decode Acceleration used the custom 'vda_h264_dec'
decoder in FFmpeg.

The Good: This new implementation has some advantages over the previous one:

 - It works with Libav: vda_h264_dec never got into Libav since they prefer
   client applications to use the hwaccel API.

 - It is way more efficient: in my tests this implementation yields a
   reduction of CPU usage of roughly ~50% compared to using `vda_h264_dec` and
   ~65-75% compared to h264 software decoding. This is mainly because
   `vo_corevideo` was adapted to perform direct rendering of the
   `CVPixelBufferRefs` created by the Video Decode Acceleration API Framework.

The Bad:
  - `vo_corevideo` is required to use VDA decoding acceleration.
  - only works with versions of ffmpeg/libav new enough (needs reference
    refcounting). That is FFmpeg 2.0+ and Libav's git master currently.

The Ugly: VDA was hardcoded to use UYVY (2vuy) for the uploaded video texture.
One one end this makes the code simple since Apple's OpenGL implementation
actually supports this out of the box. It would be nice to support other
output image formats and choose the best format depending on the input, or at
least making it configurable. My tests indicate that CPU usage actually
increases with a 420p IMGFMT output which is not what I would have expected.

NOTE: There is a small memory leak with old versions of FFmpeg and with Libav
since the CVPixelBufferRef is not automatically released when the AVFrame is
deallocated. This can cause leaks inside libavcodec for decoded frames that
are discarded before mpv wraps them inside a refcounted mp_image (this only
happens on seeks).
For frames that enter mpv's refcounting facilities, this is not a problem
since we rewrap the CVPixelBufferRef in our mp_image that properly forwards
CVPixelBufferRetain/CvPixelBufferRelease calls to the underying
CVPixelBufferRef.

So, for FFmpeg use something more recent than `b3d63995` for Libav the patch
was posted to the dev ML in July and in review since, apparently, the proposed
fix is rather hacky.
This commit is contained in:
Stefano Pigozzi 2013-08-14 15:47:18 +02:00
parent 94b4a80d45
commit a9cb2dc1b8
10 changed files with 591 additions and 113 deletions

View File

@ -100,6 +100,8 @@ General Changes for mplayer2 to mpv
* Allow changing/adjusting video filters at runtime. (This is also used to make
the ``D`` key insert vf_yadif if deinterlacing is not supported otherwise.)
* Native VAAPI support
* OSX: VDA support using libavcodec hwaccel API insted of FFmpeg's decoder. Up
to 2-2.5x reduction in CPU usage.
* General bug fixes and removal of long-standing issues
* General code cleanups (including refactoring or rewrites of many parts)
* Many more changes

View File

@ -1062,7 +1062,7 @@
:auto: see below
:vdpau: requires ``--vo=vdpau``
:vaapi: requires ``--vo=vaapi``
:vda: OSX
:vda: requires ``--vo=corevideo`` (OSX only)
:crystalhd: Broadcom Crystal HD
``auto`` tries to automatically enable hardware decoding using the first

View File

@ -107,6 +107,7 @@ SOURCES-$(PULSE) += audio/out/ao_pulse.c
SOURCES-$(PORTAUDIO) += audio/out/ao_portaudio.c
SOURCES-$(RSOUND) += audio/out/ao_rsound.c
SOURCES-$(VDPAU) += video/vdpau.c video/out/vo_vdpau.c
SOURCES-$(VDA) += video/decode/vda.c
SOURCES-$(VDPAU_DEC) += video/decode/vdpau.c
SOURCES-$(VDPAU_DEC_OLD) += video/decode/vdpau_old.c
SOURCES-$(VAAPI) += video/out/vo_vaapi.c \

46
configure vendored
View File

@ -347,6 +347,7 @@ Video output:
--enable-sdl2 enable SDL 2.0+ audio and video output [disable]
--enable-xv enable Xv video output [autodetect]
--enable-vdpau enable VDPAU acceleration [autodetect]
--enable-vda enable VDA acceleration [autodetect]
--enable-vaapi enable VAAPI acceleration [autodetect]
--enable-vm enable XF86VidMode support [autodetect]
--enable-xinerama enable Xinerama support [autodetect]
@ -422,6 +423,8 @@ _wayland=auto
_xss=auto
_xv=auto
_vdpau=auto
_vda=auto
_vda_refcounting=auto
_vaapi=auto
_direct3d=auto
_sdl=no
@ -584,6 +587,8 @@ for ac_option do
--disable-xv) _xv=no ;;
--enable-vdpau) _vdpau=yes ;;
--disable-vdpau) _vdpau=no ;;
--enable-vda) _vda=yes ;;
--disable-vda) _vda=no ;;
--enable-vaapi) _vaapi=yes ;;
--disable-vaapi) _vaapi=no ;;
--enable-direct3d) _direct3d=yes ;;
@ -2815,6 +2820,42 @@ fi
echores "$libpostproc"
if darwin ; then
echocheck "VDA"
if test "$_vda" = auto ; then
_vda=no
if test "$_avutil_has_refcounting" = "yes" ; then
header_check VideoDecodeAcceleration/VDADecoder.h &&
header_check libavcodec/vda.h && _vda=yes
else
res_comment="libavutil too old"
fi
fi
if test "$_vda" = yes ; then
def_vda='#define CONFIG_VDA 1'
libs_mplayer="$libs_mplayer -framework VideoDecodeAcceleration -framework QuartzCore -framework IOSurface"
else
def_vda='#define CONFIG_VDA 0'
fi
echores "$_vda"
echocheck "VDA libavcodec refcounting"
_vda_refcounting=no
if test "$_vda" = yes ; then
statement_check libavcodec/vda.h 'struct vda_context a = (struct vda_context) { .use_ref_buffer = 1 }' &&
_vda_refcounting=yes
fi
if test "$_vda_refcounting" = "yes" ; then
def_vda_refcounting='#define HAVE_VDA_LIBAVCODEC_REFCOUNTING 1'
else
def_vda_refcounting='#define HAVE_VDA_LIBAVCODEC_REFCOUNTING 0'
fi
echores "$_vda_refcounting"
fi
echocheck "TV interface"
if test "$_tv" = yes ; then
def_tv='#define CONFIG_TV 1'
@ -3110,6 +3151,8 @@ VCD = $_vcd
VDPAU = $_vdpau
VDPAU_DEC = $_vdpau_dec
VDPAU_DEC_OLD = $_vdpau_dec_old
VDA = $_vda
VDA_REFCOUNTING = $_vda_refcounting
VAAPI = $_vaapi
WIN32 = $_win32
X11 = $_x11
@ -3120,6 +3163,7 @@ XV = $_xv
ENCODING = $_encoding
CONFIG_VDPAU = $_vdpau
CONFIG_VDA = $_vda
CONFIG_VAAPI = $_vaapi
CONFIG_ZLIB = $_zlib
@ -3286,6 +3330,8 @@ $def_jpeg
$def_mng
$def_v4l2
$def_vdpau
$def_vda
$def_vda_refcounting
$def_vaapi
$def_vm
$def_x11

View File

@ -84,6 +84,7 @@ const m_option_t lavc_decode_opts_conf[] = {
const struct vd_lavc_hwdec mp_vd_lavc_vdpau;
const struct vd_lavc_hwdec mp_vd_lavc_vdpau_old;
const struct vd_lavc_hwdec mp_vd_lavc_vda;
const struct vd_lavc_hwdec mp_vd_lavc_vaapi;
static const struct vd_lavc_hwdec mp_vd_lavc_crystalhd = {
@ -99,11 +100,6 @@ static const struct vd_lavc_hwdec mp_vd_lavc_crystalhd = {
},
};
static const struct vd_lavc_hwdec mp_vd_lavc_vda = {
.type = HWDEC_VDA,
.codec_pairs = (const char *[]) {"h264", "h264_vda", NULL},
};
static const struct vd_lavc_hwdec *hwdec_list[] = {
#if CONFIG_VDPAU
#if HAVE_AV_CODEC_NEW_VDPAU_API
@ -112,7 +108,9 @@ static const struct vd_lavc_hwdec *hwdec_list[] = {
&mp_vd_lavc_vdpau_old,
#endif
#endif // CONFIG_VDPAU
#if CONFIG_VDA
&mp_vd_lavc_vda,
#endif
&mp_vd_lavc_crystalhd,
#if CONFIG_VAAPI
&mp_vd_lavc_vaapi,
@ -748,7 +746,7 @@ static int decode(struct sh_video *sh, struct demux_packet *packet,
assert(mpi->planes[0]);
if (ctx->hwdec && ctx->hwdec->process_image)
ctx->hwdec->process_image(ctx, mpi);
mpi = ctx->hwdec->process_image(ctx, mpi);
mpi->colorspace = ctx->image_params.colorspace;
mpi->levels = ctx->image_params.colorlevels;

219
video/decode/vda.c Normal file
View File

@ -0,0 +1,219 @@
/*
* This file is part of mpv.
*
* Copyright (c) 2013 Stefano Pigozzi <stefano.pigozzi@gmail.com>
*
* mpv is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* mpv is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with mpv. If not, see <http://www.gnu.org/licenses/>.
*/
#include <libavcodec/version.h>
#include <libavcodec/vda.h>
#include "mpvcore/av_common.h"
#include "mpvcore/mp_msg.h"
#include "video/mp_image.h"
#include "video/decode/lavc.h"
#include "config.h"
struct priv {
struct vda_context vda_ctx;
};
struct profile_entry {
enum AVCodecID av_codec;
int ff_profile;
uint32_t vda_codec;
};
static const struct profile_entry profiles[] = {
{ AV_CODEC_ID_H264, FF_PROFILE_UNKNOWN, 'avc1' },
};
static const struct profile_entry *find_codec(enum AVCodecID id, int ff_profile)
{
for (int n = 0; n < MP_ARRAY_SIZE(profiles); n++) {
if (profiles[n].av_codec == id &&
(profiles[n].ff_profile == ff_profile ||
profiles[n].ff_profile == FF_PROFILE_UNKNOWN))
{
return &profiles[n];
}
}
return NULL;
}
struct vda_error {
int code;
char *reason;
};
static const struct vda_error vda_errors[] = {
{ kVDADecoderHardwareNotSupportedErr,
"Hardware doesn't support accelerated decoding" },
{ kVDADecoderFormatNotSupportedErr,
"Hardware doesn't support requested output format" },
{ kVDADecoderConfigurationError,
"Invalid configuration provided to VDADecoderCreate" },
{ kVDADecoderDecoderFailedErr,
"Generic error returned by the decoder layer. The cause can range from"
" VDADecoder finding errors in the bitstream to another application"
" using VDA at the moment. Only one application can use VDA at a"
" givent time." },
{ 0, NULL },
};
static void print_vda_error(int lev, char *message, int error_code)
{
for (int n = 0; vda_errors[n].code < 0; n++)
if (vda_errors[n].code == error_code) {
mp_msg(MSGT_DECVIDEO, lev, "%s: %s (%d)\n",
message, vda_errors[n].reason, error_code);
return;
}
mp_msg(MSGT_DECVIDEO, lev, "%s: %d\n", message, error_code);
}
static int probe(struct vd_lavc_hwdec *hwdec, struct mp_hwdec_info *info,
const char *decoder)
{
if (!find_codec(mp_codec_to_av_codec_id(decoder), FF_PROFILE_UNKNOWN))
return HWDEC_ERR_NO_CODEC;
return 0;
}
static int init_vda_decoder(struct lavc_ctx *ctx)
{
struct priv *p = ctx->hwdec_priv;
if (p->vda_ctx.decoder)
ff_vda_destroy_decoder(&p->vda_ctx);
const struct profile_entry *pe =
find_codec(ctx->avctx->codec_id, ctx->avctx->profile);
p->vda_ctx = (struct vda_context) {
.width = ctx->avctx->width,
.height = ctx->avctx->height,
.format = pe->vda_codec,
.cv_pix_fmt_type = kCVPixelFormatType_422YpCbCr8,
#if HAVE_VDA_LIBAVCODEC_REFCOUNTING
.use_ref_buffer = 1,
#endif
// use_ref_buffer is 1 in ffmpeg (while libav doesn't support this
// feature). This means that in the libav case, libavcodec returns us
// a CVPixelBuffer with refcount=1 AND hands over ownership of that
// reference.
// This is slightly different from a typical refcounted situation
// where the API would return something that we need to to retain
// for it to stay around (ffmpeg behaves like expected when using
// use_ref_buffer = 1).
// If mpv doesn't properly free CVPixelBufferRefs that are no longer
// used, the wrapped IOSurface ids increase monotonically hinting at
// a leaking of both CVPixelBuffers and IOSurfaces.
};
int status = ff_vda_create_decoder(
&p->vda_ctx, ctx->avctx->extradata, ctx->avctx->extradata_size);
if (status) {
print_vda_error(MSGL_ERR, "[vda] failed to init decoder", status);
return -1;
}
return 0;
}
static int init(struct lavc_ctx *ctx)
{
struct priv *p = talloc_zero(NULL, struct priv);
ctx->hwdec_priv = p;
ctx->avctx->hwaccel_context = &p->vda_ctx;
return 0;
}
static void uninit(struct lavc_ctx *ctx) {
struct priv *p = ctx->hwdec_priv;
if (p->vda_ctx.decoder)
ff_vda_destroy_decoder(&p->vda_ctx);
}
static void cv_retain(void *pbuf)
{
CVPixelBufferRetain((CVPixelBufferRef)pbuf);
}
static void cv_release(void *pbuf)
{
CVPixelBufferRelease((CVPixelBufferRef)pbuf);
}
static struct mp_image *mp_image_new_cv_ref(struct mp_image *mpi)
{
CVPixelBufferRef pbuf = (CVPixelBufferRef)mpi->planes[3];
// mp_image_new_external_ref assumes the external reference count is
// already 1 so the calls to cv_retain and cv_release are unbalanced (
// in favor of cv_release). To balance out the retain count we need to
// retain the CVPixelBufferRef if ffmpeg is set to automatically release
// it when the AVFrame is unreffed.
#if HAVE_VDA_LIBAVCODEC_REFCOUNTING
cv_retain(pbuf);
#endif
return mp_image_new_external_ref(mpi,
pbuf, cv_retain, cv_release, NULL, NULL);
}
static struct mp_image *process_image(struct lavc_ctx *ctx, struct mp_image *mpi)
{
struct mp_image *cv_mpi = mp_image_new_cv_ref(mpi);
mp_image_unrefp(&mpi);
return cv_mpi;
}
// This actually returns dummy images, since vda_264 creates it's own AVFrames
// to wrap CVPixelBuffers in planes[3].
static struct mp_image *allocate_image(struct lavc_ctx *ctx, int fmt,
int w, int h)
{
struct priv *p = ctx->hwdec_priv;
if (fmt != IMGFMT_VDA)
return NULL;
if (w != p->vda_ctx.width || h != p->vda_ctx.height)
init_vda_decoder(ctx);
struct mp_image img = {0};
mp_image_setfmt(&img, fmt);
mp_image_set_size(&img, w, h);
// There is an `assert(!dst->f.buf[0])` in libavcodec/h264.c
// Setting the first plane to some dummy value allows to satisfy it
img.planes[0] = (void*)"dummy";
return mp_image_new_custom_ref(&img, NULL, NULL);
}
const struct vd_lavc_hwdec mp_vd_lavc_vda = {
.type = HWDEC_VDA,
.image_formats = (const int[]) { IMGFMT_VDA, 0 },
.probe = probe,
.init = init,
.uninit = uninit,
.allocate_image = allocate_image,
.process_image = process_image,
};

View File

@ -184,6 +184,8 @@ static const struct {
{IMGFMT_VDPAU, PIX_FMT_VDPAU_H264},
#endif
{ IMGFMT_VDA, PIX_FMT_VDA_VLD},
{IMGFMT_VAAPI, PIX_FMT_VAAPI_VLD},
{IMGFMT_VAAPI_MPEG2_IDCT, PIX_FMT_VAAPI_IDCT},
{IMGFMT_VAAPI_MPEG2_MOCO, PIX_FMT_VAAPI_MOCO},

View File

@ -119,6 +119,7 @@ struct mp_imgfmt_entry mp_imgfmt_list[] = {
FMT("vdpau_vc1", IMGFMT_VDPAU_VC1)
FMT("vdpau_mpeg4", IMGFMT_VDPAU_MPEG4)
FMT("vdpau", IMGFMT_VDPAU)
FMT("vda", IMGFMT_VDA)
FMT("vaapi", IMGFMT_VAAPI)
FMT("vaapi_mpeg2_idct", IMGFMT_VAAPI_MPEG2_IDCT)
FMT("vaapi_mpeg2_moco", IMGFMT_VAAPI_MPEG2_MOCO)

View File

@ -252,6 +252,8 @@ enum mp_imgfmt {
IMGFMT_VDPAU_FIRST = IMGFMT_VDPAU,
IMGFMT_VDPAU_LAST = IMGFMT_VDPAU_MPEG4,
IMGFMT_VDA,
IMGFMT_VAAPI,
IMGFMT_VAAPI_MPEG2_IDCT,
IMGFMT_VAAPI_MPEG2_MOCO,
@ -335,10 +337,13 @@ static inline bool IMGFMT_IS_RGB(unsigned int fmt)
#define IMGFMT_IS_VDPAU(fmt) \
(((fmt) >= IMGFMT_VDPAU_FIRST) && ((fmt) <= IMGFMT_VDPAU_LAST))
#define IMGFMT_IS_VDA(fmt) (((fmt) == IMGFMT_VDA))
#define IMGFMT_IS_VAAPI(fmt) \
(((fmt) >= IMGFMT_VAAPI_FIRST) && ((fmt) <= IMGFMT_VAAPI_LAST))
#define IMGFMT_IS_HWACCEL(fmt) (IMGFMT_IS_VDPAU(fmt) || IMGFMT_IS_VAAPI(fmt))
#define IMGFMT_IS_HWACCEL(fmt) \
(IMGFMT_IS_VDPAU(fmt) || IMGFMT_IS_VAAPI(fmt) || IMGFMT_IS_VDA(fmt))
struct mp_imgfmt_entry {

View File

@ -1,6 +1,7 @@
/*
* CoreVideo video output driver
* Copyright (c) 2005 Nicolas Plourde <nicolasplourde@gmail.com>
* Copyright (c) 2012-2013 Stefano Pigozzi <stefano.pigozzi@gmail.com>
*
* This file is part of MPlayer.
*
@ -19,7 +20,13 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include <QuartzCore/QuartzCore.h>
#if CONFIG_VDA
#include <IOSurface/IOSurface.h>
#endif
#include <assert.h>
#include "talloc.h"
@ -42,9 +49,32 @@ struct quad {
GLfloat upperLeft[2];
};
struct cv_priv {
CVPixelBufferRef pbuf;
CVOpenGLTextureCacheRef texture_cache;
CVOpenGLTextureRef texture;
OSType pixfmt;
};
struct dr_priv {
CVPixelBufferRef pbuf;
bool texture_allocated;
GLuint texture;
GLuint texture_target;
};
struct cv_functions {
void (*init)(struct vo *vo);
void (*uninit)(struct vo *vo);
void (*prepare_texture)(struct vo *vo, struct mp_image *mpi);
void (*bind_texture)(struct vo *vo);
void (*unbind_texture)(struct vo *vo);
mp_image_t *(*get_screenshot)(struct vo *vo);
int (*set_colormatrix)(struct vo *vo, struct mp_csp_details *csp);
};
struct priv {
MPGLContext *mpglctx;
OSType pixelFormat;
unsigned int image_width;
unsigned int image_height;
struct mp_csp_details colorspace;
@ -52,12 +82,21 @@ struct priv {
struct mp_rect dst_rect;
struct mp_osd_res osd_res;
CVPixelBufferRef pixelBuffer;
CVOpenGLTextureCacheRef textureCache;
CVOpenGLTextureRef texture;
struct quad *quad;
// state for normal CoreVideo rendering path: uploads mp_image data as
// OpenGL textures.
struct cv_priv cv;
// state for IOSurface based direct rendering path: accesses the IOSurface
// wrapped by the CVPixelBuffer returned by VDADecoder and directly
// renders it to the screen.
struct dr_priv dr;
struct quad *quad;
struct mpgl_osd *osd;
// functions to to deal with the the OpenGL texture for containing the
// video frame (behaviour changes depending on the rendering path).
struct cv_functions fns;
};
static void resize(struct vo *vo)
@ -98,17 +137,8 @@ static int init_gl(struct vo *vo, uint32_t d_width, uint32_t d_height)
gl->Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (gl->SwapInterval)
gl->SwapInterval(1);
return 1;
}
static void release_cv_entities(struct vo *vo) {
struct priv *p = vo->priv;
CVPixelBufferRelease(p->pixelBuffer);
p->pixelBuffer = NULL;
CVOpenGLTextureRelease(p->texture);
p->texture = NULL;
CVOpenGLTextureCacheRelease(p->textureCache);
p->textureCache = NULL;
return 1;
}
static int config(struct vo *vo, uint32_t width, uint32_t height,
@ -116,7 +146,8 @@ static int config(struct vo *vo, uint32_t width, uint32_t height,
uint32_t format)
{
struct priv *p = vo->priv;
release_cv_entities(vo);
p->fns.uninit(vo);
p->image_width = width;
p->image_height = height;
@ -125,26 +156,11 @@ static int config(struct vo *vo, uint32_t width, uint32_t height,
return -1;
init_gl(vo, vo->dwidth, vo->dheight);
p->fns.init(vo);
return 0;
}
static void prepare_texture(struct vo *vo)
{
struct priv *p = vo->priv;
struct quad *q = p->quad;
CVReturn error;
CVOpenGLTextureRelease(p->texture);
error = CVOpenGLTextureCacheCreateTextureFromImage(NULL,
p->textureCache, p->pixelBuffer, 0, &p->texture);
if(error != kCVReturnSuccess)
MP_ERR(vo, "Failed to create OpenGL texture(%d)\n", error);
CVOpenGLTextureGetCleanTexCoords(p->texture, q->lowerLeft, q->lowerRight,
q->upperRight, q->upperLeft);
}
// map x/y (in range 0..1) to the video texture, and emit OpenGL vertexes
static void video_vertex(struct vo *vo, float x, float y)
{
@ -172,12 +188,8 @@ static void do_render(struct vo *vo)
{
struct priv *p = vo->priv;
GL *gl = p->mpglctx->gl;
prepare_texture(vo);
gl->Enable(CVOpenGLTextureGetTarget(p->texture));
gl->BindTexture(
CVOpenGLTextureGetTarget(p->texture),
CVOpenGLTextureGetName(p->texture));
p->fns.bind_texture(vo);
gl->Begin(GL_QUADS);
video_vertex(vo, 0, 0);
@ -186,7 +198,7 @@ static void do_render(struct vo *vo)
video_vertex(vo, 1, 0);
gl->End();
gl->Disable(CVOpenGLTextureGetTarget(p->texture));
p->fns.unbind_texture(vo);
}
static void flip_page(struct vo *vo)
@ -196,61 +208,19 @@ static void flip_page(struct vo *vo)
p->mpglctx->gl->Clear(GL_COLOR_BUFFER_BIT);
}
static void draw_image(struct vo *vo, mp_image_t *mpi)
static void draw_image(struct vo *vo, struct mp_image *mpi)
{
struct priv *p = vo->priv;
CVReturn error;
if (!p->textureCache || !p->pixelBuffer) {
error = CVOpenGLTextureCacheCreate(NULL, 0, vo_cocoa_cgl_context(vo),
vo_cocoa_cgl_pixel_format(vo), 0, &p->textureCache);
if(error != kCVReturnSuccess)
MP_ERR(vo, "Failed to create OpenGL texture Cache(%d)\n", error);
error = CVPixelBufferCreateWithBytes(NULL, mpi->w, mpi->h,
p->pixelFormat, mpi->planes[0], mpi->stride[0],
NULL, NULL, NULL, &p->pixelBuffer);
if(error != kCVReturnSuccess)
MP_ERR(vo, "Failed to create PixelBuffer(%d)\n", error);
}
p->fns.prepare_texture(vo, mpi);
do_render(vo);
}
static int query_format(struct vo *vo, uint32_t format)
{
struct priv *p = vo->priv;
const int flags = VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW;
switch (format) {
case IMGFMT_YUYV:
p->pixelFormat = kYUVSPixelFormat;
return flags;
case IMGFMT_UYVY:
p->pixelFormat = k2vuyPixelFormat;
return flags;
case IMGFMT_RGB24:
p->pixelFormat = k24RGBPixelFormat;
return flags;
case IMGFMT_ARGB:
p->pixelFormat = k32ARGBPixelFormat;
return flags;
case IMGFMT_BGRA:
p->pixelFormat = k32BGRAPixelFormat;
return flags;
}
return 0;
}
static void uninit(struct vo *vo)
{
struct priv *p = vo->priv;
if (p->osd)
mpgl_osd_destroy(p->osd);
release_cv_entities(vo);
p->fns.uninit(vo);
mpgl_uninit(p->mpglctx);
}
@ -287,23 +257,38 @@ static CFStringRef get_cv_csp_matrix(struct vo *vo)
case MP_CSP_SMPTE_240M:
return kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
default:
return kCVImageBufferYCbCrMatrix_ITU_R_601_4;
return NULL;
}
}
static void set_yuv_colorspace(struct vo *vo)
static int set_yuv_colorspace(struct vo *vo, CVPixelBufferRef pbuf,
struct mp_csp_details *csp)
{
struct priv *p = vo->priv;
CVBufferSetAttachment(p->pixelBuffer,
kCVImageBufferYCbCrMatrixKey, get_cv_csp_matrix(vo),
kCVAttachmentMode_ShouldPropagate);
vo->want_redraw = true;
p->colorspace = *csp;
CFStringRef cv_csp = get_cv_csp_matrix(vo);
if (cv_csp) {
CVBufferSetAttachment(p->cv.pbuf, kCVImageBufferYCbCrMatrixKey, cv_csp,
kCVAttachmentMode_ShouldNotPropagate);
vo->want_redraw = true;
return VO_TRUE;
} else {
return VO_NOTIMPL;
}
}
static int get_image_fmt(struct vo *vo)
static int get_yuv_colorspace(struct vo *vo, struct mp_csp_details *csp)
{
struct priv *p = vo->priv;
switch (p->pixelFormat) {
*csp = p->colorspace;
return VO_TRUE;
}
static int get_image_fmt(struct vo *vo, CVPixelBufferRef pbuf)
{
OSType pixfmt = CVPixelBufferGetPixelFormatType(pbuf);
switch (pixfmt) {
case kYUVSPixelFormat: return IMGFMT_YUYV;
case k2vuyPixelFormat: return IMGFMT_UYVY;
case k24RGBPixelFormat: return IMGFMT_RGB24;
@ -311,21 +296,21 @@ static int get_image_fmt(struct vo *vo)
case k32BGRAPixelFormat: return IMGFMT_BGRA;
}
MP_ERR(vo, "Failed to convert pixel format. Please contact the "
"developers. PixelFormat: %d\n", p->pixelFormat);
"developers. PixelFormat: %d\n", pixfmt);
return -1;
}
static mp_image_t *get_screenshot(struct vo *vo)
static mp_image_t *get_screenshot(struct vo *vo, CVPixelBufferRef pbuf)
{
int img_fmt = get_image_fmt(vo);
int img_fmt = get_image_fmt(vo, pbuf);
if (img_fmt < 0) return NULL;
struct priv *p = vo->priv;
void *base = CVPixelBufferGetBaseAddress(p->pixelBuffer);
size_t width = CVPixelBufferGetWidth(p->pixelBuffer);
size_t height = CVPixelBufferGetHeight(p->pixelBuffer);
size_t stride = CVPixelBufferGetBytesPerRow(p->pixelBuffer);
CVPixelBufferLockBaseAddress(pbuf, 0);
void *base = CVPixelBufferGetBaseAddress(pbuf);
size_t width = CVPixelBufferGetWidth(pbuf);
size_t height = CVPixelBufferGetHeight(pbuf);
size_t stride = CVPixelBufferGetBytesPerRow(pbuf);
struct mp_image img = {0};
mp_image_setfmt(&img, img_fmt);
@ -336,6 +321,7 @@ static mp_image_t *get_screenshot(struct vo *vo)
struct mp_image *image = mp_image_new_copy(&img);
mp_image_set_display_size(image, vo->aspdat.prew, vo->aspdat.preh);
mp_image_set_colorspace_details(image, &p->colorspace);
CVPixelBufferUnlockBaseAddress(pbuf, 0);
return image;
}
@ -353,18 +339,15 @@ static int control(struct vo *vo, uint32_t request, void *data)
do_render(vo);
return VO_TRUE;
case VOCTRL_SET_YUV_COLORSPACE:
p->colorspace.format = ((struct mp_csp_details *)data)->format;
set_yuv_colorspace(vo);
return VO_TRUE;
return p->fns.set_colormatrix(vo, data);
case VOCTRL_GET_YUV_COLORSPACE:
*(struct mp_csp_details *)data = p->colorspace;
return VO_TRUE;
return get_yuv_colorspace(vo, data);
case VOCTRL_SCREENSHOT: {
struct voctrl_screenshot_args *args = data;
if (args->full_window)
args->out_image = glGetWindowScreenshot(p->mpglctx->gl);
else
args->out_image = get_screenshot(vo);
args->out_image = p->fns.get_screenshot(vo);
return VO_TRUE;
}
}
@ -377,6 +360,227 @@ static int control(struct vo *vo, uint32_t request, void *data)
return r;
}
static void dummy_cb(struct vo *vo) { }
static void cv_uninit(struct vo *vo)
{
struct priv *p = vo->priv;
CVPixelBufferRelease(p->cv.pbuf);
p->cv.pbuf = NULL;
CVOpenGLTextureRelease(p->cv.texture);
p->cv.texture = NULL;
CVOpenGLTextureCacheRelease(p->cv.texture_cache);
p->cv.texture_cache = NULL;
}
static void cv_bind_texture(struct vo *vo)
{
struct priv *p = vo->priv;
GL *gl = p->mpglctx->gl;
gl->Enable(CVOpenGLTextureGetTarget(p->cv.texture));
gl->BindTexture(CVOpenGLTextureGetTarget(p->cv.texture),
CVOpenGLTextureGetName(p->cv.texture));
}
static void cv_unbind_texture(struct vo *vo)
{
struct priv *p = vo->priv;
GL *gl = p->mpglctx->gl;
gl->Disable(CVOpenGLTextureGetTarget(p->cv.texture));
}
static void upload_opengl_texture(struct vo *vo, struct mp_image *mpi)
{
struct priv *p = vo->priv;
if (!p->cv.texture_cache || !p->cv.pbuf) {
CVReturn error;
error = CVOpenGLTextureCacheCreate(NULL, 0, vo_cocoa_cgl_context(vo),
vo_cocoa_cgl_pixel_format(vo), 0, &p->cv.texture_cache);
if(error != kCVReturnSuccess)
MP_ERR(vo, "Failed to create OpenGL texture Cache(%d)\n", error);
error = CVPixelBufferCreateWithBytes(NULL, mpi->w, mpi->h,
p->cv.pixfmt, mpi->planes[0], mpi->stride[0],
NULL, NULL, NULL, &p->cv.pbuf);
if(error != kCVReturnSuccess)
MP_ERR(vo, "Failed to create PixelBuffer(%d)\n", error);
}
struct quad *q = p->quad;
CVReturn error;
CVOpenGLTextureRelease(p->cv.texture);
error = CVOpenGLTextureCacheCreateTextureFromImage(NULL,
p->cv.texture_cache, p->cv.pbuf, 0, &p->cv.texture);
if(error != kCVReturnSuccess)
MP_ERR(vo, "Failed to create OpenGL texture(%d)\n", error);
CVOpenGLTextureGetCleanTexCoords(p->cv.texture,
q->lowerLeft, q->lowerRight, q->upperRight, q->upperLeft);
}
static mp_image_t *cv_get_screenshot(struct vo *vo)
{
struct priv *p = vo->priv;
return get_screenshot(vo, p->cv.pbuf);
}
static int cv_set_colormatrix(struct vo *vo, struct mp_csp_details *csp)
{
struct priv *p = vo->priv;
return set_yuv_colorspace(vo, p->cv.pbuf, csp);
}
static struct cv_functions cv_functions = {
.init = dummy_cb,
.uninit = cv_uninit,
.bind_texture = cv_bind_texture,
.unbind_texture = cv_unbind_texture,
.prepare_texture = upload_opengl_texture,
.get_screenshot = cv_get_screenshot,
.set_colormatrix = cv_set_colormatrix,
};
#if CONFIG_VDA
static void iosurface_init(struct vo *vo)
{
struct priv *p = vo->priv;
GL *gl = p->mpglctx->gl;
p->dr.texture_target = GL_TEXTURE_RECTANGLE_ARB;
p->fns.bind_texture(vo);
gl->GenTextures(1, &p->dr.texture);
p->fns.unbind_texture(vo);
p->dr.texture_allocated = true;
}
static void iosurface_uninit(struct vo *vo)
{
struct priv *p = vo->priv;
GL *gl = p->mpglctx->gl;
if (p->dr.texture_allocated) {
gl->DeleteTextures(1, &p->dr.texture);
p->dr.texture_allocated = false;
}
}
static void iosurface_bind_texture(struct vo *vo)
{
struct priv *p = vo->priv;
GL *gl = p->mpglctx->gl;
gl->Enable(p->dr.texture_target);
gl->BindTexture(p->dr.texture_target, p->dr.texture);
gl->MatrixMode(GL_TEXTURE);
gl->LoadIdentity();
gl->TexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
}
static void iosurface_unbind_texture(struct vo *vo)
{
struct priv *p = vo->priv;
GL *gl = p->mpglctx->gl;
gl->BindTexture(p->dr.texture_target, 0);
gl->Disable(p->dr.texture_target);
}
static void extract_texture_from_iosurface(struct vo *vo, struct mp_image *mpi)
{
struct priv *p = vo->priv;
CVPixelBufferRelease(p->dr.pbuf);
p->dr.pbuf = (CVPixelBufferRef)mpi->planes[3];
CVPixelBufferRetain(p->dr.pbuf);
IOSurfaceRef surface = CVPixelBufferGetIOSurface(p->dr.pbuf);
MP_DBG(vo, "iosurface id: %d\n", IOSurfaceGetID(surface));
p->fns.bind_texture(vo);
CGLError err = CGLTexImageIOSurface2D(
vo_cocoa_cgl_context(vo), p->dr.texture_target, GL_RGB8,
p->image_width, p->image_height,
GL_YCBCR_422_APPLE, GL_UNSIGNED_SHORT_8_8_APPLE, surface, 0);
if (err != kCGLNoError)
MP_ERR(vo, "error creating IOSurface texture: %s (%x)\n",
CGLErrorString(err), glGetError());
p->fns.unbind_texture(vo);
// video_vertex flips the coordinates.. so feed in a flipped quad
*p->quad = (struct quad) {
.lowerRight = { p->image_width, p->image_height },
.upperLeft = { 0.0, 0.0 },
};
}
static mp_image_t *iosurface_get_screenshot(struct vo *vo)
{
struct priv *p = vo->priv;
return get_screenshot(vo, p->dr.pbuf);
}
static int iosurface_set_colormatrix(struct vo *vo, struct mp_csp_details *csp)
{
struct priv *p = vo->priv;
return set_yuv_colorspace(vo, p->dr.pbuf, csp);
}
static struct cv_functions iosurface_functions = {
.init = iosurface_init,
.uninit = iosurface_uninit,
.bind_texture = iosurface_bind_texture,
.unbind_texture = iosurface_unbind_texture,
.prepare_texture = extract_texture_from_iosurface,
.get_screenshot = iosurface_get_screenshot,
.set_colormatrix = iosurface_set_colormatrix,
};
#endif /* CONFIG_VDA */
static int query_format(struct vo *vo, uint32_t format)
{
struct priv *p = vo->priv;
const int flags = VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW;
switch (format) {
#if CONFIG_VDA
case IMGFMT_VDA:
p->fns = iosurface_functions;
return flags;
#endif
case IMGFMT_YUYV:
p->fns = cv_functions;
p->cv.pixfmt = kYUVSPixelFormat;
return flags;
case IMGFMT_UYVY:
p->fns = cv_functions;
p->cv.pixfmt = k2vuyPixelFormat;
return flags;
case IMGFMT_RGB24:
p->fns = cv_functions;
p->cv.pixfmt = k24RGBPixelFormat;
return flags;
case IMGFMT_ARGB:
p->fns = cv_functions;
p->cv.pixfmt = k32ARGBPixelFormat;
return flags;
case IMGFMT_BGRA:
p->fns = cv_functions;
p->cv.pixfmt = k32BGRAPixelFormat;
return flags;
}
return 0;
}
const struct vo_driver video_out_corevideo = {
.info = &(const vo_info_t) {
"Mac OS X Core Video",