vo_gpu: hwdec_vaapi: Refactor Vulkan and OpenGL interops for VAAPI

Like hwdec_cuda, you get a big #ifdef mess if you try and keep the
OpenGL and Vulkan interops in the same file. So, I've refactored
them into separate files in a similar way.
This commit is contained in:
Philip Langdale 2019-09-14 20:05:21 -07:00
parent 237f5fa1b7
commit fa0a905ea0
7 changed files with 475 additions and 333 deletions

View File

@ -42,7 +42,7 @@ extern const struct ra_hwdec_driver ra_hwdec_rpi_overlay;
extern const struct ra_hwdec_driver ra_hwdec_drmprime_drm;
const struct ra_hwdec_driver *const ra_hwdec_drivers[] = {
#if HAVE_VAAPI_EGL
#if HAVE_VAAPI_EGL || HAVE_VAAPI_VULKAN
&ra_hwdec_vaegl,
#endif
#if HAVE_VIDEOTOOLBOX_GL || HAVE_IOS_GL

View File

@ -20,46 +20,16 @@
#include <assert.h>
#include <unistd.h>
#include <va/va_drmcommon.h>
#include <libavutil/common.h>
#include <libavutil/hwcontext.h>
#include <libavutil/hwcontext_vaapi.h>
#include "config.h"
#include "video/out/gpu/hwdec.h"
#include "video/mp_image_pool.h"
#include "video/vaapi.h"
#include "video/out/hwdec/hwdec_vaapi.h"
#if HAVE_VULKAN
#include "video/out/placebo/ra_pl.h"
#include "video/out/vulkan/common.h"
#endif
#if HAVE_GL
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include "video/out/opengl/common.h"
#include "video/out/opengl/ra_gl.h"
#if HAVE_VAAPI_DRM
#include "libmpv/render_gl.h"
#ifndef GL_OES_EGL_image
typedef void* GLeglImageOES;
#endif
#ifndef EGL_KHR_image
typedef void *EGLImageKHR;
#endif
#ifndef EGL_LINUX_DMA_BUF_EXT
#define EGL_LINUX_DMA_BUF_EXT 0x3270
#define EGL_LINUX_DRM_FOURCC_EXT 0x3271
#define EGL_DMA_BUF_PLANE0_FD_EXT 0x3272
#define EGL_DMA_BUF_PLANE0_OFFSET_EXT 0x3273
#define EGL_DMA_BUF_PLANE0_PITCH_EXT 0x3274
#endif
#endif // HAVE_GL
#if HAVE_VAAPI_X11
#include <va/va_x11.h>
@ -123,38 +93,6 @@ static VADisplay *create_native_va_display(struct ra *ra, struct mp_log *log)
return NULL;
}
struct priv_owner {
struct mp_vaapi_ctx *ctx;
VADisplay *display;
int *formats;
bool probing_formats; // temporary during init
};
struct priv {
int num_planes;
struct mp_image layout;
struct ra_tex *tex[4];
#if HAVE_GL
GLuint gl_textures[4];
EGLImageKHR images[4];
#endif
VAImage current_image;
bool buffer_acquired;
#if VA_CHECK_VERSION(1, 1, 0)
bool esh_not_implemented;
VADRMPRIMESurfaceDescriptor desc;
bool surface_acquired;
#endif
#if HAVE_GL
EGLImageKHR (EGLAPIENTRY *CreateImageKHR)(EGLDisplay, EGLContext,
EGLenum, EGLClientBuffer,
const EGLint *);
EGLBoolean (EGLAPIENTRY *DestroyImageKHR)(EGLDisplay, EGLImageKHR);
void (EGLAPIENTRY *EGLImageTargetTexture2DOES)(GLenum, GLeglImageOES);
#endif
};
static void determine_working_formats(struct ra_hwdec *hw);
static void uninit(struct ra_hwdec *hw)
@ -165,42 +103,30 @@ static void uninit(struct ra_hwdec *hw)
va_destroy(p->ctx);
}
const static vaapi_interop_init interop_inits[] = {
#if HAVE_GL
vaapi_gl_init,
#endif
#if HAVE_VULKAN
vaapi_vk_init,
#endif
NULL
};
static int init(struct ra_hwdec *hw)
{
struct priv_owner *p = hw->priv;
#if HAVE_GL
if (ra_is_gl(hw->ra)) {
if (!eglGetCurrentContext())
return -1;
const char *exts = eglQueryString(eglGetCurrentDisplay(), EGL_EXTENSIONS);
if (!exts)
return -1;
GL *gl = ra_gl_get(hw->ra);
if (!strstr(exts, "EXT_image_dma_buf_import") ||
!strstr(exts, "EGL_KHR_image_base") ||
!strstr(gl->extensions, "GL_OES_EGL_image") ||
!(gl->mpgl_caps & MPGL_CAP_TEX_RG))
return -1;
MP_VERBOSE(hw, "using VAAPI EGL interop\n");
}
#endif
#if HAVE_VULKAN
if (ra_pl_get(hw->ra)) {
const struct pl_gpu *gpu = ra_pl_get(hw->ra);
if (!(gpu->import_caps.tex & PL_HANDLE_DMA_BUF)) {
MP_VERBOSE(hw, "VAAPI Vulkan interop requires support for "
"dma_buf import in Vulkan.\n");
return -1;
for (int i = 0; interop_inits[i]; i++) {
if (interop_inits[i](hw)) {
break;
}
MP_VERBOSE(hw, "using VAAPI Vulkan interop\n");
}
#endif
if (!p->interop_map || !p->interop_unmap) {
MP_VERBOSE(hw, "VAAPI hwdec only works with OpenGL or Vulkan backends.\n");
return -1;
}
p->display = create_native_va_display(hw->ra, hw->log);
if (!p->display) {
@ -239,27 +165,8 @@ static void mapper_unmap(struct ra_hwdec_mapper *mapper)
VADisplay *display = p_owner->display;
struct priv *p = mapper->priv;
VAStatus status;
#if HAVE_GL
bool is_gl = ra_is_gl(mapper->ra);
#endif
#if HAVE_VULKAN
const struct pl_gpu *gpu = ra_pl_get(mapper->ra);
#endif
for (int n = 0; n < 4; n++) {
#if HAVE_GL
if (is_gl) {
if (p->images[n])
p->DestroyImageKHR(eglGetCurrentDisplay(), p->images[n]);
p->images[n] = 0;
}
#endif
#if HAVE_VULKAN
if (gpu) {
ra_tex_free(mapper->ra, &mapper->tex[n]);
}
#endif
}
p_owner->interop_unmap(mapper);
#if VA_CHECK_VERSION(1, 1, 0)
if (p->surface_acquired) {
@ -283,18 +190,10 @@ static void mapper_unmap(struct ra_hwdec_mapper *mapper)
static void mapper_uninit(struct ra_hwdec_mapper *mapper)
{
#if HAVE_GL
struct priv *p = mapper->priv;
if (ra_is_gl(mapper->ra)) {
GL *gl = ra_gl_get(mapper->ra);
gl->DeleteTextures(4, p->gl_textures);
for (int n = 0; n < 4; n++) {
p->gl_textures[n] = 0;
ra_tex_free(mapper->ra, &p->tex[n]);
}
struct priv_owner *p_owner = mapper->owner->priv;
if (p_owner->interop_uninit) {
p_owner->interop_uninit(mapper);
}
#endif
}
static bool check_fmt(struct ra_hwdec_mapper *mapper, int fmt)
@ -311,27 +210,9 @@ static int mapper_init(struct ra_hwdec_mapper *mapper)
{
struct priv_owner *p_owner = mapper->owner->priv;
struct priv *p = mapper->priv;
#if HAVE_GL
bool is_gl = ra_is_gl(mapper->ra);
#endif
p->current_image.buf = p->current_image.image_id = VA_INVALID_ID;
#if HAVE_GL
if (is_gl) {
// EGL_KHR_image_base
p->CreateImageKHR = (void *)eglGetProcAddress("eglCreateImageKHR");
p->DestroyImageKHR = (void *)eglGetProcAddress("eglDestroyImageKHR");
// GL_OES_EGL_image
p->EGLImageTargetTexture2DOES =
(void *)eglGetProcAddress("glEGLImageTargetTexture2DOES");
if (!p->CreateImageKHR || !p->DestroyImageKHR ||
!p->EGLImageTargetTexture2DOES)
return -1;
}
#endif
mapper->dst_params = mapper->src_params;
mapper->dst_params.imgfmt = mapper->src_params.hw_subfmt;
mapper->dst_params.hw_subfmt = 0;
@ -344,38 +225,9 @@ static int mapper_init(struct ra_hwdec_mapper *mapper)
p->num_planes = desc.num_planes;
mp_image_set_params(&p->layout, &mapper->dst_params);
#if HAVE_GL
if (is_gl) {
GL *gl = ra_gl_get(mapper->ra);
gl->GenTextures(4, p->gl_textures);
for (int n = 0; n < desc.num_planes; n++) {
gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]);
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
gl->BindTexture(GL_TEXTURE_2D, 0);
struct ra_tex_params params = {
.dimensions = 2,
.w = mp_image_plane_w(&p->layout, n),
.h = mp_image_plane_h(&p->layout, n),
.d = 1,
.format = desc.planes[n],
.render_src = true,
.src_linear = true,
};
if (params.format->ctype != RA_CTYPE_UNORM)
return -1;
p->tex[n] = ra_create_wrapped_tex(mapper->ra, &params,
p->gl_textures[n]);
if (!p->tex[n])
return -1;
}
}
#endif
if (p_owner->interop_init)
if (!p_owner->interop_init(mapper, &desc))
return -1;
if (!p_owner->probing_formats && !check_fmt(mapper, mapper->dst_params.imgfmt))
{
@ -387,24 +239,10 @@ static int mapper_init(struct ra_hwdec_mapper *mapper)
return 0;
}
#define ADD_ATTRIB(name, value) \
do { \
assert(num_attribs + 3 < MP_ARRAY_SIZE(attribs)); \
attribs[num_attribs++] = (name); \
attribs[num_attribs++] = (value); \
attribs[num_attribs] = EGL_NONE; \
} while(0)
static int mapper_map(struct ra_hwdec_mapper *mapper)
{
struct priv_owner *p_owner = mapper->owner->priv;
struct priv *p = mapper->priv;
#if HAVE_GL
bool is_gl = ra_is_gl(mapper->ra);
#endif
#if HAVE_VULKAN
const struct pl_gpu *gpu = ra_pl_get(mapper->ra);
#endif
VAStatus status;
VADisplay *display = p_owner->display;
@ -428,103 +266,8 @@ static int mapper_map(struct ra_hwdec_mapper *mapper)
CHECK_VA_STATUS(mapper, "vaSyncSurface()");
p->surface_acquired = true;
#if HAVE_GL
if (is_gl) {
GL *gl = ra_gl_get(mapper->ra);
for (int n = 0; n < p->num_planes; n++) {
int attribs[20] = {EGL_NONE};
int num_attribs = 0;
ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, p->desc.layers[n].drm_format);
ADD_ATTRIB(EGL_WIDTH, p->tex[n]->params.w);
ADD_ATTRIB(EGL_HEIGHT, p->tex[n]->params.h);
#define ADD_PLANE_ATTRIBS(plane) do { \
ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _FD_EXT, \
p->desc.objects[p->desc.layers[n].object_index[plane]].fd); \
ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _OFFSET_EXT, \
p->desc.layers[n].offset[plane]); \
ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _PITCH_EXT, \
p->desc.layers[n].pitch[plane]); \
} while (0)
ADD_PLANE_ATTRIBS(0);
if (p->desc.layers[n].num_planes > 1)
ADD_PLANE_ATTRIBS(1);
if (p->desc.layers[n].num_planes > 2)
ADD_PLANE_ATTRIBS(2);
if (p->desc.layers[n].num_planes > 3)
ADD_PLANE_ATTRIBS(3);
p->images[n] = p->CreateImageKHR(eglGetCurrentDisplay(),
EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs);
if (!p->images[n])
goto esh_failed;
gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]);
p->EGLImageTargetTexture2DOES(GL_TEXTURE_2D, p->images[n]);
mapper->tex[n] = p->tex[n];
}
gl->BindTexture(GL_TEXTURE_2D, 0);
}
#endif
#if HAVE_VULKAN
if (gpu) {
struct ra_imgfmt_desc desc = {0};
if (!ra_get_imgfmt_desc(mapper->ra, mapper->dst_params.imgfmt, &desc))
goto esh_failed;
for (int n = 0; n < p->num_planes; n++) {
if (p->desc.layers[n].num_planes > 1) {
// Should never happen because we request separate layers
MP_ERR(mapper, "Multi-plane VA surfaces are not supported\n");
goto esh_failed;
}
const struct ra_format *format = desc.planes[n];
int id = p->desc.layers[n].object_index[0];
int fd = p->desc.objects[id].fd;
uint32_t size = p->desc.objects[id].size;
uint32_t offset = p->desc.layers[n].offset[0];
struct pl_tex_params tex_params = {
.w = mp_image_plane_w(&p->layout, n),
.h = mp_image_plane_h(&p->layout, n),
.d = 0,
.format = format->priv,
.sampleable = true,
.sample_mode = format->linear_filter ? PL_TEX_SAMPLE_LINEAR
: PL_TEX_SAMPLE_NEAREST,
.import_handle = PL_HANDLE_DMA_BUF,
.shared_mem = (struct pl_shared_mem) {
.handle = {
.fd = fd,
},
.size = size,
.offset = offset,
},
};
const struct pl_tex *pltex = pl_tex_create(gpu, &tex_params);
if (!pltex) {
goto esh_failed;
}
struct ra_tex *ratex = talloc_ptrtype(NULL, ratex);
int ret = mppl_wrap_tex(mapper->ra, pltex, ratex);
if (!ret) {
pl_tex_destroy(gpu, &pltex);
talloc_free(ratex);
goto esh_failed;
}
mapper->tex[n] = ratex;
MP_TRACE(mapper, "Object %d with fd %d imported as %p\n",
id, fd, ratex);
}
}
#endif
if (!p_owner->interop_map(mapper))
goto esh_failed;
if (p->desc.fourcc == VA_FOURCC_YV12)
MPSWAP(struct ra_tex*, mapper->tex[1], mapper->tex[2]);
@ -539,9 +282,7 @@ esh_failed:
}
#endif // VA_CHECK_VERSION
#if HAVE_GL
if (is_gl) {
GL *gl = ra_gl_get(mapper->ra);
if (p_owner->interop_map_legacy) {
VAImage *va_image = &p->current_image;
status = vaDeriveImage(display, va_surface_id(mapper->src), va_image);
if (!CHECK_VA_STATUS(mapper, "vaDeriveImage()"))
@ -566,51 +307,17 @@ esh_failed:
0, // N/A
};
for (int n = 0; n < p->num_planes; n++) {
int attribs[20] = {EGL_NONE};
int num_attribs = 0;
const struct ra_format *fmt = p->tex[n]->params.format;
int n_comp = fmt->num_components;
int comp_s = fmt->component_size[n] / 8;
if (n_comp < 1 || n_comp > 3 || comp_s < 1 || comp_s > 2)
goto err;
int drm_fmt = drm_fmts[n_comp - 1 + (comp_s - 1) * 4];
if (!drm_fmt)
goto err;
ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, drm_fmt);
ADD_ATTRIB(EGL_WIDTH, p->tex[n]->params.w);
ADD_ATTRIB(EGL_HEIGHT, p->tex[n]->params.h);
ADD_ATTRIB(EGL_DMA_BUF_PLANE0_FD_EXT, buffer_info.handle);
ADD_ATTRIB(EGL_DMA_BUF_PLANE0_OFFSET_EXT, va_image->offsets[n]);
ADD_ATTRIB(EGL_DMA_BUF_PLANE0_PITCH_EXT, va_image->pitches[n]);
p->images[n] = p->CreateImageKHR(eglGetCurrentDisplay(),
EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs);
if (!p->images[n])
goto err;
gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]);
p->EGLImageTargetTexture2DOES(GL_TEXTURE_2D, p->images[n]);
mapper->tex[n] = p->tex[n];
}
gl->BindTexture(GL_TEXTURE_2D, 0);
if (!p_owner->interop_map_legacy(mapper, &buffer_info, drm_fmts))
goto err;
if (va_image->format.fourcc == VA_FOURCC_YV12)
MPSWAP(struct ra_tex*, mapper->tex[1], mapper->tex[2]);
return 0;
}
#endif
#if HAVE_VULKAN
// Seems unnecessary to support Vulkan interop with old libva API.
if (gpu) {
} else {
mapper_unmap(mapper);
goto err;
}
#endif
err:
if (!p_owner->probing_formats)

View File

@ -0,0 +1,64 @@
/*
* This file is part of mpv.
*
* mpv is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* mpv is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with mpv. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <va/va_drmcommon.h>
#include "config.h"
#include "video/vaapi.h"
#include "video/out/gpu/hwdec.h"
struct priv_owner {
struct mp_vaapi_ctx *ctx;
VADisplay *display;
int *formats;
bool probing_formats; // temporary during init
bool (*interop_init)(struct ra_hwdec_mapper *mapper,
const struct ra_imgfmt_desc *desc);
void (*interop_uninit)(const struct ra_hwdec_mapper *mapper);
bool (*interop_map)(struct ra_hwdec_mapper *mapper);
bool (*interop_map_legacy)(struct ra_hwdec_mapper *mapper,
const VABufferInfo *buffer_info,
const int *drm_fmts);
void (*interop_unmap)(struct ra_hwdec_mapper *mapper);
};
struct priv {
int num_planes;
struct mp_image layout;
struct ra_tex *tex[4];
VAImage current_image;
bool buffer_acquired;
#if VA_CHECK_VERSION(1, 1, 0)
bool esh_not_implemented;
VADRMPRIMESurfaceDescriptor desc;
bool surface_acquired;
#endif
void *interop_mapper_priv;
};
typedef bool (*vaapi_interop_init)(const struct ra_hwdec *hw);
bool vaapi_gl_init(const struct ra_hwdec *hw);
bool vaapi_vk_init(const struct ra_hwdec *hw);

View File

@ -0,0 +1,264 @@
/*
* This file is part of mpv.
*
* mpv is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* mpv is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with mpv. If not, see <http://www.gnu.org/licenses/>.
*/
#include "config.h"
#include "hwdec_vaapi.h"
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include "video/out/opengl/ra_gl.h"
#ifndef GL_OES_EGL_image
typedef void* GLeglImageOES;
#endif
#ifndef EGL_KHR_image
typedef void *EGLImageKHR;
#endif
#ifndef EGL_LINUX_DMA_BUF_EXT
#define EGL_LINUX_DMA_BUF_EXT 0x3270
#define EGL_LINUX_DRM_FOURCC_EXT 0x3271
#define EGL_DMA_BUF_PLANE0_FD_EXT 0x3272
#define EGL_DMA_BUF_PLANE0_OFFSET_EXT 0x3273
#define EGL_DMA_BUF_PLANE0_PITCH_EXT 0x3274
#endif
struct vaapi_gl_mapper_priv {
GLuint gl_textures[4];
EGLImageKHR images[4];
EGLImageKHR (EGLAPIENTRY *CreateImageKHR)(EGLDisplay, EGLContext,
EGLenum, EGLClientBuffer,
const EGLint *);
EGLBoolean (EGLAPIENTRY *DestroyImageKHR)(EGLDisplay, EGLImageKHR);
void (EGLAPIENTRY *EGLImageTargetTexture2DOES)(GLenum, GLeglImageOES);
};
static bool vaapi_gl_mapper_init(struct ra_hwdec_mapper *mapper,
const struct ra_imgfmt_desc *desc)
{
struct priv *p_mapper = mapper->priv;
struct vaapi_gl_mapper_priv *p = talloc_ptrtype(NULL, p);
p_mapper->interop_mapper_priv = p;
// EGL_KHR_image_base
p->CreateImageKHR = (void *)eglGetProcAddress("eglCreateImageKHR");
p->DestroyImageKHR = (void *)eglGetProcAddress("eglDestroyImageKHR");
// GL_OES_EGL_image
p->EGLImageTargetTexture2DOES =
(void *)eglGetProcAddress("glEGLImageTargetTexture2DOES");
if (!p->CreateImageKHR || !p->DestroyImageKHR ||
!p->EGLImageTargetTexture2DOES)
return false;
GL *gl = ra_gl_get(mapper->ra);
gl->GenTextures(4, p->gl_textures);
for (int n = 0; n < desc->num_planes; n++) {
gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]);
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
gl->BindTexture(GL_TEXTURE_2D, 0);
struct ra_tex_params params = {
.dimensions = 2,
.w = mp_image_plane_w(&p_mapper->layout, n),
.h = mp_image_plane_h(&p_mapper->layout, n),
.d = 1,
.format = desc->planes[n],
.render_src = true,
.src_linear = true,
};
if (params.format->ctype != RA_CTYPE_UNORM)
return false;
p_mapper->tex[n] = ra_create_wrapped_tex(mapper->ra, &params,
p->gl_textures[n]);
if (!p_mapper->tex[n])
return false;
}
return true;
}
static void vaapi_gl_mapper_uninit(const struct ra_hwdec_mapper *mapper)
{
struct priv *p_mapper = mapper->priv;
struct vaapi_gl_mapper_priv *p = p_mapper->interop_mapper_priv;
if (p) {
GL *gl = ra_gl_get(mapper->ra);
gl->DeleteTextures(4, p->gl_textures);
for (int n = 0; n < 4; n++) {
p->gl_textures[n] = 0;
ra_tex_free(mapper->ra, &p_mapper->tex[n]);
}
talloc_free(p);
p_mapper->interop_mapper_priv = NULL;
}
}
#define ADD_ATTRIB(name, value) \
do { \
assert(num_attribs + 3 < MP_ARRAY_SIZE(attribs)); \
attribs[num_attribs++] = (name); \
attribs[num_attribs++] = (value); \
attribs[num_attribs] = EGL_NONE; \
} while(0)
#define ADD_PLANE_ATTRIBS(plane) do { \
ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _FD_EXT, \
p_mapper->desc.objects[p_mapper->desc.layers[n].object_index[plane]].fd); \
ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _OFFSET_EXT, \
p_mapper->desc.layers[n].offset[plane]); \
ADD_ATTRIB(EGL_DMA_BUF_PLANE ## plane ## _PITCH_EXT, \
p_mapper->desc.layers[n].pitch[plane]); \
} while (0)
static bool vaapi_gl_map(struct ra_hwdec_mapper *mapper)
{
#if VA_CHECK_VERSION(1, 1, 0)
struct priv *p_mapper = mapper->priv;
struct vaapi_gl_mapper_priv *p = p_mapper->interop_mapper_priv;
GL *gl = ra_gl_get(mapper->ra);
for (int n = 0; n < p_mapper->num_planes; n++) {
int attribs[20] = {EGL_NONE};
int num_attribs = 0;
ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, p_mapper->desc.layers[n].drm_format);
ADD_ATTRIB(EGL_WIDTH, p_mapper->tex[n]->params.w);
ADD_ATTRIB(EGL_HEIGHT, p_mapper->tex[n]->params.h);
ADD_PLANE_ATTRIBS(0);
if (p_mapper->desc.layers[n].num_planes > 1)
ADD_PLANE_ATTRIBS(1);
if (p_mapper->desc.layers[n].num_planes > 2)
ADD_PLANE_ATTRIBS(2);
if (p_mapper->desc.layers[n].num_planes > 3)
ADD_PLANE_ATTRIBS(3);
p->images[n] = p->CreateImageKHR(eglGetCurrentDisplay(),
EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs);
if (!p->images[n])
return false;
gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]);
p->EGLImageTargetTexture2DOES(GL_TEXTURE_2D, p->images[n]);
mapper->tex[n] = p_mapper->tex[n];
}
gl->BindTexture(GL_TEXTURE_2D, 0);
#endif
return true;
}
static bool vaapi_gl_map_legacy(struct ra_hwdec_mapper *mapper,
const VABufferInfo *buffer_info,
const int *drm_fmts) {
struct priv *p_mapper = mapper->priv;
struct vaapi_gl_mapper_priv *p = p_mapper->interop_mapper_priv;
GL *gl = ra_gl_get(mapper->ra);
VAImage *va_image = &p_mapper->current_image;
for (int n = 0; n < p_mapper->num_planes; n++) {
int attribs[20] = {EGL_NONE};
int num_attribs = 0;
const struct ra_format *fmt = p_mapper->tex[n]->params.format;
int n_comp = fmt->num_components;
int comp_s = fmt->component_size[n] / 8;
if (n_comp < 1 || n_comp > 3 || comp_s < 1 || comp_s > 2)
return false;
int drm_fmt = drm_fmts[n_comp - 1 + (comp_s - 1) * 4];
if (!drm_fmt)
return false;
ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, drm_fmt);
ADD_ATTRIB(EGL_WIDTH, p_mapper->tex[n]->params.w);
ADD_ATTRIB(EGL_HEIGHT, p_mapper->tex[n]->params.h);
ADD_ATTRIB(EGL_DMA_BUF_PLANE0_FD_EXT, buffer_info->handle);
ADD_ATTRIB(EGL_DMA_BUF_PLANE0_OFFSET_EXT, va_image->offsets[n]);
ADD_ATTRIB(EGL_DMA_BUF_PLANE0_PITCH_EXT, va_image->pitches[n]);
p->images[n] = p->CreateImageKHR(eglGetCurrentDisplay(),
EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs);
if (!p->images[n])
return false;
gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]);
p->EGLImageTargetTexture2DOES(GL_TEXTURE_2D, p->images[n]);
mapper->tex[n] = p_mapper->tex[n];
}
gl->BindTexture(GL_TEXTURE_2D, 0);
return true;
}
static void vaapi_gl_unmap(struct ra_hwdec_mapper *mapper)
{
struct priv *p_mapper = mapper->priv;
struct vaapi_gl_mapper_priv *p = p_mapper->interop_mapper_priv;
if (p) {
for (int n = 0; n < 4; n++) {
if (p->images[n])
p->DestroyImageKHR(eglGetCurrentDisplay(), p->images[n]);
p->images[n] = 0;
}
}
}
bool vaapi_gl_init(const struct ra_hwdec *hw)
{
struct priv_owner *p = hw->priv;
if (!ra_is_gl(hw->ra)) {
// This is not an OpenGL RA.
return false;
}
if (!eglGetCurrentContext())
return false;
const char *exts = eglQueryString(eglGetCurrentDisplay(), EGL_EXTENSIONS);
if (!exts)
return false;
GL *gl = ra_gl_get(hw->ra);
if (!strstr(exts, "EXT_image_dma_buf_import") ||
!strstr(exts, "EGL_KHR_image_base") ||
!strstr(gl->extensions, "GL_OES_EGL_image") ||
!(gl->mpgl_caps & MPGL_CAP_TEX_RG))
return false;
MP_VERBOSE(hw, "using VAAPI EGL interop\n");
p->interop_init = vaapi_gl_mapper_init;
p->interop_uninit = vaapi_gl_mapper_uninit;
p->interop_map = vaapi_gl_map;
p->interop_map_legacy = vaapi_gl_map_legacy;
p->interop_unmap = vaapi_gl_unmap;
return true;
}

View File

@ -0,0 +1,110 @@
/*
* This file is part of mpv.
*
* mpv is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* mpv is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with mpv. If not, see <http://www.gnu.org/licenses/>.
*/
#include "config.h"
#include "hwdec_vaapi.h"
#include "video/out/placebo/ra_pl.h"
static bool vaapi_vk_map(struct ra_hwdec_mapper *mapper)
{
struct priv *p = mapper->priv;
const struct pl_gpu *gpu = ra_pl_get(mapper->ra);
struct ra_imgfmt_desc desc = {0};
if (!ra_get_imgfmt_desc(mapper->ra, mapper->dst_params.imgfmt, &desc))
return false;
for (int n = 0; n < p->num_planes; n++) {
if (p->desc.layers[n].num_planes > 1) {
// Should never happen because we request separate layers
MP_ERR(mapper, "Multi-plane VA surfaces are not supported\n");
return false;
}
const struct ra_format *format = desc.planes[n];
int id = p->desc.layers[n].object_index[0];
int fd = p->desc.objects[id].fd;
uint32_t size = p->desc.objects[id].size;
uint32_t offset = p->desc.layers[n].offset[0];
struct pl_tex_params tex_params = {
.w = mp_image_plane_w(&p->layout, n),
.h = mp_image_plane_h(&p->layout, n),
.d = 0,
.format = format->priv,
.sampleable = true,
.sample_mode = format->linear_filter ? PL_TEX_SAMPLE_LINEAR
: PL_TEX_SAMPLE_NEAREST,
.import_handle = PL_HANDLE_DMA_BUF,
.shared_mem = (struct pl_shared_mem) {
.handle = {
.fd = fd,
},
.size = size,
.offset = offset,
},
};
const struct pl_tex *pltex = pl_tex_create(gpu, &tex_params);
if (!pltex) {
return false;
}
struct ra_tex *ratex = talloc_ptrtype(NULL, ratex);
int ret = mppl_wrap_tex(mapper->ra, pltex, ratex);
if (!ret) {
pl_tex_destroy(gpu, &pltex);
talloc_free(ratex);
return false;
}
mapper->tex[n] = ratex;
MP_TRACE(mapper, "Object %d with fd %d imported as %p\n",
id, fd, ratex);
}
return true;
}
static void vaapi_vk_unmap(struct ra_hwdec_mapper *mapper)
{
for (int n = 0; n < 4; n++)
ra_tex_free(mapper->ra, &mapper->tex[n]);
}
bool vaapi_vk_init(const struct ra_hwdec *hw)
{
struct priv_owner *p = hw->priv;
const struct pl_gpu *gpu = ra_pl_get(hw->ra);
if (!gpu) {
// This is not a Vulkan RA;
return false;
}
if (!(gpu->import_caps.tex & PL_HANDLE_DMA_BUF)) {
MP_VERBOSE(hw, "VAAPI Vulkan interop requires support for "
"dma_buf import in Vulkan.\n");
return false;
}
MP_VERBOSE(hw, "using VAAPI Vulkan interop\n");
p->interop_map = vaapi_vk_map;
p->interop_unmap = vaapi_vk_unmap;
return true;
}

View File

@ -699,11 +699,6 @@ video_output_features = [
'desc': 'VAAPI (DRM/EGL support)',
'deps': 'vaapi && egl-drm',
'func': check_pkg_config('libva-drm', '>= 0.36.0'),
}, {
'name': '--vaapi-glx',
'desc': 'VAAPI GLX',
'deps': 'gpl && vaapi-x11 && gl-x11',
'func': check_true,
}, {
'name': '--vaapi-x-egl',
'desc': 'VAAPI EGL on X11',
@ -712,7 +707,7 @@ video_output_features = [
}, {
'name': 'vaapi-egl',
'desc': 'VAAPI EGL',
'deps': 'vaapi-x-egl || vaapi-wayland',
'deps': 'vaapi-x-egl || vaapi-wayland || vaapi-drm',
'func': check_true,
}, {
'name': '--caca',

View File

@ -435,6 +435,8 @@ def build(ctx):
( "video/out/hwdec/hwdec_cuda_gl.c", "cuda-hwaccel && gl" ),
( "video/out/hwdec/hwdec_cuda_vk.c", "cuda-hwaccel && vulkan" ),
( "video/out/hwdec/hwdec_vaapi.c", "vaapi-egl || vaapi-vulkan" ),
( "video/out/hwdec/hwdec_vaapi_gl.c", "vaapi-egl" ),
( "video/out/hwdec/hwdec_vaapi_vk.c", "vaapi-vulkan" ),
( "video/out/placebo/ra_pl.c", "libplacebo" ),
( "video/out/placebo/utils.c", "libplacebo" ),
( "video/out/opengl/angle_dynamic.c", "egl-angle" ),