2015-09-24 22:20:57 +00:00
|
|
|
/*
|
|
|
|
* This file is part of mpv.
|
|
|
|
*
|
2016-01-07 09:46:15 +00:00
|
|
|
* mpv is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2015-09-24 22:20:57 +00:00
|
|
|
*
|
|
|
|
* mpv is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2016-01-07 09:46:15 +00:00
|
|
|
* GNU Lesser General Public License for more details.
|
2015-09-24 22:20:57 +00:00
|
|
|
*
|
2016-01-07 09:46:15 +00:00
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with mpv. If not, see <http://www.gnu.org/licenses/>.
|
2015-09-24 22:20:57 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <assert.h>
|
|
|
|
|
|
|
|
#include <EGL/egl.h>
|
2015-09-27 14:10:22 +00:00
|
|
|
#include <EGL/eglext.h>
|
2015-09-24 22:20:57 +00:00
|
|
|
|
|
|
|
#include <va/va_drmcommon.h>
|
|
|
|
|
2017-06-18 13:00:36 +00:00
|
|
|
#include <libavutil/common.h>
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
#include <libavutil/hwcontext.h>
|
|
|
|
#include <libavutil/hwcontext_vaapi.h>
|
|
|
|
|
|
|
|
#include "config.h"
|
|
|
|
|
2015-09-24 22:20:57 +00:00
|
|
|
#include "hwdec.h"
|
|
|
|
#include "video/vaapi.h"
|
2015-09-26 18:15:52 +00:00
|
|
|
#include "video/mp_image_pool.h"
|
2015-09-24 22:20:57 +00:00
|
|
|
#include "common.h"
|
2017-02-17 16:20:33 +00:00
|
|
|
#include "formats.h"
|
2015-09-24 22:20:57 +00:00
|
|
|
|
2015-09-27 14:25:03 +00:00
|
|
|
#ifndef GL_OES_EGL_image
|
|
|
|
typedef void* GLeglImageOES;
|
|
|
|
#endif
|
|
|
|
#ifndef EGL_KHR_image
|
|
|
|
typedef void *EGLImageKHR;
|
|
|
|
#endif
|
|
|
|
|
2015-10-23 13:56:17 +00:00
|
|
|
#ifndef EGL_LINUX_DMA_BUF_EXT
|
|
|
|
#define EGL_LINUX_DMA_BUF_EXT 0x3270
|
2015-09-27 14:25:03 +00:00
|
|
|
#define EGL_LINUX_DRM_FOURCC_EXT 0x3271
|
|
|
|
#define EGL_DMA_BUF_PLANE0_FD_EXT 0x3272
|
|
|
|
#define EGL_DMA_BUF_PLANE0_OFFSET_EXT 0x3273
|
|
|
|
#define EGL_DMA_BUF_PLANE0_PITCH_EXT 0x3274
|
|
|
|
#endif
|
|
|
|
|
2015-09-27 18:09:10 +00:00
|
|
|
#if HAVE_VAAPI_X11
|
|
|
|
#include <va/va_x11.h>
|
|
|
|
|
|
|
|
static VADisplay *create_x11_va_display(GL *gl)
|
|
|
|
{
|
2017-05-11 15:41:54 +00:00
|
|
|
Display *x11 = mpgl_get_native_display(gl, "x11");
|
2015-09-27 18:09:10 +00:00
|
|
|
return x11 ? vaGetDisplay(x11) : NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-09-27 19:24:35 +00:00
|
|
|
#if HAVE_VAAPI_WAYLAND
|
|
|
|
#include <va/va_wayland.h>
|
|
|
|
|
|
|
|
static VADisplay *create_wayland_va_display(GL *gl)
|
|
|
|
{
|
2017-05-11 15:41:54 +00:00
|
|
|
struct wl_display *wl = mpgl_get_native_display(gl, "wl");
|
2015-09-27 19:24:35 +00:00
|
|
|
return wl ? vaGetDisplayWl(wl) : NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-01-20 18:41:29 +00:00
|
|
|
#if HAVE_VAAPI_DRM
|
|
|
|
#include <va/va_drm.h>
|
|
|
|
|
|
|
|
static VADisplay *create_drm_va_display(GL *gl)
|
|
|
|
{
|
2017-05-11 15:41:54 +00:00
|
|
|
int drm_fd = (intptr_t)mpgl_get_native_display(gl, "drm");
|
2016-01-20 18:41:29 +00:00
|
|
|
// Note: yes, drm_fd==0 could be valid - but it's rare and doesn't fit with
|
|
|
|
// our slightly crappy way of passing it through, so consider 0 not
|
|
|
|
// valid.
|
|
|
|
return drm_fd ? vaGetDisplayDRM(drm_fd) : NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-01-21 12:32:29 +00:00
|
|
|
struct va_create_native {
|
2016-09-30 12:36:42 +00:00
|
|
|
const char *name;
|
2016-01-21 12:32:29 +00:00
|
|
|
VADisplay *(*create)(GL *gl);
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct va_create_native create_native_cbs[] = {
|
2015-09-27 18:09:10 +00:00
|
|
|
#if HAVE_VAAPI_X11
|
2016-09-30 12:36:42 +00:00
|
|
|
{"x11", create_x11_va_display},
|
2015-09-27 19:24:35 +00:00
|
|
|
#endif
|
|
|
|
#if HAVE_VAAPI_WAYLAND
|
2016-09-30 12:36:42 +00:00
|
|
|
{"wayland", create_wayland_va_display},
|
2016-01-20 18:41:29 +00:00
|
|
|
#endif
|
|
|
|
#if HAVE_VAAPI_DRM
|
2016-09-30 12:36:42 +00:00
|
|
|
{"drm", create_drm_va_display},
|
2015-09-27 18:09:10 +00:00
|
|
|
#endif
|
2016-01-21 12:32:29 +00:00
|
|
|
};
|
|
|
|
|
2016-09-30 12:36:42 +00:00
|
|
|
static VADisplay *create_native_va_display(GL *gl, struct mp_log *log)
|
2016-01-21 12:32:29 +00:00
|
|
|
{
|
|
|
|
for (int n = 0; n < MP_ARRAY_SIZE(create_native_cbs); n++) {
|
2016-09-30 12:36:42 +00:00
|
|
|
const struct va_create_native *disp = &create_native_cbs[n];
|
|
|
|
mp_verbose(log, "Trying to open a %s VA display...\n", disp->name);
|
|
|
|
VADisplay *display = disp->create(gl);
|
2016-01-21 12:32:29 +00:00
|
|
|
if (display)
|
|
|
|
return display;
|
|
|
|
}
|
|
|
|
return NULL;
|
2015-09-27 18:09:10 +00:00
|
|
|
}
|
|
|
|
|
2015-09-24 22:20:57 +00:00
|
|
|
struct priv {
|
|
|
|
struct mp_log *log;
|
|
|
|
struct mp_vaapi_ctx *ctx;
|
|
|
|
VADisplay *display;
|
|
|
|
GLuint gl_textures[4];
|
|
|
|
EGLImageKHR images[4];
|
|
|
|
VAImage current_image;
|
2015-09-25 10:14:19 +00:00
|
|
|
bool buffer_acquired;
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 16:29:10 +00:00
|
|
|
int current_mpfmt;
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
int *formats;
|
|
|
|
bool probing_formats; // temporary during init
|
2015-09-27 14:10:22 +00:00
|
|
|
|
|
|
|
EGLImageKHR (EGLAPIENTRY *CreateImageKHR)(EGLDisplay, EGLContext,
|
|
|
|
EGLenum, EGLClientBuffer,
|
|
|
|
const EGLint *);
|
|
|
|
EGLBoolean (EGLAPIENTRY *DestroyImageKHR)(EGLDisplay, EGLImageKHR);
|
|
|
|
void (EGLAPIENTRY *EGLImageTargetTexture2DOES)(GLenum, GLeglImageOES);
|
2015-09-24 22:20:57 +00:00
|
|
|
};
|
|
|
|
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
static void determine_working_formats(struct gl_hwdec *hw);
|
2015-09-26 18:15:52 +00:00
|
|
|
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 16:29:10 +00:00
|
|
|
static void unmap_frame(struct gl_hwdec *hw)
|
2015-09-24 22:20:57 +00:00
|
|
|
{
|
|
|
|
struct priv *p = hw->priv;
|
|
|
|
VAStatus status;
|
|
|
|
|
|
|
|
for (int n = 0; n < 4; n++) {
|
|
|
|
if (p->images[n])
|
2015-09-27 14:10:22 +00:00
|
|
|
p->DestroyImageKHR(eglGetCurrentDisplay(), p->images[n]);
|
2015-09-25 08:02:54 +00:00
|
|
|
p->images[n] = 0;
|
2015-09-24 22:20:57 +00:00
|
|
|
}
|
|
|
|
|
2015-09-25 10:14:19 +00:00
|
|
|
if (p->buffer_acquired) {
|
2015-09-24 22:20:57 +00:00
|
|
|
status = vaReleaseBufferHandle(p->display, p->current_image.buf);
|
|
|
|
CHECK_VA_STATUS(p, "vaReleaseBufferHandle()");
|
2015-09-25 10:14:19 +00:00
|
|
|
p->buffer_acquired = false;
|
2015-09-24 22:20:57 +00:00
|
|
|
}
|
|
|
|
if (p->current_image.image_id != VA_INVALID_ID) {
|
|
|
|
status = vaDestroyImage(p->display, p->current_image.image_id);
|
|
|
|
CHECK_VA_STATUS(p, "vaDestroyImage()");
|
|
|
|
p->current_image.image_id = VA_INVALID_ID;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void destroy_textures(struct gl_hwdec *hw)
|
|
|
|
{
|
|
|
|
struct priv *p = hw->priv;
|
|
|
|
GL *gl = hw->gl;
|
|
|
|
|
|
|
|
gl->DeleteTextures(4, p->gl_textures);
|
|
|
|
for (int n = 0; n < 4; n++)
|
|
|
|
p->gl_textures[n] = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void destroy(struct gl_hwdec *hw)
|
|
|
|
{
|
|
|
|
struct priv *p = hw->priv;
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 16:29:10 +00:00
|
|
|
unmap_frame(hw);
|
2015-09-24 22:20:57 +00:00
|
|
|
destroy_textures(hw);
|
2016-05-09 17:42:03 +00:00
|
|
|
if (p->ctx)
|
|
|
|
hwdec_devices_remove(hw->devs, &p->ctx->hwctx);
|
2015-09-24 22:20:57 +00:00
|
|
|
va_destroy(p->ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int create(struct gl_hwdec *hw)
|
|
|
|
{
|
|
|
|
GL *gl = hw->gl;
|
|
|
|
|
2015-09-27 14:10:22 +00:00
|
|
|
struct priv *p = talloc_zero(hw, struct priv);
|
|
|
|
hw->priv = p;
|
|
|
|
p->current_image.buf = p->current_image.image_id = VA_INVALID_ID;
|
|
|
|
p->log = hw->log;
|
|
|
|
|
2016-05-05 11:38:08 +00:00
|
|
|
if (!eglGetCurrentContext())
|
2015-09-24 22:20:57 +00:00
|
|
|
return -1;
|
|
|
|
|
2016-01-22 18:54:32 +00:00
|
|
|
const char *exts = eglQueryString(eglGetCurrentDisplay(), EGL_EXTENSIONS);
|
|
|
|
if (!exts)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!strstr(exts, "EXT_image_dma_buf_import") ||
|
|
|
|
!strstr(exts, "EGL_KHR_image_base") ||
|
2015-09-27 14:10:22 +00:00
|
|
|
!strstr(gl->extensions, "GL_OES_EGL_image") ||
|
2015-09-24 22:20:57 +00:00
|
|
|
!(gl->mpgl_caps & MPGL_CAP_TEX_RG))
|
|
|
|
return -1;
|
|
|
|
|
2015-09-27 14:10:22 +00:00
|
|
|
// EGL_KHR_image_base
|
|
|
|
p->CreateImageKHR = (void *)eglGetProcAddress("eglCreateImageKHR");
|
|
|
|
p->DestroyImageKHR = (void *)eglGetProcAddress("eglDestroyImageKHR");
|
|
|
|
// GL_OES_EGL_image
|
|
|
|
p->EGLImageTargetTexture2DOES =
|
|
|
|
(void *)eglGetProcAddress("glEGLImageTargetTexture2DOES");
|
|
|
|
|
|
|
|
if (!p->CreateImageKHR || !p->DestroyImageKHR ||
|
|
|
|
!p->EGLImageTargetTexture2DOES)
|
|
|
|
return -1;
|
|
|
|
|
2016-09-30 12:36:42 +00:00
|
|
|
p->display = create_native_va_display(gl, hw->log);
|
|
|
|
if (!p->display) {
|
|
|
|
MP_VERBOSE(hw, "Could not create a VA display.\n");
|
2015-09-24 22:20:57 +00:00
|
|
|
return -1;
|
2016-09-30 12:36:42 +00:00
|
|
|
}
|
2015-09-24 22:20:57 +00:00
|
|
|
|
|
|
|
p->ctx = va_initialize(p->display, p->log, true);
|
|
|
|
if (!p->ctx) {
|
|
|
|
vaTerminate(p->display);
|
|
|
|
return -1;
|
|
|
|
}
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
if (!p->ctx->av_device_ref) {
|
|
|
|
MP_VERBOSE(hw, "libavutil vaapi code rejected the driver?\n");
|
|
|
|
destroy(hw);
|
|
|
|
return -1;
|
|
|
|
}
|
2015-09-24 22:20:57 +00:00
|
|
|
|
2015-11-09 10:58:38 +00:00
|
|
|
if (hw->probing && va_guess_if_emulated(p->ctx)) {
|
2015-09-24 22:20:57 +00:00
|
|
|
destroy(hw);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
MP_VERBOSE(p, "using VAAPI EGL interop\n");
|
|
|
|
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
determine_working_formats(hw);
|
|
|
|
if (!p->formats || !p->formats[0]) {
|
2015-09-26 18:15:52 +00:00
|
|
|
destroy(hw);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
p->ctx->hwctx.supported_formats = p->formats;
|
2016-05-09 17:42:03 +00:00
|
|
|
p->ctx->hwctx.driver_name = hw->driver->name;
|
|
|
|
hwdec_devices_add(hw->devs, &p->ctx->hwctx);
|
2015-09-24 22:20:57 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
static bool check_fmt(struct priv *p, int fmt)
|
|
|
|
{
|
|
|
|
for (int n = 0; p->formats[n]; n++) {
|
|
|
|
if (p->formats[n] == fmt)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-09-24 22:20:57 +00:00
|
|
|
static int reinit(struct gl_hwdec *hw, struct mp_image_params *params)
|
|
|
|
{
|
|
|
|
struct priv *p = hw->priv;
|
|
|
|
GL *gl = hw->gl;
|
|
|
|
|
|
|
|
// Recreate them to get rid of all previous image data (possibly).
|
|
|
|
destroy_textures(hw);
|
|
|
|
|
|
|
|
gl->GenTextures(4, p->gl_textures);
|
|
|
|
for (int n = 0; n < 4; n++) {
|
|
|
|
gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]);
|
|
|
|
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
|
|
|
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
|
|
|
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
|
|
|
gl->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
|
|
|
}
|
|
|
|
gl->BindTexture(GL_TEXTURE_2D, 0);
|
|
|
|
|
2016-07-15 09:54:44 +00:00
|
|
|
p->current_mpfmt = params->hw_subfmt;
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
|
|
|
|
if (!p->probing_formats && !check_fmt(p, p->current_mpfmt)) {
|
vaapi: determine surface format in decoder, not in renderer
Until now, we have made the assumption that a driver will use only 1
hardware surface format. the format is dictated by the driver (you
don't create surfaces with a specific format - you just pass a
rt_format and get a surface that will be in a specific driver-chosen
format).
In particular, the renderer created a dummy surface to probe the format,
and hoped the decoder would produce the same format. Due to a driver
bug this required a workaround to actually get the same format as the
driver did.
Change this so that the format is determined in the decoder. The format
is then passed down as hw_subfmt, which allows the renderer to configure
itself with the correct format. If the hardware surface changes its
format midstream, the renderer can be reconfigured using the normal
mechanisms.
This calls va_surface_init_subformat() each time after the decoder
returns a surface. Since libavcodec/AVFrame has no concept of sub-
formats, this is unavoidable. It creates and destroys a derived
VAImage, but this shouldn't have any bad performance effects (at
least I didn't notice any measurable effects).
Note that vaDeriveImage() failures are silently ignored as some
drivers (the vdpau wrapper) support neither vaDeriveImage, nor EGL
interop. In addition, we still probe whether we can map an image
in the EGL interop code. This is important as it's the only way
to determine whether EGL interop is supported at all. With respect
to the driver bug mentioned above, it doesn't matter which format
the test surface has.
In vf_vavpp, also remove the rt_format guessing business. I think the
existing logic was a bit meaningless anyway. It's not even a given
that vavpp produces the same rt_format for output.
2016-04-11 18:46:05 +00:00
|
|
|
MP_FATAL(p, "unsupported VA image format %s\n",
|
2016-07-15 09:54:44 +00:00
|
|
|
mp_imgfmt_to_name(p->current_mpfmt));
|
vaapi: determine surface format in decoder, not in renderer
Until now, we have made the assumption that a driver will use only 1
hardware surface format. the format is dictated by the driver (you
don't create surfaces with a specific format - you just pass a
rt_format and get a surface that will be in a specific driver-chosen
format).
In particular, the renderer created a dummy surface to probe the format,
and hoped the decoder would produce the same format. Due to a driver
bug this required a workaround to actually get the same format as the
driver did.
Change this so that the format is determined in the decoder. The format
is then passed down as hw_subfmt, which allows the renderer to configure
itself with the correct format. If the hardware surface changes its
format midstream, the renderer can be reconfigured using the normal
mechanisms.
This calls va_surface_init_subformat() each time after the decoder
returns a surface. Since libavcodec/AVFrame has no concept of sub-
formats, this is unavoidable. It creates and destroys a derived
VAImage, but this shouldn't have any bad performance effects (at
least I didn't notice any measurable effects).
Note that vaDeriveImage() failures are silently ignored as some
drivers (the vdpau wrapper) support neither vaDeriveImage, nor EGL
interop. In addition, we still probe whether we can map an image
in the EGL interop code. This is important as it's the only way
to determine whether EGL interop is supported at all. With respect
to the driver bug mentioned above, it doesn't matter which format
the test surface has.
In vf_vavpp, also remove the rt_format guessing business. I think the
existing logic was a bit meaningless anyway. It's not even a given
that vavpp produces the same rt_format for output.
2016-04-11 18:46:05 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 16:29:10 +00:00
|
|
|
params->imgfmt = p->current_mpfmt;
|
2016-07-15 10:01:30 +00:00
|
|
|
params->hw_subfmt = 0;
|
vaapi: determine surface format in decoder, not in renderer
Until now, we have made the assumption that a driver will use only 1
hardware surface format. the format is dictated by the driver (you
don't create surfaces with a specific format - you just pass a
rt_format and get a surface that will be in a specific driver-chosen
format).
In particular, the renderer created a dummy surface to probe the format,
and hoped the decoder would produce the same format. Due to a driver
bug this required a workaround to actually get the same format as the
driver did.
Change this so that the format is determined in the decoder. The format
is then passed down as hw_subfmt, which allows the renderer to configure
itself with the correct format. If the hardware surface changes its
format midstream, the renderer can be reconfigured using the normal
mechanisms.
This calls va_surface_init_subformat() each time after the decoder
returns a surface. Since libavcodec/AVFrame has no concept of sub-
formats, this is unavoidable. It creates and destroys a derived
VAImage, but this shouldn't have any bad performance effects (at
least I didn't notice any measurable effects).
Note that vaDeriveImage() failures are silently ignored as some
drivers (the vdpau wrapper) support neither vaDeriveImage, nor EGL
interop. In addition, we still probe whether we can map an image
in the EGL interop code. This is important as it's the only way
to determine whether EGL interop is supported at all. With respect
to the driver bug mentioned above, it doesn't matter which format
the test surface has.
In vf_vavpp, also remove the rt_format guessing business. I think the
existing logic was a bit meaningless anyway. It's not even a given
that vavpp produces the same rt_format for output.
2016-04-11 18:46:05 +00:00
|
|
|
|
2015-09-24 22:20:57 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ADD_ATTRIB(name, value) \
|
|
|
|
do { \
|
|
|
|
assert(num_attribs + 3 < MP_ARRAY_SIZE(attribs)); \
|
|
|
|
attribs[num_attribs++] = (name); \
|
|
|
|
attribs[num_attribs++] = (value); \
|
|
|
|
attribs[num_attribs] = EGL_NONE; \
|
|
|
|
} while(0)
|
|
|
|
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 16:29:10 +00:00
|
|
|
static int map_frame(struct gl_hwdec *hw, struct mp_image *hw_image,
|
|
|
|
struct gl_hwdec_frame *out_frame)
|
2015-09-24 22:20:57 +00:00
|
|
|
{
|
|
|
|
struct priv *p = hw->priv;
|
|
|
|
GL *gl = hw->gl;
|
|
|
|
VAStatus status;
|
|
|
|
VAImage *va_image = &p->current_image;
|
|
|
|
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 16:29:10 +00:00
|
|
|
unmap_frame(hw);
|
2015-09-24 22:20:57 +00:00
|
|
|
|
|
|
|
status = vaDeriveImage(p->display, va_surface_id(hw_image), va_image);
|
|
|
|
if (!CHECK_VA_STATUS(p, "vaDeriveImage()"))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
VABufferInfo buffer_info = {.mem_type = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME};
|
|
|
|
status = vaAcquireBufferHandle(p->display, va_image->buf, &buffer_info);
|
|
|
|
if (!CHECK_VA_STATUS(p, "vaAcquireBufferHandle()"))
|
|
|
|
goto err;
|
2015-09-25 10:14:19 +00:00
|
|
|
p->buffer_acquired = true;
|
2015-09-24 22:20:57 +00:00
|
|
|
|
2015-09-25 08:22:10 +00:00
|
|
|
struct mp_image layout = {0};
|
|
|
|
mp_image_set_params(&layout, &hw_image->params);
|
2017-01-13 09:28:58 +00:00
|
|
|
mp_image_setfmt(&layout, p->current_mpfmt);
|
2017-02-17 16:20:33 +00:00
|
|
|
|
|
|
|
struct gl_imgfmt_desc desc;
|
|
|
|
if (!gl_get_imgfmt_desc(gl, p->current_mpfmt, &desc))
|
|
|
|
goto err;
|
2015-09-24 22:20:57 +00:00
|
|
|
|
2017-01-12 12:52:35 +00:00
|
|
|
int drm_fmts[8] = {
|
2017-01-13 09:22:11 +00:00
|
|
|
// 1 bytes per component, 1-4 components
|
2017-06-18 13:00:36 +00:00
|
|
|
MKTAG('R', '8', ' ', ' '), // DRM_FORMAT_R8
|
|
|
|
MKTAG('G', 'R', '8', '8'), // DRM_FORMAT_GR88
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
0, // untested (DRM_FORMAT_RGB888?)
|
|
|
|
0, // untested (DRM_FORMAT_RGBA8888?)
|
2017-01-13 09:22:11 +00:00
|
|
|
// 2 bytes per component, 1-4 components
|
2017-06-18 13:00:36 +00:00
|
|
|
MKTAG('R', '1', '6', ' '), // proposed DRM_FORMAT_R16
|
|
|
|
MKTAG('G', 'R', '3', '2'), // proposed DRM_FORMAT_GR32
|
2017-01-12 12:52:35 +00:00
|
|
|
0, // N/A
|
|
|
|
0, // N/A
|
|
|
|
};
|
2015-09-24 22:20:57 +00:00
|
|
|
|
2015-09-25 08:22:10 +00:00
|
|
|
for (int n = 0; n < layout.num_planes; n++) {
|
|
|
|
int attribs[20] = {EGL_NONE};
|
2015-09-24 22:20:57 +00:00
|
|
|
int num_attribs = 0;
|
|
|
|
|
2017-02-17 16:20:33 +00:00
|
|
|
const struct gl_format *fmt = desc.planes[n];
|
|
|
|
if (gl_format_type(fmt) != MPGL_TYPE_UNORM)
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
goto err;
|
2017-01-12 12:52:35 +00:00
|
|
|
|
2017-02-17 16:20:33 +00:00
|
|
|
int n_comp = gl_format_components(fmt->format);
|
|
|
|
int comp_s = gl_component_size(fmt->type);
|
|
|
|
if (!gl_format_is_regular(fmt))
|
|
|
|
goto err;
|
|
|
|
if (n_comp < 1 || n_comp > 3 || comp_s < 1 || comp_s > 2)
|
|
|
|
goto err;
|
|
|
|
int drm_fmt = drm_fmts[n_comp - 1 + (comp_s - 1) * 4];
|
|
|
|
if (!drm_fmt)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, drm_fmt);
|
2015-09-25 08:22:10 +00:00
|
|
|
ADD_ATTRIB(EGL_WIDTH, mp_image_plane_w(&layout, n));
|
|
|
|
ADD_ATTRIB(EGL_HEIGHT, mp_image_plane_h(&layout, n));
|
2015-09-24 22:20:57 +00:00
|
|
|
ADD_ATTRIB(EGL_DMA_BUF_PLANE0_FD_EXT, buffer_info.handle);
|
|
|
|
ADD_ATTRIB(EGL_DMA_BUF_PLANE0_OFFSET_EXT, va_image->offsets[n]);
|
|
|
|
ADD_ATTRIB(EGL_DMA_BUF_PLANE0_PITCH_EXT, va_image->pitches[n]);
|
|
|
|
|
2015-09-27 14:10:22 +00:00
|
|
|
p->images[n] = p->CreateImageKHR(eglGetCurrentDisplay(),
|
2015-09-24 22:20:57 +00:00
|
|
|
EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs);
|
|
|
|
if (!p->images[n])
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]);
|
2015-09-27 14:10:22 +00:00
|
|
|
p->EGLImageTargetTexture2DOES(GL_TEXTURE_2D, p->images[n]);
|
2015-09-24 22:20:57 +00:00
|
|
|
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 16:29:10 +00:00
|
|
|
out_frame->planes[n] = (struct gl_hwdec_plane){
|
|
|
|
.gl_texture = p->gl_textures[n],
|
|
|
|
.gl_target = GL_TEXTURE_2D,
|
|
|
|
.tex_w = mp_image_plane_w(&layout, n),
|
|
|
|
.tex_h = mp_image_plane_h(&layout, n),
|
|
|
|
};
|
2015-09-24 22:20:57 +00:00
|
|
|
}
|
|
|
|
gl->BindTexture(GL_TEXTURE_2D, 0);
|
|
|
|
|
2015-09-25 10:07:20 +00:00
|
|
|
if (va_image->format.fourcc == VA_FOURCC_YV12)
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 16:29:10 +00:00
|
|
|
MPSWAP(struct gl_hwdec_plane, out_frame->planes[1], out_frame->planes[2]);
|
2015-09-25 10:07:20 +00:00
|
|
|
|
2015-09-24 22:20:57 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
if (!p->probing_formats)
|
|
|
|
MP_FATAL(p, "mapping VAAPI EGL image failed\n");
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 16:29:10 +00:00
|
|
|
unmap_frame(hw);
|
2015-09-24 22:20:57 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
static bool try_format(struct gl_hwdec *hw, struct mp_image *surface)
|
2015-09-26 18:15:52 +00:00
|
|
|
{
|
|
|
|
bool ok = false;
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
struct mp_image_params params = surface->params;
|
|
|
|
if (reinit(hw, ¶ms) >= 0) {
|
|
|
|
struct gl_hwdec_frame frame = {0};
|
|
|
|
ok = map_frame(hw, surface, &frame) >= 0;
|
|
|
|
}
|
|
|
|
unmap_frame(hw);
|
|
|
|
return ok;
|
|
|
|
}
|
2015-09-26 18:15:52 +00:00
|
|
|
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
static void determine_working_formats(struct gl_hwdec *hw)
|
|
|
|
{
|
|
|
|
struct priv *p = hw->priv;
|
|
|
|
int num_formats = 0;
|
|
|
|
int *formats = NULL;
|
|
|
|
|
|
|
|
p->probing_formats = true;
|
|
|
|
|
2017-04-23 13:56:45 +00:00
|
|
|
AVHWFramesConstraints *fc =
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
av_hwdevice_get_hwframe_constraints(p->ctx->av_device_ref, NULL);
|
2017-04-23 13:56:45 +00:00
|
|
|
if (!fc) {
|
|
|
|
MP_WARN(hw, "failed to retrieve libavutil frame constaints\n");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
for (int n = 0; fc->valid_sw_formats[n] != AV_PIX_FMT_NONE; n++) {
|
|
|
|
AVBufferRef *fref = NULL;
|
|
|
|
struct mp_image *s = NULL;
|
|
|
|
AVFrame *frame = NULL;
|
|
|
|
fref = av_hwframe_ctx_alloc(p->ctx->av_device_ref);
|
|
|
|
if (!fref)
|
|
|
|
goto err;
|
|
|
|
AVHWFramesContext *fctx = (void *)fref->data;
|
|
|
|
fctx->format = AV_PIX_FMT_VAAPI;
|
|
|
|
fctx->sw_format = fc->valid_sw_formats[n];
|
|
|
|
fctx->width = 128;
|
|
|
|
fctx->height = 128;
|
|
|
|
if (av_hwframe_ctx_init(fref) < 0)
|
|
|
|
goto err;
|
|
|
|
frame = av_frame_alloc();
|
|
|
|
if (!frame)
|
|
|
|
goto err;
|
|
|
|
if (av_hwframe_get_buffer(fref, frame, 0) < 0)
|
|
|
|
goto err;
|
|
|
|
s = mp_image_from_av_frame(frame);
|
|
|
|
if (!s || !mp_image_params_valid(&s->params))
|
|
|
|
goto err;
|
|
|
|
if (try_format(hw, s))
|
|
|
|
MP_TARRAY_APPEND(p, formats, num_formats, s->params.hw_subfmt);
|
|
|
|
err:
|
|
|
|
talloc_free(s);
|
|
|
|
av_frame_free(&frame);
|
|
|
|
av_buffer_unref(&fref);
|
2015-09-26 18:15:52 +00:00
|
|
|
}
|
2017-04-23 13:56:45 +00:00
|
|
|
av_hwframe_constraints_free(&fc);
|
2015-09-26 18:15:52 +00:00
|
|
|
|
vo_opengl, vaapi: properly probe 10 bit rendering support
There are going to be users who have a Mesa installation which do not
support 10 bit, but a GPU which can decode to 10 bit. So it's probably
better not to hardcode whether it is supported.
Introduce a more general way to signal supported formats from renderer
to decoder. Obviously this is imperfect, because it still isn't part of
proper format negotation (for example, what if there's a vavpp filter,
which accepts anything). Still slightly better than before.
I don't know any way to probe for vaapi dmabuf/EGL dmabuf support
properly (in particular testing specific formats, not just general
availability). So we stay with the current approach and try to create
and map dummy surfaces on init to probe for support. Overdo it and check
all formats that AVHWFramesConstraints reports, instead of only NV12 and
P010 surfaces.
Since we can support unknown formats now, add explicitly checks to the
EGL/dmabuf mapper code to reject unsupported formats. I also noticed
that libavutil signals support for RGB0/BGR0, but couldn't get it to
work. Remove the DRM formats that are unused/didn't work the way I tried
to use them.
With this, 10 bit decoding + rendering should work, provided you have
a capable CPU and a patched Mesa. The required Mesa patch adds support
for the R16 and GR32 formats. It was sent by a Kodi developer to the
Mesa developer mailing list and was not accepted yet.
2017-01-13 12:36:02 +00:00
|
|
|
done:
|
|
|
|
MP_TARRAY_APPEND(p, formats, num_formats, 0); // terminate it
|
|
|
|
p->formats = formats;
|
|
|
|
p->probing_formats = false;
|
|
|
|
|
|
|
|
MP_VERBOSE(hw, "Supported formats:\n");
|
|
|
|
for (int n = 0; formats[n]; n++)
|
|
|
|
MP_VERBOSE(hw, " %s\n", mp_imgfmt_to_name(formats[n]));
|
2015-09-26 18:15:52 +00:00
|
|
|
}
|
|
|
|
|
2015-09-24 22:20:57 +00:00
|
|
|
const struct gl_hwdec_driver gl_hwdec_vaegl = {
|
2016-02-01 19:02:52 +00:00
|
|
|
.name = "vaapi-egl",
|
|
|
|
.api = HWDEC_VAAPI,
|
2015-09-24 22:20:57 +00:00
|
|
|
.imgfmt = IMGFMT_VAAPI,
|
|
|
|
.create = create,
|
|
|
|
.reinit = reinit,
|
vo_opengl: refactor how hwdec interop exports textures
Rename gl_hwdec_driver.map_image to map_frame, and let it fill out a
struct gl_hwdec_frame describing the exact texture layout. This gives
more flexibility to what the hwdec interop can export. In particular, it
can export strange component orders/permutations and textures with
padded size. (The latter originating from cropped video.)
The way gl_hwdec_frame works is in the spirit of the rest of the
vo_opengl video processing code, which tends to put as much information
in immediate state (as part of the dataflow), instead of declaring it
globally. To some degree this duplicates the texplane and img_tex
structs, but until we somehow unify those, it's better to give the hwdec
state its own struct. The fact that changing the hwdec struct would
require changes and testing on at least 4 platform/GPU combinations
makes duplicating it almost a requirement to avoid pain later.
Make gl_hwdec_driver.reinit set the new image format and remove the
gl_hwdec.converted_imgfmt field.
Likewise, gl_hwdec.gl_texture_target is replaced with
gl_hwdec_plane.gl_target.
Split out a init_image_desc function from init_format. The latter is not
called in the hwdec case at all anymore. Setting up most of struct
texplane is also completely separate in the hwdec and normal cases.
video.c does not check whether the hwdec "mapped" image format is
supported. This should not really happen anyway, and if it does, the
hwdec interop backend must fail at creation time, so this is not an
issue.
2016-05-10 16:29:10 +00:00
|
|
|
.map_frame = map_frame,
|
|
|
|
.unmap = unmap_frame,
|
2015-09-24 22:20:57 +00:00
|
|
|
.destroy = destroy,
|
|
|
|
};
|