2016-04-27 11:49:47 +00:00
|
|
|
/*
|
|
|
|
* This file is part of mpv.
|
|
|
|
*
|
|
|
|
* mpv is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* mpv is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with mpv. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
#include <windows.h>
|
2016-04-27 12:03:30 +00:00
|
|
|
#include <d3d11.h>
|
|
|
|
|
2016-04-27 11:49:47 +00:00
|
|
|
#include <EGL/egl.h>
|
|
|
|
#include <EGL/eglext.h>
|
|
|
|
|
2016-05-11 10:33:49 +00:00
|
|
|
#include "angle_dynamic.h"
|
|
|
|
|
2016-04-27 11:49:47 +00:00
|
|
|
#include "common/common.h"
|
|
|
|
#include "osdep/timer.h"
|
|
|
|
#include "osdep/windows_utils.h"
|
2017-09-15 15:37:28 +00:00
|
|
|
#include "video/out/gpu/hwdec.h"
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
#include "ra_gl.h"
|
2016-04-27 11:49:47 +00:00
|
|
|
#include "video/hwdec.h"
|
2016-06-28 18:04:16 +00:00
|
|
|
#include "video/decode/d3d.h"
|
2016-04-27 11:49:47 +00:00
|
|
|
|
2016-05-10 18:37:03 +00:00
|
|
|
#ifndef EGL_D3D_TEXTURE_SUBRESOURCE_ID_ANGLE
|
2016-11-23 00:09:19 +00:00
|
|
|
#define EGL_D3D_TEXTURE_SUBRESOURCE_ID_ANGLE 0x33AB
|
2016-05-10 18:37:03 +00:00
|
|
|
#endif
|
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
struct priv_owner {
|
2016-05-09 17:42:03 +00:00
|
|
|
struct mp_hwdec_ctx hwctx;
|
2016-04-27 11:49:47 +00:00
|
|
|
|
|
|
|
ID3D11Device *d3d11_device;
|
|
|
|
EGLDisplay egl_display;
|
|
|
|
|
2016-05-10 18:37:03 +00:00
|
|
|
// EGL_KHR_stream
|
|
|
|
EGLStreamKHR (EGLAPIENTRY *CreateStreamKHR)(EGLDisplay dpy,
|
|
|
|
const EGLint *attrib_list);
|
|
|
|
EGLBoolean (EGLAPIENTRY *DestroyStreamKHR)(EGLDisplay dpy,
|
|
|
|
EGLStreamKHR stream);
|
|
|
|
|
|
|
|
// EGL_KHR_stream_consumer_gltexture
|
|
|
|
EGLBoolean (EGLAPIENTRY *StreamConsumerAcquireKHR)
|
|
|
|
(EGLDisplay dpy, EGLStreamKHR stream);
|
|
|
|
EGLBoolean (EGLAPIENTRY *StreamConsumerReleaseKHR)
|
|
|
|
(EGLDisplay dpy, EGLStreamKHR stream);
|
|
|
|
|
|
|
|
// EGL_NV_stream_consumer_gltexture_yuv
|
|
|
|
EGLBoolean (EGLAPIENTRY *StreamConsumerGLTextureExternalAttribsNV)
|
|
|
|
(EGLDisplay dpy, EGLStreamKHR stream, EGLAttrib *attrib_list);
|
|
|
|
|
|
|
|
// EGL_ANGLE_stream_producer_d3d_texture_nv12
|
|
|
|
EGLBoolean (EGLAPIENTRY *CreateStreamProducerD3DTextureNV12ANGLE)
|
|
|
|
(EGLDisplay dpy, EGLStreamKHR stream, const EGLAttrib *attrib_list);
|
|
|
|
EGLBoolean (EGLAPIENTRY *StreamPostD3DTextureNV12ANGLE)
|
|
|
|
(EGLDisplay dpy, EGLStreamKHR stream, void *texture,
|
|
|
|
const EGLAttrib *attrib_list);
|
2016-04-27 11:49:47 +00:00
|
|
|
};
|
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
struct priv {
|
|
|
|
EGLStreamKHR egl_stream;
|
|
|
|
GLuint gl_textures[2];
|
|
|
|
};
|
2016-04-27 11:49:47 +00:00
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
static void uninit(struct ra_hwdec *hw)
|
2016-04-27 11:49:47 +00:00
|
|
|
{
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
struct priv_owner *p = hw->priv;
|
2016-04-27 11:49:47 +00:00
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
if (p->hwctx.ctx)
|
|
|
|
hwdec_devices_remove(hw->devs, &p->hwctx);
|
2016-05-09 17:42:03 +00:00
|
|
|
|
2016-04-27 11:49:47 +00:00
|
|
|
if (p->d3d11_device)
|
|
|
|
ID3D11Device_Release(p->d3d11_device);
|
|
|
|
}
|
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
static int init(struct ra_hwdec *hw)
|
2016-04-27 11:49:47 +00:00
|
|
|
{
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
struct priv_owner *p = hw->priv;
|
|
|
|
HRESULT hr;
|
|
|
|
|
|
|
|
if (!ra_is_gl(hw->ra))
|
|
|
|
return -1;
|
2016-05-11 10:33:49 +00:00
|
|
|
if (!angle_load())
|
|
|
|
return -1;
|
|
|
|
|
2016-04-27 11:49:47 +00:00
|
|
|
EGLDisplay egl_display = eglGetCurrentDisplay();
|
|
|
|
if (!egl_display)
|
|
|
|
return -1;
|
2016-05-05 11:38:08 +00:00
|
|
|
|
|
|
|
if (!eglGetCurrentContext())
|
|
|
|
return -1;
|
2016-04-27 11:49:47 +00:00
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
GL *gl = ra_gl_get(hw->ra);
|
|
|
|
|
2016-04-27 11:49:47 +00:00
|
|
|
const char *exts = eglQueryString(egl_display, EGL_EXTENSIONS);
|
video: remove d3d11 video processor use from OpenGL interop
We now have a video filter that uses the d3d11 video processor, so it
makes no sense to have one in the VO interop code. The VO uses it for
formats not directly supported by ANGLE (so the video data is converted
to a RGB texture, which ANGLE can take in).
Change this so that the video filter is automatically inserted if
needed. Move the code that maps RGB surfaces to its own inteorp backend.
Add a bunch of new image formats, which are used to enforce the new
constraints, and to automatically insert the filter only when needed.
The added vf mechanism to auto-insert the d3d11vpp filter is very dumb
and primitive, and will work only for this specific purpose. The format
negotiation mechanism in the filter chain is generally not very pretty,
and mostly broken as well. (libavfilter has a different mechanism, and
these mechanisms don't match well, so vf_lavfi uses some sort of hack.
It only works because hwaccel and non-hwaccel formats are strictly
separated.)
The RGB interop is now only used with older ANGLE versions. The only
reason I'm keeping it is because it's relatively isolated (uses only
existing mechanisms and adds no new concepts), and because I want to be
able to compare the behavior of the old code with the new one for
testing. It will be removed eventually.
If ANGLE has NV12 interop, P010 is now handled by converting to NV12
with the video processor, instead of converting it to RGB and using the
old mechanism to import that as a texture.
2016-05-29 15:13:22 +00:00
|
|
|
if (!exts || !strstr(exts, "EGL_ANGLE_d3d_share_handle_client_buffer") ||
|
|
|
|
!strstr(exts, "EGL_ANGLE_stream_producer_d3d_texture_nv12") ||
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
!(strstr(gl->extensions, "GL_OES_EGL_image_external_essl3") ||
|
|
|
|
gl->es == 200) ||
|
2016-06-13 14:14:18 +00:00
|
|
|
!strstr(exts, "EGL_EXT_device_query") ||
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
!(gl->mpgl_caps & MPGL_CAP_TEX_RG))
|
2016-04-27 11:49:47 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
p->egl_display = egl_display;
|
|
|
|
|
video: remove d3d11 video processor use from OpenGL interop
We now have a video filter that uses the d3d11 video processor, so it
makes no sense to have one in the VO interop code. The VO uses it for
formats not directly supported by ANGLE (so the video data is converted
to a RGB texture, which ANGLE can take in).
Change this so that the video filter is automatically inserted if
needed. Move the code that maps RGB surfaces to its own inteorp backend.
Add a bunch of new image formats, which are used to enforce the new
constraints, and to automatically insert the filter only when needed.
The added vf mechanism to auto-insert the d3d11vpp filter is very dumb
and primitive, and will work only for this specific purpose. The format
negotiation mechanism in the filter chain is generally not very pretty,
and mostly broken as well. (libavfilter has a different mechanism, and
these mechanisms don't match well, so vf_lavfi uses some sort of hack.
It only works because hwaccel and non-hwaccel formats are strictly
separated.)
The RGB interop is now only used with older ANGLE versions. The only
reason I'm keeping it is because it's relatively isolated (uses only
existing mechanisms and adds no new concepts), and because I want to be
able to compare the behavior of the old code with the new one for
testing. It will be removed eventually.
If ANGLE has NV12 interop, P010 is now handled by converting to NV12
with the video processor, instead of converting it to RGB and using the
old mechanism to import that as a texture.
2016-05-29 15:13:22 +00:00
|
|
|
p->CreateStreamKHR = (void *)eglGetProcAddress("eglCreateStreamKHR");
|
|
|
|
p->DestroyStreamKHR = (void *)eglGetProcAddress("eglDestroyStreamKHR");
|
|
|
|
p->StreamConsumerAcquireKHR =
|
|
|
|
(void *)eglGetProcAddress("eglStreamConsumerAcquireKHR");
|
|
|
|
p->StreamConsumerReleaseKHR =
|
|
|
|
(void *)eglGetProcAddress("eglStreamConsumerReleaseKHR");
|
|
|
|
p->StreamConsumerGLTextureExternalAttribsNV =
|
|
|
|
(void *)eglGetProcAddress("eglStreamConsumerGLTextureExternalAttribsNV");
|
|
|
|
p->CreateStreamProducerD3DTextureNV12ANGLE =
|
|
|
|
(void *)eglGetProcAddress("eglCreateStreamProducerD3DTextureNV12ANGLE");
|
|
|
|
p->StreamPostD3DTextureNV12ANGLE =
|
|
|
|
(void *)eglGetProcAddress("eglStreamPostD3DTextureNV12ANGLE");
|
|
|
|
|
|
|
|
if (!p->CreateStreamKHR || !p->DestroyStreamKHR ||
|
|
|
|
!p->StreamConsumerAcquireKHR || !p->StreamConsumerReleaseKHR ||
|
|
|
|
!p->StreamConsumerGLTextureExternalAttribsNV ||
|
|
|
|
!p->CreateStreamProducerD3DTextureNV12ANGLE ||
|
|
|
|
!p->StreamPostD3DTextureNV12ANGLE)
|
2016-05-10 18:37:03 +00:00
|
|
|
{
|
video: remove d3d11 video processor use from OpenGL interop
We now have a video filter that uses the d3d11 video processor, so it
makes no sense to have one in the VO interop code. The VO uses it for
formats not directly supported by ANGLE (so the video data is converted
to a RGB texture, which ANGLE can take in).
Change this so that the video filter is automatically inserted if
needed. Move the code that maps RGB surfaces to its own inteorp backend.
Add a bunch of new image formats, which are used to enforce the new
constraints, and to automatically insert the filter only when needed.
The added vf mechanism to auto-insert the d3d11vpp filter is very dumb
and primitive, and will work only for this specific purpose. The format
negotiation mechanism in the filter chain is generally not very pretty,
and mostly broken as well. (libavfilter has a different mechanism, and
these mechanisms don't match well, so vf_lavfi uses some sort of hack.
It only works because hwaccel and non-hwaccel formats are strictly
separated.)
The RGB interop is now only used with older ANGLE versions. The only
reason I'm keeping it is because it's relatively isolated (uses only
existing mechanisms and adds no new concepts), and because I want to be
able to compare the behavior of the old code with the new one for
testing. It will be removed eventually.
If ANGLE has NV12 interop, P010 is now handled by converting to NV12
with the video processor, instead of converting it to RGB and using the
old mechanism to import that as a texture.
2016-05-29 15:13:22 +00:00
|
|
|
MP_ERR(hw, "Failed to load some EGLStream functions.\n");
|
|
|
|
goto fail;
|
2016-05-10 18:37:03 +00:00
|
|
|
}
|
|
|
|
|
video: remove d3d11 video processor use from OpenGL interop
We now have a video filter that uses the d3d11 video processor, so it
makes no sense to have one in the VO interop code. The VO uses it for
formats not directly supported by ANGLE (so the video data is converted
to a RGB texture, which ANGLE can take in).
Change this so that the video filter is automatically inserted if
needed. Move the code that maps RGB surfaces to its own inteorp backend.
Add a bunch of new image formats, which are used to enforce the new
constraints, and to automatically insert the filter only when needed.
The added vf mechanism to auto-insert the d3d11vpp filter is very dumb
and primitive, and will work only for this specific purpose. The format
negotiation mechanism in the filter chain is generally not very pretty,
and mostly broken as well. (libavfilter has a different mechanism, and
these mechanisms don't match well, so vf_lavfi uses some sort of hack.
It only works because hwaccel and non-hwaccel formats are strictly
separated.)
The RGB interop is now only used with older ANGLE versions. The only
reason I'm keeping it is because it's relatively isolated (uses only
existing mechanisms and adds no new concepts), and because I want to be
able to compare the behavior of the old code with the new one for
testing. It will be removed eventually.
If ANGLE has NV12 interop, P010 is now handled by converting to NV12
with the video processor, instead of converting it to RGB and using the
old mechanism to import that as a texture.
2016-05-29 15:13:22 +00:00
|
|
|
static const char *es2_exts[] = {"GL_NV_EGL_stream_consumer_external", 0};
|
|
|
|
static const char *es3_exts[] = {"GL_NV_EGL_stream_consumer_external",
|
|
|
|
"GL_OES_EGL_image_external_essl3", 0};
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
hw->glsl_extensions = gl->es == 200 ? es2_exts : es3_exts;
|
video: remove d3d11 video processor use from OpenGL interop
We now have a video filter that uses the d3d11 video processor, so it
makes no sense to have one in the VO interop code. The VO uses it for
formats not directly supported by ANGLE (so the video data is converted
to a RGB texture, which ANGLE can take in).
Change this so that the video filter is automatically inserted if
needed. Move the code that maps RGB surfaces to its own inteorp backend.
Add a bunch of new image formats, which are used to enforce the new
constraints, and to automatically insert the filter only when needed.
The added vf mechanism to auto-insert the d3d11vpp filter is very dumb
and primitive, and will work only for this specific purpose. The format
negotiation mechanism in the filter chain is generally not very pretty,
and mostly broken as well. (libavfilter has a different mechanism, and
these mechanisms don't match well, so vf_lavfi uses some sort of hack.
It only works because hwaccel and non-hwaccel formats are strictly
separated.)
The RGB interop is now only used with older ANGLE versions. The only
reason I'm keeping it is because it's relatively isolated (uses only
existing mechanisms and adds no new concepts), and because I want to be
able to compare the behavior of the old code with the new one for
testing. It will be removed eventually.
If ANGLE has NV12 interop, P010 is now handled by converting to NV12
with the video processor, instead of converting it to RGB and using the
old mechanism to import that as a texture.
2016-05-29 15:13:22 +00:00
|
|
|
|
|
|
|
PFNEGLQUERYDISPLAYATTRIBEXTPROC p_eglQueryDisplayAttribEXT =
|
|
|
|
(void *)eglGetProcAddress("eglQueryDisplayAttribEXT");
|
|
|
|
PFNEGLQUERYDEVICEATTRIBEXTPROC p_eglQueryDeviceAttribEXT =
|
|
|
|
(void *)eglGetProcAddress("eglQueryDeviceAttribEXT");
|
|
|
|
if (!p_eglQueryDisplayAttribEXT || !p_eglQueryDeviceAttribEXT)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
EGLAttrib device = 0;
|
|
|
|
if (!p_eglQueryDisplayAttribEXT(egl_display, EGL_DEVICE_EXT, &device))
|
|
|
|
goto fail;
|
|
|
|
EGLAttrib d3d_device = 0;
|
|
|
|
if (!p_eglQueryDeviceAttribEXT((EGLDeviceEXT)device,
|
|
|
|
EGL_D3D11_DEVICE_ANGLE, &d3d_device))
|
|
|
|
{
|
|
|
|
MP_ERR(hw, "Could not get EGL_D3D11_DEVICE_ANGLE from ANGLE.\n");
|
|
|
|
goto fail;
|
2016-05-11 13:08:11 +00:00
|
|
|
}
|
2016-04-27 11:49:47 +00:00
|
|
|
|
video: remove d3d11 video processor use from OpenGL interop
We now have a video filter that uses the d3d11 video processor, so it
makes no sense to have one in the VO interop code. The VO uses it for
formats not directly supported by ANGLE (so the video data is converted
to a RGB texture, which ANGLE can take in).
Change this so that the video filter is automatically inserted if
needed. Move the code that maps RGB surfaces to its own inteorp backend.
Add a bunch of new image formats, which are used to enforce the new
constraints, and to automatically insert the filter only when needed.
The added vf mechanism to auto-insert the d3d11vpp filter is very dumb
and primitive, and will work only for this specific purpose. The format
negotiation mechanism in the filter chain is generally not very pretty,
and mostly broken as well. (libavfilter has a different mechanism, and
these mechanisms don't match well, so vf_lavfi uses some sort of hack.
It only works because hwaccel and non-hwaccel formats are strictly
separated.)
The RGB interop is now only used with older ANGLE versions. The only
reason I'm keeping it is because it's relatively isolated (uses only
existing mechanisms and adds no new concepts), and because I want to be
able to compare the behavior of the old code with the new one for
testing. It will be removed eventually.
If ANGLE has NV12 interop, P010 is now handled by converting to NV12
with the video processor, instead of converting it to RGB and using the
old mechanism to import that as a texture.
2016-05-29 15:13:22 +00:00
|
|
|
p->d3d11_device = (ID3D11Device *)d3d_device;
|
|
|
|
if (!p->d3d11_device)
|
|
|
|
goto fail;
|
|
|
|
ID3D11Device_AddRef(p->d3d11_device);
|
|
|
|
|
2016-06-09 09:17:06 +00:00
|
|
|
if (!d3d11_check_decoding(p->d3d11_device)) {
|
|
|
|
MP_VERBOSE(hw, "D3D11 video decoding not supported on this system.\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2016-04-27 11:49:47 +00:00
|
|
|
ID3D10Multithread *multithread;
|
|
|
|
hr = ID3D11Device_QueryInterface(p->d3d11_device, &IID_ID3D10Multithread,
|
|
|
|
(void **)&multithread);
|
|
|
|
if (FAILED(hr)) {
|
|
|
|
MP_ERR(hw, "Failed to get Multithread interface: %s\n",
|
|
|
|
mp_HRESULT_to_str(hr));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
ID3D10Multithread_SetMultithreadProtected(multithread, TRUE);
|
|
|
|
ID3D10Multithread_Release(multithread);
|
|
|
|
|
2016-05-09 17:42:03 +00:00
|
|
|
p->hwctx = (struct mp_hwdec_ctx){
|
|
|
|
.type = HWDEC_D3D11VA,
|
|
|
|
.driver_name = hw->driver->name,
|
|
|
|
.ctx = p->d3d11_device,
|
2017-06-08 19:16:11 +00:00
|
|
|
.av_device_ref = d3d11_wrap_device_ref(p->d3d11_device),
|
2016-05-09 17:42:03 +00:00
|
|
|
};
|
|
|
|
hwdec_devices_add(hw->devs, &p->hwctx);
|
2016-04-27 11:49:47 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
static void mapper_uninit(struct ra_hwdec_mapper *mapper)
|
2016-05-10 18:37:03 +00:00
|
|
|
{
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
struct priv_owner *o = mapper->owner->priv;
|
|
|
|
struct priv *p = mapper->priv;
|
|
|
|
GL *gl = ra_gl_get(mapper->ra);
|
2016-05-10 18:37:03 +00:00
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
if (p->egl_stream)
|
|
|
|
o->DestroyStreamKHR(o->egl_display, p->egl_stream);
|
|
|
|
p->egl_stream = 0;
|
|
|
|
|
|
|
|
gl->DeleteTextures(2, p->gl_textures);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mapper_init(struct ra_hwdec_mapper *mapper)
|
|
|
|
{
|
|
|
|
struct priv_owner *o = mapper->owner->priv;
|
|
|
|
struct priv *p = mapper->priv;
|
|
|
|
GL *gl = ra_gl_get(mapper->ra);
|
2016-05-10 18:37:03 +00:00
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
if (mapper->src_params.hw_subfmt != IMGFMT_NV12) {
|
|
|
|
MP_FATAL(mapper, "Format not supported.\n");
|
video: remove d3d11 video processor use from OpenGL interop
We now have a video filter that uses the d3d11 video processor, so it
makes no sense to have one in the VO interop code. The VO uses it for
formats not directly supported by ANGLE (so the video data is converted
to a RGB texture, which ANGLE can take in).
Change this so that the video filter is automatically inserted if
needed. Move the code that maps RGB surfaces to its own inteorp backend.
Add a bunch of new image formats, which are used to enforce the new
constraints, and to automatically insert the filter only when needed.
The added vf mechanism to auto-insert the d3d11vpp filter is very dumb
and primitive, and will work only for this specific purpose. The format
negotiation mechanism in the filter chain is generally not very pretty,
and mostly broken as well. (libavfilter has a different mechanism, and
these mechanisms don't match well, so vf_lavfi uses some sort of hack.
It only works because hwaccel and non-hwaccel formats are strictly
separated.)
The RGB interop is now only used with older ANGLE versions. The only
reason I'm keeping it is because it's relatively isolated (uses only
existing mechanisms and adds no new concepts), and because I want to be
able to compare the behavior of the old code with the new one for
testing. It will be removed eventually.
If ANGLE has NV12 interop, P010 is now handled by converting to NV12
with the video processor, instead of converting it to RGB and using the
old mechanism to import that as a texture.
2016-05-29 15:13:22 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2016-05-10 18:37:03 +00:00
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
mapper->dst_params = mapper->src_params;
|
|
|
|
mapper->dst_params.imgfmt = mapper->src_params.hw_subfmt;
|
|
|
|
mapper->dst_params.hw_subfmt = 0;
|
|
|
|
|
2016-05-10 18:37:03 +00:00
|
|
|
// The texture units need to be bound during init only, and are free for
|
|
|
|
// use again after the initialization here is done.
|
|
|
|
int texunits = 0; // [texunits, texunits + num_planes)
|
|
|
|
int num_planes = 2;
|
|
|
|
int gl_target = GL_TEXTURE_EXTERNAL_OES;
|
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
p->egl_stream = o->CreateStreamKHR(o->egl_display, (EGLint[]){EGL_NONE});
|
2016-05-10 18:37:03 +00:00
|
|
|
if (!p->egl_stream)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
for (int n = 0; n < num_planes; n++) {
|
|
|
|
gl->ActiveTexture(GL_TEXTURE0 + texunits + n);
|
|
|
|
gl->GenTextures(1, &p->gl_textures[n]);
|
|
|
|
gl->BindTexture(gl_target, p->gl_textures[n]);
|
|
|
|
gl->TexParameteri(gl_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
|
|
|
gl->TexParameteri(gl_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
|
|
|
gl->TexParameteri(gl_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
|
|
|
gl->TexParameteri(gl_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
|
|
|
}
|
|
|
|
|
|
|
|
EGLAttrib attrs[] = {
|
|
|
|
EGL_COLOR_BUFFER_TYPE, EGL_YUV_BUFFER_EXT,
|
|
|
|
EGL_YUV_NUMBER_OF_PLANES_EXT, num_planes,
|
|
|
|
EGL_YUV_PLANE0_TEXTURE_UNIT_NV, texunits + 0,
|
|
|
|
EGL_YUV_PLANE1_TEXTURE_UNIT_NV, texunits + 1,
|
|
|
|
EGL_NONE,
|
|
|
|
};
|
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
if (!o->StreamConsumerGLTextureExternalAttribsNV(o->egl_display, p->egl_stream,
|
2016-05-10 18:37:03 +00:00
|
|
|
attrs))
|
|
|
|
goto fail;
|
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
if (!o->CreateStreamProducerD3DTextureNV12ANGLE(o->egl_display, p->egl_stream,
|
2016-05-10 18:37:03 +00:00
|
|
|
(EGLAttrib[]){EGL_NONE}))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
for (int n = 0; n < num_planes; n++) {
|
|
|
|
gl->ActiveTexture(GL_TEXTURE0 + texunits + n);
|
|
|
|
gl->BindTexture(gl_target, 0);
|
|
|
|
}
|
|
|
|
gl->ActiveTexture(GL_TEXTURE0);
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
gl->ActiveTexture(GL_TEXTURE0);
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
MP_ERR(mapper, "Failed to create EGLStream\n");
|
2016-05-10 18:37:03 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
static int mapper_map(struct ra_hwdec_mapper *mapper)
|
2016-04-27 11:49:47 +00:00
|
|
|
{
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
struct priv_owner *o = mapper->owner->priv;
|
|
|
|
struct priv *p = mapper->priv;
|
2016-04-27 11:49:47 +00:00
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
ID3D11Texture2D *d3d_tex = (void *)mapper->src->planes[0];
|
|
|
|
int d3d_subindex = (intptr_t)mapper->src->planes[1];
|
video: remove d3d11 video processor use from OpenGL interop
We now have a video filter that uses the d3d11 video processor, so it
makes no sense to have one in the VO interop code. The VO uses it for
formats not directly supported by ANGLE (so the video data is converted
to a RGB texture, which ANGLE can take in).
Change this so that the video filter is automatically inserted if
needed. Move the code that maps RGB surfaces to its own inteorp backend.
Add a bunch of new image formats, which are used to enforce the new
constraints, and to automatically insert the filter only when needed.
The added vf mechanism to auto-insert the d3d11vpp filter is very dumb
and primitive, and will work only for this specific purpose. The format
negotiation mechanism in the filter chain is generally not very pretty,
and mostly broken as well. (libavfilter has a different mechanism, and
these mechanisms don't match well, so vf_lavfi uses some sort of hack.
It only works because hwaccel and non-hwaccel formats are strictly
separated.)
The RGB interop is now only used with older ANGLE versions. The only
reason I'm keeping it is because it's relatively isolated (uses only
existing mechanisms and adds no new concepts), and because I want to be
able to compare the behavior of the old code with the new one for
testing. It will be removed eventually.
If ANGLE has NV12 interop, P010 is now handled by converting to NV12
with the video processor, instead of converting it to RGB and using the
old mechanism to import that as a texture.
2016-05-29 15:13:22 +00:00
|
|
|
if (!d3d_tex)
|
2016-04-27 11:49:47 +00:00
|
|
|
return -1;
|
2016-05-10 18:37:03 +00:00
|
|
|
|
|
|
|
EGLAttrib attrs[] = {
|
|
|
|
EGL_D3D_TEXTURE_SUBRESOURCE_ID_ANGLE, d3d_subindex,
|
|
|
|
EGL_NONE,
|
|
|
|
};
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
if (!o->StreamPostD3DTextureNV12ANGLE(o->egl_display, p->egl_stream,
|
2016-05-10 18:37:03 +00:00
|
|
|
(void *)d3d_tex, attrs))
|
2017-02-27 14:39:14 +00:00
|
|
|
{
|
|
|
|
// ANGLE changed the enum ID of this without warning at one point.
|
|
|
|
attrs[0] = attrs[0] == 0x33AB ? 0x3AAB : 0x33AB;
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
if (!o->StreamPostD3DTextureNV12ANGLE(o->egl_display, p->egl_stream,
|
|
|
|
(void *)d3d_tex, attrs))
|
2017-02-27 14:39:14 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2016-05-10 18:37:03 +00:00
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
if (!o->StreamConsumerAcquireKHR(o->egl_display, p->egl_stream))
|
2016-05-10 18:37:03 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
D3D11_TEXTURE2D_DESC texdesc;
|
|
|
|
ID3D11Texture2D_GetDesc(d3d_tex, &texdesc);
|
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
for (int n = 0; n < 2; n++) {
|
|
|
|
struct ra_tex_params params = {
|
|
|
|
.dimensions = 2,
|
|
|
|
.w = texdesc.Width / (n ? 2 : 1),
|
|
|
|
.h = texdesc.Height / (n ? 2 : 1),
|
|
|
|
.d = 1,
|
|
|
|
.format = ra_find_unorm_format(mapper->ra, 1, n ? 2 : 1),
|
|
|
|
.render_src = true,
|
|
|
|
.src_linear = true,
|
|
|
|
.external_oes = true,
|
|
|
|
};
|
|
|
|
if (!params.format)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
mapper->tex[n] = ra_create_wrapped_tex(mapper->ra, ¶ms,
|
|
|
|
p->gl_textures[n]);
|
|
|
|
if (!mapper->tex[n])
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-05-10 18:37:03 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
static void mapper_unmap(struct ra_hwdec_mapper *mapper)
|
2016-05-10 18:37:03 +00:00
|
|
|
{
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
struct priv_owner *o = mapper->owner->priv;
|
|
|
|
struct priv *p = mapper->priv;
|
|
|
|
|
|
|
|
for (int n = 0; n < 2; n++)
|
|
|
|
ra_tex_free(mapper->ra, &mapper->tex[n]);
|
2016-05-10 18:37:03 +00:00
|
|
|
if (p->egl_stream)
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
o->StreamConsumerReleaseKHR(o->egl_display, p->egl_stream);
|
2016-05-10 18:37:03 +00:00
|
|
|
}
|
|
|
|
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
const struct ra_hwdec_driver ra_hwdec_d3d11egl = {
|
2016-04-27 11:49:47 +00:00
|
|
|
.name = "d3d11-egl",
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
.priv_size = sizeof(struct priv_owner),
|
2016-04-27 11:49:47 +00:00
|
|
|
.api = HWDEC_D3D11VA,
|
vo_opengl: separate hwdec context and mapping, port it to use ra
This does two separate rather intrusive things:
1. Make the hwdec context (which does initialization, provides the
device to the decoder, and other basic state) and frame mapping
(getting textures from a mp_image) separate. This is more
flexible, and you could map multiple images at once. It will
help removing some hwdec special-casing from video.c.
2. Switch all hwdec API use to ra. Of course all code is still
GL specific, but in theory it would be possible to support other
backends. The most important change is that the hwdec interop
returns ra objects, instead of anything GL specific. This removes
the last dependency on GL-specific header files from video.c.
I'm mixing these separate changes because both requires essentially
rewriting all the glue code, so better do them at once. For the same
reason, this change isn't done incrementally.
hwdec_ios.m is untested, since I can't test it. Apart from superficial
mistakes, this also requires dealing with Apple's texture format
fuckups: they force you to use GL_LUMINANCE[_ALPHA] instead of GL_RED
and GL_RG. We also need to report the correct format via ra_tex to
the renderer, which is done by find_la_variant(). It's unknown whether
this works correctly.
hwdec_rpi.c as well as vo_rpi.c are still broken. (I need to pull my
RPI out of a dusty pile of devices and cables, so, later.)
2017-08-10 15:48:33 +00:00
|
|
|
.imgfmts = {IMGFMT_D3D11NV12, 0},
|
|
|
|
.init = init,
|
|
|
|
.uninit = uninit,
|
|
|
|
.mapper = &(const struct ra_hwdec_mapper_driver){
|
|
|
|
.priv_size = sizeof(struct priv),
|
|
|
|
.init = mapper_init,
|
|
|
|
.uninit = mapper_uninit,
|
|
|
|
.map = mapper_map,
|
|
|
|
.unmap = mapper_unmap,
|
|
|
|
},
|
2016-04-27 11:49:47 +00:00
|
|
|
};
|