1
0
mirror of https://github.com/mpv-player/mpv synced 2025-02-19 06:17:00 +00:00

vo_opengl_old: reject 9-15 bit formats if textures have less than 16 bit

For 9-15 bit material, cutting off the lower bits leads to significant
quality reduction, because these formats leave the most significant bits
unused (e.g. 10 bit padded to 16 bit, transferred as 8 bit -> only
2 bits left). 16 bit formats still can be played like this, as cutting
the lower bits merely reduces quality in this case.

This problem was encountered with the following GPU/driver combination:

OpenGL vendor string: Intel Open Source Technology Center
OpenGL renderer string: Mesa DRI Intel(R) 915GM x86/MMX/SSE2
OpenGL version string: 1.4 Mesa 9.0.1

It appears 16 bit support is rather common on GPUs, so testing the
actual texture depth wasn't needed until now. (There are some other Mesa
GPU/driver combinations which support 16 bit only when using RG textures
instead of LUMINANCE_ALPHA. This is due to OpenGL driver bugs.)
This commit is contained in:
wm4 2012-12-27 18:07:37 +01:00
parent 1e56e68701
commit d78bde15ab
3 changed files with 22 additions and 1 deletions

View File

@ -364,6 +364,7 @@ struct gl_functions gl_functions[] = {
DEF_FN_HARD(DrawArrays),
DEF_FN_HARD(GetString),
DEF_FN_HARD(GetError),
DEF_FN_HARD(GetTexLevelParameteriv),
{0}
},
},

View File

@ -318,6 +318,8 @@ struct GL {
void (GLAPIENTRY *EnableClientState)(GLenum);
void (GLAPIENTRY *DisableClientState)(GLenum);
GLenum (GLAPIENTRY *GetError)(void);
void (GLAPIENTRY *GetTexLevelParameteriv)(GLenum, GLint, GLenum, GLint *);
void (GLAPIENTRY *GenBuffers)(GLsizei, GLuint *);
void (GLAPIENTRY *DeleteBuffers)(GLsizei, const GLuint *);

View File

@ -75,6 +75,7 @@ struct gl_priv {
uint32_t image_format;
int many_fmts;
int have_texture_rg;
int max_tex_component_size;
int ati_hack;
int force_pbo;
int use_glFinish;
@ -347,6 +348,22 @@ static void autodetectGlExtensions(struct vo *vo)
}
p->video_eq.capabilities = eq_caps;
{
int target = p->use_rectangle == 1 ? GL_TEXTURE_RECTANGLE : GL_TEXTURE_2D;
GLint gl_texfmt;
GLenum gl_format, gl_type;
glFindFormat(IMGFMT_420P16, p->have_texture_rg, NULL, &gl_texfmt,
&gl_format, &gl_type);
glCreateClearTex(gl, target, gl_texfmt, gl_format, gl_type,
GL_LINEAR, 64, 64, 0);
int tex_size_token = p->have_texture_rg ? GL_TEXTURE_RED_SIZE
: GL_TEXTURE_INTENSITY_SIZE;
GLint size = 8;
gl->GetTexLevelParameteriv(target, 0, tex_size_token, &size);
mp_msg(MSGT_VO, MSGL_V, "[gl] 16 bit texture depth: %d.\n", size);
p->max_tex_component_size = size;
}
if (is_ati && (p->lscale == 1 || p->lscale == 2 || p->cscale == 1 || p->cscale == 2))
mp_msg(MSGT_VO, MSGL_WARN, "[gl] Selected scaling mode may be broken on"
" ATI cards.\n"
@ -838,7 +855,8 @@ static int query_format(struct vo *vo, uint32_t format)
if (format == IMGFMT_RGB24 || format == IMGFMT_RGBA)
return caps;
if (p->use_yuv && mp_get_chroma_shift(format, NULL, NULL, &depth) &&
(depth == 8 || depth == 16 || glYUVLargeRange(p->use_yuv)) &&
(depth == 8 || depth == 16 ||
p->max_tex_component_size >= 16 && glYUVLargeRange(p->use_yuv)) &&
(IMGFMT_IS_YUVP16_NE(format) || !IMGFMT_IS_YUVP16(format)))
return caps;
// HACK, otherwise we get only b&w with some filters (e.g. -vf eq)