2010-01-30 16:57:40 +00:00
|
|
|
/*
|
|
|
|
* This file is part of MPlayer.
|
|
|
|
*
|
|
|
|
* MPlayer is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* MPlayer is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with MPlayer; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*/
|
|
|
|
|
2002-04-07 20:21:37 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <inttypes.h>
|
2012-07-31 21:37:56 +00:00
|
|
|
#include <sys/types.h>
|
2002-04-07 20:21:37 +00:00
|
|
|
|
2005-11-18 14:39:25 +00:00
|
|
|
#include "config.h"
|
2012-11-09 00:06:43 +00:00
|
|
|
#include "core/mp_msg.h"
|
|
|
|
#include "core/options.h"
|
2002-04-07 20:21:37 +00:00
|
|
|
|
2012-11-09 00:06:43 +00:00
|
|
|
#include "video/img_format.h"
|
|
|
|
#include "video/mp_image.h"
|
2002-04-07 20:21:37 +00:00
|
|
|
#include "vf.h"
|
2012-11-09 00:06:43 +00:00
|
|
|
#include "video/fmt-conversion.h"
|
|
|
|
#include "compat/mpbswap.h"
|
2002-04-07 20:21:37 +00:00
|
|
|
|
2012-11-09 00:06:43 +00:00
|
|
|
#include "video/sws_utils.h"
|
2002-04-07 20:21:37 +00:00
|
|
|
|
2012-11-09 00:06:43 +00:00
|
|
|
#include "video/csputils.h"
|
|
|
|
#include "video/out/vo.h"
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 21:50:21 +00:00
|
|
|
|
2012-11-09 00:06:43 +00:00
|
|
|
#include "core/m_option.h"
|
|
|
|
#include "core/m_struct.h"
|
2003-03-15 18:01:02 +00:00
|
|
|
|
|
|
|
static struct vf_priv_s {
|
2013-07-18 11:13:03 +00:00
|
|
|
int w, h;
|
2011-12-12 20:16:05 +00:00
|
|
|
int cfg_w, cfg_h;
|
2002-06-24 01:05:41 +00:00
|
|
|
int v_chr_drop;
|
2004-09-18 00:08:17 +00:00
|
|
|
double param[2];
|
2002-04-07 21:33:42 +00:00
|
|
|
unsigned int fmt;
|
2003-02-23 22:05:55 +00:00
|
|
|
struct SwsContext *ctx;
|
2003-12-29 14:16:07 +00:00
|
|
|
int interlaced;
|
2006-02-18 21:12:56 +00:00
|
|
|
int noup;
|
2006-07-24 10:36:06 +00:00
|
|
|
int accurate_rnd;
|
2007-01-28 16:48:01 +00:00
|
|
|
} const vf_priv_dflt = {
|
2013-07-18 11:13:03 +00:00
|
|
|
0, 0,
|
|
|
|
-1, -1,
|
|
|
|
0,
|
|
|
|
{SWS_PARAM_DEFAULT, SWS_PARAM_DEFAULT},
|
2002-04-07 20:21:37 +00:00
|
|
|
};
|
|
|
|
|
2012-11-24 20:27:34 +00:00
|
|
|
static int mp_sws_set_colorspace(struct SwsContext *sws,
|
2013-07-16 21:22:55 +00:00
|
|
|
struct mp_image_params *p);
|
2012-11-24 20:27:34 +00:00
|
|
|
|
2002-04-07 20:21:37 +00:00
|
|
|
//===========================================================================//
|
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
static const unsigned int outfmt_list[] = {
|
2004-04-23 15:24:24 +00:00
|
|
|
// YUV:
|
|
|
|
IMGFMT_444P,
|
2009-12-31 23:25:21 +00:00
|
|
|
IMGFMT_444P16_LE,
|
|
|
|
IMGFMT_444P16_BE,
|
2012-11-27 09:39:09 +00:00
|
|
|
IMGFMT_444P14_LE,
|
|
|
|
IMGFMT_444P14_BE,
|
|
|
|
IMGFMT_444P12_LE,
|
|
|
|
IMGFMT_444P12_BE,
|
2011-06-25 22:22:53 +00:00
|
|
|
IMGFMT_444P10_LE,
|
|
|
|
IMGFMT_444P10_BE,
|
|
|
|
IMGFMT_444P9_LE,
|
|
|
|
IMGFMT_444P9_BE,
|
2004-04-23 15:24:24 +00:00
|
|
|
IMGFMT_422P,
|
2009-12-31 23:25:21 +00:00
|
|
|
IMGFMT_422P16_LE,
|
|
|
|
IMGFMT_422P16_BE,
|
2012-11-27 09:39:09 +00:00
|
|
|
IMGFMT_422P14_LE,
|
|
|
|
IMGFMT_422P14_BE,
|
|
|
|
IMGFMT_422P12_LE,
|
|
|
|
IMGFMT_422P12_BE,
|
2011-06-25 22:22:53 +00:00
|
|
|
IMGFMT_422P10_LE,
|
|
|
|
IMGFMT_422P10_BE,
|
2012-01-05 20:32:10 +00:00
|
|
|
IMGFMT_422P9_LE,
|
|
|
|
IMGFMT_422P9_BE,
|
video: decouple internal pixel formats from FourCCs
mplayer's video chain traditionally used FourCCs for pixel formats. For
example, it used IMGFMT_YV12 for 4:2:0 YUV, which was defined to the
string 'YV12' interpreted as unsigned int. Additionally, it used to
encode information into the numeric values of some formats. The RGB
formats had their bit depth and endian encoded into the least
significant byte. Extended planar formats (420P10 etc.) had chroma
shift, endian, and component bit depth encoded. (This has been removed
in recent commits.)
Replace the FourCC mess with a simple enum. Remove all the redundant
formats like YV12/I420/IYUV. Replace some image format names by
something more intuitive, most importantly IMGFMT_YV12 -> IMGFMT_420P.
Add img_fourcc.h, which contains the old IDs for code that actually uses
FourCCs. Change the way demuxers, that output raw video, identify the
video format: they set either MP_FOURCC_RAWVIDEO or MP_FOURCC_IMGFMT to
request the rawvideo decoder, and sh_video->imgfmt specifies the pixel
format. Like the previous hack, this is supposed to avoid the need for
a complete codecs.cfg entry per format, or other lookup tables. (Note
that the RGB raw video FourCCs mostly rely on ffmpeg's mappings for NUT
raw video, but this is still considered better than adding a raw video
decoder - even if trivial, it would be full of annoying lookup tables.)
The TV code has not been tested.
Some corrective changes regarding endian and other image format flags
creep in.
2012-12-23 19:03:30 +00:00
|
|
|
IMGFMT_420P,
|
2009-12-31 23:25:21 +00:00
|
|
|
IMGFMT_420P16_LE,
|
|
|
|
IMGFMT_420P16_BE,
|
2012-11-27 09:39:09 +00:00
|
|
|
IMGFMT_420P14_LE,
|
|
|
|
IMGFMT_420P14_BE,
|
|
|
|
IMGFMT_420P12_LE,
|
|
|
|
IMGFMT_420P12_BE,
|
2011-06-25 22:22:53 +00:00
|
|
|
IMGFMT_420P10_LE,
|
|
|
|
IMGFMT_420P10_BE,
|
|
|
|
IMGFMT_420P9_LE,
|
|
|
|
IMGFMT_420P9_BE,
|
video: decouple internal pixel formats from FourCCs
mplayer's video chain traditionally used FourCCs for pixel formats. For
example, it used IMGFMT_YV12 for 4:2:0 YUV, which was defined to the
string 'YV12' interpreted as unsigned int. Additionally, it used to
encode information into the numeric values of some formats. The RGB
formats had their bit depth and endian encoded into the least
significant byte. Extended planar formats (420P10 etc.) had chroma
shift, endian, and component bit depth encoded. (This has been removed
in recent commits.)
Replace the FourCC mess with a simple enum. Remove all the redundant
formats like YV12/I420/IYUV. Replace some image format names by
something more intuitive, most importantly IMGFMT_YV12 -> IMGFMT_420P.
Add img_fourcc.h, which contains the old IDs for code that actually uses
FourCCs. Change the way demuxers, that output raw video, identify the
video format: they set either MP_FOURCC_RAWVIDEO or MP_FOURCC_IMGFMT to
request the rawvideo decoder, and sh_video->imgfmt specifies the pixel
format. Like the previous hack, this is supposed to avoid the need for
a complete codecs.cfg entry per format, or other lookup tables. (Note
that the RGB raw video FourCCs mostly rely on ffmpeg's mappings for NUT
raw video, but this is still considered better than adding a raw video
decoder - even if trivial, it would be full of annoying lookup tables.)
The TV code has not been tested.
Some corrective changes regarding endian and other image format flags
creep in.
2012-12-23 19:03:30 +00:00
|
|
|
IMGFMT_420AP,
|
|
|
|
IMGFMT_410P,
|
2004-04-23 15:24:24 +00:00
|
|
|
IMGFMT_411P,
|
2005-02-16 23:47:00 +00:00
|
|
|
IMGFMT_NV12,
|
|
|
|
IMGFMT_NV21,
|
video: decouple internal pixel formats from FourCCs
mplayer's video chain traditionally used FourCCs for pixel formats. For
example, it used IMGFMT_YV12 for 4:2:0 YUV, which was defined to the
string 'YV12' interpreted as unsigned int. Additionally, it used to
encode information into the numeric values of some formats. The RGB
formats had their bit depth and endian encoded into the least
significant byte. Extended planar formats (420P10 etc.) had chroma
shift, endian, and component bit depth encoded. (This has been removed
in recent commits.)
Replace the FourCC mess with a simple enum. Remove all the redundant
formats like YV12/I420/IYUV. Replace some image format names by
something more intuitive, most importantly IMGFMT_YV12 -> IMGFMT_420P.
Add img_fourcc.h, which contains the old IDs for code that actually uses
FourCCs. Change the way demuxers, that output raw video, identify the
video format: they set either MP_FOURCC_RAWVIDEO or MP_FOURCC_IMGFMT to
request the rawvideo decoder, and sh_video->imgfmt specifies the pixel
format. Like the previous hack, this is supposed to avoid the need for
a complete codecs.cfg entry per format, or other lookup tables. (Note
that the RGB raw video FourCCs mostly rely on ffmpeg's mappings for NUT
raw video, but this is still considered better than adding a raw video
decoder - even if trivial, it would be full of annoying lookup tables.)
The TV code has not been tested.
Some corrective changes regarding endian and other image format flags
creep in.
2012-12-23 19:03:30 +00:00
|
|
|
IMGFMT_YUYV,
|
2004-04-23 15:24:24 +00:00
|
|
|
IMGFMT_UYVY,
|
2009-12-30 12:06:09 +00:00
|
|
|
IMGFMT_440P,
|
2004-05-14 21:08:53 +00:00
|
|
|
// RGB and grayscale (Y8 and Y800):
|
2002-04-07 21:33:42 +00:00
|
|
|
IMGFMT_BGR32,
|
2002-07-04 13:08:37 +00:00
|
|
|
IMGFMT_RGB32,
|
2012-12-28 07:12:25 +00:00
|
|
|
IMGFMT_ABGR,
|
|
|
|
IMGFMT_ARGB,
|
|
|
|
IMGFMT_BGRA,
|
|
|
|
IMGFMT_RGBA,
|
2002-09-15 13:36:37 +00:00
|
|
|
IMGFMT_BGR24,
|
2002-07-04 13:08:37 +00:00
|
|
|
IMGFMT_RGB24,
|
2012-03-25 21:12:25 +00:00
|
|
|
IMGFMT_GBRP,
|
video: decouple internal pixel formats from FourCCs
mplayer's video chain traditionally used FourCCs for pixel formats. For
example, it used IMGFMT_YV12 for 4:2:0 YUV, which was defined to the
string 'YV12' interpreted as unsigned int. Additionally, it used to
encode information into the numeric values of some formats. The RGB
formats had their bit depth and endian encoded into the least
significant byte. Extended planar formats (420P10 etc.) had chroma
shift, endian, and component bit depth encoded. (This has been removed
in recent commits.)
Replace the FourCC mess with a simple enum. Remove all the redundant
formats like YV12/I420/IYUV. Replace some image format names by
something more intuitive, most importantly IMGFMT_YV12 -> IMGFMT_420P.
Add img_fourcc.h, which contains the old IDs for code that actually uses
FourCCs. Change the way demuxers, that output raw video, identify the
video format: they set either MP_FOURCC_RAWVIDEO or MP_FOURCC_IMGFMT to
request the rawvideo decoder, and sh_video->imgfmt specifies the pixel
format. Like the previous hack, this is supposed to avoid the need for
a complete codecs.cfg entry per format, or other lookup tables. (Note
that the RGB raw video FourCCs mostly rely on ffmpeg's mappings for NUT
raw video, but this is still considered better than adding a raw video
decoder - even if trivial, it would be full of annoying lookup tables.)
The TV code has not been tested.
Some corrective changes regarding endian and other image format flags
creep in.
2012-12-23 19:03:30 +00:00
|
|
|
IMGFMT_RGB48_LE,
|
|
|
|
IMGFMT_RGB48_BE,
|
2002-09-15 13:36:37 +00:00
|
|
|
IMGFMT_BGR16,
|
2002-07-04 13:08:37 +00:00
|
|
|
IMGFMT_RGB16,
|
2002-09-15 13:36:37 +00:00
|
|
|
IMGFMT_BGR15,
|
2002-07-04 13:08:37 +00:00
|
|
|
IMGFMT_RGB15,
|
2010-05-06 10:18:25 +00:00
|
|
|
IMGFMT_BGR12,
|
|
|
|
IMGFMT_RGB12,
|
2004-05-14 21:08:53 +00:00
|
|
|
IMGFMT_Y8,
|
2002-09-15 13:36:37 +00:00
|
|
|
IMGFMT_BGR8,
|
2002-07-04 13:08:37 +00:00
|
|
|
IMGFMT_RGB8,
|
2002-09-15 13:36:37 +00:00
|
|
|
IMGFMT_BGR4,
|
2002-07-04 13:08:37 +00:00
|
|
|
IMGFMT_RGB4,
|
video: decouple internal pixel formats from FourCCs
mplayer's video chain traditionally used FourCCs for pixel formats. For
example, it used IMGFMT_YV12 for 4:2:0 YUV, which was defined to the
string 'YV12' interpreted as unsigned int. Additionally, it used to
encode information into the numeric values of some formats. The RGB
formats had their bit depth and endian encoded into the least
significant byte. Extended planar formats (420P10 etc.) had chroma
shift, endian, and component bit depth encoded. (This has been removed
in recent commits.)
Replace the FourCC mess with a simple enum. Remove all the redundant
formats like YV12/I420/IYUV. Replace some image format names by
something more intuitive, most importantly IMGFMT_YV12 -> IMGFMT_420P.
Add img_fourcc.h, which contains the old IDs for code that actually uses
FourCCs. Change the way demuxers, that output raw video, identify the
video format: they set either MP_FOURCC_RAWVIDEO or MP_FOURCC_IMGFMT to
request the rawvideo decoder, and sh_video->imgfmt specifies the pixel
format. Like the previous hack, this is supposed to avoid the need for
a complete codecs.cfg entry per format, or other lookup tables. (Note
that the RGB raw video FourCCs mostly rely on ffmpeg's mappings for NUT
raw video, but this is still considered better than adding a raw video
decoder - even if trivial, it would be full of annoying lookup tables.)
The TV code has not been tested.
Some corrective changes regarding endian and other image format flags
creep in.
2012-12-23 19:03:30 +00:00
|
|
|
IMGFMT_RGB4_BYTE,
|
|
|
|
IMGFMT_BGR4_BYTE,
|
|
|
|
IMGFMT_MONO,
|
2013-02-24 15:51:29 +00:00
|
|
|
IMGFMT_MONO_W,
|
2002-05-25 13:22:28 +00:00
|
|
|
0
|
2002-04-07 21:33:42 +00:00
|
|
|
};
|
|
|
|
|
2010-01-23 19:00:09 +00:00
|
|
|
/**
|
|
|
|
* A list of preferred conversions, in order of preference.
|
|
|
|
* This should be used for conversions that e.g. involve no scaling
|
|
|
|
* or to stop vf_scale from choosing a conversion that has no
|
|
|
|
* fast assembler implementation.
|
|
|
|
*/
|
|
|
|
static int preferred_conversions[][2] = {
|
video: decouple internal pixel formats from FourCCs
mplayer's video chain traditionally used FourCCs for pixel formats. For
example, it used IMGFMT_YV12 for 4:2:0 YUV, which was defined to the
string 'YV12' interpreted as unsigned int. Additionally, it used to
encode information into the numeric values of some formats. The RGB
formats had their bit depth and endian encoded into the least
significant byte. Extended planar formats (420P10 etc.) had chroma
shift, endian, and component bit depth encoded. (This has been removed
in recent commits.)
Replace the FourCC mess with a simple enum. Remove all the redundant
formats like YV12/I420/IYUV. Replace some image format names by
something more intuitive, most importantly IMGFMT_YV12 -> IMGFMT_420P.
Add img_fourcc.h, which contains the old IDs for code that actually uses
FourCCs. Change the way demuxers, that output raw video, identify the
video format: they set either MP_FOURCC_RAWVIDEO or MP_FOURCC_IMGFMT to
request the rawvideo decoder, and sh_video->imgfmt specifies the pixel
format. Like the previous hack, this is supposed to avoid the need for
a complete codecs.cfg entry per format, or other lookup tables. (Note
that the RGB raw video FourCCs mostly rely on ffmpeg's mappings for NUT
raw video, but this is still considered better than adding a raw video
decoder - even if trivial, it would be full of annoying lookup tables.)
The TV code has not been tested.
Some corrective changes regarding endian and other image format flags
creep in.
2012-12-23 19:03:30 +00:00
|
|
|
{IMGFMT_YUYV, IMGFMT_UYVY},
|
|
|
|
{IMGFMT_YUYV, IMGFMT_422P},
|
|
|
|
{IMGFMT_UYVY, IMGFMT_YUYV},
|
2010-01-23 19:00:09 +00:00
|
|
|
{IMGFMT_UYVY, IMGFMT_422P},
|
video: decouple internal pixel formats from FourCCs
mplayer's video chain traditionally used FourCCs for pixel formats. For
example, it used IMGFMT_YV12 for 4:2:0 YUV, which was defined to the
string 'YV12' interpreted as unsigned int. Additionally, it used to
encode information into the numeric values of some formats. The RGB
formats had their bit depth and endian encoded into the least
significant byte. Extended planar formats (420P10 etc.) had chroma
shift, endian, and component bit depth encoded. (This has been removed
in recent commits.)
Replace the FourCC mess with a simple enum. Remove all the redundant
formats like YV12/I420/IYUV. Replace some image format names by
something more intuitive, most importantly IMGFMT_YV12 -> IMGFMT_420P.
Add img_fourcc.h, which contains the old IDs for code that actually uses
FourCCs. Change the way demuxers, that output raw video, identify the
video format: they set either MP_FOURCC_RAWVIDEO or MP_FOURCC_IMGFMT to
request the rawvideo decoder, and sh_video->imgfmt specifies the pixel
format. Like the previous hack, this is supposed to avoid the need for
a complete codecs.cfg entry per format, or other lookup tables. (Note
that the RGB raw video FourCCs mostly rely on ffmpeg's mappings for NUT
raw video, but this is still considered better than adding a raw video
decoder - even if trivial, it would be full of annoying lookup tables.)
The TV code has not been tested.
Some corrective changes regarding endian and other image format flags
creep in.
2012-12-23 19:03:30 +00:00
|
|
|
{IMGFMT_422P, IMGFMT_YUYV},
|
2010-01-23 19:00:09 +00:00
|
|
|
{IMGFMT_422P, IMGFMT_UYVY},
|
video: decouple internal pixel formats from FourCCs
mplayer's video chain traditionally used FourCCs for pixel formats. For
example, it used IMGFMT_YV12 for 4:2:0 YUV, which was defined to the
string 'YV12' interpreted as unsigned int. Additionally, it used to
encode information into the numeric values of some formats. The RGB
formats had their bit depth and endian encoded into the least
significant byte. Extended planar formats (420P10 etc.) had chroma
shift, endian, and component bit depth encoded. (This has been removed
in recent commits.)
Replace the FourCC mess with a simple enum. Remove all the redundant
formats like YV12/I420/IYUV. Replace some image format names by
something more intuitive, most importantly IMGFMT_YV12 -> IMGFMT_420P.
Add img_fourcc.h, which contains the old IDs for code that actually uses
FourCCs. Change the way demuxers, that output raw video, identify the
video format: they set either MP_FOURCC_RAWVIDEO or MP_FOURCC_IMGFMT to
request the rawvideo decoder, and sh_video->imgfmt specifies the pixel
format. Like the previous hack, this is supposed to avoid the need for
a complete codecs.cfg entry per format, or other lookup tables. (Note
that the RGB raw video FourCCs mostly rely on ffmpeg's mappings for NUT
raw video, but this is still considered better than adding a raw video
decoder - even if trivial, it would be full of annoying lookup tables.)
The TV code has not been tested.
Some corrective changes regarding endian and other image format flags
creep in.
2012-12-23 19:03:30 +00:00
|
|
|
{IMGFMT_420P10, IMGFMT_420P},
|
2012-03-25 21:12:25 +00:00
|
|
|
{IMGFMT_GBRP, IMGFMT_BGR24},
|
|
|
|
{IMGFMT_GBRP, IMGFMT_RGB24},
|
|
|
|
{IMGFMT_GBRP, IMGFMT_BGR32},
|
|
|
|
{IMGFMT_GBRP, IMGFMT_RGB32},
|
2012-12-19 11:04:38 +00:00
|
|
|
{IMGFMT_PAL8, IMGFMT_BGR32},
|
2013-05-01 14:16:03 +00:00
|
|
|
{IMGFMT_XYZ12, IMGFMT_RGB48},
|
2010-01-23 19:00:09 +00:00
|
|
|
{0, 0}
|
|
|
|
};
|
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
static unsigned int find_best_out(vf_instance_t *vf, int in_format)
|
|
|
|
{
|
|
|
|
unsigned int best = 0;
|
2010-01-23 19:00:09 +00:00
|
|
|
int i = -1;
|
|
|
|
int j = -1;
|
|
|
|
int format = 0;
|
2003-11-03 12:15:32 +00:00
|
|
|
|
2002-04-07 21:33:42 +00:00
|
|
|
// find the best outfmt:
|
2010-01-23 19:00:09 +00:00
|
|
|
while (1) {
|
|
|
|
int ret;
|
|
|
|
if (j < 0) {
|
|
|
|
format = in_format;
|
|
|
|
j = 0;
|
|
|
|
} else if (i < 0) {
|
|
|
|
while (preferred_conversions[j][0] &&
|
|
|
|
preferred_conversions[j][0] != in_format)
|
|
|
|
j++;
|
|
|
|
format = preferred_conversions[j++][1];
|
|
|
|
// switch to standard list
|
|
|
|
if (!format)
|
|
|
|
i = 0;
|
2003-11-03 12:15:32 +00:00
|
|
|
}
|
2010-01-23 19:00:09 +00:00
|
|
|
if (i >= 0)
|
|
|
|
format = outfmt_list[i++];
|
|
|
|
if (!format)
|
|
|
|
break;
|
|
|
|
ret = vf_next_query_format(vf, format);
|
2009-07-06 23:26:13 +00:00
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
mp_msg(MSGT_VFILTER, MSGL_DBG2, "scale: query(%s) -> %d\n",
|
|
|
|
vo_format_name(
|
|
|
|
format), ret & 3);
|
|
|
|
if (ret & VFCAP_CSP_SUPPORTED_BY_HW) {
|
|
|
|
best = format; // no conversion -> bingo!
|
2003-11-03 12:15:32 +00:00
|
|
|
break;
|
2009-07-06 23:26:13 +00:00
|
|
|
}
|
2013-07-18 11:13:03 +00:00
|
|
|
if (ret & VFCAP_CSP_SUPPORTED && !best)
|
|
|
|
best = format; // best with conversion
|
2002-04-07 21:33:42 +00:00
|
|
|
}
|
2002-04-11 20:56:17 +00:00
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
2013-07-16 21:22:55 +00:00
|
|
|
static int reconfig(struct vf_instance *vf, struct mp_image_params *p, int flags)
|
|
|
|
{
|
|
|
|
int width = p->w, height = p->h, d_width = p->d_w, d_height = p->d_h;
|
|
|
|
unsigned int outfmt = p->imgfmt;
|
2013-07-18 11:13:03 +00:00
|
|
|
unsigned int best = find_best_out(vf, outfmt);
|
|
|
|
int int_sws_flags = 0;
|
|
|
|
int round_w = 0, round_h = 0;
|
2002-06-24 01:05:41 +00:00
|
|
|
SwsFilter *srcFilter, *dstFilter;
|
2006-09-17 15:02:13 +00:00
|
|
|
enum PixelFormat dfmt, sfmt;
|
2009-07-06 23:26:13 +00:00
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
if (!best) {
|
|
|
|
mp_msg(MSGT_VFILTER, MSGL_WARN,
|
|
|
|
"SwScale: no supported outfmt found :(\n");
|
|
|
|
return -1;
|
2002-04-07 21:33:42 +00:00
|
|
|
}
|
2006-09-17 15:02:13 +00:00
|
|
|
sfmt = imgfmt2pixfmt(outfmt);
|
|
|
|
dfmt = imgfmt2pixfmt(best);
|
2009-07-06 23:26:13 +00:00
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->next->query_format(vf->next, best);
|
2009-07-06 23:26:13 +00:00
|
|
|
|
2011-12-12 20:16:05 +00:00
|
|
|
vf->priv->w = vf->priv->cfg_w;
|
|
|
|
vf->priv->h = vf->priv->cfg_h;
|
|
|
|
|
2005-12-17 20:00:16 +00:00
|
|
|
if (vf->priv->w <= -8) {
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->w += 8;
|
|
|
|
round_w = 1;
|
2005-03-06 21:15:24 +00:00
|
|
|
}
|
2005-12-17 20:00:16 +00:00
|
|
|
if (vf->priv->h <= -8) {
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->h += 8;
|
|
|
|
round_h = 1;
|
2005-03-06 21:15:24 +00:00
|
|
|
}
|
|
|
|
|
2004-11-12 11:15:26 +00:00
|
|
|
if (vf->priv->w < -3 || vf->priv->h < -3 ||
|
2013-07-18 11:13:03 +00:00
|
|
|
(vf->priv->w < -1 && vf->priv->h < -1))
|
|
|
|
{
|
|
|
|
// TODO: establish a direct connection to the user's brain
|
|
|
|
// and find out what the heck he thinks MPlayer should do
|
|
|
|
// with this nonsense.
|
|
|
|
mp_msg(MSGT_VFILTER, MSGL_ERR,
|
|
|
|
"SwScale: EUSERBROKEN Check your parameters, they make no sense!\n");
|
|
|
|
return -1;
|
2004-11-12 11:15:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (vf->priv->w == -1)
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->w = width;
|
2004-11-12 11:15:26 +00:00
|
|
|
if (vf->priv->w == 0)
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->w = d_width;
|
2004-11-12 11:15:26 +00:00
|
|
|
|
|
|
|
if (vf->priv->h == -1)
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->h = height;
|
2004-11-12 11:15:26 +00:00
|
|
|
if (vf->priv->h == 0)
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->h = d_height;
|
2004-11-12 11:15:26 +00:00
|
|
|
|
|
|
|
if (vf->priv->w == -3)
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->w = vf->priv->h * width / height;
|
2004-11-12 11:15:26 +00:00
|
|
|
if (vf->priv->w == -2)
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->w = vf->priv->h * d_width / d_height;
|
2004-11-12 11:15:26 +00:00
|
|
|
|
|
|
|
if (vf->priv->h == -3)
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->h = vf->priv->w * height / width;
|
2004-11-12 11:15:26 +00:00
|
|
|
if (vf->priv->h == -2)
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->h = vf->priv->w * d_height / d_width;
|
2004-11-12 11:15:26 +00:00
|
|
|
|
2005-03-06 21:15:24 +00:00
|
|
|
if (round_w)
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->w = ((vf->priv->w + 8) / 16) * 16;
|
2005-03-06 21:15:24 +00:00
|
|
|
if (round_h)
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->h = ((vf->priv->h + 8) / 16) * 16;
|
2005-03-06 21:15:24 +00:00
|
|
|
|
2012-09-24 14:34:03 +00:00
|
|
|
// check for upscaling, now that all parameters had been applied
|
2013-07-18 11:13:03 +00:00
|
|
|
if (vf->priv->noup) {
|
|
|
|
if ((vf->priv->w > width) + (vf->priv->h > height) >= vf->priv->noup) {
|
|
|
|
vf->priv->w = width;
|
|
|
|
vf->priv->h = height;
|
2012-09-24 14:34:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-04-07 20:21:37 +00:00
|
|
|
// calculate the missing parameters:
|
2013-07-18 11:13:03 +00:00
|
|
|
switch (best) {
|
|
|
|
case IMGFMT_420P: /* YV12 needs w & h rounded to 2 */
|
2005-02-16 23:47:00 +00:00
|
|
|
case IMGFMT_NV12:
|
|
|
|
case IMGFMT_NV21:
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->h = (vf->priv->h + 1) & ~1;
|
|
|
|
case IMGFMT_YUYV: /* YUY2 needs w rounded to 2 */
|
2004-11-12 11:15:26 +00:00
|
|
|
case IMGFMT_UYVY:
|
2013-07-18 11:13:03 +00:00
|
|
|
vf->priv->w = (vf->priv->w + 1) & ~1;
|
2002-07-18 14:06:32 +00:00
|
|
|
}
|
2009-07-06 23:26:13 +00:00
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
mp_msg(MSGT_VFILTER, MSGL_DBG2, "SwScale: scaling %dx%d %s to %dx%d %s \n",
|
|
|
|
width, height, vo_format_name(outfmt), vf->priv->w, vf->priv->h,
|
|
|
|
vo_format_name(best));
|
2002-04-07 23:30:59 +00:00
|
|
|
|
|
|
|
// free old ctx:
|
2013-07-18 11:13:03 +00:00
|
|
|
if (vf->priv->ctx)
|
|
|
|
sws_freeContext(vf->priv->ctx);
|
2009-07-06 23:26:13 +00:00
|
|
|
|
2002-04-07 20:21:37 +00:00
|
|
|
// new swscaler:
|
2003-02-23 22:05:55 +00:00
|
|
|
sws_getFlagsAndFilterFromCmdLine(&int_sws_flags, &srcFilter, &dstFilter);
|
2013-07-18 11:13:03 +00:00
|
|
|
int_sws_flags |= vf->priv->v_chr_drop << SWS_SRC_V_CHR_DROP_SHIFT;
|
|
|
|
int_sws_flags |= vf->priv->accurate_rnd * SWS_ACCURATE_RND;
|
|
|
|
vf->priv->ctx = sws_getContext(width, height >> vf->priv->interlaced,
|
|
|
|
sfmt, vf->priv->w,
|
|
|
|
vf->priv->h >> vf->priv->interlaced, dfmt,
|
|
|
|
int_sws_flags, srcFilter, dstFilter,
|
|
|
|
vf->priv->param);
|
|
|
|
if (!vf->priv->ctx) {
|
|
|
|
// error...
|
|
|
|
mp_msg(MSGT_VFILTER, MSGL_WARN,
|
|
|
|
"Couldn't init SwScaler for this setup\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
vf->priv->fmt = best;
|
|
|
|
// Compute new d_width and d_height, preserving aspect
|
|
|
|
// while ensuring that both are >= output size in pixels.
|
|
|
|
if (vf->priv->h * d_width > vf->priv->w * d_height) {
|
|
|
|
d_width = vf->priv->h * d_width / d_height;
|
|
|
|
d_height = vf->priv->h;
|
|
|
|
} else {
|
|
|
|
d_height = vf->priv->w * d_height / d_width;
|
|
|
|
d_width = vf->priv->w;
|
2002-04-07 20:21:37 +00:00
|
|
|
}
|
2013-07-18 11:13:03 +00:00
|
|
|
//d_width=d_width*vf->priv->w/width;
|
|
|
|
//d_height=d_height*vf->priv->h/height;
|
|
|
|
p->w = vf->priv->w;
|
|
|
|
p->h = vf->priv->h;
|
|
|
|
p->d_w = d_width;
|
|
|
|
p->d_h = d_height;
|
|
|
|
p->imgfmt = best;
|
|
|
|
mp_sws_set_colorspace(vf->priv->ctx, p);
|
|
|
|
// In particular, fix up colorspace/levels if YUV<->RGB conversion is
|
|
|
|
// performed.
|
|
|
|
p->colorlevels = MP_CSP_LEVELS_TV; // in case output is YUV
|
|
|
|
mp_image_params_guess_csp(p);
|
2013-07-16 21:22:55 +00:00
|
|
|
return vf_next_reconfig(vf, p, flags);
|
2002-04-07 20:21:37 +00:00
|
|
|
}
|
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
static void scale(struct SwsContext *sws1, struct SwsContext *sws2,
|
|
|
|
uint8_t *src[MP_MAX_PLANES], int src_stride[MP_MAX_PLANES],
|
|
|
|
int y, int h, uint8_t *dst[MP_MAX_PLANES],
|
|
|
|
int dst_stride[MP_MAX_PLANES],
|
|
|
|
int interlaced)
|
|
|
|
{
|
|
|
|
const uint8_t *src2[MP_MAX_PLANES] = {src[0], src[1], src[2], src[3]};
|
2007-02-17 12:58:35 +00:00
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
if (interlaced) {
|
2003-12-29 14:16:07 +00:00
|
|
|
int i;
|
2013-07-18 11:13:03 +00:00
|
|
|
uint8_t *dst2[MP_MAX_PLANES] = {dst[0], dst[1], dst[2], dst[3]};
|
|
|
|
int src_stride2[MP_MAX_PLANES] = {2 * src_stride[0], 2 * src_stride[1],
|
|
|
|
2 * src_stride[2], 2 * src_stride[3]};
|
|
|
|
int dst_stride2[MP_MAX_PLANES] = {2 * dst_stride[0], 2 * dst_stride[1],
|
|
|
|
2 * dst_stride[2], 2 * dst_stride[3]};
|
|
|
|
sws_scale(sws1, src2, src_stride2, y >> 1, h >> 1, dst2, dst_stride2);
|
|
|
|
for (i = 0; i < MP_MAX_PLANES; i++) {
|
2003-12-29 14:16:07 +00:00
|
|
|
src2[i] += src_stride[i];
|
|
|
|
dst2[i] += dst_stride[i];
|
|
|
|
}
|
2013-07-18 11:13:03 +00:00
|
|
|
sws_scale(sws2, src2, src_stride2, y >> 1, h >> 1, dst2, dst_stride2);
|
|
|
|
} else {
|
2010-01-03 17:12:12 +00:00
|
|
|
sws_scale(sws1, src2, src_stride, y, h, dst, dst_stride);
|
2009-07-06 23:26:13 +00:00
|
|
|
}
|
2003-12-29 14:16:07 +00:00
|
|
|
}
|
|
|
|
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 13:25:04 +00:00
|
|
|
static struct mp_image *filter(struct vf_instance *vf, struct mp_image *mpi)
|
|
|
|
{
|
|
|
|
struct mp_image *dmpi = vf_alloc_out_image(vf);
|
|
|
|
mp_image_copy_attributes(dmpi, mpi);
|
2002-10-17 21:53:30 +00:00
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
scale(vf->priv->ctx, vf->priv->ctx, mpi->planes, mpi->stride, 0, mpi->h,
|
|
|
|
dmpi->planes, dmpi->stride, vf->priv->interlaced);
|
2009-07-06 23:26:13 +00:00
|
|
|
|
video/filter: change filter API, use refcounting, remove filter DR
Change the entire filter API to use reference counted images instead
of vf_get_image().
Remove filter "direct rendering". This was useful for vf_expand and (in
rare cases) vf_sub: DR allowed these filters to pass a cropped image to
the filters before them. Then, on filtering, the image was "uncropped",
so that black bars could be added around the image without copying. This
means that in some cases, vf_expand will be slower (-vf gradfun,expand
for example).
Note that another form of DR used for in-place filters has been replaced
by simpler logic. Instead of trying to do DR, filters can check if the
image is writeable (with mp_image_is_writeable()), and do true in-place
if that's the case. This affects filters like vf_gradfun and vf_sub.
Everything has to support strides now. If something doesn't, making a
copy of the image data is required.
2012-11-05 13:25:04 +00:00
|
|
|
talloc_free(mpi);
|
|
|
|
return dmpi;
|
2002-04-07 20:21:37 +00:00
|
|
|
}
|
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
static int control(struct vf_instance *vf, int request, void *data)
|
|
|
|
{
|
2003-02-21 20:35:18 +00:00
|
|
|
int *table;
|
|
|
|
int *inv_table;
|
|
|
|
int r;
|
|
|
|
int brightness, contrast, saturation, srcRange, dstRange;
|
|
|
|
vf_equalizer_t *eq;
|
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
if (vf->priv->ctx) {
|
|
|
|
switch (request) {
|
|
|
|
case VFCTRL_GET_EQUALIZER:
|
|
|
|
r = sws_getColorspaceDetails(vf->priv->ctx, &inv_table, &srcRange,
|
|
|
|
&table, &dstRange, &brightness,
|
|
|
|
&contrast, &saturation);
|
|
|
|
if (r < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
eq = data;
|
|
|
|
if (!strcmp(eq->item, "brightness"))
|
|
|
|
eq->value = ((brightness * 100) + (1 << 15)) >> 16;
|
|
|
|
else if (!strcmp(eq->item, "contrast"))
|
|
|
|
eq->value = (((contrast * 100) + (1 << 15)) >> 16) - 100;
|
|
|
|
else if (!strcmp(eq->item, "saturation"))
|
|
|
|
eq->value = (((saturation * 100) + (1 << 15)) >> 16) - 100;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
return CONTROL_TRUE;
|
|
|
|
case VFCTRL_SET_EQUALIZER:
|
|
|
|
r = sws_getColorspaceDetails(vf->priv->ctx, &inv_table, &srcRange,
|
|
|
|
&table, &dstRange, &brightness,
|
|
|
|
&contrast, &saturation);
|
|
|
|
if (r < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
eq = data;
|
|
|
|
if (!strcmp(eq->item, "brightness"))
|
|
|
|
brightness = ((eq->value << 16) + 50) / 100;
|
|
|
|
else if (!strcmp(eq->item, "contrast"))
|
|
|
|
contrast = (((eq->value + 100) << 16) + 50) / 100;
|
|
|
|
else if (!strcmp(eq->item, "saturation"))
|
|
|
|
saturation = (((eq->value + 100) << 16) + 50) / 100;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
|
|
|
|
r = sws_setColorspaceDetails(vf->priv->ctx, inv_table, srcRange,
|
|
|
|
table, dstRange, brightness, contrast,
|
|
|
|
saturation);
|
|
|
|
if (r < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
return CONTROL_TRUE;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2003-02-21 20:35:18 +00:00
|
|
|
}
|
2009-07-06 23:26:13 +00:00
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
return vf_next_control(vf, request, data);
|
2003-02-21 20:35:18 +00:00
|
|
|
}
|
|
|
|
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 21:50:21 +00:00
|
|
|
static const int mp_csp_to_swscale[MP_CSP_COUNT] = {
|
|
|
|
[MP_CSP_BT_601] = SWS_CS_ITU601,
|
|
|
|
[MP_CSP_BT_709] = SWS_CS_ITU709,
|
|
|
|
[MP_CSP_SMPTE_240M] = SWS_CS_SMPTE240M,
|
|
|
|
};
|
|
|
|
|
|
|
|
// Adjust the colorspace used for YUV->RGB conversion. On other conversions,
|
|
|
|
// do nothing or return an error.
|
|
|
|
// Return 0 on success and -1 on error.
|
2012-11-24 20:27:34 +00:00
|
|
|
static int mp_sws_set_colorspace(struct SwsContext *sws,
|
2013-07-16 21:22:55 +00:00
|
|
|
struct mp_image_params *p)
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 21:50:21 +00:00
|
|
|
{
|
|
|
|
int *table, *inv_table;
|
|
|
|
int brightness, contrast, saturation, srcRange, dstRange;
|
|
|
|
|
|
|
|
// NOTE: returns an error if the destination format is YUV
|
|
|
|
if (sws_getColorspaceDetails(sws, &inv_table, &srcRange, &table, &dstRange,
|
|
|
|
&brightness, &contrast, &saturation) == -1)
|
|
|
|
goto error_out;
|
|
|
|
|
2013-07-16 21:22:55 +00:00
|
|
|
int sws_csp = mp_csp_to_swscale[p->colorspace];
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 21:50:21 +00:00
|
|
|
if (sws_csp == 0) {
|
|
|
|
// colorspace not supported, go with a reasonable default
|
2013-07-16 21:22:55 +00:00
|
|
|
sws_csp = SWS_CS_ITU601;
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 21:50:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* The swscale API for these is hardly documented.
|
|
|
|
* Apparently table/range only apply to YUV. Thus dstRange has no effect
|
|
|
|
* for YUV->RGB conversions, and conversions to limited-range RGB are
|
|
|
|
* not supported.
|
|
|
|
*/
|
2013-07-16 21:22:55 +00:00
|
|
|
srcRange = p->colorlevels == MP_CSP_LEVELS_PC;
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 21:50:21 +00:00
|
|
|
const int *new_inv_table = sws_getCoefficients(sws_csp);
|
|
|
|
|
|
|
|
if (sws_setColorspaceDetails(sws, new_inv_table, srcRange, table, dstRange,
|
2013-07-18 11:13:03 +00:00
|
|
|
brightness, contrast, saturation) == -1)
|
video, options: implement better YUV->RGB conversion control
Rewrite control of the colorspace and input/output level parameters
used in YUV-RGB conversions, replacing VO-specific suboptions with new
common options and adding configuration support to more cases.
Add new option --colormatrix which selects the colorspace the original
video is assumed to have in YUV->RGB conversions. The default
behavior changes from assuming BT.601 to colorspace autoselection
between BT.601 and BT.709 using a simple heuristic based on video
size. Add new options --colormatrix-input-range and
--colormatrix-output-range which select input YUV and output RGB range.
Disable the previously existing VO-specific colorspace and level
conversion suboptions in vo_gl and vo_vdpau. Remove the
"yuv_colorspace" property and replace it with one named "colormatrix"
and semantics matching the new option. Add new properties matching the
options for level conversion.
Colorspace selection is currently supported by vo_gl, vo_vdpau, vo_xv
and vf_scale, and all can change it at runtime (previously only
vo_vdpau and vo_xv could). vo_vdpau now uses the same conversion
matrix generation as vo_gl instead of libvdpau functionality; the main
functional difference is that the "contrast" equalizer control behaves
somewhat differently (it scales the Y component around 1/2 instead of
around 0, so that contrast 0 makes the image gray rather than black).
vo_xv does not support level conversion. vf_scale supports range
setting for input, but always outputs full-range RGB.
The value of the slave properties is the policy setting used for
conversions. This means they can be set to any value regardless of
whether the current VO supports that value or whether there currently
even is any video. Possibly separate properties could be added to
query the conversion actually used at the moment, if any.
Because the colorspace and level settings are now set with a single
VF/VO control call, the return value of that is no longer used to
signal whether all the settings are actually supported. Instead code
should set all the details it can support, and ignore the rest. The
core will use GET_YUV_COLORSPACE to check which colorspace details
have been set and which not. In other words, the return value for
SET_YUV_COLORSPACE only signals whether any kind of YUV colorspace
conversion handling exists at all, and VOs have to take care to return
the actual state with GET_YUV_COLORSPACE instead.
To be changed in later commits: add missing option documentation.
2011-10-15 21:50:21 +00:00
|
|
|
goto error_out;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_out:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2002-04-07 20:21:37 +00:00
|
|
|
//===========================================================================//
|
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
static int query_format(struct vf_instance *vf, unsigned int fmt)
|
|
|
|
{
|
2010-11-03 16:42:24 +00:00
|
|
|
if (!IMGFMT_IS_HWACCEL(fmt) && imgfmt2pixfmt(fmt) != PIX_FMT_NONE) {
|
2013-07-18 11:13:03 +00:00
|
|
|
unsigned int best = find_best_out(vf, fmt);
|
|
|
|
int flags;
|
|
|
|
if (!best)
|
|
|
|
return 0; // no matching out-fmt
|
|
|
|
flags = vf_next_query_format(vf, best);
|
|
|
|
if (!(flags & (VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW)))
|
|
|
|
return 0;
|
|
|
|
if (fmt != best)
|
|
|
|
flags &= ~VFCAP_CSP_SUPPORTED_BY_HW;
|
|
|
|
return flags;
|
2002-04-07 21:33:42 +00:00
|
|
|
}
|
2013-07-18 11:13:03 +00:00
|
|
|
return 0; // nomatching in-fmt
|
2002-04-07 21:33:42 +00:00
|
|
|
}
|
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
static void uninit(struct vf_instance *vf)
|
|
|
|
{
|
|
|
|
if (vf->priv->ctx)
|
|
|
|
sws_freeContext(vf->priv->ctx);
|
2003-11-29 19:40:30 +00:00
|
|
|
}
|
|
|
|
|
2013-07-18 11:13:03 +00:00
|
|
|
static int vf_open(vf_instance_t *vf, char *args)
|
|
|
|
{
|
|
|
|
vf->reconfig = reconfig;
|
|
|
|
vf->filter = filter;
|
|
|
|
vf->query_format = query_format;
|
|
|
|
vf->control = control;
|
|
|
|
vf->uninit = uninit;
|
|
|
|
mp_msg(MSGT_VFILTER, MSGL_V, "SwScale params: %d x %d (-1=no scaling)\n",
|
|
|
|
vf->priv->cfg_w, vf->priv->cfg_h);
|
2009-07-06 23:26:13 +00:00
|
|
|
|
2002-04-07 20:21:37 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2003-03-15 18:01:02 +00:00
|
|
|
#undef ST_OFF
|
2013-07-18 11:13:03 +00:00
|
|
|
#define ST_OFF(f) M_ST_OFF(struct vf_priv_s, f)
|
2008-04-26 13:35:40 +00:00
|
|
|
static const m_option_t vf_opts_fields[] = {
|
2013-07-18 11:13:03 +00:00
|
|
|
{"w", ST_OFF(cfg_w), CONF_TYPE_INT, M_OPT_MIN, -11, 0, NULL},
|
|
|
|
{"h", ST_OFF(cfg_h), CONF_TYPE_INT, M_OPT_MIN, -11, 0, NULL},
|
|
|
|
{"interlaced", ST_OFF(interlaced), CONF_TYPE_INT, M_OPT_RANGE, 0, 1, NULL},
|
|
|
|
{"chr-drop", ST_OFF(v_chr_drop), CONF_TYPE_INT, M_OPT_RANGE, 0, 3, NULL},
|
|
|
|
{"param", ST_OFF(param[0]), CONF_TYPE_DOUBLE, M_OPT_RANGE, 0.0, 100.0, NULL},
|
|
|
|
{"param2", ST_OFF(param[1]), CONF_TYPE_DOUBLE, M_OPT_RANGE, 0.0, 100.0, NULL},
|
|
|
|
{"noup", ST_OFF(noup), CONF_TYPE_INT, M_OPT_RANGE, 0, 2, NULL},
|
|
|
|
{"arnd", ST_OFF(accurate_rnd), CONF_TYPE_FLAG, 0, 0, 1, NULL},
|
|
|
|
{ NULL, NULL, 0, 0, 0, 0, NULL }
|
2003-03-15 18:01:02 +00:00
|
|
|
};
|
|
|
|
|
2008-04-26 13:35:40 +00:00
|
|
|
static const m_struct_t vf_opts = {
|
2013-07-18 11:13:03 +00:00
|
|
|
"scale",
|
|
|
|
sizeof(struct vf_priv_s),
|
|
|
|
&vf_priv_dflt,
|
|
|
|
vf_opts_fields
|
2003-03-15 18:01:02 +00:00
|
|
|
};
|
|
|
|
|
2007-12-02 14:57:15 +00:00
|
|
|
const vf_info_t vf_info_scale = {
|
2002-04-07 20:21:37 +00:00
|
|
|
"software scaling",
|
|
|
|
"scale",
|
|
|
|
"A'rpi",
|
|
|
|
"",
|
2010-02-21 13:40:49 +00:00
|
|
|
vf_open,
|
2003-03-15 18:01:02 +00:00
|
|
|
&vf_opts
|
2002-04-07 20:21:37 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
//===========================================================================//
|