2012-10-24 17:11:42 +00:00
|
|
|
/*
|
|
|
|
* This file is part of mpv.
|
|
|
|
*
|
|
|
|
* mpv is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* mpv is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
2015-04-13 07:36:54 +00:00
|
|
|
* with mpv. If not, see <http://www.gnu.org/licenses/>.
|
2012-10-24 17:11:42 +00:00
|
|
|
*/
|
|
|
|
|
2012-10-22 14:15:52 +00:00
|
|
|
#include <stddef.h>
|
2012-10-24 17:11:42 +00:00
|
|
|
#include <stdbool.h>
|
2012-10-19 15:49:49 +00:00
|
|
|
#include <assert.h>
|
2012-10-24 17:11:42 +00:00
|
|
|
#include <math.h>
|
2012-10-25 17:37:43 +00:00
|
|
|
#include <inttypes.h>
|
|
|
|
|
2013-07-18 11:46:05 +00:00
|
|
|
#include <libswscale/swscale.h>
|
2012-10-25 17:37:43 +00:00
|
|
|
#include <libavutil/common.h>
|
2012-10-24 17:11:42 +00:00
|
|
|
|
2013-12-17 01:39:45 +00:00
|
|
|
#include "common/common.h"
|
2013-11-24 11:58:06 +00:00
|
|
|
#include "draw_bmp.h"
|
|
|
|
#include "img_convert.h"
|
2012-11-09 00:06:43 +00:00
|
|
|
#include "video/mp_image.h"
|
|
|
|
#include "video/sws_utils.h"
|
|
|
|
#include "video/img_format.h"
|
|
|
|
#include "video/csputils.h"
|
2012-10-24 17:11:42 +00:00
|
|
|
|
2012-10-19 15:49:49 +00:00
|
|
|
const bool mp_draw_sub_formats[SUBBITMAP_COUNT] = {
|
|
|
|
[SUBBITMAP_LIBASS] = true,
|
|
|
|
[SUBBITMAP_RGBA] = true,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct sub_cache {
|
|
|
|
struct mp_image *i, *a;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct part {
|
2015-03-18 11:33:14 +00:00
|
|
|
int change_id;
|
2012-10-27 16:06:09 +00:00
|
|
|
int imgfmt;
|
|
|
|
enum mp_csp colorspace;
|
|
|
|
enum mp_csp_levels levels;
|
2012-10-19 15:49:49 +00:00
|
|
|
int num_imgs;
|
|
|
|
struct sub_cache *imgs;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mp_draw_sub_cache
|
|
|
|
{
|
|
|
|
struct part *parts[MAX_OSD_PARTS];
|
2012-12-25 14:39:47 +00:00
|
|
|
struct mp_image *upsample_img;
|
|
|
|
struct mp_image upsample_temp;
|
2012-10-19 15:49:49 +00:00
|
|
|
};
|
|
|
|
|
2012-12-25 14:39:47 +00:00
|
|
|
|
2012-12-25 14:11:07 +00:00
|
|
|
static struct part *get_cache(struct mp_draw_sub_cache *cache,
|
2012-10-27 16:06:09 +00:00
|
|
|
struct sub_bitmaps *sbs, struct mp_image *format);
|
2012-10-25 17:37:43 +00:00
|
|
|
static bool get_sub_area(struct mp_rect bb, struct mp_image *temp,
|
|
|
|
struct sub_bitmap *sb, struct mp_image *out_area,
|
|
|
|
int *out_src_x, int *out_src_y);
|
|
|
|
|
2012-10-24 17:11:42 +00:00
|
|
|
#define ACCURATE
|
|
|
|
#define CONDITIONAL
|
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
static void blend_const16_alpha(void *dst, int dst_stride, uint16_t srcp,
|
|
|
|
uint8_t *srca, int srca_stride, uint8_t srcamul,
|
|
|
|
int w, int h)
|
2012-10-24 17:11:42 +00:00
|
|
|
{
|
|
|
|
if (!srcamul)
|
|
|
|
return;
|
2012-10-25 17:37:43 +00:00
|
|
|
for (int y = 0; y < h; y++) {
|
|
|
|
uint16_t *dst_r = (uint16_t *)((uint8_t *)dst + dst_stride * y);
|
|
|
|
uint8_t *srca_r = srca + srca_stride * y;
|
|
|
|
for (int x = 0; x < w; x++) {
|
|
|
|
uint32_t srcap = srca_r[x];
|
2012-10-24 17:11:42 +00:00
|
|
|
#ifdef CONDITIONAL
|
|
|
|
if (!srcap)
|
|
|
|
continue;
|
|
|
|
#endif
|
|
|
|
srcap *= srcamul; // now 0..65025
|
2012-10-25 17:37:43 +00:00
|
|
|
dst_r[x] = (srcp * srcap + dst_r[x] * (65025 - srcap) + 32512) / 65025;
|
2012-10-24 17:11:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
static void blend_const8_alpha(void *dst, int dst_stride, uint16_t srcp,
|
|
|
|
uint8_t *srca, int srca_stride, uint8_t srcamul,
|
|
|
|
int w, int h)
|
2012-10-24 17:11:42 +00:00
|
|
|
{
|
2012-10-25 17:37:43 +00:00
|
|
|
if (!srcamul)
|
|
|
|
return;
|
|
|
|
for (int y = 0; y < h; y++) {
|
|
|
|
uint8_t *dst_r = (uint8_t *)dst + dst_stride * y;
|
|
|
|
uint8_t *srca_r = srca + srca_stride * y;
|
|
|
|
for (int x = 0; x < w; x++) {
|
|
|
|
uint32_t srcap = srca_r[x];
|
2012-10-24 17:11:42 +00:00
|
|
|
#ifdef CONDITIONAL
|
|
|
|
if (!srcap)
|
|
|
|
continue;
|
|
|
|
#endif
|
2012-10-25 17:37:43 +00:00
|
|
|
#ifdef ACCURATE
|
|
|
|
srcap *= srcamul; // now 0..65025
|
|
|
|
dst_r[x] = (srcp * srcap + dst_r[x] * (65025 - srcap) + 32512) / 65025;
|
|
|
|
#else
|
|
|
|
srcap = (srcap * srcamul + 255) >> 8;
|
|
|
|
dst_r[x] = (srcp * srcap + dst_r[x] * (255 - srcap) + 255) >> 8;
|
|
|
|
#endif
|
2012-10-24 17:11:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
static void blend_const_alpha(void *dst, int dst_stride, int srcp,
|
|
|
|
uint8_t *srca, int srca_stride, uint8_t srcamul,
|
|
|
|
int w, int h, int bytes)
|
2012-10-24 17:11:42 +00:00
|
|
|
{
|
2012-10-25 17:37:43 +00:00
|
|
|
if (bytes == 2) {
|
|
|
|
blend_const16_alpha(dst, dst_stride, srcp, srca, srca_stride, srcamul,
|
|
|
|
w, h);
|
|
|
|
} else if (bytes == 1) {
|
|
|
|
blend_const8_alpha(dst, dst_stride, srcp, srca, srca_stride, srcamul,
|
|
|
|
w, h);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void blend_src16_alpha(void *dst, int dst_stride, void *src,
|
|
|
|
int src_stride, uint8_t *srca, int srca_stride,
|
|
|
|
int w, int h)
|
|
|
|
{
|
|
|
|
for (int y = 0; y < h; y++) {
|
|
|
|
uint16_t *dst_r = (uint16_t *)((uint8_t *)dst + dst_stride * y);
|
|
|
|
uint16_t *src_r = (uint16_t *)((uint8_t *)src + src_stride * y);
|
|
|
|
uint8_t *srca_r = srca + srca_stride * y;
|
|
|
|
for (int x = 0; x < w; x++) {
|
|
|
|
uint32_t srcap = srca_r[x];
|
2012-10-24 17:11:42 +00:00
|
|
|
#ifdef CONDITIONAL
|
|
|
|
if (!srcap)
|
|
|
|
continue;
|
|
|
|
#endif
|
2012-10-25 17:37:43 +00:00
|
|
|
dst_r[x] = (src_r[x] * srcap + dst_r[x] * (255 - srcap) + 127) / 255;
|
2012-10-24 17:11:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
static void blend_src8_alpha(void *dst, int dst_stride, void *src,
|
|
|
|
int src_stride, uint8_t *srca, int srca_stride,
|
|
|
|
int w, int h)
|
2012-10-24 17:11:42 +00:00
|
|
|
{
|
2012-10-25 17:37:43 +00:00
|
|
|
for (int y = 0; y < h; y++) {
|
|
|
|
uint8_t *dst_r = (uint8_t *)dst + dst_stride * y;
|
|
|
|
uint8_t *src_r = (uint8_t *)src + src_stride * y;
|
|
|
|
uint8_t *srca_r = srca + srca_stride * y;
|
|
|
|
for (int x = 0; x < w; x++) {
|
|
|
|
uint16_t srcap = srca_r[x];
|
2012-10-24 17:11:42 +00:00
|
|
|
#ifdef CONDITIONAL
|
|
|
|
if (!srcap)
|
|
|
|
continue;
|
|
|
|
#endif
|
|
|
|
#ifdef ACCURATE
|
2012-10-25 17:37:43 +00:00
|
|
|
dst_r[x] = (src_r[x] * srcap + dst_r[x] * (255 - srcap) + 127) / 255;
|
2012-10-24 17:11:42 +00:00
|
|
|
#else
|
2012-10-25 17:37:43 +00:00
|
|
|
dst_r[x] = (src_r[x] * srcap + dst_r[x] * (255 - srcap) + 255) >> 8;
|
2012-10-24 17:11:42 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
static void blend_src_alpha(void *dst, int dst_stride, void *src,
|
|
|
|
int src_stride, uint8_t *srca, int srca_stride,
|
|
|
|
int w, int h, int bytes)
|
2012-10-24 17:11:42 +00:00
|
|
|
{
|
|
|
|
if (bytes == 2) {
|
2012-10-25 17:37:43 +00:00
|
|
|
blend_src16_alpha(dst, dst_stride, src, src_stride, srca, srca_stride,
|
|
|
|
w, h);
|
2012-10-24 17:11:42 +00:00
|
|
|
} else if (bytes == 1) {
|
2012-10-25 17:37:43 +00:00
|
|
|
blend_src8_alpha(dst, dst_stride, src, src_stride, srca, srca_stride,
|
|
|
|
w, h);
|
2012-10-24 17:11:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
static void unpremultiply_and_split_BGR32(struct mp_image *img,
|
|
|
|
struct mp_image *alpha)
|
2012-10-24 17:11:42 +00:00
|
|
|
{
|
2012-10-25 17:37:43 +00:00
|
|
|
for (int y = 0; y < img->h; ++y) {
|
2012-10-22 15:28:36 +00:00
|
|
|
uint32_t *irow = (uint32_t *) &img->planes[0][img->stride[0] * y];
|
2012-10-25 17:37:43 +00:00
|
|
|
uint8_t *arow = &alpha->planes[0][alpha->stride[0] * y];
|
|
|
|
for (int x = 0; x < img->w; ++x) {
|
2012-10-22 15:28:36 +00:00
|
|
|
uint32_t pval = irow[x];
|
2012-10-25 17:37:43 +00:00
|
|
|
uint8_t aval = (pval >> 24);
|
|
|
|
uint8_t rval = (pval >> 16) & 0xFF;
|
|
|
|
uint8_t gval = (pval >> 8) & 0xFF;
|
|
|
|
uint8_t bval = pval & 0xFF;
|
2012-10-24 17:11:42 +00:00
|
|
|
// multiplied = separate * alpha / 255
|
|
|
|
// separate = rint(multiplied * 255 / alpha)
|
|
|
|
// = floor(multiplied * 255 / alpha + 0.5)
|
|
|
|
// = floor((multiplied * 255 + 0.5 * alpha) / alpha)
|
|
|
|
// = floor((multiplied * 255 + floor(0.5 * alpha)) / alpha)
|
|
|
|
int div = (int) aval;
|
|
|
|
int add = div / 2;
|
|
|
|
if (aval) {
|
2012-10-25 17:37:43 +00:00
|
|
|
rval = FFMIN(255, (rval * 255 + add) / div);
|
|
|
|
gval = FFMIN(255, (gval * 255 + add) / div);
|
|
|
|
bval = FFMIN(255, (bval * 255 + add) / div);
|
2012-10-22 15:28:36 +00:00
|
|
|
irow[x] = bval + (gval << 8) + (rval << 16) + (aval << 24);
|
2012-10-24 17:11:42 +00:00
|
|
|
}
|
|
|
|
arow[x] = aval;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-27 16:06:09 +00:00
|
|
|
// dst_format merely contains the target colorspace/format information
|
|
|
|
static void scale_sb_rgba(struct sub_bitmap *sb, struct mp_image *dst_format,
|
|
|
|
struct mp_image **out_sbi, struct mp_image **out_sba)
|
2012-10-25 17:37:43 +00:00
|
|
|
{
|
mp_image: simplify image allocation
mp_image_alloc_planes() allocated images with minimal stride, even if
the resulting stride was unaligned. It was the responsibility of
vf_get_image() to set an image's width to something larger than
required to get an aligned stride, and then crop it. Always allocate
with aligned strides instead.
Get rid of IMGFMT_IF09 special handling. This format is not used
anymore. (IF09 has 4x4 chroma sub-sampling, and that is what it was
mainly used for - this is still supported.) Get rid of swapped chroma
plane allocation. This is not used anywhere, and VOs like vo_xv,
vo_direct3d and vo_sdl do their own swapping.
Always round chroma width/height up instead of down. Consider 4:2:0 and
an uneven image size. For luma, the size was left uneven, and the chroma
size was rounded down. This doesn't make sense, because chroma would be
missing for the bottom/right border.
Remove mp_image_new_empty() and mp_image_alloc_planes(), they were not
used anymore, except in draw_bmp.c. (It's still allowed to setup
mp_images manually, you just can't allocate image data with them
anymore - this is also done in draw_bmp.c.)
2012-12-19 11:04:32 +00:00
|
|
|
struct mp_image sbisrc = {0};
|
|
|
|
mp_image_setfmt(&sbisrc, IMGFMT_BGR32);
|
|
|
|
mp_image_set_size(&sbisrc, sb->w, sb->h);
|
|
|
|
sbisrc.planes[0] = sb->bitmap;
|
|
|
|
sbisrc.stride[0] = sb->stride;
|
|
|
|
struct mp_image *sbisrc2 = mp_image_alloc(IMGFMT_BGR32, sb->dw, sb->dh);
|
|
|
|
struct mp_image *sba = mp_image_alloc(IMGFMT_Y8, sb->dw, sb->dh);
|
video: introduce failure path for image allocations
Until now, failure to allocate image data resulted in a crash (i.e.
abort() was called). This was intentional, because it's pretty silly to
degrade playback, and in almost all situations, the OOM will probably
kill you anyway. (And then there's the standard Linux overcommit
behavior, which also will kill you at some point.)
But I changed my opinion, so here we go. This change does not affect
_all_ memory allocations, just image data. Now in most failure cases,
the output will just be skipped. For video filters, this coincidentally
means that failure is treated as EOF (because the playback core assumes
EOF if nothing comes out of the video filter chain). In other
situations, output might be in some way degraded, like skipping frames,
not scaling OSD, and such.
Functions whose return values changed semantics:
mp_image_alloc
mp_image_new_copy
mp_image_new_ref
mp_image_make_writeable
mp_image_setrefp
mp_image_to_av_frame_and_unref
mp_image_from_av_frame
mp_image_new_external_ref
mp_image_new_custom_ref
mp_image_pool_make_writeable
mp_image_pool_get
mp_image_pool_new_copy
mp_vdpau_mixed_frame_create
vf_alloc_out_image
vf_make_out_image_writeable
glGetWindowScreenshot
2014-06-17 20:43:43 +00:00
|
|
|
struct mp_image *sbi = mp_image_alloc(dst_format->imgfmt, sb->dw, sb->dh);
|
|
|
|
if (!sbisrc2 || !sba || !sbi) {
|
|
|
|
talloc_free(sbisrc2);
|
|
|
|
talloc_free(sba);
|
|
|
|
talloc_free(sbi);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mp_image_swscale(sbisrc2, &sbisrc, SWS_BILINEAR);
|
2012-10-25 17:37:43 +00:00
|
|
|
unpremultiply_and_split_BGR32(sbisrc2, sba);
|
|
|
|
|
2014-04-20 19:27:45 +00:00
|
|
|
sbi->params.colorspace = dst_format->params.colorspace;
|
|
|
|
sbi->params.colorlevels = dst_format->params.colorlevels;
|
2012-10-27 16:06:09 +00:00
|
|
|
mp_image_swscale(sbi, sbisrc2, SWS_BILINEAR);
|
2012-10-25 17:37:43 +00:00
|
|
|
|
mp_image: simplify image allocation
mp_image_alloc_planes() allocated images with minimal stride, even if
the resulting stride was unaligned. It was the responsibility of
vf_get_image() to set an image's width to something larger than
required to get an aligned stride, and then crop it. Always allocate
with aligned strides instead.
Get rid of IMGFMT_IF09 special handling. This format is not used
anymore. (IF09 has 4x4 chroma sub-sampling, and that is what it was
mainly used for - this is still supported.) Get rid of swapped chroma
plane allocation. This is not used anywhere, and VOs like vo_xv,
vo_direct3d and vo_sdl do their own swapping.
Always round chroma width/height up instead of down. Consider 4:2:0 and
an uneven image size. For luma, the size was left uneven, and the chroma
size was rounded down. This doesn't make sense, because chroma would be
missing for the bottom/right border.
Remove mp_image_new_empty() and mp_image_alloc_planes(), they were not
used anymore, except in draw_bmp.c. (It's still allowed to setup
mp_images manually, you just can't allocate image data with them
anymore - this is also done in draw_bmp.c.)
2012-12-19 11:04:32 +00:00
|
|
|
talloc_free(sbisrc2);
|
2012-10-25 17:37:43 +00:00
|
|
|
|
|
|
|
*out_sbi = sbi;
|
|
|
|
*out_sba = sba;
|
|
|
|
}
|
|
|
|
|
2012-12-25 14:11:07 +00:00
|
|
|
static void draw_rgba(struct mp_draw_sub_cache *cache, struct mp_rect bb,
|
2012-10-27 16:06:09 +00:00
|
|
|
struct mp_image *temp, int bits,
|
2012-10-25 17:37:43 +00:00
|
|
|
struct sub_bitmaps *sbs)
|
|
|
|
{
|
2012-10-27 16:06:09 +00:00
|
|
|
struct part *part = get_cache(cache, sbs, temp);
|
2012-12-25 14:11:07 +00:00
|
|
|
assert(part);
|
2012-10-25 17:37:43 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < sbs->num_parts; ++i) {
|
|
|
|
struct sub_bitmap *sb = &sbs->parts[i];
|
|
|
|
|
2012-10-28 15:08:19 +00:00
|
|
|
if (sb->w < 1 || sb->h < 1)
|
2012-10-25 17:37:43 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
struct mp_image dst;
|
|
|
|
int src_x, src_y;
|
|
|
|
if (!get_sub_area(bb, temp, sb, &dst, &src_x, &src_y))
|
|
|
|
continue;
|
|
|
|
|
2012-12-25 14:11:07 +00:00
|
|
|
struct mp_image *sbi = part->imgs[i].i;
|
|
|
|
struct mp_image *sba = part->imgs[i].a;
|
2012-10-25 17:37:43 +00:00
|
|
|
|
|
|
|
if (!(sbi && sba))
|
2012-10-27 16:06:09 +00:00
|
|
|
scale_sb_rgba(sb, temp, &sbi, &sba);
|
video: introduce failure path for image allocations
Until now, failure to allocate image data resulted in a crash (i.e.
abort() was called). This was intentional, because it's pretty silly to
degrade playback, and in almost all situations, the OOM will probably
kill you anyway. (And then there's the standard Linux overcommit
behavior, which also will kill you at some point.)
But I changed my opinion, so here we go. This change does not affect
_all_ memory allocations, just image data. Now in most failure cases,
the output will just be skipped. For video filters, this coincidentally
means that failure is treated as EOF (because the playback core assumes
EOF if nothing comes out of the video filter chain). In other
situations, output might be in some way degraded, like skipping frames,
not scaling OSD, and such.
Functions whose return values changed semantics:
mp_image_alloc
mp_image_new_copy
mp_image_new_ref
mp_image_make_writeable
mp_image_setrefp
mp_image_to_av_frame_and_unref
mp_image_from_av_frame
mp_image_new_external_ref
mp_image_new_custom_ref
mp_image_pool_make_writeable
mp_image_pool_get
mp_image_pool_new_copy
mp_vdpau_mixed_frame_create
vf_alloc_out_image
vf_make_out_image_writeable
glGetWindowScreenshot
2014-06-17 20:43:43 +00:00
|
|
|
// on OOM, skip drawing
|
|
|
|
if (!(sbi && sba))
|
|
|
|
continue;
|
2012-10-25 17:37:43 +00:00
|
|
|
|
|
|
|
int bytes = (bits + 7) / 8;
|
|
|
|
uint8_t *alpha_p = sba->planes[0] + src_y * sba->stride[0] + src_x;
|
2012-12-25 13:54:42 +00:00
|
|
|
for (int p = 0; p < (temp->num_planes > 2 ? 3 : 1); p++) {
|
2012-10-25 17:37:43 +00:00
|
|
|
void *src = sbi->planes[p] + src_y * sbi->stride[p] + src_x * bytes;
|
|
|
|
blend_src_alpha(dst.planes[p], dst.stride[p], src, sbi->stride[p],
|
|
|
|
alpha_p, sba->stride[0], dst.w, dst.h, bytes);
|
|
|
|
}
|
|
|
|
|
2012-12-25 14:11:07 +00:00
|
|
|
part->imgs[i].i = talloc_steal(part, sbi);
|
|
|
|
part->imgs[i].a = talloc_steal(part, sba);
|
2012-10-25 17:37:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-25 14:11:07 +00:00
|
|
|
static void draw_ass(struct mp_draw_sub_cache *cache, struct mp_rect bb,
|
2012-10-27 16:06:09 +00:00
|
|
|
struct mp_image *temp, int bits, struct sub_bitmaps *sbs)
|
2012-10-24 17:11:42 +00:00
|
|
|
{
|
2012-10-25 17:37:47 +00:00
|
|
|
struct mp_csp_params cspar = MP_CSP_PARAMS_DEFAULTS;
|
2015-01-06 14:21:26 +00:00
|
|
|
mp_csp_set_image_params(&cspar, &temp->params);
|
2015-01-06 14:04:29 +00:00
|
|
|
cspar.levels_out = MP_CSP_LEVELS_PC; // RGB (libass.color)
|
2012-10-25 19:23:18 +00:00
|
|
|
cspar.int_bits_in = bits;
|
|
|
|
cspar.int_bits_out = 8;
|
2012-10-25 17:37:43 +00:00
|
|
|
|
2015-01-06 15:49:53 +00:00
|
|
|
struct mp_cmat yuv2rgb, rgb2yuv;
|
2015-04-10 19:06:18 +00:00
|
|
|
bool need_conv = temp->fmt.flags & MP_IMGFLAG_YUV;
|
2014-11-21 04:56:55 +00:00
|
|
|
if (need_conv) {
|
2015-01-06 15:49:53 +00:00
|
|
|
mp_get_yuv2rgb_coeffs(&cspar, &yuv2rgb);
|
|
|
|
mp_invert_yuv2rgb(&rgb2yuv, &yuv2rgb);
|
2013-03-28 19:35:36 +00:00
|
|
|
}
|
2012-10-25 17:37:43 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < sbs->num_parts; ++i) {
|
|
|
|
struct sub_bitmap *sb = &sbs->parts[i];
|
|
|
|
|
|
|
|
struct mp_image dst;
|
|
|
|
int src_x, src_y;
|
|
|
|
if (!get_sub_area(bb, temp, sb, &dst, &src_x, &src_y))
|
|
|
|
continue;
|
|
|
|
|
2012-10-24 17:11:42 +00:00
|
|
|
int r = (sb->libass.color >> 24) & 0xFF;
|
|
|
|
int g = (sb->libass.color >> 16) & 0xFF;
|
|
|
|
int b = (sb->libass.color >> 8) & 0xFF;
|
2012-10-25 17:37:43 +00:00
|
|
|
int a = 255 - (sb->libass.color & 0xFF);
|
2012-10-25 17:37:47 +00:00
|
|
|
int color_yuv[3] = {r, g, b};
|
2014-11-21 04:56:55 +00:00
|
|
|
if (need_conv) {
|
2015-01-06 15:49:53 +00:00
|
|
|
mp_map_int_color(&rgb2yuv, bits, color_yuv);
|
draw_bmp: add RGB rendering to fix image quality issues
As pointed out in commit ed01df, the quality loss due to frequent
conversion between RGB and YUV is too much when drawing OSD and
subtitles.
Fix this by staying in the same colorspace when drawing subtitles.
Render directly to RGB, without converting to YUV first.
The bad thing about packed RGB is that there are many pixel formats,
which would all require special code for blending. It's also completely
incompatible to planar YUV. Use planar RGB instead, which allows us to
reuse all code originally written for planar YUV. The only thing that
needs to be changed is the color conversion in the libass case. (In
exchange for simpler code, the image has to be copied, but this is
still much better than converting to YUV.)
Unfortunately, libswscale doesn't support planar RGB output. Add a hack
to sws_utils.c to handle conversion to planar RGB. In the common case,
when converting 32 bit per pixel RGB, calling swscale can be avoided
entirely.
The change in mp_image.c is needed to allocate GBRP images correctly.
(The issue with vo_x11 could be easily solved by always backing up the
same bounding box as the bitmap drawing RGB<->YUV conversion does, but
this commit is probably the better fix.)
2012-11-22 12:30:16 +00:00
|
|
|
} else {
|
|
|
|
color_yuv[0] = g;
|
|
|
|
color_yuv[1] = b;
|
|
|
|
color_yuv[2] = r;
|
|
|
|
}
|
2012-10-25 17:37:43 +00:00
|
|
|
|
|
|
|
int bytes = (bits + 7) / 8;
|
|
|
|
uint8_t *alpha_p = (uint8_t *)sb->bitmap + src_y * sb->stride + src_x;
|
2012-12-25 13:54:42 +00:00
|
|
|
for (int p = 0; p < (temp->num_planes > 2 ? 3 : 1); p++) {
|
2012-10-25 17:37:43 +00:00
|
|
|
blend_const_alpha(dst.planes[p], dst.stride[p], color_yuv[p],
|
|
|
|
alpha_p, sb->stride, a, dst.w, dst.h, bytes);
|
|
|
|
}
|
|
|
|
}
|
2012-10-24 17:11:42 +00:00
|
|
|
}
|
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
static void get_swscale_alignment(const struct mp_image *img, int *out_xstep,
|
|
|
|
int *out_ystep)
|
2012-10-24 17:11:42 +00:00
|
|
|
{
|
2015-04-10 19:02:16 +00:00
|
|
|
int sx = (1 << img->fmt.chroma_xs);
|
|
|
|
int sy = (1 << img->fmt.chroma_ys);
|
2012-10-24 17:11:42 +00:00
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
for (int p = 0; p < img->num_planes; ++p) {
|
mp_image: simplify image allocation
mp_image_alloc_planes() allocated images with minimal stride, even if
the resulting stride was unaligned. It was the responsibility of
vf_get_image() to set an image's width to something larger than
required to get an aligned stride, and then crop it. Always allocate
with aligned strides instead.
Get rid of IMGFMT_IF09 special handling. This format is not used
anymore. (IF09 has 4x4 chroma sub-sampling, and that is what it was
mainly used for - this is still supported.) Get rid of swapped chroma
plane allocation. This is not used anywhere, and VOs like vo_xv,
vo_direct3d and vo_sdl do their own swapping.
Always round chroma width/height up instead of down. Consider 4:2:0 and
an uneven image size. For luma, the size was left uneven, and the chroma
size was rounded down. This doesn't make sense, because chroma would be
missing for the bottom/right border.
Remove mp_image_new_empty() and mp_image_alloc_planes(), they were not
used anymore, except in draw_bmp.c. (It's still allowed to setup
mp_images manually, you just can't allocate image data with them
anymore - this is also done in draw_bmp.c.)
2012-12-19 11:04:32 +00:00
|
|
|
int bits = img->fmt.bpp[p];
|
2012-10-17 12:06:03 +00:00
|
|
|
// the * 2 fixes problems with writing past the destination width
|
2015-04-10 19:02:16 +00:00
|
|
|
while (((sx >> img->fmt.chroma_xs) * bits) % (SWS_MIN_BYTE_ALIGN * 8 * 2))
|
2012-10-25 17:37:43 +00:00
|
|
|
sx *= 2;
|
2012-10-24 17:11:42 +00:00
|
|
|
}
|
2012-10-25 17:37:43 +00:00
|
|
|
|
|
|
|
*out_xstep = sx;
|
|
|
|
*out_ystep = sy;
|
2012-10-24 17:11:42 +00:00
|
|
|
}
|
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
static void align_bbox(int xstep, int ystep, struct mp_rect *rc)
|
2012-10-24 17:11:42 +00:00
|
|
|
{
|
2012-10-25 17:37:43 +00:00
|
|
|
rc->x0 = rc->x0 & ~(xstep - 1);
|
|
|
|
rc->y0 = rc->y0 & ~(ystep - 1);
|
|
|
|
rc->x1 = FFALIGN(rc->x1, xstep);
|
|
|
|
rc->y1 = FFALIGN(rc->y1, ystep);
|
2012-10-24 17:11:42 +00:00
|
|
|
}
|
|
|
|
|
2012-11-21 16:59:24 +00:00
|
|
|
// Post condition, if true returned: rc is inside img
|
2012-10-25 17:37:43 +00:00
|
|
|
static bool align_bbox_for_swscale(struct mp_image *img, struct mp_rect *rc)
|
2012-10-24 17:11:42 +00:00
|
|
|
{
|
2012-10-25 17:37:43 +00:00
|
|
|
struct mp_rect img_rect = {0, 0, img->w, img->h};
|
|
|
|
// Get rid of negative coordinates
|
2012-12-25 16:27:22 +00:00
|
|
|
if (!mp_rect_intersection(rc, &img_rect))
|
2012-10-25 17:37:43 +00:00
|
|
|
return false;
|
2012-10-24 17:11:42 +00:00
|
|
|
int xstep, ystep;
|
2012-10-25 17:37:43 +00:00
|
|
|
get_swscale_alignment(img, &xstep, &ystep);
|
|
|
|
align_bbox(xstep, ystep, rc);
|
2012-12-25 16:27:22 +00:00
|
|
|
return mp_rect_intersection(rc, &img_rect);
|
2012-10-24 17:11:42 +00:00
|
|
|
}
|
|
|
|
|
2012-12-25 13:54:42 +00:00
|
|
|
// Try to find best/closest YUV 444 format (or similar) for imgfmt
|
2012-10-25 17:37:43 +00:00
|
|
|
static void get_closest_y444_format(int imgfmt, int *out_format, int *out_bits)
|
2012-10-24 17:11:42 +00:00
|
|
|
{
|
2012-12-25 13:54:42 +00:00
|
|
|
struct mp_imgfmt_desc desc = mp_imgfmt_get_desc(imgfmt);
|
|
|
|
if (desc.flags & MP_IMGFLAG_RGB) {
|
draw_bmp: add RGB rendering to fix image quality issues
As pointed out in commit ed01df, the quality loss due to frequent
conversion between RGB and YUV is too much when drawing OSD and
subtitles.
Fix this by staying in the same colorspace when drawing subtitles.
Render directly to RGB, without converting to YUV first.
The bad thing about packed RGB is that there are many pixel formats,
which would all require special code for blending. It's also completely
incompatible to planar YUV. Use planar RGB instead, which allows us to
reuse all code originally written for planar YUV. The only thing that
needs to be changed is the color conversion in the libass case. (In
exchange for simpler code, the image has to be copied, but this is
still much better than converting to YUV.)
Unfortunately, libswscale doesn't support planar RGB output. Add a hack
to sws_utils.c to handle conversion to planar RGB. In the common case,
when converting 32 bit per pixel RGB, calling swscale can be avoided
entirely.
The change in mp_image.c is needed to allocate GBRP images correctly.
(The issue with vo_x11 could be easily solved by always backing up the
same bounding box as the bitmap drawing RGB<->YUV conversion does, but
this commit is probably the better fix.)
2012-11-22 12:30:16 +00:00
|
|
|
*out_format = IMGFMT_GBRP;
|
|
|
|
*out_bits = 8;
|
|
|
|
return;
|
2012-12-25 13:54:42 +00:00
|
|
|
} else if (desc.flags & MP_IMGFLAG_YUV_P) {
|
|
|
|
*out_format = mp_imgfmt_find_yuv_planar(0, 0, desc.num_planes,
|
|
|
|
desc.plane_bits);
|
|
|
|
if (*out_format && mp_sws_supported_format(*out_format)) {
|
|
|
|
*out_bits = mp_imgfmt_get_desc(*out_format).plane_bits;
|
|
|
|
return;
|
|
|
|
}
|
2012-10-25 17:37:43 +00:00
|
|
|
}
|
2012-12-25 13:54:42 +00:00
|
|
|
// fallback
|
2012-10-25 17:37:43 +00:00
|
|
|
*out_format = IMGFMT_444P;
|
|
|
|
*out_bits = 8;
|
|
|
|
}
|
2012-10-19 17:11:58 +00:00
|
|
|
|
2012-12-25 14:11:07 +00:00
|
|
|
static struct part *get_cache(struct mp_draw_sub_cache *cache,
|
2012-10-27 16:06:09 +00:00
|
|
|
struct sub_bitmaps *sbs, struct mp_image *format)
|
2012-10-25 17:37:43 +00:00
|
|
|
{
|
2012-10-19 15:49:49 +00:00
|
|
|
struct part *part = NULL;
|
|
|
|
|
|
|
|
bool use_cache = sbs->format == SUBBITMAP_RGBA;
|
2012-12-25 14:11:07 +00:00
|
|
|
if (use_cache) {
|
|
|
|
part = cache->parts[sbs->render_index];
|
2012-10-27 16:06:09 +00:00
|
|
|
if (part) {
|
2015-03-18 11:33:14 +00:00
|
|
|
if (part->change_id != sbs->change_id
|
2012-10-27 16:06:09 +00:00
|
|
|
|| part->imgfmt != format->imgfmt
|
2014-04-20 19:27:45 +00:00
|
|
|
|| part->colorspace != format->params.colorspace
|
|
|
|
|| part->levels != format->params.colorlevels)
|
2012-10-27 16:06:09 +00:00
|
|
|
{
|
|
|
|
talloc_free(part);
|
|
|
|
part = NULL;
|
|
|
|
}
|
2012-10-19 15:49:49 +00:00
|
|
|
}
|
|
|
|
if (!part) {
|
2012-12-25 14:11:07 +00:00
|
|
|
part = talloc(cache, struct part);
|
2012-10-27 16:06:09 +00:00
|
|
|
*part = (struct part) {
|
2015-03-18 11:33:14 +00:00
|
|
|
.change_id = sbs->change_id,
|
2012-10-27 16:06:09 +00:00
|
|
|
.num_imgs = sbs->num_parts,
|
|
|
|
.imgfmt = format->imgfmt,
|
2014-04-20 19:27:45 +00:00
|
|
|
.levels = format->params.colorlevels,
|
|
|
|
.colorspace = format->params.colorspace,
|
2012-10-27 16:06:09 +00:00
|
|
|
};
|
2012-10-19 15:49:49 +00:00
|
|
|
part->imgs = talloc_zero_array(part, struct sub_cache,
|
|
|
|
part->num_imgs);
|
|
|
|
}
|
|
|
|
assert(part->num_imgs == sbs->num_parts);
|
2012-12-25 14:11:07 +00:00
|
|
|
cache->parts[sbs->render_index] = part;
|
2012-10-19 15:49:49 +00:00
|
|
|
}
|
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
return part;
|
|
|
|
}
|
2012-10-24 17:11:42 +00:00
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
// Return area of intersection between target and sub-bitmap as cropped image
|
|
|
|
static bool get_sub_area(struct mp_rect bb, struct mp_image *temp,
|
|
|
|
struct sub_bitmap *sb, struct mp_image *out_area,
|
|
|
|
int *out_src_x, int *out_src_y)
|
|
|
|
{
|
|
|
|
// coordinates are relative to the bbox
|
|
|
|
struct mp_rect dst = {sb->x - bb.x0, sb->y - bb.y0};
|
|
|
|
dst.x1 = dst.x0 + sb->dw;
|
|
|
|
dst.y1 = dst.y0 + sb->dh;
|
2012-12-25 16:27:22 +00:00
|
|
|
if (!mp_rect_intersection(&dst, &(struct mp_rect){0, 0, temp->w, temp->h}))
|
2012-10-25 17:37:43 +00:00
|
|
|
return false;
|
2012-10-24 17:11:42 +00:00
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
*out_src_x = (dst.x0 - sb->x) + bb.x0;
|
|
|
|
*out_src_y = (dst.y0 - sb->y) + bb.y0;
|
|
|
|
*out_area = *temp;
|
2012-12-25 21:29:49 +00:00
|
|
|
mp_image_crop_rc(out_area, dst);
|
2012-10-25 17:37:43 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-12-25 14:23:16 +00:00
|
|
|
// Convert the src image to imgfmt (which should be a 444 format)
|
|
|
|
static struct mp_image *chroma_up(struct mp_draw_sub_cache *cache, int imgfmt,
|
|
|
|
struct mp_image *src)
|
|
|
|
{
|
|
|
|
if (src->imgfmt == imgfmt)
|
|
|
|
return src;
|
|
|
|
|
2012-12-25 14:39:47 +00:00
|
|
|
if (!cache->upsample_img || cache->upsample_img->imgfmt != imgfmt ||
|
|
|
|
cache->upsample_img->w < src->w || cache->upsample_img->h < src->h)
|
|
|
|
{
|
|
|
|
talloc_free(cache->upsample_img);
|
|
|
|
cache->upsample_img = mp_image_alloc(imgfmt, src->w, src->h);
|
|
|
|
talloc_steal(cache, cache->upsample_img);
|
video: introduce failure path for image allocations
Until now, failure to allocate image data resulted in a crash (i.e.
abort() was called). This was intentional, because it's pretty silly to
degrade playback, and in almost all situations, the OOM will probably
kill you anyway. (And then there's the standard Linux overcommit
behavior, which also will kill you at some point.)
But I changed my opinion, so here we go. This change does not affect
_all_ memory allocations, just image data. Now in most failure cases,
the output will just be skipped. For video filters, this coincidentally
means that failure is treated as EOF (because the playback core assumes
EOF if nothing comes out of the video filter chain). In other
situations, output might be in some way degraded, like skipping frames,
not scaling OSD, and such.
Functions whose return values changed semantics:
mp_image_alloc
mp_image_new_copy
mp_image_new_ref
mp_image_make_writeable
mp_image_setrefp
mp_image_to_av_frame_and_unref
mp_image_from_av_frame
mp_image_new_external_ref
mp_image_new_custom_ref
mp_image_pool_make_writeable
mp_image_pool_get
mp_image_pool_new_copy
mp_vdpau_mixed_frame_create
vf_alloc_out_image
vf_make_out_image_writeable
glGetWindowScreenshot
2014-06-17 20:43:43 +00:00
|
|
|
if (!cache->upsample_img)
|
|
|
|
return NULL;
|
2012-12-25 14:39:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cache->upsample_temp = *cache->upsample_img;
|
|
|
|
struct mp_image *temp = &cache->upsample_temp;
|
|
|
|
mp_image_set_size(temp, src->w, src->h);
|
2012-12-25 15:27:20 +00:00
|
|
|
|
|
|
|
// The temp image is always YUV, but src not necessarily.
|
|
|
|
// Reduce amount of conversions in YUV case (upsampling/shifting only)
|
2015-04-10 19:06:18 +00:00
|
|
|
if (src->fmt.flags & MP_IMGFLAG_YUV) {
|
2014-04-20 19:27:45 +00:00
|
|
|
temp->params.colorspace = src->params.colorspace;
|
|
|
|
temp->params.colorlevels = src->params.colorlevels;
|
2012-12-25 14:23:16 +00:00
|
|
|
}
|
2012-12-25 15:27:20 +00:00
|
|
|
|
|
|
|
if (src->imgfmt == IMGFMT_420P) {
|
|
|
|
assert(imgfmt == IMGFMT_444P);
|
|
|
|
// Faster upsampling: keep Y plane, upsample chroma planes only
|
|
|
|
// The whole point is not having swscale copy the Y plane
|
|
|
|
struct mp_image t_dst = *temp;
|
|
|
|
mp_image_setfmt(&t_dst, IMGFMT_Y8);
|
2015-04-09 19:11:20 +00:00
|
|
|
mp_image_set_size(&t_dst, temp->w, temp->h);
|
2012-12-25 15:27:20 +00:00
|
|
|
struct mp_image t_src = t_dst;
|
2015-04-09 19:11:20 +00:00
|
|
|
mp_image_set_size(&t_src, src->w >> 1, src->h >> 1);
|
2012-12-25 15:27:20 +00:00
|
|
|
for (int c = 0; c < 2; c++) {
|
|
|
|
t_dst.planes[0] = temp->planes[1 + c];
|
|
|
|
t_dst.stride[0] = temp->stride[1 + c];
|
|
|
|
t_src.planes[0] = src->planes[1 + c];
|
|
|
|
t_src.stride[0] = src->stride[1 + c];
|
|
|
|
mp_image_swscale(&t_dst, &t_src, SWS_POINT);
|
|
|
|
}
|
|
|
|
temp->planes[0] = src->planes[0];
|
|
|
|
temp->stride[0] = src->stride[0];
|
|
|
|
} else {
|
|
|
|
mp_image_swscale(temp, src, SWS_POINT);
|
|
|
|
}
|
2012-12-25 14:23:16 +00:00
|
|
|
|
|
|
|
return temp;
|
|
|
|
}
|
|
|
|
|
2012-12-25 15:27:20 +00:00
|
|
|
// Undo chroma_up() (copy temp to old_src if needed)
|
2012-12-25 14:23:16 +00:00
|
|
|
static void chroma_down(struct mp_image *old_src, struct mp_image *temp)
|
|
|
|
{
|
|
|
|
assert(old_src->w == temp->w && old_src->h == temp->h);
|
|
|
|
if (temp != old_src) {
|
2012-12-25 15:27:20 +00:00
|
|
|
if (old_src->imgfmt == IMGFMT_420P) {
|
|
|
|
// Downsampling, skipping the Y plane (see chroma_up())
|
|
|
|
assert(temp->imgfmt == IMGFMT_444P);
|
|
|
|
assert(temp->planes[0] == old_src->planes[0]);
|
|
|
|
struct mp_image t_dst = *temp;
|
|
|
|
mp_image_setfmt(&t_dst, IMGFMT_Y8);
|
2015-04-09 19:11:20 +00:00
|
|
|
mp_image_set_size(&t_dst, old_src->w >> 1, old_src->h >> 1);
|
2012-12-25 15:27:20 +00:00
|
|
|
struct mp_image t_src = t_dst;
|
2015-04-09 19:11:20 +00:00
|
|
|
mp_image_set_size(&t_src, temp->w, temp->h);
|
2012-12-25 15:27:20 +00:00
|
|
|
for (int c = 0; c < 2; c++) {
|
|
|
|
t_dst.planes[0] = old_src->planes[1 + c];
|
|
|
|
t_dst.stride[0] = old_src->stride[1 + c];
|
|
|
|
t_src.planes[0] = temp->planes[1 + c];
|
|
|
|
t_src.stride[0] = temp->stride[1 + c];
|
|
|
|
mp_image_swscale(&t_dst, &t_src, SWS_AREA);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mp_image_swscale(old_src, temp, SWS_AREA); // chroma down
|
|
|
|
}
|
2012-12-25 14:23:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
// cache: if not NULL, the function will set *cache to a talloc-allocated cache
|
|
|
|
// containing scaled versions of sbs contents - free the cache with
|
|
|
|
// talloc_free()
|
|
|
|
void mp_draw_sub_bitmaps(struct mp_draw_sub_cache **cache, struct mp_image *dst,
|
2012-10-27 16:06:09 +00:00
|
|
|
struct sub_bitmaps *sbs)
|
2012-10-25 17:37:43 +00:00
|
|
|
{
|
|
|
|
assert(mp_draw_sub_formats[sbs->format]);
|
|
|
|
if (!mp_sws_supported_format(dst->imgfmt))
|
|
|
|
return;
|
|
|
|
|
2012-12-25 14:11:07 +00:00
|
|
|
struct mp_draw_sub_cache *cache_ = cache ? *cache : NULL;
|
|
|
|
if (!cache_)
|
|
|
|
cache_ = talloc_zero(NULL, struct mp_draw_sub_cache);
|
|
|
|
|
2012-10-25 17:37:43 +00:00
|
|
|
int format, bits;
|
|
|
|
get_closest_y444_format(dst->imgfmt, &format, &bits);
|
2012-10-24 17:11:42 +00:00
|
|
|
|
2012-12-25 16:27:22 +00:00
|
|
|
struct mp_rect rc_list[MP_SUB_BB_LIST_MAX];
|
|
|
|
int num_rc = mp_get_sub_bb_list(sbs, rc_list, MP_SUB_BB_LIST_MAX);
|
2012-10-24 17:11:42 +00:00
|
|
|
|
2012-12-25 16:27:22 +00:00
|
|
|
for (int r = 0; r < num_rc; r++) {
|
|
|
|
struct mp_rect bb = rc_list[r];
|
2012-10-24 17:11:42 +00:00
|
|
|
|
2012-12-25 16:27:22 +00:00
|
|
|
if (!align_bbox_for_swscale(dst, &bb))
|
|
|
|
return;
|
2012-10-24 17:11:42 +00:00
|
|
|
|
2012-12-25 16:27:22 +00:00
|
|
|
struct mp_image dst_region = *dst;
|
|
|
|
mp_image_crop_rc(&dst_region, bb);
|
|
|
|
struct mp_image *temp = chroma_up(cache_, format, &dst_region);
|
video: introduce failure path for image allocations
Until now, failure to allocate image data resulted in a crash (i.e.
abort() was called). This was intentional, because it's pretty silly to
degrade playback, and in almost all situations, the OOM will probably
kill you anyway. (And then there's the standard Linux overcommit
behavior, which also will kill you at some point.)
But I changed my opinion, so here we go. This change does not affect
_all_ memory allocations, just image data. Now in most failure cases,
the output will just be skipped. For video filters, this coincidentally
means that failure is treated as EOF (because the playback core assumes
EOF if nothing comes out of the video filter chain). In other
situations, output might be in some way degraded, like skipping frames,
not scaling OSD, and such.
Functions whose return values changed semantics:
mp_image_alloc
mp_image_new_copy
mp_image_new_ref
mp_image_make_writeable
mp_image_setrefp
mp_image_to_av_frame_and_unref
mp_image_from_av_frame
mp_image_new_external_ref
mp_image_new_custom_ref
mp_image_pool_make_writeable
mp_image_pool_get
mp_image_pool_new_copy
mp_vdpau_mixed_frame_create
vf_alloc_out_image
vf_make_out_image_writeable
glGetWindowScreenshot
2014-06-17 20:43:43 +00:00
|
|
|
if (!temp)
|
|
|
|
continue; // on OOM, skip region
|
2012-10-24 17:11:42 +00:00
|
|
|
|
2012-12-25 16:27:22 +00:00
|
|
|
if (sbs->format == SUBBITMAP_RGBA) {
|
|
|
|
draw_rgba(cache_, bb, temp, bits, sbs);
|
|
|
|
} else if (sbs->format == SUBBITMAP_LIBASS) {
|
|
|
|
draw_ass(cache_, bb, temp, bits, sbs);
|
|
|
|
}
|
|
|
|
|
|
|
|
chroma_down(&dst_region, temp);
|
|
|
|
}
|
2012-12-25 14:11:07 +00:00
|
|
|
|
|
|
|
if (cache) {
|
|
|
|
*cache = cache_;
|
|
|
|
} else {
|
|
|
|
talloc_free(cache_);
|
|
|
|
}
|
2012-10-24 17:11:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// vim: ts=4 sw=4 et tw=80
|