mirror of https://git.ffmpeg.org/ffmpeg.git
avfilter/vf_minterpolate: Fix left shift of negative value
This has happened when initializing the motion estimation context if
width or height of the video was smaller than the block size used
for motion estimation and if the motion interpolation mode indicates
not to use motion estimation.
The solution is of course to only initialize the motion estimation
context if the interpolation mode uses motion estimation.
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
(cherry picked from commit aa262dcce8
)
This commit is contained in:
parent
8b7f2f0ec5
commit
a7d3dc5b04
|
@ -368,6 +368,15 @@ static int config_input(AVFilterLink *inlink)
|
|||
2 * mi_ctx->mb_size);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
ff_me_init_context(me_ctx, mi_ctx->mb_size, mi_ctx->search_param,
|
||||
width, height, 0, (mi_ctx->b_width - 1) << mi_ctx->log2_mb_size,
|
||||
0, (mi_ctx->b_height - 1) << mi_ctx->log2_mb_size);
|
||||
|
||||
if (mi_ctx->me_mode == ME_MODE_BIDIR)
|
||||
me_ctx->get_cost = &get_sad_ob;
|
||||
else if (mi_ctx->me_mode == ME_MODE_BILAT)
|
||||
me_ctx->get_cost = &get_sbad_ob;
|
||||
|
||||
mi_ctx->pixel_mvs = av_mallocz_array(width * height, sizeof(PixelMVS));
|
||||
mi_ctx->pixel_weights = av_mallocz_array(width * height, sizeof(PixelWeights));
|
||||
mi_ctx->pixel_refs = av_mallocz_array(width * height, sizeof(PixelRefs));
|
||||
|
@ -395,13 +404,6 @@ static int config_input(AVFilterLink *inlink)
|
|||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
ff_me_init_context(me_ctx, mi_ctx->mb_size, mi_ctx->search_param, width, height, 0, (mi_ctx->b_width - 1) << mi_ctx->log2_mb_size, 0, (mi_ctx->b_height - 1) << mi_ctx->log2_mb_size);
|
||||
|
||||
if (mi_ctx->me_mode == ME_MODE_BIDIR)
|
||||
me_ctx->get_cost = &get_sad_ob;
|
||||
else if (mi_ctx->me_mode == ME_MODE_BILAT)
|
||||
me_ctx->get_cost = &get_sbad_ob;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
for (i = 0; i < NB_FRAMES; i++)
|
||||
|
@ -830,9 +832,10 @@ static int inject_frame(AVFilterLink *inlink, AVFrame *avf_in)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int detect_scene_change(MIContext *mi_ctx)
|
||||
static int detect_scene_change(AVFilterContext *ctx)
|
||||
{
|
||||
AVMotionEstContext *me_ctx = &mi_ctx->me_ctx;
|
||||
MIContext *mi_ctx = ctx->priv;
|
||||
AVFilterLink *input = ctx->inputs[0];
|
||||
uint8_t *p1 = mi_ctx->frames[1].avf->data[0];
|
||||
ptrdiff_t linesize1 = mi_ctx->frames[1].avf->linesize[0];
|
||||
uint8_t *p2 = mi_ctx->frames[2].avf->data[0];
|
||||
|
@ -841,9 +844,9 @@ static int detect_scene_change(MIContext *mi_ctx)
|
|||
if (mi_ctx->scd_method == SCD_METHOD_FDIFF) {
|
||||
double ret = 0, mafd, diff;
|
||||
uint64_t sad;
|
||||
mi_ctx->sad(p1, linesize1, p2, linesize2, me_ctx->width, me_ctx->height, &sad);
|
||||
mi_ctx->sad(p1, linesize1, p2, linesize2, input->w, input->h, &sad);
|
||||
emms_c();
|
||||
mafd = (double) sad * 100.0 / (me_ctx->height * me_ctx->width) / (1 << mi_ctx->bitdepth);
|
||||
mafd = (double) sad * 100.0 / (input->h * input->w) / (1 << mi_ctx->bitdepth);
|
||||
diff = fabs(mafd - mi_ctx->prev_mafd);
|
||||
ret = av_clipf(FFMIN(mafd, diff), 0, 100.0);
|
||||
mi_ctx->prev_mafd = mafd;
|
||||
|
@ -1191,7 +1194,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *avf_in)
|
|||
if (!mi_ctx->frames[0].avf)
|
||||
return 0;
|
||||
|
||||
mi_ctx->scene_changed = detect_scene_change(mi_ctx);
|
||||
mi_ctx->scene_changed = detect_scene_change(ctx);
|
||||
|
||||
for (;;) {
|
||||
AVFrame *avf_out;
|
||||
|
|
Loading…
Reference in New Issue