mirror of https://git.ffmpeg.org/ffmpeg.git
lavf/vf_deinterlace_vaapi: flush queued frame for field in DeinterlacingBob
For DeinterlacingBob mode with rate=field, the frame number of output should equal 2x input total since only intra deinterlace is used. Currently for "backward_ref = 0, rate = field", extra_delay is introduced. Due to the async without flush, frame number of output is [expected_number - 2]. Specifically, if the input only has 1 frame, the output will be empty. Add deint_vaapi_request_frame for deinterlace_vaapi, send NULL frame to flush the queued frame. For 1 frame input in Bob mode with rate=field, before patch: 0 frame; after patch: 2 frames; ffmpeg -hwaccel vaapi -hwaccel_device /dev/dri/renderD128 -hwaccel_output_format vaapi -i input.h264 -an -vf deinterlace_vaapi=mode=bob:rate=field -f null - Tested-by: Mark Thompson <sw@jkqxz.net> Reviewed-by: Mark Thompson <sw@jkqxz.net> Signed-off-by: Linjie Fu <linjie.fu@intel.com> Signed-off-by: Haihao Xiang <haihao.xiang@intel.com>
This commit is contained in:
parent
e6b990e25d
commit
9c58fd2226
|
@ -46,6 +46,9 @@ typedef struct DeintVAAPIContext {
|
||||||
int queue_count;
|
int queue_count;
|
||||||
AVFrame *frame_queue[MAX_REFERENCES];
|
AVFrame *frame_queue[MAX_REFERENCES];
|
||||||
int extra_delay_for_timestamps;
|
int extra_delay_for_timestamps;
|
||||||
|
|
||||||
|
int eof;
|
||||||
|
int prev_pts;
|
||||||
} DeintVAAPIContext;
|
} DeintVAAPIContext;
|
||||||
|
|
||||||
static const char *deint_vaapi_mode_name(int mode)
|
static const char *deint_vaapi_mode_name(int mode)
|
||||||
|
@ -188,9 +191,11 @@ static int deint_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame)
|
||||||
void *filter_params_addr = NULL;
|
void *filter_params_addr = NULL;
|
||||||
int err, i, field, current_frame_index;
|
int err, i, field, current_frame_index;
|
||||||
|
|
||||||
av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n",
|
// NULL frame is used to flush the queue in field mode
|
||||||
av_get_pix_fmt_name(input_frame->format),
|
if (input_frame)
|
||||||
input_frame->width, input_frame->height, input_frame->pts);
|
av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n",
|
||||||
|
av_get_pix_fmt_name(input_frame->format),
|
||||||
|
input_frame->width, input_frame->height, input_frame->pts);
|
||||||
|
|
||||||
if (ctx->queue_count < ctx->queue_depth) {
|
if (ctx->queue_count < ctx->queue_depth) {
|
||||||
ctx->frame_queue[ctx->queue_count++] = input_frame;
|
ctx->frame_queue[ctx->queue_count++] = input_frame;
|
||||||
|
@ -208,6 +213,9 @@ static int deint_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame)
|
||||||
current_frame_index = ctx->pipeline_caps.num_forward_references;
|
current_frame_index = ctx->pipeline_caps.num_forward_references;
|
||||||
|
|
||||||
input_frame = ctx->frame_queue[current_frame_index];
|
input_frame = ctx->frame_queue[current_frame_index];
|
||||||
|
if (!input_frame)
|
||||||
|
return 0;
|
||||||
|
|
||||||
input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3];
|
input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3];
|
||||||
for (i = 0; i < ctx->pipeline_caps.num_forward_references; i++)
|
for (i = 0; i < ctx->pipeline_caps.num_forward_references; i++)
|
||||||
forward_references[i] = (VASurfaceID)(uintptr_t)
|
forward_references[i] = (VASurfaceID)(uintptr_t)
|
||||||
|
@ -289,6 +297,8 @@ static int deint_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame)
|
||||||
if (ctx->field_rate == 2) {
|
if (ctx->field_rate == 2) {
|
||||||
if (field == 0)
|
if (field == 0)
|
||||||
output_frame->pts = 2 * input_frame->pts;
|
output_frame->pts = 2 * input_frame->pts;
|
||||||
|
else if (ctx->eof)
|
||||||
|
output_frame->pts = 3 * input_frame->pts - ctx->prev_pts;
|
||||||
else
|
else
|
||||||
output_frame->pts = input_frame->pts +
|
output_frame->pts = input_frame->pts +
|
||||||
ctx->frame_queue[current_frame_index + 1]->pts;
|
ctx->frame_queue[current_frame_index + 1]->pts;
|
||||||
|
@ -304,6 +314,8 @@ static int deint_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx->prev_pts = input_frame->pts;
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
|
@ -313,6 +325,25 @@ fail:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int deint_vaapi_request_frame(AVFilterLink *link)
|
||||||
|
{
|
||||||
|
AVFilterContext *avctx = link->src;
|
||||||
|
DeintVAAPIContext *ctx = avctx->priv;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (ctx->eof)
|
||||||
|
return AVERROR_EOF;
|
||||||
|
|
||||||
|
ret = ff_request_frame(link->src->inputs[0]);
|
||||||
|
if (ret == AVERROR_EOF && ctx->extra_delay_for_timestamps) {
|
||||||
|
ctx->eof = 1;
|
||||||
|
deint_vaapi_filter_frame(link->src->inputs[0], NULL);
|
||||||
|
} else if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static av_cold int deint_vaapi_init(AVFilterContext *avctx)
|
static av_cold int deint_vaapi_init(AVFilterContext *avctx)
|
||||||
{
|
{
|
||||||
VAAPIVPPContext *vpp_ctx = avctx->priv;
|
VAAPIVPPContext *vpp_ctx = avctx->priv;
|
||||||
|
@ -373,9 +404,10 @@ static const AVFilterPad deint_vaapi_inputs[] = {
|
||||||
|
|
||||||
static const AVFilterPad deint_vaapi_outputs[] = {
|
static const AVFilterPad deint_vaapi_outputs[] = {
|
||||||
{
|
{
|
||||||
.name = "default",
|
.name = "default",
|
||||||
.type = AVMEDIA_TYPE_VIDEO,
|
.type = AVMEDIA_TYPE_VIDEO,
|
||||||
.config_props = &deint_vaapi_config_output,
|
.request_frame = &deint_vaapi_request_frame,
|
||||||
|
.config_props = &deint_vaapi_config_output,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue