mirror of https://git.ffmpeg.org/ffmpeg.git
avfilter/f_cue: use inlink fifo for queueing frames
Signed-off-by: Marton Balint <cus@passwd.hu>
This commit is contained in:
parent
8f14170b9a
commit
7ca2ee059e
|
@ -22,7 +22,6 @@
|
|||
#include "libavutil/time.h"
|
||||
#include "avfilter.h"
|
||||
#include "filters.h"
|
||||
#include "framequeue.h"
|
||||
#include "internal.h"
|
||||
|
||||
typedef struct CueContext {
|
||||
|
@ -32,57 +31,35 @@ typedef struct CueContext {
|
|||
int64_t preroll;
|
||||
int64_t buffer;
|
||||
int status;
|
||||
FFFrameQueue queue;
|
||||
} CueContext;
|
||||
|
||||
static av_cold int init(AVFilterContext *ctx)
|
||||
{
|
||||
CueContext *s = ctx->priv;
|
||||
ff_framequeue_init(&s->queue, &ctx->graph->internal->frame_queues);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold void uninit(AVFilterContext *ctx)
|
||||
{
|
||||
CueContext *s = ctx->priv;
|
||||
ff_framequeue_free(&s->queue);
|
||||
}
|
||||
|
||||
static int activate(AVFilterContext *ctx)
|
||||
{
|
||||
AVFilterLink *inlink = ctx->inputs[0];
|
||||
AVFilterLink *outlink = ctx->outputs[0];
|
||||
CueContext *s = ctx->priv;
|
||||
int64_t pts;
|
||||
AVFrame *frame = NULL;
|
||||
|
||||
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
|
||||
|
||||
if (s->status < 3 || s->status == 5) {
|
||||
int ret = ff_inlink_consume_frame(inlink, &frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (frame)
|
||||
pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
|
||||
}
|
||||
if (ff_inlink_queued_frames(inlink)) {
|
||||
AVFrame *frame = ff_inlink_peek_frame(inlink, 0);
|
||||
int64_t pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
|
||||
|
||||
if (!s->status && frame) {
|
||||
if (!s->status) {
|
||||
s->first_pts = pts;
|
||||
s->status++;
|
||||
}
|
||||
if (s->status == 1 && frame) {
|
||||
if (pts - s->first_pts < s->preroll)
|
||||
if (s->status == 1) {
|
||||
if (pts - s->first_pts < s->preroll) {
|
||||
ff_inlink_consume_frame(inlink, &frame);
|
||||
return ff_filter_frame(outlink, frame);
|
||||
}
|
||||
s->first_pts = pts;
|
||||
s->status++;
|
||||
}
|
||||
if (s->status == 2 && frame) {
|
||||
int ret = ff_framequeue_add(&s->queue, frame);
|
||||
if (ret < 0) {
|
||||
av_frame_free(&frame);
|
||||
return ret;
|
||||
}
|
||||
frame = NULL;
|
||||
if (s->status == 2) {
|
||||
frame = ff_inlink_peek_frame(inlink, ff_inlink_queued_frames(inlink) - 1);
|
||||
pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
|
||||
if (!(pts - s->first_pts < s->buffer && (av_gettime() - s->cue) < 0))
|
||||
s->status++;
|
||||
}
|
||||
|
@ -93,12 +70,10 @@ static int activate(AVFilterContext *ctx)
|
|||
s->status++;
|
||||
}
|
||||
if (s->status == 4) {
|
||||
if (ff_framequeue_queued_frames(&s->queue))
|
||||
return ff_filter_frame(outlink, ff_framequeue_take(&s->queue));
|
||||
s->status++;
|
||||
}
|
||||
if (s->status == 5 && frame)
|
||||
ff_inlink_consume_frame(inlink, &frame);
|
||||
return ff_filter_frame(outlink, frame);
|
||||
}
|
||||
}
|
||||
|
||||
FF_FILTER_FORWARD_STATUS(inlink, outlink);
|
||||
FF_FILTER_FORWARD_WANTED(outlink, inlink);
|
||||
|
@ -140,8 +115,6 @@ AVFilter ff_vf_cue = {
|
|||
.description = NULL_IF_CONFIG_SMALL("Delay filtering to match a cue."),
|
||||
.priv_size = sizeof(CueContext),
|
||||
.priv_class = &cue_class,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.inputs = cue_inputs,
|
||||
.outputs = cue_outputs,
|
||||
.activate = activate,
|
||||
|
@ -173,8 +146,6 @@ AVFilter ff_af_acue = {
|
|||
.description = NULL_IF_CONFIG_SMALL("Delay filtering to match a cue."),
|
||||
.priv_size = sizeof(CueContext),
|
||||
.priv_class = &acue_class,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.inputs = acue_inputs,
|
||||
.outputs = acue_outputs,
|
||||
.activate = activate,
|
||||
|
|
Loading…
Reference in New Issue