diff --git a/ffmpeg.c b/ffmpeg.c index 66c80396ce..c4a1a149a9 100644 --- a/ffmpeg.c +++ b/ffmpeg.c @@ -43,6 +43,13 @@ #include "libavutil/libm.h" #include "libavformat/os_support.h" +#if CONFIG_AVFILTER +# include "libavfilter/avfilter.h" +# include "libavfilter/avfiltergraph.h" +# include "libavfilter/graphparser.h" +# include "libavfilter/vsrc_buffer.h" +#endif + #if HAVE_SYS_RESOURCE_H #include #include @@ -147,6 +154,10 @@ static int intra_dc_precision = 8; static int loop_input = 0; static int loop_output = AVFMT_NOOUTPUTLOOP; static int qp_hist = 0; +#if CONFIG_AVFILTER +static char *vfilters = NULL; +AVFilterGraph *filt_graph_all = NULL; +#endif static int intra_only = 0; static int audio_sample_rate = 44100; @@ -305,6 +316,13 @@ typedef struct AVInputStream { int is_start; /* is 1 at the start and after a discontinuity */ int showed_multi_packet_warning; int is_past_recording_time; +#if CONFIG_AVFILTER + AVFilterContext *out_video_filter; + AVFilterContext *input_video_filter; + AVFrame *filter_frame; + int has_filter_frame; + AVFilterPicRef *picref; +#endif } AVInputStream; typedef struct AVInputFile { @@ -320,6 +338,184 @@ typedef struct AVInputFile { static struct termios oldtty; #endif +#if CONFIG_AVFILTER +typedef struct { + int pix_fmt; +} FilterOutPriv; + + +static int output_init(AVFilterContext *ctx, const char *args, void *opaque) +{ + FilterOutPriv *priv = ctx->priv; + + if(!opaque) return -1; + + priv->pix_fmt = *((int *)opaque); + + return 0; +} + +static void output_end_frame(AVFilterLink *link) +{ +} + +static int output_query_formats(AVFilterContext *ctx) +{ + FilterOutPriv *priv = ctx->priv; + enum PixelFormat pix_fmts[] = { priv->pix_fmt, PIX_FMT_NONE }; + + avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts)); + return 0; +} + +static int get_filtered_video_pic(AVFilterContext *ctx, + AVFilterPicRef **picref, AVFrame *pic2, + uint64_t *pts) +{ + AVFilterPicRef *pic; + + if(avfilter_request_frame(ctx->inputs[0])) + return -1; + if(!(pic = ctx->inputs[0]->cur_pic)) + return -1; + *picref = pic; + ctx->inputs[0]->cur_pic = NULL; + + *pts = pic->pts; + + memcpy(pic2->data, pic->data, sizeof(pic->data)); + memcpy(pic2->linesize, pic->linesize, sizeof(pic->linesize)); + + return 1; +} + +static AVFilter output_filter = +{ + .name = "ffmpeg_output", + + .priv_size = sizeof(FilterOutPriv), + .init = output_init, + + .query_formats = output_query_formats, + + .inputs = (AVFilterPad[]) {{ .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .end_frame = output_end_frame, + .min_perms = AV_PERM_READ, }, + { .name = NULL }}, + .outputs = (AVFilterPad[]) {{ .name = NULL }}, +}; + +static int configure_filters(AVInputStream *ist, AVOutputStream *ost) +{ + AVFilterContext *curr_filter; + /** filter graph containing all filters including input & output */ + AVCodecContext *codec = ost->st->codec; + AVCodecContext *icodec = ist->st->codec; + char args[255]; + + filt_graph_all = av_mallocz(sizeof(AVFilterGraph)); + + if(!(ist->input_video_filter = avfilter_open(avfilter_get_by_name("buffer"), "src"))) + return -1; + if(!(ist->out_video_filter = avfilter_open(&output_filter, "out"))) + return -1; + + snprintf(args, 255, "%d:%d:%d", ist->st->codec->width, + ist->st->codec->height, ist->st->codec->pix_fmt); + if(avfilter_init_filter(ist->input_video_filter, args, NULL)) + return -1; + if(avfilter_init_filter(ist->out_video_filter, NULL, &codec->pix_fmt)) + return -1; + + /* add input and output filters to the overall graph */ + avfilter_graph_add_filter(filt_graph_all, ist->input_video_filter); + avfilter_graph_add_filter(filt_graph_all, ist->out_video_filter); + + curr_filter = ist->input_video_filter; + + if(ost->video_crop) { + char crop_args[255]; + AVFilterContext *filt_crop; + snprintf(crop_args, 255, "%d:%d:%d:%d", ost->leftBand, ost->topBand, + codec->width - (frame_padleft + frame_padright), + codec->height - (frame_padtop + frame_padbottom)); + filt_crop = avfilter_open(avfilter_get_by_name("crop"), NULL); + if (!filt_crop) + return -1; + if (avfilter_init_filter(filt_crop, crop_args, NULL)) + return -1; + if (avfilter_link(curr_filter, 0, filt_crop, 0)) + return -1; + curr_filter = filt_crop; + avfilter_graph_add_filter(filt_graph_all, curr_filter); + } + + if((codec->width != + icodec->width - (frame_leftBand + frame_rightBand) + + (frame_padleft + frame_padright)) || + (codec->height != icodec->height - (frame_topBand + frame_bottomBand) + + (frame_padtop + frame_padbottom))) { + char scale_args[255]; + AVFilterContext *filt_scale; + snprintf(scale_args, 255, "%d:%d:flags=0x%X", + codec->width - (frame_padleft + frame_padright), + codec->height - (frame_padtop + frame_padbottom), + (int)av_get_int(sws_opts, "sws_flags", NULL)); + filt_scale = avfilter_open(avfilter_get_by_name("scale"), NULL); + if (!filt_scale) + return -1; + if (avfilter_init_filter(filt_scale, scale_args, NULL)) + return -1; + if (avfilter_link(curr_filter, 0, filt_scale, 0)) + return -1; + curr_filter = filt_scale; + avfilter_graph_add_filter(filt_graph_all, curr_filter); + } + + if(vfilters) { + AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut)); + AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut)); + + outputs->name = av_strdup("in"); + outputs->filter = curr_filter; + outputs->pad_idx = 0; + outputs->next = NULL; + + inputs->name = av_strdup("out"); + inputs->filter = ist->out_video_filter; + inputs->pad_idx = 0; + inputs->next = NULL; + + if (avfilter_graph_parse(filt_graph_all, vfilters, inputs, outputs, NULL) < 0) + return -1; + av_freep(&vfilters); + } else { + if(avfilter_link(curr_filter, 0, ist->out_video_filter, 0) < 0) + return -1; + } + + { + char scale_sws_opts[128]; + snprintf(scale_sws_opts, sizeof(scale_sws_opts), "flags=0x%X", (int)av_get_int(sws_opts, "sws_flags", NULL)); + filt_graph_all->scale_sws_opts = av_strdup(scale_sws_opts); + } + + /* configure all the filter links */ + if(avfilter_graph_check_validity(filt_graph_all, NULL)) + return -1; + if(avfilter_graph_config_formats(filt_graph_all, NULL)) + return -1; + if(avfilter_graph_config_links(filt_graph_all, NULL)) + return -1; + + codec->width = ist->out_video_filter->inputs[0]->w; + codec->height = ist->out_video_filter->inputs[0]->h; + + return 0; +} +#endif /* CONFIG_AVFILTER */ + static void term_exit(void) { #if HAVE_TERMIOS_H @@ -461,6 +657,10 @@ static int av_exit(int ret) allocated_audio_buf_size= allocated_audio_out_size= 0; av_free(samples); +#if CONFIG_AVFILTER + avfilter_uninit(); +#endif + if (received_sigterm) { fprintf(stderr, "Received signal %d: terminating.\n", @@ -952,7 +1152,9 @@ static void do_video_out(AVFormatContext *s, int *frame_size) { int nb_frames, i, ret; +#if !CONFIG_AVFILTER int64_t topBand, bottomBand, leftBand, rightBand; +#endif AVFrame *final_picture, *formatted_picture, *resampling_dst, *padding_src; AVFrame picture_crop_temp, picture_pad_temp; AVCodecContext *enc, *dec; @@ -1000,6 +1202,9 @@ static void do_video_out(AVFormatContext *s, if (nb_frames <= 0) return; +#if CONFIG_AVFILTER + formatted_picture = in_picture; +#else if (ost->video_crop) { if (av_picture_crop((AVPicture *)&picture_crop_temp, (AVPicture *)in_picture, dec->pix_fmt, ost->topBand, ost->leftBand) < 0) { fprintf(stderr, "error cropping picture\n"); @@ -1011,6 +1216,7 @@ static void do_video_out(AVFormatContext *s, } else { formatted_picture = in_picture; } +#endif final_picture = formatted_picture; padding_src = formatted_picture; @@ -1037,6 +1243,7 @@ static void do_video_out(AVFormatContext *s, av_exit(1); } +#if !CONFIG_AVFILTER if (ost->video_resample) { padding_src = NULL; final_picture = &ost->pict_tmp; @@ -1084,6 +1291,7 @@ static void do_video_out(AVFormatContext *s, sws_scale(ost->img_resample_ctx, formatted_picture->data, formatted_picture->linesize, 0, ost->resample_height, resampling_dst->data, resampling_dst->linesize); } +#endif if (ost->video_pad) { av_picture_pad((AVPicture*)final_picture, (AVPicture *)padding_src, @@ -1349,6 +1557,10 @@ static int output_packet(AVInputStream *ist, int ist_index, static unsigned int samples_size= 0; AVSubtitle subtitle, *subtitle_to_free; int got_subtitle; +#if CONFIG_AVFILTER + int loop; +#endif + AVPacket avpkt; int bps = av_get_bits_per_sample_format(ist->st->codec->sample_fmt)>>3; @@ -1476,6 +1688,15 @@ static int output_packet(AVInputStream *ist, int ist_index, &buffer_to_free); } +#if CONFIG_AVFILTER + if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ist->input_video_filter) { + // add it to be filtered + av_vsrc_buffer_add_frame(ist->input_video_filter, &picture, + ist->pts, + ist->st->codec->sample_aspect_ratio); + } +#endif + // preprocess audio (volume) if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if (audio_volume != 256) { @@ -1497,10 +1718,18 @@ static int output_packet(AVInputStream *ist, int ist_index, if (pts > now) usleep(pts - now); } - +#if CONFIG_AVFILTER + loop = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || + !ist->out_video_filter || avfilter_poll_frame(ist->out_video_filter->inputs[0]); +#endif /* if output time reached then transcode raw format, encode packets and output them */ if (start_time == 0 || ist->pts >= start_time) +#if CONFIG_AVFILTER + while(loop) { + if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ist->out_video_filter) + get_filtered_video_pic(ist->out_video_filter, &ist->picref, &picture, &ist->pts); +#endif for(i=0;ist->codec->sample_aspect_ratio = ist->picref->pixel_aspect; +#endif do_video_out(os, ost, ist, &picture, &frame_size); if (vstats_filename && frame_size) do_video_stats(os, ost, frame_size); @@ -1586,7 +1818,17 @@ static int output_packet(AVInputStream *ist, int ist_index, av_free_packet(&opkt); } } +#if CONFIG_AVFILTER + loop = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) && + ist->out_video_filter && avfilter_poll_frame(ist->out_video_filter->inputs[0]); +#endif } + +#if CONFIG_AVFILTER + if(ist->picref) + avfilter_unref_pic(ist->picref); + } +#endif av_free(buffer_to_free); /* XXX: allocate the subtitles in the codec ? */ if (subtitle_to_free) { @@ -2064,9 +2306,10 @@ static int av_transcode(AVFormatContext **output_files, av_exit(1); } +#if !CONFIG_AVFILTER ost->original_height = icodec->height; ost->original_width = icodec->width; - +#endif codec->bits_per_raw_sample= 0; } ost->resample_height = icodec->height - (frame_topBand + frame_bottomBand); @@ -2074,6 +2317,13 @@ static int av_transcode(AVFormatContext **output_files, ost->resample_pix_fmt= icodec->pix_fmt; ost->encoding_needed = 1; ist->decoding_needed = 1; + +#if CONFIG_AVFILTER + if (configure_filters(ist, ost)) { + fprintf(stderr, "Error opening filters!\n"); + exit(1); + } +#endif break; case AVMEDIA_TYPE_SUBTITLE: ost->encoding_needed = 1; @@ -2459,6 +2709,12 @@ static int av_transcode(AVFormatContext **output_files, avcodec_close(ist->st->codec); } } +#if CONFIG_AVFILTER + if (filt_graph_all) { + avfilter_graph_destroy(filt_graph_all); + av_freep(&filt_graph_all); + } +#endif /* finished ! */ ret = 0; @@ -4040,6 +4296,9 @@ static const OptionDef options[] = { { "psnr", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_psnr}, "calculate PSNR of compressed frames" }, { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" }, { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" }, +#if CONFIG_AVFILTER + { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" }, +#endif { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_intra_matrix}, "specify intra matrix coeffs", "matrix" }, { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_inter_matrix}, "specify inter matrix coeffs", "matrix" }, { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_top_field_first}, "top=1/bottom=0/auto=-1 field first", "" }, @@ -4101,6 +4360,9 @@ int main(int argc, char **argv) avcodec_register_all(); #if CONFIG_AVDEVICE avdevice_register_all(); +#endif +#if CONFIG_AVFILTER + avfilter_register_all(); #endif av_register_all();