diff --git a/fftools/ffmpeg.h b/fftools/ffmpeg.h index b6302245c0..7d31291d5f 100644 --- a/fftools/ffmpeg.h +++ b/fftools/ffmpeg.h @@ -255,6 +255,9 @@ typedef struct InputFilterOptions { int64_t trim_end_us; uint8_t *name; + + int sub2video_width; + int sub2video_height; } InputFilterOptions; typedef struct InputFilter { @@ -366,10 +369,6 @@ typedef struct InputStream { int fix_sub_duration; - struct sub2video { - int w, h; - } sub2video; - /* decoded data from this stream goes into all those filters * currently video and audio only */ InputFilter **filters; diff --git a/fftools/ffmpeg_demux.c b/fftools/ffmpeg_demux.c index 4cbad80e17..87ed8225c2 100644 --- a/fftools/ffmpeg_demux.c +++ b/fftools/ffmpeg_demux.c @@ -992,6 +992,26 @@ int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, return ret; if (ist->par->codec_type == AVMEDIA_TYPE_SUBTITLE) { + /* Compute the size of the canvas for the subtitles stream. + If the subtitles codecpar has set a size, use it. Otherwise use the + maximum dimensions of the video streams in the same file. */ + opts->sub2video_width = ist->par->width; + opts->sub2video_height = ist->par->height; + if (!(opts->sub2video_width && opts->sub2video_height)) { + for (int j = 0; j < d->f.nb_streams; j++) { + AVCodecParameters *par1 = d->f.streams[j]->par; + if (par1->codec_type == AVMEDIA_TYPE_VIDEO) { + opts->sub2video_width = FFMAX(opts->sub2video_width, par1->width); + opts->sub2video_height = FFMAX(opts->sub2video_height, par1->height); + } + } + } + + if (!(opts->sub2video_width && opts->sub2video_height)) { + opts->sub2video_width = FFMAX(opts->sub2video_width, 720); + opts->sub2video_height = FFMAX(opts->sub2video_height, 576); + } + if (!d->pkt_heartbeat) { d->pkt_heartbeat = av_packet_alloc(); if (!d->pkt_heartbeat) @@ -1357,27 +1377,6 @@ static int ist_add(const OptionsContext *o, Demuxer *d, AVStream *st) return ret; } } - - /* Compute the size of the canvas for the subtitles stream. - If the subtitles codecpar has set a size, use it. Otherwise use the - maximum dimensions of the video streams in the same file. */ - ist->sub2video.w = par->width; - ist->sub2video.h = par->height; - if (!(ist->sub2video.w && ist->sub2video.h)) { - for (int j = 0; j < ic->nb_streams; j++) { - AVCodecParameters *par1 = ic->streams[j]->codecpar; - if (par1->codec_type == AVMEDIA_TYPE_VIDEO) { - ist->sub2video.w = FFMAX(ist->sub2video.w, par1->width); - ist->sub2video.h = FFMAX(ist->sub2video.h, par1->height); - } - } - } - - if (!(ist->sub2video.w && ist->sub2video.h)) { - ist->sub2video.w = FFMAX(ist->sub2video.w, 720); - ist->sub2video.h = FFMAX(ist->sub2video.h, 576); - } - break; } case AVMEDIA_TYPE_ATTACHMENT: diff --git a/fftools/ffmpeg_filter.c b/fftools/ffmpeg_filter.c index 0f57035104..d182f3ab2e 100644 --- a/fftools/ffmpeg_filter.c +++ b/fftools/ffmpeg_filter.c @@ -689,6 +689,16 @@ static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist) ifp->sub2video.frame = av_frame_alloc(); if (!ifp->sub2video.frame) return AVERROR(ENOMEM); + + ifp->width = ifp->opts.sub2video_width; + ifp->height = ifp->opts.sub2video_height; + + /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the + palettes for all rectangles are identical or compatible */ + ifp->format = AV_PIX_FMT_RGB32; + + av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n", + ifp->width, ifp->height); } return 0; @@ -1829,17 +1839,6 @@ int ifilter_parameters_from_dec(InputFilter *ifilter, const AVCodecContext *dec) ret = av_channel_layout_copy(&ifp->fallback.ch_layout, &dec->ch_layout); if (ret < 0) return ret; - } else { - // for subtitles (i.e. sub2video) we set the actual parameters, - // rather than just fallback - ifp->width = ifp->ist->sub2video.w; - ifp->height = ifp->ist->sub2video.h; - - /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the - palettes for all rectangles are identical or compatible */ - ifp->format = AV_PIX_FMT_RGB32; - - av_log(NULL, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n", ifp->width, ifp->height); } return 0;