fftools/ffmpeg: merge DemuxPktData into FrameData

This way we can propagate arbitrary data from the demuxer all the way
into the muxer, using a single struct.
This commit is contained in:
Anton Khirnov 2023-11-24 09:49:37 +01:00
parent 5475f665f6
commit c9f38210fc
4 changed files with 33 additions and 21 deletions

View File

@ -404,36 +404,48 @@ InputStream *ist_iter(InputStream *prev)
return NULL;
}
static int frame_data_ensure(AVFrame *frame, int writable)
static int frame_data_ensure(AVBufferRef **dst, int writable)
{
if (!frame->opaque_ref) {
if (!*dst) {
FrameData *fd;
frame->opaque_ref = av_buffer_allocz(sizeof(*fd));
if (!frame->opaque_ref)
*dst = av_buffer_allocz(sizeof(*fd));
if (!*dst)
return AVERROR(ENOMEM);
fd = (FrameData*)frame->opaque_ref->data;
fd = (FrameData*)((*dst)->data);
fd->dec.frame_num = UINT64_MAX;
fd->dec.pts = AV_NOPTS_VALUE;
} else if (writable)
return av_buffer_make_writable(&frame->opaque_ref);
return av_buffer_make_writable(dst);
return 0;
}
FrameData *frame_data(AVFrame *frame)
{
int ret = frame_data_ensure(frame, 1);
int ret = frame_data_ensure(&frame->opaque_ref, 1);
return ret < 0 ? NULL : (FrameData*)frame->opaque_ref->data;
}
const FrameData *frame_data_c(AVFrame *frame)
{
int ret = frame_data_ensure(frame, 0);
int ret = frame_data_ensure(&frame->opaque_ref, 0);
return ret < 0 ? NULL : (const FrameData*)frame->opaque_ref->data;
}
FrameData *packet_data(AVPacket *pkt)
{
int ret = frame_data_ensure(&pkt->opaque_ref, 1);
return ret < 0 ? NULL : (FrameData*)pkt->opaque_ref->data;
}
const FrameData *packet_data_c(AVPacket *pkt)
{
int ret = frame_data_ensure(&pkt->opaque_ref, 0);
return ret < 0 ? NULL : (const FrameData*)pkt->opaque_ref->data;
}
void remove_avoptions(AVDictionary **a, AVDictionary *b)
{
const AVDictionaryEntry *t = NULL;

View File

@ -115,12 +115,6 @@ typedef struct {
} AudioChannelMap;
#endif
typedef struct DemuxPktData {
// estimated dts in AV_TIME_BASE_Q,
// to be used when real dts is missing
int64_t dts_est;
} DemuxPktData;
typedef struct OptionsContext {
OptionGroup *g;
@ -622,6 +616,10 @@ typedef struct OutputFile {
// optionally attached as opaque_ref to decoded AVFrames
typedef struct FrameData {
// demuxer-estimated dts in AV_TIME_BASE_Q,
// to be used when real dts is missing
int64_t dts_est;
// properties that come from the decoder
struct {
uint64_t frame_num;
@ -723,6 +721,9 @@ FrameData *frame_data(AVFrame *frame);
const FrameData *frame_data_c(AVFrame *frame);
FrameData *packet_data (AVPacket *pkt);
const FrameData *packet_data_c(AVPacket *pkt);
/**
* Set up fallback filtering parameters from a decoder context. They will only
* be used if no frames are ever sent on this input, otherwise the actual

View File

@ -328,14 +328,13 @@ static int ist_dts_update(DemuxStream *ds, AVPacket *pkt)
av_assert0(!pkt->opaque_ref);
if (ds->streamcopy_needed) {
DemuxPktData *pd;
FrameData *fd;
pkt->opaque_ref = av_buffer_allocz(sizeof(*pd));
if (!pkt->opaque_ref)
fd = packet_data(pkt);
if (!fd)
return AVERROR(ENOMEM);
pd = (DemuxPktData*)pkt->opaque_ref->data;
pd->dts_est = ds->dts;
fd->dts_est = ds->dts;
}
return 0;

View File

@ -381,8 +381,8 @@ static int of_streamcopy(OutputStream *ost, AVPacket *pkt)
{
OutputFile *of = output_files[ost->file_index];
MuxStream *ms = ms_from_ost(ost);
DemuxPktData *pd = pkt->opaque_ref ? (DemuxPktData*)pkt->opaque_ref->data : NULL;
int64_t dts = pd ? pd->dts_est : AV_NOPTS_VALUE;
FrameData *fd = pkt->opaque_ref ? (FrameData*)pkt->opaque_ref->data : NULL;
int64_t dts = fd ? fd->dts_est : AV_NOPTS_VALUE;
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
int64_t ts_offset;