mirror of https://git.ffmpeg.org/ffmpeg.git
examples/muxing: Don't use stack packet
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
parent
53f374c08d
commit
0230d125cb
|
@ -62,6 +62,8 @@ typedef struct OutputStream {
|
|||
AVFrame *frame;
|
||||
AVFrame *tmp_frame;
|
||||
|
||||
AVPacket *tmp_pkt;
|
||||
|
||||
float t, tincr, tincr2;
|
||||
|
||||
struct SwsContext *sws_ctx;
|
||||
|
@ -80,7 +82,7 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
|||
}
|
||||
|
||||
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
||||
AVStream *st, AVFrame *frame)
|
||||
AVStream *st, AVFrame *frame, AVPacket *pkt)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -93,9 +95,7 @@ static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
|||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
AVPacket pkt = { 0 };
|
||||
|
||||
ret = avcodec_receive_packet(c, &pkt);
|
||||
ret = avcodec_receive_packet(c, pkt);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0) {
|
||||
|
@ -104,12 +104,12 @@ static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
|||
}
|
||||
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(&pkt, c->time_base, st->time_base);
|
||||
pkt.stream_index = st->index;
|
||||
av_packet_rescale_ts(pkt, c->time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, &pkt);
|
||||
ret = av_interleaved_write_frame(fmt_ctx, &pkt);
|
||||
log_packet(fmt_ctx, pkt);
|
||||
ret = av_interleaved_write_frame(fmt_ctx, pkt);
|
||||
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
|
||||
* its contents and resets pkt), so that no unreferencing is necessary.
|
||||
* This would be different if one used av_write_frame(). */
|
||||
|
@ -138,6 +138,12 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
|||
exit(1);
|
||||
}
|
||||
|
||||
ost->tmp_pkt = av_packet_alloc();
|
||||
if (!ost->tmp_pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->st = avformat_new_stream(oc, NULL);
|
||||
if (!ost->st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
|
@ -380,7 +386,7 @@ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
|||
ost->samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
return write_frame(oc, c, ost->st, frame);
|
||||
return write_frame(oc, c, ost->st, frame, ost->tmp_pkt);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
|
@ -523,7 +529,7 @@ static AVFrame *get_video_frame(OutputStream *ost)
|
|||
*/
|
||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
return write_frame(oc, ost->enc, ost->st, get_video_frame(ost));
|
||||
return write_frame(oc, ost->enc, ost->st, get_video_frame(ost), ost->tmp_pkt);
|
||||
}
|
||||
|
||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
|
@ -531,6 +537,7 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
|||
avcodec_free_context(&ost->enc);
|
||||
av_frame_free(&ost->frame);
|
||||
av_frame_free(&ost->tmp_frame);
|
||||
av_packet_free(&ost->tmp_pkt);
|
||||
sws_freeContext(ost->sws_ctx);
|
||||
swr_free(&ost->swr_ctx);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue