examples/muxing: factorize write_interleave code

Also log output packet information.
This commit is contained in:
Stefano Sabatini 2014-01-19 17:17:14 +01:00
parent 5e2b8e4934
commit d72c742d47

View File

@ -36,6 +36,7 @@
#include <libavutil/opt.h> #include <libavutil/opt.h>
#include <libavutil/mathematics.h> #include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h> #include <libavformat/avformat.h>
#include <libswscale/swscale.h> #include <libswscale/swscale.h>
#include <libswresample/swresample.h> #include <libswresample/swresample.h>
@ -48,6 +49,30 @@
static int sws_flags = SWS_BICUBIC; static int sws_flags = SWS_BICUBIC;
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
/* rescale output packet timestamp values from codec to stream timebase */
pkt->pts = av_rescale_q_rnd(pkt->pts, *time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt->dts = av_rescale_q_rnd(pkt->dts, *time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt->duration = av_rescale_q(pkt->duration, *time_base, st->time_base);
pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */
log_packet(fmt_ctx, pkt);
return av_interleaved_write_frame(fmt_ctx, pkt);
}
/* Add an output stream. */ /* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id) enum AVCodecID codec_id)
@ -284,14 +309,7 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st)
if (!got_packet) if (!got_packet)
return; return;
/* rescale output packet timestamp values from codec to stream timebase */ ret = write_frame(oc, &c->time_base, st, &pkt);
pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt.dts = av_rescale_q_rnd(pkt.dts, c->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt.duration = av_rescale_q(pkt.duration, c->time_base, st->time_base);
pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
if (ret != 0) { if (ret != 0) {
fprintf(stderr, "Error while writing audio frame: %s\n", fprintf(stderr, "Error while writing audio frame: %s\n",
av_err2str(ret)); av_err2str(ret));
@ -443,15 +461,8 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
} }
/* If size is zero, it means the image was buffered. */ /* If size is zero, it means the image was buffered. */
if (!ret && got_packet && pkt.size) { if (got_packet) {
/* rescale output packet timestamp values from codec to stream timebase */ ret = write_frame(oc, &c->time_base, st, &pkt);
pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt.dts = av_rescale_q_rnd(pkt.dts, c->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt.duration = av_rescale_q(pkt.duration, c->time_base, st->time_base);
pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
} else { } else {
ret = 0; ret = 0;
} }