diff --git a/avconv.c b/avconv.c index b29f2006a7..13968da4f9 100644 --- a/avconv.c +++ b/avconv.c @@ -428,13 +428,7 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost, } if (got_packet) { - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base); - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base); - if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base); - + av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base); write_frame(s, &pkt, ost); } } @@ -587,11 +581,7 @@ static void do_video_out(AVFormatContext *s, } if (got_packet) { - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base); - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base); - + av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base); write_frame(s, &pkt, ost); *frame_size = pkt.size; @@ -1044,12 +1034,7 @@ static void flush_encoders(void) stop_encoding = 1; break; } - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base); - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base); - if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base); + av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base); write_frame(os, &pkt, ost); } diff --git a/doc/examples/output.c b/doc/examples/output.c index dd0e6a224e..0239791e3d 100644 --- a/doc/examples/output.c +++ b/doc/examples/output.c @@ -362,14 +362,7 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st) ret = avcodec_encode_video2(c, &pkt, picture, &got_packet); /* If size is zero, it means the image was buffered. */ if (!ret && got_packet && pkt.size) { - if (pkt.pts != AV_NOPTS_VALUE) { - pkt.pts = av_rescale_q(pkt.pts, - c->time_base, st->time_base); - } - if (pkt.dts != AV_NOPTS_VALUE) { - pkt.dts = av_rescale_q(pkt.dts, - c->time_base, st->time_base); - } + av_packet_rescale_ts(&pkt, c->time_base, st->time_base); pkt.stream_index = st->index; /* Write the compressed frame to the media file. */