2012-09-30 22:49:16 +00:00
|
|
|
/*
|
2012-10-02 16:15:13 +00:00
|
|
|
* muxing functions for use within FFmpeg
|
2012-09-30 22:49:16 +00:00
|
|
|
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
|
|
|
|
*
|
2012-10-02 16:15:13 +00:00
|
|
|
* This file is part of FFmpeg.
|
2012-09-30 22:49:16 +00:00
|
|
|
*
|
2012-10-02 16:15:13 +00:00
|
|
|
* FFmpeg is free software; you can redistribute it and/or
|
2012-09-30 22:49:16 +00:00
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
2012-10-02 16:15:13 +00:00
|
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
2012-09-30 22:49:16 +00:00
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2012-10-02 16:15:13 +00:00
|
|
|
* License along with FFmpeg; if not, write to the Free Software
|
2012-09-30 22:49:16 +00:00
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "avformat.h"
|
|
|
|
#include "avio_internal.h"
|
|
|
|
#include "internal.h"
|
|
|
|
#include "libavcodec/internal.h"
|
|
|
|
#include "libavcodec/bytestream.h"
|
|
|
|
#include "libavutil/opt.h"
|
|
|
|
#include "libavutil/dict.h"
|
|
|
|
#include "libavutil/pixdesc.h"
|
2012-10-02 16:15:13 +00:00
|
|
|
#include "libavutil/timestamp.h"
|
2012-09-30 22:49:16 +00:00
|
|
|
#include "metadata.h"
|
|
|
|
#include "id3v2.h"
|
|
|
|
#include "libavutil/avassert.h"
|
|
|
|
#include "libavutil/avstring.h"
|
2013-03-27 17:36:51 +00:00
|
|
|
#include "libavutil/internal.h"
|
2012-09-30 22:49:16 +00:00
|
|
|
#include "libavutil/mathematics.h"
|
|
|
|
#include "libavutil/parseutils.h"
|
|
|
|
#include "libavutil/time.h"
|
|
|
|
#include "riff.h"
|
|
|
|
#include "audiointerleave.h"
|
|
|
|
#include "url.h"
|
|
|
|
#include <stdarg.h>
|
|
|
|
#if CONFIG_NETWORK
|
|
|
|
#include "network.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
2012-10-02 16:15:13 +00:00
|
|
|
* muxing functions for use within libavformat
|
2012-09-30 22:49:16 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* fraction handling */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* f = val + (num / den) + 0.5.
|
|
|
|
*
|
|
|
|
* 'num' is normalized so that it is such as 0 <= num < den.
|
|
|
|
*
|
|
|
|
* @param f fractional number
|
|
|
|
* @param val integer value
|
|
|
|
* @param num must be >= 0
|
|
|
|
* @param den must be >= 1
|
|
|
|
*/
|
2015-08-17 14:30:16 +00:00
|
|
|
static void frac_init(FFFrac *f, int64_t val, int64_t num, int64_t den)
|
2012-09-30 22:49:16 +00:00
|
|
|
{
|
|
|
|
num += (den >> 1);
|
|
|
|
if (num >= den) {
|
|
|
|
val += num / den;
|
|
|
|
num = num % den;
|
|
|
|
}
|
|
|
|
f->val = val;
|
|
|
|
f->num = num;
|
|
|
|
f->den = den;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fractional addition to f: f = f + (incr / f->den).
|
|
|
|
*
|
|
|
|
* @param f fractional number
|
|
|
|
* @param incr increment, can be positive or negative
|
|
|
|
*/
|
2015-08-17 14:30:16 +00:00
|
|
|
static void frac_add(FFFrac *f, int64_t incr)
|
2012-09-30 22:49:16 +00:00
|
|
|
{
|
|
|
|
int64_t num, den;
|
|
|
|
|
|
|
|
num = f->num + incr;
|
|
|
|
den = f->den;
|
|
|
|
if (num < 0) {
|
|
|
|
f->val += num / den;
|
|
|
|
num = num % den;
|
|
|
|
if (num < 0) {
|
|
|
|
num += den;
|
|
|
|
f->val--;
|
|
|
|
}
|
|
|
|
} else if (num >= den) {
|
|
|
|
f->val += num / den;
|
|
|
|
num = num % den;
|
|
|
|
}
|
|
|
|
f->num = num;
|
|
|
|
}
|
|
|
|
|
2014-04-14 20:35:25 +00:00
|
|
|
AVRational ff_choose_timebase(AVFormatContext *s, AVStream *st, int min_precision)
|
2012-10-03 19:34:43 +00:00
|
|
|
{
|
|
|
|
AVRational q;
|
|
|
|
int j;
|
|
|
|
|
2014-06-18 18:09:55 +00:00
|
|
|
q = st->time_base;
|
|
|
|
|
2012-10-03 19:38:56 +00:00
|
|
|
for (j=2; j<14; j+= 1+(j>2))
|
2014-04-14 20:35:25 +00:00
|
|
|
while (q.den / q.num < min_precision && q.num % j == 0)
|
2012-10-03 19:34:43 +00:00
|
|
|
q.num /= j;
|
2014-04-14 20:35:25 +00:00
|
|
|
while (q.den / q.num < min_precision && q.den < (1<<24))
|
2012-10-03 19:34:43 +00:00
|
|
|
q.den <<= 1;
|
|
|
|
|
|
|
|
return q;
|
|
|
|
}
|
|
|
|
|
2015-05-22 11:42:24 +00:00
|
|
|
enum AVChromaLocation ff_choose_chroma_location(AVFormatContext *s, AVStream *st)
|
|
|
|
{
|
2016-04-10 19:58:15 +00:00
|
|
|
AVCodecParameters *par = st->codecpar;
|
|
|
|
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(par->format);
|
2015-05-22 11:42:24 +00:00
|
|
|
|
2016-04-10 19:58:15 +00:00
|
|
|
if (par->chroma_location != AVCHROMA_LOC_UNSPECIFIED)
|
|
|
|
return par->chroma_location;
|
2015-05-22 11:42:24 +00:00
|
|
|
|
|
|
|
if (pix_desc) {
|
|
|
|
if (pix_desc->log2_chroma_h == 0) {
|
|
|
|
return AVCHROMA_LOC_TOPLEFT;
|
|
|
|
} else if (pix_desc->log2_chroma_w == 1 && pix_desc->log2_chroma_h == 1) {
|
2016-04-10 19:58:15 +00:00
|
|
|
if (par->field_order == AV_FIELD_UNKNOWN || par->field_order == AV_FIELD_PROGRESSIVE) {
|
|
|
|
switch (par->codec_id) {
|
2015-05-22 11:42:24 +00:00
|
|
|
case AV_CODEC_ID_MJPEG:
|
|
|
|
case AV_CODEC_ID_MPEG1VIDEO: return AVCHROMA_LOC_CENTER;
|
|
|
|
}
|
|
|
|
}
|
2016-04-10 19:58:15 +00:00
|
|
|
if (par->field_order == AV_FIELD_UNKNOWN || par->field_order != AV_FIELD_PROGRESSIVE) {
|
|
|
|
switch (par->codec_id) {
|
2015-05-22 11:42:24 +00:00
|
|
|
case AV_CODEC_ID_MPEG2VIDEO: return AVCHROMA_LOC_LEFT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return AVCHROMA_LOC_UNSPECIFIED;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2012-10-02 16:15:13 +00:00
|
|
|
int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
|
|
|
|
const char *format, const char *filename)
|
|
|
|
{
|
|
|
|
AVFormatContext *s = avformat_alloc_context();
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
*avctx = NULL;
|
|
|
|
if (!s)
|
|
|
|
goto nomem;
|
|
|
|
|
|
|
|
if (!oformat) {
|
|
|
|
if (format) {
|
|
|
|
oformat = av_guess_format(format, NULL, NULL);
|
|
|
|
if (!oformat) {
|
|
|
|
av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
oformat = av_guess_format(NULL, filename, NULL);
|
|
|
|
if (!oformat) {
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
|
|
|
|
filename);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s->oformat = oformat;
|
|
|
|
if (s->oformat->priv_data_size > 0) {
|
|
|
|
s->priv_data = av_mallocz(s->oformat->priv_data_size);
|
|
|
|
if (!s->priv_data)
|
|
|
|
goto nomem;
|
|
|
|
if (s->oformat->priv_class) {
|
|
|
|
*(const AVClass**)s->priv_data= s->oformat->priv_class;
|
|
|
|
av_opt_set_defaults(s->priv_data);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
s->priv_data = NULL;
|
|
|
|
|
|
|
|
if (filename)
|
|
|
|
av_strlcpy(s->filename, filename, sizeof(s->filename));
|
|
|
|
*avctx = s;
|
|
|
|
return 0;
|
|
|
|
nomem:
|
|
|
|
av_log(s, AV_LOG_ERROR, "Out of memory\n");
|
|
|
|
ret = AVERROR(ENOMEM);
|
|
|
|
error:
|
|
|
|
avformat_free_context(s);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
static int validate_codec_tag(AVFormatContext *s, AVStream *st)
|
|
|
|
{
|
|
|
|
const AVCodecTag *avctag;
|
|
|
|
int n;
|
|
|
|
enum AVCodecID id = AV_CODEC_ID_NONE;
|
2014-01-01 21:29:06 +00:00
|
|
|
int64_t tag = -1;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Check that tag + id is in the table
|
|
|
|
* If neither is in the table -> OK
|
|
|
|
* If tag is in the table with another id -> FAIL
|
|
|
|
* If id is in the table with another tag -> FAIL unless strict < normal
|
|
|
|
*/
|
|
|
|
for (n = 0; s->oformat->codec_tag[n]; n++) {
|
|
|
|
avctag = s->oformat->codec_tag[n];
|
|
|
|
while (avctag->id != AV_CODEC_ID_NONE) {
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codecpar->codec_tag)) {
|
2012-09-30 22:49:16 +00:00
|
|
|
id = avctag->id;
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (id == st->codecpar->codec_id)
|
2012-09-30 22:49:16 +00:00
|
|
|
return 1;
|
|
|
|
}
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (avctag->id == st->codecpar->codec_id)
|
2012-09-30 22:49:16 +00:00
|
|
|
tag = avctag->tag;
|
|
|
|
avctag++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (id != AV_CODEC_ID_NONE)
|
|
|
|
return 0;
|
2014-11-06 12:10:01 +00:00
|
|
|
if (tag >= 0 && (s->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
|
2012-09-30 22:49:16 +00:00
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:18 +00:00
|
|
|
|
|
|
|
static int init_muxer(AVFormatContext *s, AVDictionary **options)
|
2012-09-30 22:49:16 +00:00
|
|
|
{
|
|
|
|
int ret = 0, i;
|
|
|
|
AVStream *st;
|
|
|
|
AVDictionary *tmp = NULL;
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
AVCodecParameters *par = NULL;
|
2012-09-30 22:49:17 +00:00
|
|
|
AVOutputFormat *of = s->oformat;
|
2015-10-07 13:51:11 +00:00
|
|
|
const AVCodecDescriptor *desc;
|
2014-05-20 00:30:26 +00:00
|
|
|
AVDictionaryEntry *e;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (options)
|
|
|
|
av_dict_copy(&tmp, *options, 0);
|
2012-09-30 22:49:17 +00:00
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
if ((ret = av_opt_set_dict(s, &tmp)) < 0)
|
|
|
|
goto fail;
|
2012-10-02 16:15:13 +00:00
|
|
|
if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
|
2014-05-04 05:11:50 +00:00
|
|
|
(ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
|
2012-10-02 16:15:13 +00:00
|
|
|
goto fail;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2016-04-10 19:58:15 +00:00
|
|
|
#if FF_API_LAVF_AVCTX
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
FF_DISABLE_DEPRECATION_WARNINGS
|
2015-08-24 22:37:04 +00:00
|
|
|
if (s->nb_streams && s->streams[0]->codec->flags & AV_CODEC_FLAG_BITEXACT) {
|
|
|
|
if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
|
2014-05-01 08:43:10 +00:00
|
|
|
#if FF_API_LAVF_BITEXACT
|
2015-08-24 22:37:04 +00:00
|
|
|
av_log(s, AV_LOG_WARNING,
|
|
|
|
"Setting the AVFormatContext to bitexact mode, because "
|
|
|
|
"the AVCodecContext is in that mode. This behavior will "
|
|
|
|
"change in the future. To keep the current behavior, set "
|
|
|
|
"AVFormatContext.flags |= AVFMT_FLAG_BITEXACT.\n");
|
|
|
|
s->flags |= AVFMT_FLAG_BITEXACT;
|
|
|
|
#else
|
|
|
|
av_log(s, AV_LOG_WARNING,
|
|
|
|
"The AVFormatContext is not in set to bitexact mode, only "
|
|
|
|
"the AVCodecContext. If this is not intended, set "
|
|
|
|
"AVFormatContext.flags |= AVFMT_FLAG_BITEXACT.\n");
|
2014-05-01 08:43:10 +00:00
|
|
|
#endif
|
2015-08-24 22:37:04 +00:00
|
|
|
}
|
|
|
|
}
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
FF_ENABLE_DEPRECATION_WARNINGS
|
2014-05-01 08:43:10 +00:00
|
|
|
#endif
|
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
// some sanity checks
|
2012-09-30 22:49:17 +00:00
|
|
|
if (s->nb_streams == 0 && !(of->flags & AVFMT_NOSTREAMS)) {
|
2013-12-15 17:12:10 +00:00
|
|
|
av_log(s, AV_LOG_ERROR, "No streams to mux were specified\n");
|
2012-09-30 22:49:16 +00:00
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < s->nb_streams; i++) {
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
st = s->streams[i];
|
|
|
|
par = st->codecpar;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2016-05-03 22:32:26 +00:00
|
|
|
#if FF_API_LAVF_CODEC_TB && FF_API_LAVF_AVCTX
|
2014-05-18 10:12:59 +00:00
|
|
|
FF_DISABLE_DEPRECATION_WARNINGS
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (!st->time_base.num && st->codec->time_base.num) {
|
2014-05-18 10:12:59 +00:00
|
|
|
av_log(s, AV_LOG_WARNING, "Using AVStream.codec.time_base as a "
|
|
|
|
"timebase hint to the muxer is deprecated. Set "
|
|
|
|
"AVStream.time_base instead.\n");
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
avpriv_set_pts_info(st, 64, st->codec->time_base.num, st->codec->time_base.den);
|
|
|
|
}
|
|
|
|
FF_ENABLE_DEPRECATION_WARNINGS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if FF_API_LAVF_AVCTX
|
|
|
|
FF_DISABLE_DEPRECATION_WARNINGS
|
|
|
|
if (st->codecpar->codec_type == AVMEDIA_TYPE_UNKNOWN &&
|
|
|
|
st->codec->codec_type != AVMEDIA_TYPE_UNKNOWN) {
|
|
|
|
av_log(s, AV_LOG_WARNING, "Using AVStream.codec to pass codec "
|
|
|
|
"parameters to muxers is deprecated, use AVStream.codecpar "
|
|
|
|
"instead.\n");
|
|
|
|
ret = avcodec_parameters_from_context(st->codecpar, st->codec);
|
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
2014-05-18 10:12:59 +00:00
|
|
|
}
|
|
|
|
FF_ENABLE_DEPRECATION_WARNINGS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!st->time_base.num) {
|
|
|
|
/* fall back on the default timebase values */
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (par->codec_type == AVMEDIA_TYPE_AUDIO && par->sample_rate)
|
|
|
|
avpriv_set_pts_info(st, 64, 1, par->sample_rate);
|
2014-05-18 10:12:59 +00:00
|
|
|
else
|
|
|
|
avpriv_set_pts_info(st, 33, 1, 90000);
|
|
|
|
}
|
|
|
|
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
switch (par->codec_type) {
|
2012-09-30 22:49:16 +00:00
|
|
|
case AVMEDIA_TYPE_AUDIO:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (par->sample_rate <= 0) {
|
2012-09-30 22:49:16 +00:00
|
|
|
av_log(s, AV_LOG_ERROR, "sample rate not set\n");
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto fail;
|
|
|
|
}
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (!par->block_align)
|
|
|
|
par->block_align = par->channels *
|
|
|
|
av_get_bits_per_sample(par->codec_id) >> 3;
|
2012-09-30 22:49:16 +00:00
|
|
|
break;
|
|
|
|
case AVMEDIA_TYPE_VIDEO:
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if ((par->width <= 0 || par->height <= 0) &&
|
2012-09-30 22:49:17 +00:00
|
|
|
!(of->flags & AVFMT_NODIMENSIONS)) {
|
2012-09-30 22:49:16 +00:00
|
|
|
av_log(s, AV_LOG_ERROR, "dimensions not set\n");
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto fail;
|
|
|
|
}
|
2016-04-10 19:58:15 +00:00
|
|
|
if (av_cmp_q(st->sample_aspect_ratio, par->sample_aspect_ratio)
|
|
|
|
&& fabs(av_q2d(st->sample_aspect_ratio) - av_q2d(par->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
|
2012-10-02 16:15:13 +00:00
|
|
|
) {
|
2013-06-23 21:00:34 +00:00
|
|
|
if (st->sample_aspect_ratio.num != 0 &&
|
|
|
|
st->sample_aspect_ratio.den != 0 &&
|
2016-04-10 19:58:15 +00:00
|
|
|
par->sample_aspect_ratio.num != 0 &&
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
par->sample_aspect_ratio.den != 0) {
|
2013-06-23 21:00:34 +00:00
|
|
|
av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
|
2013-06-24 09:42:42 +00:00
|
|
|
"(%d/%d) and encoder layer (%d/%d)\n",
|
|
|
|
st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
|
2016-04-10 19:58:15 +00:00
|
|
|
par->sample_aspect_ratio.num,
|
|
|
|
par->sample_aspect_ratio.den);
|
2013-06-23 21:00:34 +00:00
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto fail;
|
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
desc = avcodec_descriptor_get(par->codec_id);
|
2015-10-07 13:51:11 +00:00
|
|
|
if (desc && desc->props & AV_CODEC_PROP_REORDER)
|
|
|
|
st->internal->reorder = 1;
|
|
|
|
|
2012-09-30 22:49:17 +00:00
|
|
|
if (of->codec_tag) {
|
2016-04-10 19:58:15 +00:00
|
|
|
if ( par->codec_tag
|
|
|
|
&& par->codec_id == AV_CODEC_ID_RAWVIDEO
|
|
|
|
&& ( av_codec_get_tag(of->codec_tag, par->codec_id) == 0
|
|
|
|
|| av_codec_get_tag(of->codec_tag, par->codec_id) == MKTAG('r', 'a', 'w', ' '))
|
2012-10-02 16:31:47 +00:00
|
|
|
&& !validate_codec_tag(s, st)) {
|
2012-09-30 22:49:17 +00:00
|
|
|
// the current rawvideo encoding system ends up setting
|
2012-10-02 16:31:47 +00:00
|
|
|
// the wrong codec_tag for avi/mov, we override it here
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
par->codec_tag = 0;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (par->codec_tag) {
|
2012-09-30 22:49:16 +00:00
|
|
|
if (!validate_codec_tag(s, st)) {
|
2017-03-26 23:31:52 +00:00
|
|
|
const uint32_t otag = av_codec_get_tag(s->oformat->codec_tag, par->codec_id);
|
2012-09-30 22:49:16 +00:00
|
|
|
av_log(s, AV_LOG_ERROR,
|
2017-03-27 19:31:46 +00:00
|
|
|
"Tag %s incompatible with output codec id '%d' (%s)\n",
|
|
|
|
av_fourcc2str(par->codec_tag), par->codec_id, av_fourcc2str(otag));
|
2012-09-30 22:49:16 +00:00
|
|
|
ret = AVERROR_INVALIDDATA;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
} else
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
par->codec_tag = av_codec_get_tag(of->codec_tag, par->codec_id);
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (par->codec_type != AVMEDIA_TYPE_ATTACHMENT)
|
2014-01-20 12:59:06 +00:00
|
|
|
s->internal->nb_interleaved_streams++;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:17 +00:00
|
|
|
if (!s->priv_data && of->priv_data_size > 0) {
|
|
|
|
s->priv_data = av_mallocz(of->priv_data_size);
|
2012-09-30 22:49:16 +00:00
|
|
|
if (!s->priv_data) {
|
|
|
|
ret = AVERROR(ENOMEM);
|
|
|
|
goto fail;
|
|
|
|
}
|
2012-09-30 22:49:17 +00:00
|
|
|
if (of->priv_class) {
|
|
|
|
*(const AVClass **)s->priv_data = of->priv_class;
|
2012-09-30 22:49:16 +00:00
|
|
|
av_opt_set_defaults(s->priv_data);
|
2014-04-29 22:00:44 +00:00
|
|
|
if ((ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
|
2012-09-30 22:49:16 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set muxer identification string */
|
2014-05-01 08:43:10 +00:00
|
|
|
if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
|
2012-09-30 22:49:16 +00:00
|
|
|
av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
|
2013-12-06 03:35:35 +00:00
|
|
|
} else {
|
|
|
|
av_dict_set(&s->metadata, "encoder", NULL, 0);
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
2014-05-20 00:30:26 +00:00
|
|
|
for (e = NULL; e = av_dict_get(s->metadata, "encoder-", e, AV_DICT_IGNORE_SUFFIX); ) {
|
|
|
|
av_dict_set(&s->metadata, e->key, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:18 +00:00
|
|
|
if (options) {
|
|
|
|
av_dict_free(options);
|
|
|
|
*options = tmp;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
2016-06-25 03:02:50 +00:00
|
|
|
if (s->oformat->init) {
|
|
|
|
if ((ret = s->oformat->init(s)) < 0) {
|
|
|
|
if (s->oformat->deinit)
|
|
|
|
s->oformat->deinit(s);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return ret == 0;
|
2015-10-08 02:32:14 +00:00
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:18 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
av_dict_free(&tmp);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int init_pts(AVFormatContext *s)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
AVStream *st;
|
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
/* init PTS generation */
|
|
|
|
for (i = 0; i < s->nb_streams; i++) {
|
|
|
|
int64_t den = AV_NOPTS_VALUE;
|
|
|
|
st = s->streams[i];
|
|
|
|
|
2016-04-10 19:58:15 +00:00
|
|
|
switch (st->codecpar->codec_type) {
|
2012-09-30 22:49:16 +00:00
|
|
|
case AVMEDIA_TYPE_AUDIO:
|
2016-04-10 19:58:15 +00:00
|
|
|
den = (int64_t)st->time_base.num * st->codecpar->sample_rate;
|
2012-09-30 22:49:16 +00:00
|
|
|
break;
|
|
|
|
case AVMEDIA_TYPE_VIDEO:
|
2016-04-10 19:58:15 +00:00
|
|
|
den = (int64_t)st->time_base.num * st->time_base.den;
|
2012-09-30 22:49:16 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2015-08-17 14:30:16 +00:00
|
|
|
|
|
|
|
if (!st->priv_pts)
|
|
|
|
st->priv_pts = av_mallocz(sizeof(*st->priv_pts));
|
|
|
|
if (!st->priv_pts)
|
|
|
|
return AVERROR(ENOMEM);
|
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
if (den != AV_NOPTS_VALUE) {
|
2012-09-30 22:49:18 +00:00
|
|
|
if (den <= 0)
|
|
|
|
return AVERROR_INVALIDDATA;
|
|
|
|
|
2015-08-17 14:30:16 +00:00
|
|
|
frac_init(st->priv_pts, 0, 0, den);
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:18 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-11 18:02:02 +00:00
|
|
|
static int write_header_internal(AVFormatContext *s)
|
|
|
|
{
|
2016-05-04 14:18:35 +00:00
|
|
|
if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
|
|
|
|
avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_HEADER);
|
2016-06-11 18:02:02 +00:00
|
|
|
if (s->oformat->write_header) {
|
|
|
|
int ret = s->oformat->write_header(s);
|
|
|
|
if (ret >= 0 && s->pb && s->pb->error < 0)
|
|
|
|
ret = s->pb->error;
|
2016-06-11 18:18:40 +00:00
|
|
|
s->internal->write_header_ret = ret;
|
2016-06-11 18:02:02 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (s->flush_packets && s->pb && s->pb->error >= 0 && s->flags & AVFMT_FLAG_FLUSH_PACKETS)
|
|
|
|
avio_flush(s->pb);
|
|
|
|
}
|
|
|
|
s->internal->header_written = 1;
|
2016-05-04 14:18:35 +00:00
|
|
|
if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
|
|
|
|
avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_UNKNOWN);
|
2016-06-11 18:02:02 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-25 03:02:50 +00:00
|
|
|
int avformat_init_output(AVFormatContext *s, AVDictionary **options)
|
2012-09-30 22:49:18 +00:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
2015-05-16 16:33:51 +00:00
|
|
|
if ((ret = init_muxer(s, options)) < 0)
|
2012-09-30 22:49:18 +00:00
|
|
|
return ret;
|
|
|
|
|
2016-06-25 03:02:50 +00:00
|
|
|
s->internal->initialized = 1;
|
|
|
|
s->internal->streams_initialized = ret;
|
|
|
|
|
|
|
|
if (s->oformat->init && ret) {
|
|
|
|
if ((ret = init_pts(s)) < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (s->avoid_negative_ts < 0) {
|
|
|
|
av_assert2(s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO);
|
|
|
|
if (s->oformat->flags & (AVFMT_TS_NEGATIVE | AVFMT_NOTIMESTAMPS)) {
|
|
|
|
s->avoid_negative_ts = 0;
|
|
|
|
} else
|
|
|
|
s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return AVSTREAM_INIT_IN_INIT_OUTPUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return AVSTREAM_INIT_IN_WRITE_HEADER;
|
|
|
|
}
|
|
|
|
|
|
|
|
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
int already_initialized = s->internal->initialized;
|
|
|
|
int streams_already_initialized = s->internal->streams_initialized;
|
|
|
|
|
|
|
|
if (!already_initialized)
|
|
|
|
if ((ret = avformat_init_output(s, options)) < 0)
|
|
|
|
return ret;
|
|
|
|
|
2016-04-07 07:59:39 +00:00
|
|
|
if (!(s->oformat->check_bitstream && s->flags & AVFMT_FLAG_AUTO_BSF)) {
|
2016-06-11 18:02:02 +00:00
|
|
|
ret = write_header_internal(s);
|
2012-09-30 22:49:18 +00:00
|
|
|
if (ret < 0)
|
2016-06-11 16:52:27 +00:00
|
|
|
goto fail;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
2012-09-30 22:49:18 +00:00
|
|
|
|
2016-06-25 03:02:50 +00:00
|
|
|
if (!s->internal->streams_initialized) {
|
|
|
|
if ((ret = init_pts(s)) < 0)
|
|
|
|
goto fail;
|
2012-09-30 22:49:18 +00:00
|
|
|
|
2016-06-25 03:02:50 +00:00
|
|
|
if (s->avoid_negative_ts < 0) {
|
|
|
|
av_assert2(s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO);
|
|
|
|
if (s->oformat->flags & (AVFMT_TS_NEGATIVE | AVFMT_NOTIMESTAMPS)) {
|
|
|
|
s->avoid_negative_ts = 0;
|
|
|
|
} else
|
|
|
|
s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE;
|
|
|
|
}
|
2013-04-26 09:52:51 +00:00
|
|
|
}
|
|
|
|
|
2016-06-25 03:02:50 +00:00
|
|
|
return streams_already_initialized;
|
2016-06-11 16:52:27 +00:00
|
|
|
|
|
|
|
fail:
|
|
|
|
if (s->oformat->deinit)
|
|
|
|
s->oformat->deinit(s);
|
|
|
|
return ret;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
2013-12-31 13:09:48 +00:00
|
|
|
#define AV_PKT_FLAG_UNCODED_FRAME 0x2000
|
|
|
|
|
|
|
|
/* Note: using sizeof(AVFrame) from outside lavu is unsafe in general, but
|
|
|
|
it is only being used internally to this file as a consistency check.
|
|
|
|
The value is chosen to be very unlikely to appear on its own and to cause
|
|
|
|
immediate failure if used anywhere as a real size. */
|
|
|
|
#define UNCODED_FRAME_PACKET_SIZE (INT_MIN / 3 * 2 + (int)sizeof(AVFrame))
|
|
|
|
|
|
|
|
|
2016-05-03 22:32:26 +00:00
|
|
|
#if FF_API_COMPUTE_PKT_FIELDS2 && FF_API_LAVF_AVCTX
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
FF_DISABLE_DEPRECATION_WARNINGS
|
2012-09-30 22:49:16 +00:00
|
|
|
//FIXME merge with compute_pkt_fields
|
2015-11-12 22:06:32 +00:00
|
|
|
static int compute_muxer_pkt_fields(AVFormatContext *s, AVStream *st, AVPacket *pkt)
|
2012-09-30 22:49:16 +00:00
|
|
|
{
|
2016-04-10 19:58:15 +00:00
|
|
|
int delay = FFMAX(st->codecpar->video_delay, st->internal->avctx->max_b_frames > 0);
|
2014-05-18 10:36:00 +00:00
|
|
|
int num, den, i;
|
2014-05-19 16:38:10 +00:00
|
|
|
int frame_size;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2015-10-07 13:51:11 +00:00
|
|
|
if (!s->internal->missing_ts_warning &&
|
|
|
|
!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
|
2016-11-13 00:01:52 +00:00
|
|
|
(!(st->disposition & AV_DISPOSITION_ATTACHED_PIC) || (st->disposition & AV_DISPOSITION_TIMED_THUMBNAILS)) &&
|
2015-10-07 13:51:11 +00:00
|
|
|
(pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE)) {
|
|
|
|
av_log(s, AV_LOG_WARNING,
|
|
|
|
"Timestamps are unset in a packet for stream %d. "
|
|
|
|
"This is deprecated and will stop working in the future. "
|
|
|
|
"Fix your code to set the timestamps properly\n", st->index);
|
|
|
|
s->internal->missing_ts_warning = 1;
|
|
|
|
}
|
|
|
|
|
2015-04-20 15:29:20 +00:00
|
|
|
if (s->debug & FF_FDEBUG_TS)
|
2015-11-12 22:06:32 +00:00
|
|
|
av_log(s, AV_LOG_TRACE, "compute_muxer_pkt_fields: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
|
2012-10-02 16:15:13 +00:00
|
|
|
av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2016-07-17 02:10:38 +00:00
|
|
|
if (pkt->duration < 0 && st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
|
2015-09-29 13:14:59 +00:00
|
|
|
av_log(s, AV_LOG_WARNING, "Packet with invalid duration %"PRId64" in stream %d\n",
|
2014-04-23 04:04:50 +00:00
|
|
|
pkt->duration, pkt->stream_index);
|
|
|
|
pkt->duration = 0;
|
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
/* duration field */
|
|
|
|
if (pkt->duration == 0) {
|
2014-10-08 19:23:14 +00:00
|
|
|
ff_compute_frame_duration(s, &num, &den, st, NULL, pkt);
|
2012-09-30 22:49:16 +00:00
|
|
|
if (den && num) {
|
|
|
|
pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay == 0)
|
|
|
|
pkt->pts = pkt->dts;
|
|
|
|
|
|
|
|
//XXX/FIXME this is a temporary hack until all encoders output pts
|
|
|
|
if ((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay) {
|
2012-10-02 16:15:13 +00:00
|
|
|
static int warned;
|
|
|
|
if (!warned) {
|
|
|
|
av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
|
|
|
|
warned = 1;
|
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
pkt->dts =
|
|
|
|
// pkt->pts= st->cur_dts;
|
2015-08-17 14:30:16 +00:00
|
|
|
pkt->pts = st->priv_pts->val;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//calculate dts from pts
|
|
|
|
if (pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
|
|
|
|
st->pts_buffer[0] = pkt->pts;
|
|
|
|
for (i = 1; i < delay + 1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
|
|
|
|
st->pts_buffer[i] = pkt->pts + (i - delay - 1) * pkt->duration;
|
|
|
|
for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
|
|
|
|
FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
|
|
|
|
|
|
|
|
pkt->dts = st->pts_buffer[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
|
|
|
|
((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
|
2016-04-10 19:58:15 +00:00
|
|
|
st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE &&
|
|
|
|
st->codecpar->codec_type != AVMEDIA_TYPE_DATA &&
|
2012-09-30 22:49:16 +00:00
|
|
|
st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
|
|
|
|
av_log(s, AV_LOG_ERROR,
|
2012-10-02 16:15:13 +00:00
|
|
|
"Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
|
|
|
|
st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
|
2012-09-30 22:49:16 +00:00
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts) {
|
2014-10-09 15:18:03 +00:00
|
|
|
av_log(s, AV_LOG_ERROR,
|
2014-10-10 18:34:14 +00:00
|
|
|
"pts (%s) < dts (%s) in stream %d\n",
|
|
|
|
av_ts2str(pkt->pts), av_ts2str(pkt->dts),
|
2014-10-09 15:18:03 +00:00
|
|
|
st->index);
|
2012-09-30 22:49:16 +00:00
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
|
2015-04-20 15:29:20 +00:00
|
|
|
if (s->debug & FF_FDEBUG_TS)
|
|
|
|
av_log(s, AV_LOG_TRACE, "av_write_frame: pts2:%s dts2:%s\n",
|
2012-10-02 16:15:13 +00:00
|
|
|
av_ts2str(pkt->pts), av_ts2str(pkt->dts));
|
2015-04-20 15:29:20 +00:00
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
st->cur_dts = pkt->dts;
|
2015-08-17 14:30:16 +00:00
|
|
|
st->priv_pts->val = pkt->dts;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
/* update pts */
|
2016-07-17 02:10:38 +00:00
|
|
|
switch (st->codecpar->codec_type) {
|
2012-09-30 22:49:16 +00:00
|
|
|
case AVMEDIA_TYPE_AUDIO:
|
2013-12-31 13:09:48 +00:00
|
|
|
frame_size = (pkt->flags & AV_PKT_FLAG_UNCODED_FRAME) ?
|
|
|
|
((AVFrame *)pkt->data)->nb_samples :
|
2014-07-28 13:27:57 +00:00
|
|
|
av_get_audio_frame_duration(st->codec, pkt->size);
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
/* HACK/FIXME, we skip the initial 0 size packets as they are most
|
|
|
|
* likely equal to the encoder delay, but it would be better if we
|
|
|
|
* had the real timestamps from the encoder */
|
2015-08-17 14:30:16 +00:00
|
|
|
if (frame_size >= 0 && (pkt->size || st->priv_pts->num != st->priv_pts->den >> 1 || st->priv_pts->val)) {
|
|
|
|
frac_add(st->priv_pts, (int64_t)st->time_base.den * frame_size);
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AVMEDIA_TYPE_VIDEO:
|
2016-04-10 19:58:15 +00:00
|
|
|
frac_add(st->priv_pts, (int64_t)st->time_base.den * st->time_base.num);
|
2012-09-30 22:49:16 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
FF_ENABLE_DEPRECATION_WARNINGS
|
2015-10-07 13:51:11 +00:00
|
|
|
#endif
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2012-12-03 22:13:53 +00:00
|
|
|
/**
|
2013-04-26 08:47:31 +00:00
|
|
|
* Make timestamps non negative, move side data from payload to internal struct, call muxer, and restore
|
|
|
|
* sidedata.
|
|
|
|
*
|
2013-04-03 12:11:10 +00:00
|
|
|
* FIXME: this function should NEVER get undefined pts/dts beside when the
|
|
|
|
* AVFMT_NOTIMESTAMPS is set.
|
|
|
|
* Those additional safety checks should be dropped once the correct checks
|
|
|
|
* are set in the callers.
|
2012-12-03 22:13:53 +00:00
|
|
|
*/
|
2013-04-03 12:11:10 +00:00
|
|
|
static int write_packet(AVFormatContext *s, AVPacket *pkt)
|
2012-12-03 22:13:53 +00:00
|
|
|
{
|
2016-11-05 00:58:49 +00:00
|
|
|
int ret, did_split;
|
2016-08-02 13:24:19 +00:00
|
|
|
int64_t pts_backup, dts_backup;
|
|
|
|
|
|
|
|
pts_backup = pkt->pts;
|
|
|
|
dts_backup = pkt->dts;
|
2013-04-26 08:47:31 +00:00
|
|
|
|
2016-06-24 20:38:54 +00:00
|
|
|
// If the timestamp offsetting below is adjusted, adjust
|
|
|
|
// ff_interleaved_peek similarly.
|
2014-01-27 18:16:45 +00:00
|
|
|
if (s->output_ts_offset) {
|
|
|
|
AVStream *st = s->streams[pkt->stream_index];
|
|
|
|
int64_t offset = av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base);
|
|
|
|
|
|
|
|
if (pkt->dts != AV_NOPTS_VALUE)
|
|
|
|
pkt->dts += offset;
|
|
|
|
if (pkt->pts != AV_NOPTS_VALUE)
|
|
|
|
pkt->pts += offset;
|
|
|
|
}
|
|
|
|
|
2013-04-26 08:47:31 +00:00
|
|
|
if (s->avoid_negative_ts > 0) {
|
|
|
|
AVStream *st = s->streams[pkt->stream_index];
|
2013-04-26 09:52:28 +00:00
|
|
|
int64_t offset = st->mux_ts_offset;
|
2015-05-05 10:44:20 +00:00
|
|
|
int64_t ts = s->internal->avoid_negative_ts_use_pts ? pkt->pts : pkt->dts;
|
2013-04-26 09:37:42 +00:00
|
|
|
|
2015-05-05 10:44:20 +00:00
|
|
|
if (s->internal->offset == AV_NOPTS_VALUE && ts != AV_NOPTS_VALUE &&
|
|
|
|
(ts < 0 || s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO)) {
|
|
|
|
s->internal->offset = -ts;
|
2015-02-11 01:13:46 +00:00
|
|
|
s->internal->offset_timebase = st->time_base;
|
2013-04-26 08:47:31 +00:00
|
|
|
}
|
2013-04-26 09:37:42 +00:00
|
|
|
|
2015-02-11 01:13:46 +00:00
|
|
|
if (s->internal->offset != AV_NOPTS_VALUE && !offset) {
|
2013-04-26 09:52:28 +00:00
|
|
|
offset = st->mux_ts_offset =
|
2015-02-11 01:13:46 +00:00
|
|
|
av_rescale_q_rnd(s->internal->offset,
|
|
|
|
s->internal->offset_timebase,
|
2013-04-26 09:37:42 +00:00
|
|
|
st->time_base,
|
|
|
|
AV_ROUND_UP);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pkt->dts != AV_NOPTS_VALUE)
|
2013-04-26 09:52:28 +00:00
|
|
|
pkt->dts += offset;
|
2013-04-26 08:47:31 +00:00
|
|
|
if (pkt->pts != AV_NOPTS_VALUE)
|
2013-04-26 09:52:28 +00:00
|
|
|
pkt->pts += offset;
|
2013-04-26 09:42:54 +00:00
|
|
|
|
2015-05-05 10:44:20 +00:00
|
|
|
if (s->internal->avoid_negative_ts_use_pts) {
|
|
|
|
if (pkt->pts != AV_NOPTS_VALUE && pkt->pts < 0) {
|
|
|
|
av_log(s, AV_LOG_WARNING, "failed to avoid negative "
|
|
|
|
"pts %s in stream %d.\n"
|
|
|
|
"Try -avoid_negative_ts 1 as a possible workaround.\n",
|
2017-05-26 16:01:31 +00:00
|
|
|
av_ts2str(pkt->pts),
|
2015-05-05 10:44:20 +00:00
|
|
|
pkt->stream_index
|
|
|
|
);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
av_assert2(pkt->dts == AV_NOPTS_VALUE || pkt->dts >= 0 || s->max_interleave_delta > 0);
|
|
|
|
if (pkt->dts != AV_NOPTS_VALUE && pkt->dts < 0) {
|
|
|
|
av_log(s, AV_LOG_WARNING,
|
|
|
|
"Packets poorly interleaved, failed to avoid negative "
|
|
|
|
"timestamp %s in stream %d.\n"
|
|
|
|
"Try -max_interleave_delta 0 as a possible workaround.\n",
|
|
|
|
av_ts2str(pkt->dts),
|
|
|
|
pkt->stream_index
|
|
|
|
);
|
|
|
|
}
|
2014-10-22 14:15:02 +00:00
|
|
|
}
|
2013-04-26 08:47:31 +00:00
|
|
|
}
|
|
|
|
|
avcodec, avformat: deprecate anything related to side data merging
This patch deprecates anything that has to do with merging/splitting
side data. Automatic side data merging (and splitting), as well as all
API symbols involved in it, are removed completely.
Two FF_API_ defines are dedicated to deprecating API symbols related to
this: FF_API_MERGE_SD_API removes av_packet_split/merge_side_data in
libavcodec, and FF_API_LAVF_KEEPSIDE_FLAG deprecates
AVFMT_FLAG_KEEP_SIDE_DATA in libavformat.
Since it was claimed that changing the default from merging side data to
not doing it is an ABI change, there are two additional FF_API_ defines,
which stop using the side data merging/splitting by default (and remove
any code in avformat/avcodec doing this): FF_API_MERGE_SD in libavcodec,
and FF_API_LAVF_MERGE_SD in libavformat.
It is very much intended that FF_API_MERGE_SD and FF_API_LAVF_MERGE_SD
are quickly defined to 0 in the next ABI bump, while the API symbols are
retained for a longer time for the sake of compatibility.
AVFMT_FLAG_KEEP_SIDE_DATA will (very much intentionally) do nothing for
most of the time it will still be defined. Keep in mind that no code
exists that actually tries to unset this flag for any reason, nor does
such code need to exist. Code setting this flag explicitly will work as
before. Thus it's ok for AVFMT_FLAG_KEEP_SIDE_DATA to do nothing once
side data merging has been removed from libavformat.
In order to avoid that anyone in the future does this incorrectly, here
is a small guide how to update the internal code on bumps:
- next ABI bump (probably soon):
- define FF_API_LAVF_MERGE_SD to 0, and remove all code covered by it
- define FF_API_MERGE_SD to 0, and remove all code covered by it
- next API bump (typically two years in the future or so):
- define FF_API_LAVF_KEEPSIDE_FLAG to 0, and remove all code covered
by it
- define FF_API_MERGE_SD_API to 0, and remove all code covered by it
This forces anyone who actually wants packet side data to temporarily
use deprecated API to get it all. If you ask me, this is batshit fucked
up crazy, but it's how we roll. Making AVFMT_FLAG_KEEP_SIDE_DATA to be
set by default was rejected as an ABI change, so I'm going all the way
to get rid of this once and for all.
Reviewed-by: James Almer <jamrial@gmail.com>
Reviewed-by: Rostislav Pehlivanov <atomnuker@gmail.com>
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
2017-03-16 03:52:55 +00:00
|
|
|
#if FF_API_LAVF_MERGE_SD
|
|
|
|
FF_DISABLE_DEPRECATION_WARNINGS
|
2016-11-05 00:58:49 +00:00
|
|
|
did_split = av_packet_split_side_data(pkt);
|
avcodec, avformat: deprecate anything related to side data merging
This patch deprecates anything that has to do with merging/splitting
side data. Automatic side data merging (and splitting), as well as all
API symbols involved in it, are removed completely.
Two FF_API_ defines are dedicated to deprecating API symbols related to
this: FF_API_MERGE_SD_API removes av_packet_split/merge_side_data in
libavcodec, and FF_API_LAVF_KEEPSIDE_FLAG deprecates
AVFMT_FLAG_KEEP_SIDE_DATA in libavformat.
Since it was claimed that changing the default from merging side data to
not doing it is an ABI change, there are two additional FF_API_ defines,
which stop using the side data merging/splitting by default (and remove
any code in avformat/avcodec doing this): FF_API_MERGE_SD in libavcodec,
and FF_API_LAVF_MERGE_SD in libavformat.
It is very much intended that FF_API_MERGE_SD and FF_API_LAVF_MERGE_SD
are quickly defined to 0 in the next ABI bump, while the API symbols are
retained for a longer time for the sake of compatibility.
AVFMT_FLAG_KEEP_SIDE_DATA will (very much intentionally) do nothing for
most of the time it will still be defined. Keep in mind that no code
exists that actually tries to unset this flag for any reason, nor does
such code need to exist. Code setting this flag explicitly will work as
before. Thus it's ok for AVFMT_FLAG_KEEP_SIDE_DATA to do nothing once
side data merging has been removed from libavformat.
In order to avoid that anyone in the future does this incorrectly, here
is a small guide how to update the internal code on bumps:
- next ABI bump (probably soon):
- define FF_API_LAVF_MERGE_SD to 0, and remove all code covered by it
- define FF_API_MERGE_SD to 0, and remove all code covered by it
- next API bump (typically two years in the future or so):
- define FF_API_LAVF_KEEPSIDE_FLAG to 0, and remove all code covered
by it
- define FF_API_MERGE_SD_API to 0, and remove all code covered by it
This forces anyone who actually wants packet side data to temporarily
use deprecated API to get it all. If you ask me, this is batshit fucked
up crazy, but it's how we roll. Making AVFMT_FLAG_KEEP_SIDE_DATA to be
set by default was rejected as an ABI change, so I'm going all the way
to get rid of this once and for all.
Reviewed-by: James Almer <jamrial@gmail.com>
Reviewed-by: Rostislav Pehlivanov <atomnuker@gmail.com>
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
2017-03-16 03:52:55 +00:00
|
|
|
FF_ENABLE_DEPRECATION_WARNINGS
|
|
|
|
#endif
|
2016-11-05 00:58:49 +00:00
|
|
|
|
2016-06-11 18:02:02 +00:00
|
|
|
if (!s->internal->header_written) {
|
2016-06-11 18:18:40 +00:00
|
|
|
ret = s->internal->write_header_ret ? s->internal->write_header_ret : write_header_internal(s);
|
2015-10-08 02:32:14 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2013-12-31 13:09:48 +00:00
|
|
|
if ((pkt->flags & AV_PKT_FLAG_UNCODED_FRAME)) {
|
|
|
|
AVFrame *frame = (AVFrame *)pkt->data;
|
|
|
|
av_assert0(pkt->size == UNCODED_FRAME_PACKET_SIZE);
|
|
|
|
ret = s->oformat->write_uncoded_frame(s, pkt->stream_index, &frame, 0);
|
|
|
|
av_frame_free(&frame);
|
|
|
|
} else {
|
|
|
|
ret = s->oformat->write_packet(s, pkt);
|
|
|
|
}
|
2013-09-11 12:02:06 +00:00
|
|
|
|
2015-08-27 04:04:16 +00:00
|
|
|
if (s->pb && ret >= 0) {
|
2015-09-07 14:19:40 +00:00
|
|
|
if (s->flush_packets && s->flags & AVFMT_FLAG_FLUSH_PACKETS)
|
2015-08-27 04:04:16 +00:00
|
|
|
avio_flush(s->pb);
|
|
|
|
if (s->pb->error < 0)
|
|
|
|
ret = s->pb->error;
|
|
|
|
}
|
2013-09-11 12:02:06 +00:00
|
|
|
|
2015-10-08 02:32:14 +00:00
|
|
|
fail:
|
avcodec, avformat: deprecate anything related to side data merging
This patch deprecates anything that has to do with merging/splitting
side data. Automatic side data merging (and splitting), as well as all
API symbols involved in it, are removed completely.
Two FF_API_ defines are dedicated to deprecating API symbols related to
this: FF_API_MERGE_SD_API removes av_packet_split/merge_side_data in
libavcodec, and FF_API_LAVF_KEEPSIDE_FLAG deprecates
AVFMT_FLAG_KEEP_SIDE_DATA in libavformat.
Since it was claimed that changing the default from merging side data to
not doing it is an ABI change, there are two additional FF_API_ defines,
which stop using the side data merging/splitting by default (and remove
any code in avformat/avcodec doing this): FF_API_MERGE_SD in libavcodec,
and FF_API_LAVF_MERGE_SD in libavformat.
It is very much intended that FF_API_MERGE_SD and FF_API_LAVF_MERGE_SD
are quickly defined to 0 in the next ABI bump, while the API symbols are
retained for a longer time for the sake of compatibility.
AVFMT_FLAG_KEEP_SIDE_DATA will (very much intentionally) do nothing for
most of the time it will still be defined. Keep in mind that no code
exists that actually tries to unset this flag for any reason, nor does
such code need to exist. Code setting this flag explicitly will work as
before. Thus it's ok for AVFMT_FLAG_KEEP_SIDE_DATA to do nothing once
side data merging has been removed from libavformat.
In order to avoid that anyone in the future does this incorrectly, here
is a small guide how to update the internal code on bumps:
- next ABI bump (probably soon):
- define FF_API_LAVF_MERGE_SD to 0, and remove all code covered by it
- define FF_API_MERGE_SD to 0, and remove all code covered by it
- next API bump (typically two years in the future or so):
- define FF_API_LAVF_KEEPSIDE_FLAG to 0, and remove all code covered
by it
- define FF_API_MERGE_SD_API to 0, and remove all code covered by it
This forces anyone who actually wants packet side data to temporarily
use deprecated API to get it all. If you ask me, this is batshit fucked
up crazy, but it's how we roll. Making AVFMT_FLAG_KEEP_SIDE_DATA to be
set by default was rejected as an ABI change, so I'm going all the way
to get rid of this once and for all.
Reviewed-by: James Almer <jamrial@gmail.com>
Reviewed-by: Rostislav Pehlivanov <atomnuker@gmail.com>
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
2017-03-16 03:52:55 +00:00
|
|
|
#if FF_API_LAVF_MERGE_SD
|
|
|
|
FF_DISABLE_DEPRECATION_WARNINGS
|
2016-11-05 00:58:49 +00:00
|
|
|
if (did_split)
|
|
|
|
av_packet_merge_side_data(pkt);
|
avcodec, avformat: deprecate anything related to side data merging
This patch deprecates anything that has to do with merging/splitting
side data. Automatic side data merging (and splitting), as well as all
API symbols involved in it, are removed completely.
Two FF_API_ defines are dedicated to deprecating API symbols related to
this: FF_API_MERGE_SD_API removes av_packet_split/merge_side_data in
libavcodec, and FF_API_LAVF_KEEPSIDE_FLAG deprecates
AVFMT_FLAG_KEEP_SIDE_DATA in libavformat.
Since it was claimed that changing the default from merging side data to
not doing it is an ABI change, there are two additional FF_API_ defines,
which stop using the side data merging/splitting by default (and remove
any code in avformat/avcodec doing this): FF_API_MERGE_SD in libavcodec,
and FF_API_LAVF_MERGE_SD in libavformat.
It is very much intended that FF_API_MERGE_SD and FF_API_LAVF_MERGE_SD
are quickly defined to 0 in the next ABI bump, while the API symbols are
retained for a longer time for the sake of compatibility.
AVFMT_FLAG_KEEP_SIDE_DATA will (very much intentionally) do nothing for
most of the time it will still be defined. Keep in mind that no code
exists that actually tries to unset this flag for any reason, nor does
such code need to exist. Code setting this flag explicitly will work as
before. Thus it's ok for AVFMT_FLAG_KEEP_SIDE_DATA to do nothing once
side data merging has been removed from libavformat.
In order to avoid that anyone in the future does this incorrectly, here
is a small guide how to update the internal code on bumps:
- next ABI bump (probably soon):
- define FF_API_LAVF_MERGE_SD to 0, and remove all code covered by it
- define FF_API_MERGE_SD to 0, and remove all code covered by it
- next API bump (typically two years in the future or so):
- define FF_API_LAVF_KEEPSIDE_FLAG to 0, and remove all code covered
by it
- define FF_API_MERGE_SD_API to 0, and remove all code covered by it
This forces anyone who actually wants packet side data to temporarily
use deprecated API to get it all. If you ask me, this is batshit fucked
up crazy, but it's how we roll. Making AVFMT_FLAG_KEEP_SIDE_DATA to be
set by default was rejected as an ABI change, so I'm going all the way
to get rid of this once and for all.
Reviewed-by: James Almer <jamrial@gmail.com>
Reviewed-by: Rostislav Pehlivanov <atomnuker@gmail.com>
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
2017-03-16 03:52:55 +00:00
|
|
|
FF_ENABLE_DEPRECATION_WARNINGS
|
|
|
|
#endif
|
2016-11-05 00:58:49 +00:00
|
|
|
|
2016-08-02 13:24:19 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
pkt->pts = pts_backup;
|
|
|
|
pkt->dts = dts_backup;
|
|
|
|
}
|
|
|
|
|
2012-12-03 22:13:53 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-01-20 13:10:01 +00:00
|
|
|
static int check_packet(AVFormatContext *s, AVPacket *pkt)
|
|
|
|
{
|
|
|
|
if (!pkt)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (pkt->stream_index < 0 || pkt->stream_index >= s->nb_streams) {
|
|
|
|
av_log(s, AV_LOG_ERROR, "Invalid packet stream index: %d\n",
|
|
|
|
pkt->stream_index);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
|
lavf: replace AVStream.codec with AVStream.codecpar
Currently, AVStream contains an embedded AVCodecContext instance, which
is used by demuxers to export stream parameters to the caller and by
muxers to receive stream parameters from the caller. It is also used
internally as the codec context that is passed to parsers.
In addition, it is also widely used by the callers as the decoding (when
demuxer) or encoding (when muxing) context, though this has been
officially discouraged since Libav 11.
There are multiple important problems with this approach:
- the fields in AVCodecContext are in general one of
* stream parameters
* codec options
* codec state
However, it's not clear which ones are which. It is consequently
unclear which fields are a demuxer allowed to set or a muxer allowed to
read. This leads to erratic behaviour depending on whether decoding or
encoding is being performed or not (and whether it uses the AVStream
embedded codec context).
- various synchronization issues arising from the fact that the same
context is used by several different APIs (muxers/demuxers,
parsers, bitstream filters and encoders/decoders) simultaneously, with
there being no clear rules for who can modify what and the different
processes being typically delayed with respect to each other.
- avformat_find_stream_info() making it necessary to support opening
and closing a single codec context multiple times, thus
complicating the semantics of freeing various allocated objects in the
codec context.
Those problems are resolved by replacing the AVStream embedded codec
context with a newly added AVCodecParameters instance, which stores only
the stream parameters exported by the demuxers or read by the muxers.
2014-06-18 18:42:52 +00:00
|
|
|
if (s->streams[pkt->stream_index]->codecpar->codec_type == AVMEDIA_TYPE_ATTACHMENT) {
|
2014-01-20 13:10:01 +00:00
|
|
|
av_log(s, AV_LOG_ERROR, "Received a packet for an attachment stream.\n");
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-07 13:51:11 +00:00
|
|
|
static int prepare_input_packet(AVFormatContext *s, AVPacket *pkt)
|
2012-09-30 22:49:16 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2014-01-20 13:10:01 +00:00
|
|
|
ret = check_packet(s, pkt);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2016-12-15 07:45:49 +00:00
|
|
|
#if !FF_API_COMPUTE_PKT_FIELDS2 || !FF_API_LAVF_AVCTX
|
2015-10-07 13:51:11 +00:00
|
|
|
/* sanitize the timestamps */
|
|
|
|
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
|
|
|
|
AVStream *st = s->streams[pkt->stream_index];
|
|
|
|
|
|
|
|
/* when there is no reordering (so dts is equal to pts), but
|
|
|
|
* only one of them is set, set the other as well */
|
|
|
|
if (!st->internal->reorder) {
|
|
|
|
if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE)
|
|
|
|
pkt->pts = pkt->dts;
|
|
|
|
if (pkt->dts == AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE)
|
|
|
|
pkt->dts = pkt->pts;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check that the timestamps are set */
|
|
|
|
if (pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE) {
|
|
|
|
av_log(s, AV_LOG_ERROR,
|
|
|
|
"Timestamps are unset in a packet for stream %d\n", st->index);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check that the dts are increasing (or at least non-decreasing,
|
|
|
|
* if the format allows it */
|
|
|
|
if (st->cur_dts != AV_NOPTS_VALUE &&
|
|
|
|
((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) ||
|
|
|
|
st->cur_dts > pkt->dts)) {
|
|
|
|
av_log(s, AV_LOG_ERROR,
|
|
|
|
"Application provided invalid, non monotonically increasing "
|
|
|
|
"dts to muxer in stream %d: %" PRId64 " >= %" PRId64 "\n",
|
|
|
|
st->index, st->cur_dts, pkt->dts);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pkt->pts < pkt->dts) {
|
|
|
|
av_log(s, AV_LOG_ERROR, "pts %" PRId64 " < dts %" PRId64 " in stream %d\n",
|
|
|
|
pkt->pts, pkt->dts, st->index);
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-13 17:08:44 +00:00
|
|
|
static int do_packet_auto_bsf(AVFormatContext *s, AVPacket *pkt) {
|
|
|
|
AVStream *st = s->streams[pkt->stream_index];
|
|
|
|
int i, ret;
|
|
|
|
|
2016-04-07 07:59:39 +00:00
|
|
|
if (!(s->flags & AVFMT_FLAG_AUTO_BSF))
|
|
|
|
return 1;
|
|
|
|
|
2016-07-13 17:08:44 +00:00
|
|
|
if (s->oformat->check_bitstream) {
|
|
|
|
if (!st->internal->bitstream_checked) {
|
|
|
|
if ((ret = s->oformat->check_bitstream(s, pkt)) < 0)
|
|
|
|
return ret;
|
|
|
|
else if (ret == 1)
|
|
|
|
st->internal->bitstream_checked = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
avcodec, avformat: deprecate anything related to side data merging
This patch deprecates anything that has to do with merging/splitting
side data. Automatic side data merging (and splitting), as well as all
API symbols involved in it, are removed completely.
Two FF_API_ defines are dedicated to deprecating API symbols related to
this: FF_API_MERGE_SD_API removes av_packet_split/merge_side_data in
libavcodec, and FF_API_LAVF_KEEPSIDE_FLAG deprecates
AVFMT_FLAG_KEEP_SIDE_DATA in libavformat.
Since it was claimed that changing the default from merging side data to
not doing it is an ABI change, there are two additional FF_API_ defines,
which stop using the side data merging/splitting by default (and remove
any code in avformat/avcodec doing this): FF_API_MERGE_SD in libavcodec,
and FF_API_LAVF_MERGE_SD in libavformat.
It is very much intended that FF_API_MERGE_SD and FF_API_LAVF_MERGE_SD
are quickly defined to 0 in the next ABI bump, while the API symbols are
retained for a longer time for the sake of compatibility.
AVFMT_FLAG_KEEP_SIDE_DATA will (very much intentionally) do nothing for
most of the time it will still be defined. Keep in mind that no code
exists that actually tries to unset this flag for any reason, nor does
such code need to exist. Code setting this flag explicitly will work as
before. Thus it's ok for AVFMT_FLAG_KEEP_SIDE_DATA to do nothing once
side data merging has been removed from libavformat.
In order to avoid that anyone in the future does this incorrectly, here
is a small guide how to update the internal code on bumps:
- next ABI bump (probably soon):
- define FF_API_LAVF_MERGE_SD to 0, and remove all code covered by it
- define FF_API_MERGE_SD to 0, and remove all code covered by it
- next API bump (typically two years in the future or so):
- define FF_API_LAVF_KEEPSIDE_FLAG to 0, and remove all code covered
by it
- define FF_API_MERGE_SD_API to 0, and remove all code covered by it
This forces anyone who actually wants packet side data to temporarily
use deprecated API to get it all. If you ask me, this is batshit fucked
up crazy, but it's how we roll. Making AVFMT_FLAG_KEEP_SIDE_DATA to be
set by default was rejected as an ABI change, so I'm going all the way
to get rid of this once and for all.
Reviewed-by: James Almer <jamrial@gmail.com>
Reviewed-by: Rostislav Pehlivanov <atomnuker@gmail.com>
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
2017-03-16 03:52:55 +00:00
|
|
|
#if FF_API_LAVF_MERGE_SD
|
|
|
|
FF_DISABLE_DEPRECATION_WARNINGS
|
2017-03-30 13:58:32 +00:00
|
|
|
if (st->internal->nb_bsfcs) {
|
|
|
|
ret = av_packet_split_side_data(pkt);
|
|
|
|
if (ret < 0)
|
|
|
|
av_log(s, AV_LOG_WARNING, "Failed to split side data before bitstream filter\n");
|
|
|
|
}
|
avcodec, avformat: deprecate anything related to side data merging
This patch deprecates anything that has to do with merging/splitting
side data. Automatic side data merging (and splitting), as well as all
API symbols involved in it, are removed completely.
Two FF_API_ defines are dedicated to deprecating API symbols related to
this: FF_API_MERGE_SD_API removes av_packet_split/merge_side_data in
libavcodec, and FF_API_LAVF_KEEPSIDE_FLAG deprecates
AVFMT_FLAG_KEEP_SIDE_DATA in libavformat.
Since it was claimed that changing the default from merging side data to
not doing it is an ABI change, there are two additional FF_API_ defines,
which stop using the side data merging/splitting by default (and remove
any code in avformat/avcodec doing this): FF_API_MERGE_SD in libavcodec,
and FF_API_LAVF_MERGE_SD in libavformat.
It is very much intended that FF_API_MERGE_SD and FF_API_LAVF_MERGE_SD
are quickly defined to 0 in the next ABI bump, while the API symbols are
retained for a longer time for the sake of compatibility.
AVFMT_FLAG_KEEP_SIDE_DATA will (very much intentionally) do nothing for
most of the time it will still be defined. Keep in mind that no code
exists that actually tries to unset this flag for any reason, nor does
such code need to exist. Code setting this flag explicitly will work as
before. Thus it's ok for AVFMT_FLAG_KEEP_SIDE_DATA to do nothing once
side data merging has been removed from libavformat.
In order to avoid that anyone in the future does this incorrectly, here
is a small guide how to update the internal code on bumps:
- next ABI bump (probably soon):
- define FF_API_LAVF_MERGE_SD to 0, and remove all code covered by it
- define FF_API_MERGE_SD to 0, and remove all code covered by it
- next API bump (typically two years in the future or so):
- define FF_API_LAVF_KEEPSIDE_FLAG to 0, and remove all code covered
by it
- define FF_API_MERGE_SD_API to 0, and remove all code covered by it
This forces anyone who actually wants packet side data to temporarily
use deprecated API to get it all. If you ask me, this is batshit fucked
up crazy, but it's how we roll. Making AVFMT_FLAG_KEEP_SIDE_DATA to be
set by default was rejected as an ABI change, so I'm going all the way
to get rid of this once and for all.
Reviewed-by: James Almer <jamrial@gmail.com>
Reviewed-by: Rostislav Pehlivanov <atomnuker@gmail.com>
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
2017-03-16 03:52:55 +00:00
|
|
|
FF_ENABLE_DEPRECATION_WARNINGS
|
|
|
|
#endif
|
2016-11-04 12:43:45 +00:00
|
|
|
|
2016-07-13 17:08:44 +00:00
|
|
|
for (i = 0; i < st->internal->nb_bsfcs; i++) {
|
|
|
|
AVBSFContext *ctx = st->internal->bsfcs[i];
|
|
|
|
// TODO: when any bitstream filter requires flushing at EOF, we'll need to
|
|
|
|
// flush each stream's BSF chain on write_trailer.
|
|
|
|
if ((ret = av_bsf_send_packet(ctx, pkt)) < 0) {
|
|
|
|
av_log(ctx, AV_LOG_ERROR,
|
2016-11-04 11:23:47 +00:00
|
|
|
"Failed to send packet to filter %s for stream %d\n",
|
2016-07-13 17:08:44 +00:00
|
|
|
ctx->filter->name, pkt->stream_index);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
// TODO: when any automatically-added bitstream filter is generating multiple
|
|
|
|
// output packets for a single input one, we'll need to call this in a loop
|
|
|
|
// and write each output packet.
|
|
|
|
if ((ret = av_bsf_receive_packet(ctx, pkt)) < 0) {
|
|
|
|
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
|
|
|
return 0;
|
|
|
|
av_log(ctx, AV_LOG_ERROR,
|
2016-11-04 11:23:47 +00:00
|
|
|
"Failed to send packet to filter %s for stream %d\n",
|
2016-07-13 17:08:44 +00:00
|
|
|
ctx->filter->name, pkt->stream_index);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-10-07 13:51:11 +00:00
|
|
|
int av_write_frame(AVFormatContext *s, AVPacket *pkt)
|
|
|
|
{
|
2016-11-05 00:58:49 +00:00
|
|
|
int ret;
|
2015-10-07 13:51:11 +00:00
|
|
|
|
|
|
|
ret = prepare_input_packet(s, pkt);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
if (!pkt) {
|
2012-10-02 16:15:13 +00:00
|
|
|
if (s->oformat->flags & AVFMT_ALLOW_FLUSH) {
|
2016-06-11 18:25:58 +00:00
|
|
|
if (!s->internal->header_written) {
|
|
|
|
ret = s->internal->write_header_ret ? s->internal->write_header_ret : write_header_internal(s);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
2012-12-03 22:13:53 +00:00
|
|
|
ret = s->oformat->write_packet(s, NULL);
|
2014-06-28 19:23:32 +00:00
|
|
|
if (s->flush_packets && s->pb && s->pb->error >= 0 && s->flags & AVFMT_FLAG_FLUSH_PACKETS)
|
2013-04-08 11:31:12 +00:00
|
|
|
avio_flush(s->pb);
|
2012-10-02 16:15:13 +00:00
|
|
|
if (ret >= 0 && s->pb && s->pb->error < 0)
|
|
|
|
ret = s->pb->error;
|
|
|
|
return ret;
|
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
return 1;
|
2016-11-05 00:58:49 +00:00
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2016-07-13 17:08:45 +00:00
|
|
|
ret = do_packet_auto_bsf(s, pkt);
|
|
|
|
if (ret <= 0)
|
|
|
|
return ret;
|
|
|
|
|
2016-05-03 22:32:26 +00:00
|
|
|
#if FF_API_COMPUTE_PKT_FIELDS2 && FF_API_LAVF_AVCTX
|
2015-11-12 22:06:32 +00:00
|
|
|
ret = compute_muxer_pkt_fields(s, s->streams[pkt->stream_index], pkt);
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (ret < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
|
|
|
|
return ret;
|
2015-10-07 13:51:11 +00:00
|
|
|
#endif
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2013-04-03 12:11:10 +00:00
|
|
|
ret = write_packet(s, pkt);
|
2012-10-02 16:15:13 +00:00
|
|
|
if (ret >= 0 && s->pb && s->pb->error < 0)
|
|
|
|
ret = s->pb->error;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (ret >= 0)
|
|
|
|
s->streams[pkt->stream_index]->nb_frames++;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-02 16:15:13 +00:00
|
|
|
#define CHUNK_START 0x1000
|
|
|
|
|
|
|
|
int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
|
2014-07-14 06:22:44 +00:00
|
|
|
int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
|
2012-09-30 22:49:16 +00:00
|
|
|
{
|
2014-07-14 06:22:44 +00:00
|
|
|
int ret;
|
2012-09-30 22:49:16 +00:00
|
|
|
AVPacketList **next_point, *this_pktl;
|
2012-10-02 16:15:13 +00:00
|
|
|
AVStream *st = s->streams[pkt->stream_index];
|
|
|
|
int chunked = s->max_chunk_size || s->max_chunk_duration;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
this_pktl = av_mallocz(sizeof(AVPacketList));
|
2012-10-02 16:15:13 +00:00
|
|
|
if (!this_pktl)
|
|
|
|
return AVERROR(ENOMEM);
|
2013-12-31 13:09:48 +00:00
|
|
|
if ((pkt->flags & AV_PKT_FLAG_UNCODED_FRAME)) {
|
|
|
|
av_assert0(pkt->size == UNCODED_FRAME_PACKET_SIZE);
|
|
|
|
av_assert0(((AVFrame *)pkt->data)->buf);
|
2016-01-27 06:16:43 +00:00
|
|
|
this_pktl->pkt = *pkt;
|
|
|
|
pkt->buf = NULL;
|
|
|
|
pkt->side_data = NULL;
|
|
|
|
pkt->side_data_elems = 0;
|
|
|
|
} else {
|
|
|
|
if ((ret = av_packet_ref(&this_pktl->pkt, pkt)) < 0) {
|
|
|
|
av_free(this_pktl);
|
|
|
|
return ret;
|
|
|
|
}
|
2013-12-31 13:09:48 +00:00
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (s->streams[pkt->stream_index]->last_in_packet_buffer) {
|
2012-10-02 16:15:13 +00:00
|
|
|
next_point = &(st->last_in_packet_buffer->next);
|
|
|
|
} else {
|
2015-02-06 13:53:40 +00:00
|
|
|
next_point = &s->internal->packet_buffer;
|
2012-10-02 16:15:13 +00:00
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2013-01-15 18:58:23 +00:00
|
|
|
if (chunked) {
|
|
|
|
uint64_t max= av_rescale_q_rnd(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base, AV_ROUND_UP);
|
2013-01-15 22:56:37 +00:00
|
|
|
st->interleaver_chunk_size += pkt->size;
|
|
|
|
st->interleaver_chunk_duration += pkt->duration;
|
2013-01-17 14:08:50 +00:00
|
|
|
if ( (s->max_chunk_size && st->interleaver_chunk_size > s->max_chunk_size)
|
|
|
|
|| (max && st->interleaver_chunk_duration > max)) {
|
2013-01-15 23:03:36 +00:00
|
|
|
st->interleaver_chunk_size = 0;
|
2013-01-15 18:58:23 +00:00
|
|
|
this_pktl->pkt.flags |= CHUNK_START;
|
2013-01-15 23:03:36 +00:00
|
|
|
if (max && st->interleaver_chunk_duration > max) {
|
2016-04-10 19:58:15 +00:00
|
|
|
int64_t syncoffset = (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)*max/2;
|
2013-01-15 23:03:36 +00:00
|
|
|
int64_t syncto = av_rescale(pkt->dts + syncoffset, 1, max)*max - syncoffset;
|
|
|
|
|
|
|
|
st->interleaver_chunk_duration += (pkt->dts - syncto)/8 - max;
|
|
|
|
} else
|
|
|
|
st->interleaver_chunk_duration = 0;
|
2012-10-02 16:15:13 +00:00
|
|
|
}
|
2013-01-15 18:58:23 +00:00
|
|
|
}
|
|
|
|
if (*next_point) {
|
|
|
|
if (chunked && !(this_pktl->pkt.flags & CHUNK_START))
|
|
|
|
goto next_non_null;
|
2012-10-02 16:15:13 +00:00
|
|
|
|
2015-02-06 13:53:40 +00:00
|
|
|
if (compare(s, &s->internal->packet_buffer_end->pkt, pkt)) {
|
2012-10-02 16:15:13 +00:00
|
|
|
while ( *next_point
|
|
|
|
&& ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
|
|
|
|
|| !compare(s, &(*next_point)->pkt, pkt)))
|
2012-09-30 22:49:16 +00:00
|
|
|
next_point = &(*next_point)->next;
|
2012-10-02 16:15:13 +00:00
|
|
|
if (*next_point)
|
|
|
|
goto next_non_null;
|
2012-09-30 22:49:16 +00:00
|
|
|
} else {
|
2015-02-06 13:53:40 +00:00
|
|
|
next_point = &(s->internal->packet_buffer_end->next);
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
}
|
2012-10-07 21:15:18 +00:00
|
|
|
av_assert1(!*next_point);
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2015-02-06 13:53:40 +00:00
|
|
|
s->internal->packet_buffer_end = this_pktl;
|
2012-09-30 22:49:16 +00:00
|
|
|
next_non_null:
|
|
|
|
|
|
|
|
this_pktl->next = *next_point;
|
|
|
|
|
|
|
|
s->streams[pkt->stream_index]->last_in_packet_buffer =
|
|
|
|
*next_point = this_pktl;
|
2014-07-14 06:22:44 +00:00
|
|
|
|
2015-10-23 09:11:33 +00:00
|
|
|
av_packet_unref(pkt);
|
|
|
|
|
2012-10-02 16:15:13 +00:00
|
|
|
return 0;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
2013-04-20 19:47:15 +00:00
|
|
|
static int interleave_compare_dts(AVFormatContext *s, AVPacket *next,
|
|
|
|
AVPacket *pkt)
|
2012-09-30 22:49:16 +00:00
|
|
|
{
|
|
|
|
AVStream *st = s->streams[pkt->stream_index];
|
|
|
|
AVStream *st2 = s->streams[next->stream_index];
|
|
|
|
int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
|
|
|
|
st->time_base);
|
2016-04-10 19:58:15 +00:00
|
|
|
if (s->audio_preload && ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codecpar->codec_type == AVMEDIA_TYPE_AUDIO))) {
|
|
|
|
int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codecpar->codec_type == AVMEDIA_TYPE_AUDIO);
|
|
|
|
int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codecpar->codec_type == AVMEDIA_TYPE_AUDIO);
|
2012-10-02 16:15:13 +00:00
|
|
|
if (ts == ts2) {
|
2016-04-10 19:58:15 +00:00
|
|
|
ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
|
|
|
|
-( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
|
2012-10-02 16:15:13 +00:00
|
|
|
ts2=0;
|
|
|
|
}
|
|
|
|
comp= (ts>ts2) - (ts<ts2);
|
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (comp == 0)
|
|
|
|
return pkt->stream_index < next->stream_index;
|
|
|
|
return comp > 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
|
|
|
|
AVPacket *pkt, int flush)
|
|
|
|
{
|
|
|
|
AVPacketList *pktl;
|
|
|
|
int stream_count = 0;
|
2014-07-17 12:18:20 +00:00
|
|
|
int noninterleaved_count = 0;
|
2012-10-02 16:15:13 +00:00
|
|
|
int i, ret;
|
2016-08-12 19:28:08 +00:00
|
|
|
int eof = flush;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (pkt) {
|
2014-07-14 06:22:44 +00:00
|
|
|
if ((ret = ff_interleave_add_packet(s, pkt, interleave_compare_dts)) < 0)
|
2012-10-02 16:15:13 +00:00
|
|
|
return ret;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
2012-10-02 16:15:13 +00:00
|
|
|
for (i = 0; i < s->nb_streams; i++) {
|
|
|
|
if (s->streams[i]->last_in_packet_buffer) {
|
|
|
|
++stream_count;
|
2016-04-10 19:58:15 +00:00
|
|
|
} else if (s->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_ATTACHMENT &&
|
|
|
|
s->streams[i]->codecpar->codec_id != AV_CODEC_ID_VP8 &&
|
|
|
|
s->streams[i]->codecpar->codec_id != AV_CODEC_ID_VP9) {
|
2012-10-02 16:15:13 +00:00
|
|
|
++noninterleaved_count;
|
|
|
|
}
|
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2014-02-05 02:17:16 +00:00
|
|
|
if (s->internal->nb_interleaved_streams == stream_count)
|
2012-10-02 16:15:13 +00:00
|
|
|
flush = 1;
|
2014-02-05 02:17:16 +00:00
|
|
|
|
2014-07-25 22:56:32 +00:00
|
|
|
if (s->max_interleave_delta > 0 &&
|
2015-02-11 01:13:46 +00:00
|
|
|
s->internal->packet_buffer &&
|
2014-07-25 22:56:32 +00:00
|
|
|
!flush &&
|
|
|
|
s->internal->nb_interleaved_streams == stream_count+noninterleaved_count
|
|
|
|
) {
|
2015-02-06 13:53:40 +00:00
|
|
|
AVPacket *top_pkt = &s->internal->packet_buffer->pkt;
|
2014-01-20 12:28:37 +00:00
|
|
|
int64_t delta_dts = INT64_MIN;
|
|
|
|
int64_t top_dts = av_rescale_q(top_pkt->dts,
|
|
|
|
s->streams[top_pkt->stream_index]->time_base,
|
|
|
|
AV_TIME_BASE_Q);
|
|
|
|
|
|
|
|
for (i = 0; i < s->nb_streams; i++) {
|
|
|
|
int64_t last_dts;
|
|
|
|
const AVPacketList *last = s->streams[i]->last_in_packet_buffer;
|
|
|
|
|
|
|
|
if (!last)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
last_dts = av_rescale_q(last->pkt.dts,
|
|
|
|
s->streams[i]->time_base,
|
|
|
|
AV_TIME_BASE_Q);
|
|
|
|
delta_dts = FFMAX(delta_dts, last_dts - top_dts);
|
2012-10-02 16:15:13 +00:00
|
|
|
}
|
2014-01-20 12:28:37 +00:00
|
|
|
|
|
|
|
if (delta_dts > s->max_interleave_delta) {
|
|
|
|
av_log(s, AV_LOG_DEBUG,
|
|
|
|
"Delay between the first packet and last packet in the "
|
|
|
|
"muxing queue is %"PRId64" > %"PRId64": forcing output\n",
|
|
|
|
delta_dts, s->max_interleave_delta);
|
2012-10-02 16:15:13 +00:00
|
|
|
flush = 1;
|
|
|
|
}
|
|
|
|
}
|
2014-01-20 12:28:37 +00:00
|
|
|
|
2016-08-12 19:28:08 +00:00
|
|
|
if (s->internal->packet_buffer &&
|
|
|
|
eof &&
|
|
|
|
(s->flags & AVFMT_FLAG_SHORTEST) &&
|
|
|
|
s->internal->shortest_end == AV_NOPTS_VALUE) {
|
|
|
|
AVPacket *top_pkt = &s->internal->packet_buffer->pkt;
|
|
|
|
|
|
|
|
s->internal->shortest_end = av_rescale_q(top_pkt->dts,
|
|
|
|
s->streams[top_pkt->stream_index]->time_base,
|
|
|
|
AV_TIME_BASE_Q);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->internal->shortest_end != AV_NOPTS_VALUE) {
|
|
|
|
while (s->internal->packet_buffer) {
|
|
|
|
AVPacket *top_pkt = &s->internal->packet_buffer->pkt;
|
|
|
|
AVStream *st;
|
|
|
|
int64_t top_dts = av_rescale_q(top_pkt->dts,
|
|
|
|
s->streams[top_pkt->stream_index]->time_base,
|
|
|
|
AV_TIME_BASE_Q);
|
|
|
|
|
|
|
|
if (s->internal->shortest_end + 1 >= top_dts)
|
|
|
|
break;
|
|
|
|
|
|
|
|
pktl = s->internal->packet_buffer;
|
|
|
|
st = s->streams[pktl->pkt.stream_index];
|
|
|
|
|
|
|
|
s->internal->packet_buffer = pktl->next;
|
|
|
|
if (!s->internal->packet_buffer)
|
|
|
|
s->internal->packet_buffer_end = NULL;
|
|
|
|
|
|
|
|
if (st->last_in_packet_buffer == pktl)
|
|
|
|
st->last_in_packet_buffer = NULL;
|
|
|
|
|
|
|
|
av_packet_unref(&pktl->pkt);
|
|
|
|
av_freep(&pktl);
|
|
|
|
flush = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-02 16:15:13 +00:00
|
|
|
if (stream_count && flush) {
|
|
|
|
AVStream *st;
|
2015-02-06 13:53:40 +00:00
|
|
|
pktl = s->internal->packet_buffer;
|
2012-09-30 22:49:16 +00:00
|
|
|
*out = pktl->pkt;
|
2012-10-02 16:15:13 +00:00
|
|
|
st = s->streams[out->stream_index];
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2015-02-06 13:53:40 +00:00
|
|
|
s->internal->packet_buffer = pktl->next;
|
|
|
|
if (!s->internal->packet_buffer)
|
|
|
|
s->internal->packet_buffer_end = NULL;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2012-10-02 16:15:13 +00:00
|
|
|
if (st->last_in_packet_buffer == pktl)
|
|
|
|
st->last_in_packet_buffer = NULL;
|
2012-09-30 22:49:16 +00:00
|
|
|
av_freep(&pktl);
|
2012-10-02 16:15:13 +00:00
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
return 1;
|
|
|
|
} else {
|
|
|
|
av_init_packet(out);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-24 20:38:54 +00:00
|
|
|
int ff_interleaved_peek(AVFormatContext *s, int stream,
|
|
|
|
AVPacket *pkt, int add_offset)
|
2016-04-20 20:10:37 +00:00
|
|
|
{
|
|
|
|
AVPacketList *pktl = s->internal->packet_buffer;
|
|
|
|
while (pktl) {
|
2016-06-25 00:54:43 +00:00
|
|
|
if (pktl->pkt.stream_index == stream) {
|
2016-06-24 20:38:54 +00:00
|
|
|
*pkt = pktl->pkt;
|
2016-11-13 21:55:34 +00:00
|
|
|
if (add_offset) {
|
|
|
|
AVStream *st = s->streams[pkt->stream_index];
|
|
|
|
int64_t offset = st->mux_ts_offset;
|
|
|
|
|
|
|
|
if (s->output_ts_offset)
|
|
|
|
offset += av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base);
|
|
|
|
|
2016-06-24 20:38:54 +00:00
|
|
|
if (pkt->dts != AV_NOPTS_VALUE)
|
|
|
|
pkt->dts += offset;
|
|
|
|
if (pkt->pts != AV_NOPTS_VALUE)
|
|
|
|
pkt->pts += offset;
|
|
|
|
}
|
|
|
|
return 0;
|
2016-06-25 00:54:43 +00:00
|
|
|
}
|
2016-04-20 20:10:37 +00:00
|
|
|
pktl = pktl->next;
|
|
|
|
}
|
2016-06-24 20:38:54 +00:00
|
|
|
return AVERROR(ENOENT);
|
2016-04-20 20:10:37 +00:00
|
|
|
}
|
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
/**
|
|
|
|
* Interleave an AVPacket correctly so it can be muxed.
|
|
|
|
* @param out the interleaved packet will be output here
|
|
|
|
* @param in the input packet
|
|
|
|
* @param flush 1 if no further packets are available as input and all
|
|
|
|
* remaining packets should be output
|
|
|
|
* @return 1 if a packet was output, 0 if no packet could be output,
|
|
|
|
* < 0 if an error occurred
|
|
|
|
*/
|
|
|
|
static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush)
|
|
|
|
{
|
|
|
|
if (s->oformat->interleave_packet) {
|
|
|
|
int ret = s->oformat->interleave_packet(s, out, in, flush);
|
|
|
|
if (in)
|
2015-10-23 09:11:31 +00:00
|
|
|
av_packet_unref(in);
|
2012-09-30 22:49:16 +00:00
|
|
|
return ret;
|
|
|
|
} else
|
|
|
|
return ff_interleave_packet_per_dts(s, out, in, flush);
|
|
|
|
}
|
|
|
|
|
|
|
|
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
|
|
|
|
{
|
2016-11-05 00:58:49 +00:00
|
|
|
int ret, flush = 0;
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2015-10-07 13:51:11 +00:00
|
|
|
ret = prepare_input_packet(s, pkt);
|
2014-01-20 13:10:01 +00:00
|
|
|
if (ret < 0)
|
2014-02-04 14:58:11 +00:00
|
|
|
goto fail;
|
2014-01-20 13:10:01 +00:00
|
|
|
|
2012-09-30 22:49:16 +00:00
|
|
|
if (pkt) {
|
|
|
|
AVStream *st = s->streams[pkt->stream_index];
|
|
|
|
|
2016-07-13 17:08:44 +00:00
|
|
|
ret = do_packet_auto_bsf(s, pkt);
|
|
|
|
if (ret == 0)
|
|
|
|
return 0;
|
|
|
|
else if (ret < 0)
|
|
|
|
goto fail;
|
2016-02-29 14:42:54 +00:00
|
|
|
|
2015-04-20 15:29:20 +00:00
|
|
|
if (s->debug & FF_FDEBUG_TS)
|
|
|
|
av_log(s, AV_LOG_TRACE, "av_interleaved_write_frame size:%d dts:%s pts:%s\n",
|
2012-10-02 16:15:13 +00:00
|
|
|
pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
|
2015-04-20 15:29:20 +00:00
|
|
|
|
2016-06-22 15:36:42 +00:00
|
|
|
#if FF_API_COMPUTE_PKT_FIELDS2 && FF_API_LAVF_AVCTX
|
2015-11-12 22:06:32 +00:00
|
|
|
if ((ret = compute_muxer_pkt_fields(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
|
2014-02-04 14:58:11 +00:00
|
|
|
goto fail;
|
2015-10-07 13:51:11 +00:00
|
|
|
#endif
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2014-02-04 14:58:11 +00:00
|
|
|
if (pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
|
|
|
|
ret = AVERROR(EINVAL);
|
|
|
|
goto fail;
|
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
} else {
|
2015-03-16 08:57:35 +00:00
|
|
|
av_log(s, AV_LOG_TRACE, "av_interleaved_write_frame FLUSH\n");
|
2012-09-30 22:49:16 +00:00
|
|
|
flush = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (;; ) {
|
|
|
|
AVPacket opkt;
|
|
|
|
int ret = interleave_packet(s, &opkt, pkt, flush);
|
2014-02-04 14:58:11 +00:00
|
|
|
if (pkt) {
|
|
|
|
memset(pkt, 0, sizeof(*pkt));
|
|
|
|
av_init_packet(pkt);
|
|
|
|
pkt = NULL;
|
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
if (ret <= 0) //FIXME cleanup needed for ret<0 ?
|
|
|
|
return ret;
|
|
|
|
|
2013-04-03 12:11:10 +00:00
|
|
|
ret = write_packet(s, &opkt);
|
2012-09-30 22:49:16 +00:00
|
|
|
if (ret >= 0)
|
|
|
|
s->streams[opkt.stream_index]->nb_frames++;
|
|
|
|
|
2015-10-23 09:11:31 +00:00
|
|
|
av_packet_unref(&opkt);
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2012-10-02 16:15:13 +00:00
|
|
|
if(s->pb && s->pb->error)
|
|
|
|
return s->pb->error;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
2014-02-04 14:58:11 +00:00
|
|
|
fail:
|
|
|
|
av_packet_unref(pkt);
|
|
|
|
return ret;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int av_write_trailer(AVFormatContext *s)
|
|
|
|
{
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
for (;; ) {
|
|
|
|
AVPacket pkt;
|
|
|
|
ret = interleave_packet(s, &pkt, NULL, 1);
|
2014-11-20 09:14:51 +00:00
|
|
|
if (ret < 0)
|
2012-09-30 22:49:16 +00:00
|
|
|
goto fail;
|
|
|
|
if (!ret)
|
|
|
|
break;
|
|
|
|
|
2013-04-03 12:11:10 +00:00
|
|
|
ret = write_packet(s, &pkt);
|
2012-09-30 22:49:16 +00:00
|
|
|
if (ret >= 0)
|
|
|
|
s->streams[pkt.stream_index]->nb_frames++;
|
|
|
|
|
2015-10-23 09:11:31 +00:00
|
|
|
av_packet_unref(&pkt);
|
2012-09-30 22:49:16 +00:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
2012-10-02 16:15:13 +00:00
|
|
|
if(s->pb && s->pb->error)
|
|
|
|
goto fail;
|
2012-09-30 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
2016-06-11 18:02:02 +00:00
|
|
|
if (!s->internal->header_written) {
|
2016-06-11 18:18:40 +00:00
|
|
|
ret = s->internal->write_header_ret ? s->internal->write_header_ret : write_header_internal(s);
|
2015-10-08 02:32:14 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2014-11-20 09:14:51 +00:00
|
|
|
fail:
|
2016-06-23 13:27:00 +00:00
|
|
|
if (s->internal->header_written && s->oformat->write_trailer) {
|
|
|
|
if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
|
|
|
|
avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_TRAILER);
|
2014-11-20 09:14:51 +00:00
|
|
|
if (ret >= 0) {
|
2012-09-30 22:49:16 +00:00
|
|
|
ret = s->oformat->write_trailer(s);
|
2014-11-20 09:14:51 +00:00
|
|
|
} else {
|
|
|
|
s->oformat->write_trailer(s);
|
|
|
|
}
|
2016-06-23 13:27:00 +00:00
|
|
|
}
|
2012-09-30 22:49:16 +00:00
|
|
|
|
2015-10-08 02:32:14 +00:00
|
|
|
if (s->oformat->deinit)
|
|
|
|
s->oformat->deinit(s);
|
|
|
|
|
2016-06-25 03:02:50 +00:00
|
|
|
s->internal->header_written =
|
|
|
|
s->internal->initialized =
|
|
|
|
s->internal->streams_initialized = 0;
|
|
|
|
|
2012-10-02 16:15:13 +00:00
|
|
|
if (s->pb)
|
|
|
|
avio_flush(s->pb);
|
|
|
|
if (ret == 0)
|
|
|
|
ret = s->pb ? s->pb->error : 0;
|
2012-09-30 22:49:16 +00:00
|
|
|
for (i = 0; i < s->nb_streams; i++) {
|
|
|
|
av_freep(&s->streams[i]->priv_data);
|
|
|
|
av_freep(&s->streams[i]->index_entries);
|
|
|
|
}
|
|
|
|
if (s->oformat->priv_class)
|
|
|
|
av_opt_free(s->priv_data);
|
|
|
|
av_freep(&s->priv_data);
|
|
|
|
return ret;
|
|
|
|
}
|
2012-10-02 16:15:13 +00:00
|
|
|
|
|
|
|
int av_get_output_timestamp(struct AVFormatContext *s, int stream,
|
|
|
|
int64_t *dts, int64_t *wall)
|
|
|
|
{
|
|
|
|
if (!s->oformat || !s->oformat->get_output_timestamp)
|
|
|
|
return AVERROR(ENOSYS);
|
|
|
|
s->oformat->get_output_timestamp(s, stream, dts, wall);
|
|
|
|
return 0;
|
2012-10-02 16:31:47 +00:00
|
|
|
}
|
2013-06-16 21:58:59 +00:00
|
|
|
|
2013-06-15 09:56:36 +00:00
|
|
|
int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
|
2014-07-24 20:39:22 +00:00
|
|
|
AVFormatContext *src, int interleave)
|
2013-06-15 09:56:36 +00:00
|
|
|
{
|
|
|
|
AVPacket local_pkt;
|
2014-07-24 20:33:21 +00:00
|
|
|
int ret;
|
2013-06-15 09:56:36 +00:00
|
|
|
|
|
|
|
local_pkt = *pkt;
|
|
|
|
local_pkt.stream_index = dst_stream;
|
|
|
|
if (pkt->pts != AV_NOPTS_VALUE)
|
|
|
|
local_pkt.pts = av_rescale_q(pkt->pts,
|
|
|
|
src->streams[pkt->stream_index]->time_base,
|
|
|
|
dst->streams[dst_stream]->time_base);
|
|
|
|
if (pkt->dts != AV_NOPTS_VALUE)
|
|
|
|
local_pkt.dts = av_rescale_q(pkt->dts,
|
|
|
|
src->streams[pkt->stream_index]->time_base,
|
|
|
|
dst->streams[dst_stream]->time_base);
|
2013-06-16 21:58:59 +00:00
|
|
|
if (pkt->duration)
|
|
|
|
local_pkt.duration = av_rescale_q(pkt->duration,
|
|
|
|
src->streams[pkt->stream_index]->time_base,
|
|
|
|
dst->streams[dst_stream]->time_base);
|
2014-07-24 20:33:21 +00:00
|
|
|
|
2014-07-24 20:39:22 +00:00
|
|
|
if (interleave) ret = av_interleaved_write_frame(dst, &local_pkt);
|
|
|
|
else ret = av_write_frame(dst, &local_pkt);
|
2014-07-24 20:33:21 +00:00
|
|
|
pkt->buf = local_pkt.buf;
|
2015-08-20 01:35:10 +00:00
|
|
|
pkt->side_data = local_pkt.side_data;
|
|
|
|
pkt->side_data_elems = local_pkt.side_data_elems;
|
2014-07-24 20:33:21 +00:00
|
|
|
return ret;
|
2013-06-15 09:56:36 +00:00
|
|
|
}
|
2013-12-31 13:09:48 +00:00
|
|
|
|
|
|
|
static int av_write_uncoded_frame_internal(AVFormatContext *s, int stream_index,
|
|
|
|
AVFrame *frame, int interleaved)
|
|
|
|
{
|
|
|
|
AVPacket pkt, *pktp;
|
|
|
|
|
|
|
|
av_assert0(s->oformat);
|
|
|
|
if (!s->oformat->write_uncoded_frame)
|
|
|
|
return AVERROR(ENOSYS);
|
|
|
|
|
|
|
|
if (!frame) {
|
|
|
|
pktp = NULL;
|
|
|
|
} else {
|
|
|
|
pktp = &pkt;
|
|
|
|
av_init_packet(&pkt);
|
|
|
|
pkt.data = (void *)frame;
|
|
|
|
pkt.size = UNCODED_FRAME_PACKET_SIZE;
|
|
|
|
pkt.pts =
|
|
|
|
pkt.dts = frame->pts;
|
2017-04-22 08:55:45 +00:00
|
|
|
pkt.duration = frame->pkt_duration;
|
2013-12-31 13:09:48 +00:00
|
|
|
pkt.stream_index = stream_index;
|
|
|
|
pkt.flags |= AV_PKT_FLAG_UNCODED_FRAME;
|
|
|
|
}
|
|
|
|
|
|
|
|
return interleaved ? av_interleaved_write_frame(s, pktp) :
|
|
|
|
av_write_frame(s, pktp);
|
|
|
|
}
|
|
|
|
|
|
|
|
int av_write_uncoded_frame(AVFormatContext *s, int stream_index,
|
|
|
|
AVFrame *frame)
|
|
|
|
{
|
|
|
|
return av_write_uncoded_frame_internal(s, stream_index, frame, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index,
|
|
|
|
AVFrame *frame)
|
|
|
|
{
|
|
|
|
return av_write_uncoded_frame_internal(s, stream_index, frame, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
int av_write_uncoded_frame_query(AVFormatContext *s, int stream_index)
|
|
|
|
{
|
|
|
|
av_assert0(s->oformat);
|
|
|
|
if (!s->oformat->write_uncoded_frame)
|
|
|
|
return AVERROR(ENOSYS);
|
|
|
|
return s->oformat->write_uncoded_frame(s, stream_index, NULL,
|
|
|
|
AV_WRITE_UNCODED_FRAME_QUERY);
|
|
|
|
}
|