Merge remote-tracking branch 'qatar/master'

* qatar/master:
  avconv: use different variables for decoded and filtered frame.
  avconv: add support for copying attachments.
  matroskaenc: write attachments.
  matroskadec: export mimetype of attachments as metadata.
  avconv: factorize common code from new_*_stream()
  doc/avconv: expand documentation for some options.
  doc/avconv: document -timelimit.

Conflicts:
	avconv.c
	cmdutils.c
	tests/codec-regression.sh

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2011-09-28 01:31:38 +02:00
commit 23c1db9b83
7 changed files with 172 additions and 76 deletions

View File

@ -1588,7 +1588,6 @@ static int output_packet(InputStream *ist, int ist_index,
OutputStream *ost;
int ret, i;
int got_output;
AVFrame picture;
void *buffer_to_free = NULL;
static unsigned int samples_size= 0;
AVSubtitle subtitle, *subtitle_to_free;
@ -1623,6 +1622,7 @@ static int output_packet(InputStream *ist, int ist_index,
while (avpkt.size > 0 || (!pkt && got_output)) {
uint8_t *data_buf, *decoded_data_buf;
int data_size, decoded_data_size;
AVFrame *decoded_frame, *filtered_frame;
handle_eof:
ist->pts= ist->next_pts;
@ -1632,6 +1632,7 @@ static int output_packet(InputStream *ist, int ist_index,
ist->showed_multi_packet_warning=1;
/* decode the packet if needed */
decoded_frame = filtered_frame = NULL;
decoded_data_buf = NULL; /* fail safe */
decoded_data_size= 0;
data_buf = avpkt.data;
@ -1668,22 +1669,23 @@ static int output_packet(InputStream *ist, int ist_index,
break;}
case AVMEDIA_TYPE_VIDEO:
decoded_data_size = (ist->st->codec->width * ist->st->codec->height * 3) / 2;
/* XXX: allocate picture correctly */
avcodec_get_frame_defaults(&picture);
if (!(decoded_frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM);
avpkt.pts = pkt_pts;
avpkt.dts = ist->pts;
pkt_pts = AV_NOPTS_VALUE;
ret = avcodec_decode_video2(ist->st->codec,
&picture, &got_output, &avpkt);
quality = same_quant ? picture.quality : 0;
decoded_frame, &got_output, &avpkt);
quality = same_quant ? decoded_frame->quality : 0;
if (ret < 0)
return ret;
goto fail;
if (!got_output) {
/* no picture yet */
av_freep(&decoded_frame);
goto discard_packet;
}
ist->next_pts = ist->pts = picture.best_effort_timestamp;
ist->next_pts = ist->pts = decoded_frame->best_effort_timestamp;
if (ist->st->codec->time_base.num != 0) {
int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
ist->next_pts += ((int64_t)AV_TIME_BASE *
@ -1692,7 +1694,7 @@ static int output_packet(InputStream *ist, int ist_index,
}
avpkt.size = 0;
buffer_to_free = NULL;
pre_process_video_frame(ist, (AVPicture *)&picture, &buffer_to_free);
pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
break;
case AVMEDIA_TYPE_SUBTITLE:
ret = avcodec_decode_subtitle2(ist->st->codec,
@ -1768,11 +1770,15 @@ static int output_packet(InputStream *ist, int ist_index,
#if CONFIG_AVFILTER
if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
ost->input_video_filter) {
if (!picture.sample_aspect_ratio.num)
picture.sample_aspect_ratio = ist->st->sample_aspect_ratio;
picture.pts = ist->pts;
if (!decoded_frame->sample_aspect_ratio.num)
decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
decoded_frame->pts = ist->pts;
av_vsrc_buffer_add_frame(ost->input_video_filter, &picture, AV_VSRC_BUF_FLAG_OVERWRITE);
av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE);
if (!(filtered_frame = avcodec_alloc_frame())) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
!ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]);
@ -1782,10 +1788,12 @@ static int output_packet(InputStream *ist, int ist_index,
if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
goto cont;
if (ost->picref) {
avfilter_fill_frame_from_video_buffer_ref(&picture, ost->picref);
avfilter_fill_frame_from_video_buffer_ref(filtered_frame, ost->picref);
ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
}
}
#else
filtered_frame = decoded_frame;
#endif
os = output_files[ost->file_index].ctx;
@ -1803,8 +1811,8 @@ static int output_packet(InputStream *ist, int ist_index,
if (ost->picref->video && !ost->frame_aspect_ratio)
ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio;
#endif
do_video_out(os, ost, ist, &picture, &frame_size,
same_quant ? quality : ost->st->codec->global_quality);
do_video_out(os, ost, ist, filtered_frame, &frame_size,
same_quant ? quality : ost->st->codec->global_quality);
if (vstats_filename && frame_size)
do_video_stats(os, ost, frame_size);
break;
@ -1884,15 +1892,20 @@ static int output_packet(InputStream *ist, int ist_index,
if (ost->picref)
avfilter_unref_buffer(ost->picref);
}
av_freep(&filtered_frame);
#endif
}
fail:
av_free(buffer_to_free);
/* XXX: allocate the subtitles in the codec ? */
if (subtitle_to_free) {
avsubtitle_free(subtitle_to_free);
subtitle_to_free = NULL;
}
av_freep(&decoded_frame);
if (ret < 0)
return ret;
}
discard_packet:
@ -2068,6 +2081,7 @@ static int transcode_init(OutputFile *output_files,
codec->height = icodec->height;
break;
case AVMEDIA_TYPE_DATA:
case AVMEDIA_TYPE_ATTACHMENT:
break;
default:
abort();
@ -3097,6 +3111,9 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
st->codec->global_quality = FF_QP2LAMBDA * qscale;
}
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
ost->sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
return ost;
}
@ -3128,10 +3145,6 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
st = ost->st;
video_enc = st->codec;
if(oc->oformat->flags & AVFMT_GLOBALHEADER) {
video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
if (!st->stream_copy) {
const char *p = NULL;
char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
@ -3249,9 +3262,6 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
audio_enc = st->codec;
audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
if (!st->stream_copy) {
char *sample_fmt = NULL;
@ -3274,20 +3284,21 @@ static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
{
AVStream *st;
OutputStream *ost;
AVCodecContext *data_enc;
ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
st = ost->st;
data_enc = st->codec;
if (!st->stream_copy) {
av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
exit_program(1);
}
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
data_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
return ost;
}
static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc)
{
OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT);
ost->st->stream_copy = 1;
return ost;
}
@ -3303,10 +3314,6 @@ static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc)
subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
subtitle_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
return ost;
}
@ -3495,6 +3502,7 @@ static void opt_output_file(void *optctx, const char *filename)
case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
default:
av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d.%d - unsupported type.\n",
map->file_index, map->stream_index);

View File

@ -864,7 +864,7 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
{
if (*spec <= '9' && *spec >= '0') /* opt:index */
return strtol(spec, NULL, 0) == st->index;
else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd') { /* opt:[vasd] */
else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' || *spec == 't') { /* opt:[vasdt] */
enum AVMediaType type;
switch (*spec++) {
@ -872,6 +872,7 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
case 'a': type = AVMEDIA_TYPE_AUDIO; break;
case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
case 'd': type = AVMEDIA_TYPE_DATA; break;
case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
default: abort(); // never reached, silence warning
}
if (type != st->codec->codec_type)

View File

@ -82,13 +82,15 @@ described.
@table @option
@item -f @var{fmt} (@emph{input/output})
Force format.
Force input or output file format. The format is normally autodetected for input
files and guessed from file extension for output files, so this option is not
needed in most cases.
@item -i @var{filename} (@emph{input})
input file name
@item -y (@emph{global})
Overwrite output files.
Overwrite output files without asking.
@item -c[:@var{stream_specifier}] @var{codec} (@emph{input/output,per-stream})
@itemx -codec[:@var{stream_specifier}] @var{codec} (@emph{input/output,per-stream})
@ -111,9 +113,8 @@ will copy all the streams except the second video, which will be encoded with
libx264, and the 138th audio, which will be encoded with libvorbis.
@item -t @var{duration} (@emph{output})
Restrict the transcoded/captured video sequence
to the duration specified in seconds.
@code{hh:mm:ss[.xxx]} syntax is also supported.
Stop writing the output after its duration reaches @var{duration}.
@var{duration} may be a number in seconds, or in @code{hh:mm:ss[.xxx]} form.
@item -fs @var{limit_size} (@emph{output})
Set the file size limit.
@ -131,7 +132,7 @@ Set the input time offset in seconds.
@code{[-]hh:mm:ss[.xxx]} syntax is also supported.
The offset is added to the timestamps of the input files.
Specifying a positive offset means that the corresponding
streams are delayed by 'offset' seconds.
streams are delayed by @var{offset} seconds.
@item -metadata[:metadata_specifier] @var{key}=@var{value} (@emph{output,per-metadata})
Set a metadata key/value pair.
@ -158,9 +159,10 @@ This option is deprecated and has no effect, use -loglevel
to set verbosity level.
@item -target @var{type} (@emph{output})
Specify target file type ("vcd", "svcd", "dvd", "dv", "dv50", "pal-vcd",
"ntsc-svcd", ... ). All the format options (bitrate, codecs,
buffer sizes) are then set automatically. You can just type:
Specify target file type (@code{vcd}, @code{svcd}, @code{dvd}, @code{dv},
@code{dv50}). @var{type} may be prefixed with @code{pal-}, @code{ntsc-} or
@code{film-} to use the corresponding standard. All the format options
(bitrate, codecs, buffer sizes) are then set automatically. You can just type:
@example
avconv -i myfile.avi -target vcd /tmp/vcd.mpg
@ -647,8 +649,10 @@ Show benchmarking information at the end of an encode.
Shows CPU time used and maximum memory consumption.
Maximum memory consumption is not supported on all systems,
it will usually display as 0 if not supported.
@item -timelimit @var{duration} (@emph{global})
Exit after avconv has been running for @var{duration} seconds.
@item -dump (@emph{global})
Dump each input packet.
Dump each input packet to stderr.
@item -hex (@emph{global})
When dumping packets, also dump the payload.
@item -ps @var{size}

View File

@ -33,9 +33,10 @@ Possible forms of stream specifiers are:
Matches the stream with this index. E.g. @code{-threads:1 4} would set the
thread count for the second stream to 4.
@item @var{stream_type}[:@var{stream_index}]
@var{stream_type} is one of: 'v' for video, 'a' for audio, 's' for subtitle and
'd' for data. If @var{stream_index} is given, then matches stream number
@var{stream_index} of this type. Otherwise matches all streams of this type.
@var{stream_type} is one of: 'v' for video, 'a' for audio, 's' for subtitle,
'd' for data and 't' for attachments. If @var{stream_index} is given, then
matches stream number @var{stream_index} of this type. Otherwise matches all
streams of this type.
@item @var{program_id}[:@var{stream_index}]
If @var{stream_index} is given, then matches stream number @var{stream_index} in
program with id @var{program_id}. Otherwise matches all streams in this program.

View File

@ -1598,7 +1598,6 @@ static int output_packet(InputStream *ist, int ist_index,
OutputStream *ost;
int ret, i;
int got_output;
AVFrame picture;
void *buffer_to_free = NULL;
static unsigned int samples_size= 0;
AVSubtitle subtitle, *subtitle_to_free;
@ -1633,6 +1632,7 @@ static int output_packet(InputStream *ist, int ist_index,
while (avpkt.size > 0 || (!pkt && got_output)) {
uint8_t *data_buf, *decoded_data_buf;
int data_size, decoded_data_size;
AVFrame *decoded_frame, *filtered_frame;
handle_eof:
ist->pts= ist->next_pts;
@ -1642,6 +1642,7 @@ static int output_packet(InputStream *ist, int ist_index,
ist->showed_multi_packet_warning=1;
/* decode the packet if needed */
decoded_frame = filtered_frame = NULL;
decoded_data_buf = NULL; /* fail safe */
decoded_data_size= 0;
data_buf = avpkt.data;
@ -1678,22 +1679,23 @@ static int output_packet(InputStream *ist, int ist_index,
break;}
case AVMEDIA_TYPE_VIDEO:
decoded_data_size = (ist->st->codec->width * ist->st->codec->height * 3) / 2;
/* XXX: allocate picture correctly */
avcodec_get_frame_defaults(&picture);
if (!(decoded_frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM);
avpkt.pts = pkt_pts;
avpkt.dts = ist->pts;
pkt_pts = AV_NOPTS_VALUE;
ret = avcodec_decode_video2(ist->st->codec,
&picture, &got_output, &avpkt);
quality = same_quant ? picture.quality : 0;
decoded_frame, &got_output, &avpkt);
quality = same_quant ? decoded_frame->quality : 0;
if (ret < 0)
return ret;
goto fail;
if (!got_output) {
/* no picture yet */
av_freep(&decoded_frame);
goto discard_packet;
}
ist->next_pts = ist->pts = picture.best_effort_timestamp;
ist->next_pts = ist->pts = decoded_frame->best_effort_timestamp;
if (ist->st->codec->time_base.num != 0) {
int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
ist->next_pts += ((int64_t)AV_TIME_BASE *
@ -1702,7 +1704,7 @@ static int output_packet(InputStream *ist, int ist_index,
}
avpkt.size = 0;
buffer_to_free = NULL;
pre_process_video_frame(ist, (AVPicture *)&picture, &buffer_to_free);
pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
break;
case AVMEDIA_TYPE_SUBTITLE:
ret = avcodec_decode_subtitle2(ist->st->codec,
@ -1743,11 +1745,11 @@ static int output_packet(InputStream *ist, int ist_index,
if (of->start_time == 0 || ist->pts >= of->start_time) {
ost = &ost_table[i];
if (ost->input_video_filter && ost->source_index == ist_index) {
if (!picture.sample_aspect_ratio.num)
picture.sample_aspect_ratio = ist->st->sample_aspect_ratio;
picture.pts = ist->pts;
if (!decoded_frame->sample_aspect_ratio.num)
decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
decoded_frame->pts = ist->pts;
av_vsrc_buffer_add_frame(ost->input_video_filter, &picture, AV_VSRC_BUF_FLAG_OVERWRITE);
av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE);
}
}
}
@ -1800,11 +1802,18 @@ static int output_packet(InputStream *ist, int ist_index,
AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
goto cont;
if (!filtered_frame && !(filtered_frame = avcodec_alloc_frame())) {
ret = AVERROR(ENOMEM);
goto fail;
}
*filtered_frame= *decoded_frame; //for me_threshold
if (ost->picref) {
avfilter_fill_frame_from_video_buffer_ref(&picture, ost->picref);
avfilter_fill_frame_from_video_buffer_ref(filtered_frame, ost->picref);
ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
}
}
#else
filtered_frame = decoded_frame;
#endif
os = output_files[ost->file_index].ctx;
@ -1822,8 +1831,8 @@ static int output_packet(InputStream *ist, int ist_index,
if (ost->picref->video && !ost->frame_aspect_ratio)
ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio;
#endif
do_video_out(os, ost, ist, &picture, &frame_size,
same_quant ? quality : ost->st->codec->global_quality);
do_video_out(os, ost, ist, filtered_frame, &frame_size,
same_quant ? quality : ost->st->codec->global_quality);
if (vstats_filename && frame_size)
do_video_stats(os, ost, frame_size);
break;
@ -1902,15 +1911,20 @@ static int output_packet(InputStream *ist, int ist_index,
ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
avfilter_unref_buffer(ost->picref);
}
av_freep(&filtered_frame);
#endif
}
fail:
av_free(buffer_to_free);
/* XXX: allocate the subtitles in the codec ? */
if (subtitle_to_free) {
avsubtitle_free(subtitle_to_free);
subtitle_to_free = NULL;
}
av_freep(&decoded_frame);
if (ret < 0)
return ret;
}
discard_packet:
@ -2082,6 +2096,7 @@ static int transcode_init(OutputFile *output_files, int nb_output_files,
codec->height = icodec->height;
break;
case AVMEDIA_TYPE_DATA:
case AVMEDIA_TYPE_ATTACHMENT:
break;
default:
abort();
@ -3196,6 +3211,9 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
st->codec->global_quality = FF_QP2LAMBDA * qscale;
}
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
ost->sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
return ost;
}
@ -3227,10 +3245,6 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
st = ost->st;
video_enc = st->codec;
if(oc->oformat->flags & AVFMT_GLOBALHEADER) {
video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
if (!st->stream_copy) {
const char *p = NULL;
char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
@ -3351,9 +3365,6 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
audio_enc = st->codec;
audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
if (!st->stream_copy) {
char *sample_fmt = NULL;
@ -3376,20 +3387,21 @@ static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
{
AVStream *st;
OutputStream *ost;
AVCodecContext *data_enc;
ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
st = ost->st;
data_enc = st->codec;
if (!st->stream_copy) {
av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
exit_program(1);
}
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
data_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
return ost;
}
static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc)
{
OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT);
ost->st->stream_copy = 1;
return ost;
}
@ -3405,10 +3417,6 @@ static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc)
subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
subtitle_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
return ost;
}
@ -3596,6 +3604,7 @@ static void opt_output_file(void *optctx, const char *filename)
case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
default:
av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d.%d - unsupported type.\n",
map->file_index, map->stream_index);

View File

@ -1632,6 +1632,7 @@ static int matroska_read_header(AVFormatContext *s, AVFormatParameters *ap)
if (st == NULL)
break;
av_dict_set(&st->metadata, "filename",attachements[j].filename, 0);
av_dict_set(&st->metadata, "mimetype", attachements[j].mime, 0);
st->codec->codec_id = CODEC_ID_NONE;
st->codec->codec_type = AVMEDIA_TYPE_ATTACHMENT;
st->codec->extradata = av_malloc(attachements[j].bin.size);

View File

@ -91,6 +91,8 @@ typedef struct MatroskaMuxContext {
unsigned int audio_buffer_size;
AVPacket cur_audio_pkt;
int have_attachments;
} MatroskaMuxContext;
@ -528,6 +530,11 @@ static int mkv_write_tracks(AVFormatContext *s)
int output_sample_rate = 0;
AVDictionaryEntry *tag;
if (codec->codec_type == AVMEDIA_TYPE_ATTACHMENT) {
mkv->have_attachments = 1;
continue;
}
if (!bit_depth)
bit_depth = av_get_bytes_per_sample(codec->sample_fmt) << 3;
@ -804,6 +811,68 @@ static int mkv_write_tags(AVFormatContext *s)
return 0;
}
static int mkv_write_attachments(AVFormatContext *s)
{
MatroskaMuxContext *mkv = s->priv_data;
AVIOContext *pb = s->pb;
ebml_master attachments;
AVLFG c;
int i, ret;
if (!mkv->have_attachments)
return 0;
av_lfg_init(&c, av_get_random_seed());
ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_ATTACHMENTS, avio_tell(pb));
if (ret < 0) return ret;
attachments = start_ebml_master(pb, MATROSKA_ID_ATTACHMENTS, 0);
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
ebml_master attached_file;
AVDictionaryEntry *t;
const char *mimetype = NULL;
if (st->codec->codec_type != AVMEDIA_TYPE_ATTACHMENT)
continue;
attached_file = start_ebml_master(pb, MATROSKA_ID_ATTACHEDFILE, 0);
if (t = av_dict_get(st->metadata, "title", NULL, 0))
put_ebml_string(pb, MATROSKA_ID_FILEDESC, t->value);
if (!(t = av_dict_get(st->metadata, "filename", NULL, 0))) {
av_log(s, AV_LOG_ERROR, "Attachment stream %d has no filename tag.\n", i);
return AVERROR(EINVAL);
}
put_ebml_string(pb, MATROSKA_ID_FILENAME, t->value);
if (t = av_dict_get(st->metadata, "mimetype", NULL, 0))
mimetype = t->value;
else if (st->codec->codec_id != CODEC_ID_NONE ) {
int i;
for (i = 0; ff_mkv_mime_tags[i].id != CODEC_ID_NONE; i++)
if (ff_mkv_mime_tags[i].id == st->codec->codec_id) {
mimetype = ff_mkv_mime_tags[i].str;
break;
}
}
if (!mimetype) {
av_log(s, AV_LOG_ERROR, "Attachment stream %d has no mimetype tag and "
"it cannot be deduced from the codec id.\n", i);
return AVERROR(EINVAL);
}
put_ebml_string(pb, MATROSKA_ID_FILEMIMETYPE, mimetype);
put_ebml_binary(pb, MATROSKA_ID_FILEDATA, st->codec->extradata, st->codec->extradata_size);
put_ebml_uint(pb, MATROSKA_ID_FILEUID, av_lfg_get(&c));
end_ebml_master(pb, attached_file);
}
end_ebml_master(pb, attachments);
return 0;
}
static int mkv_write_header(AVFormatContext *s)
{
MatroskaMuxContext *mkv = s->priv_data;
@ -877,6 +946,9 @@ static int mkv_write_header(AVFormatContext *s)
ret = mkv_write_tags(s);
if (ret < 0) return ret;
ret = mkv_write_attachments(s);
if (ret < 0) return ret;
}
if (!s->pb->seekable)