ffprobe: add support to audio frame information printing

This commit is contained in:
Stefano Sabatini 2012-01-10 22:44:32 +01:00
parent 58e1de724d
commit b25c239c7a
3 changed files with 59 additions and 22 deletions

View File

@ -114,7 +114,7 @@ The information for each single packet is printed within a dedicated
section with name "PACKET". section with name "PACKET".
@item -show_frames @item -show_frames
Show information about each video frame contained in the input multimedia Show information about each frame contained in the input multimedia
stream. stream.
The information for each single frame is printed within a dedicated The information for each single frame is printed within a dedicated

View File

@ -55,6 +55,10 @@
<xsd:attribute name="pkt_dts_time" type="xsd:float"/> <xsd:attribute name="pkt_dts_time" type="xsd:float"/>
<xsd:attribute name="pkt_pos" type="xsd:long" /> <xsd:attribute name="pkt_pos" type="xsd:long" />
<!-- audio attributes -->
<xsd:attribute name="sample_fmt" type="xsd:string"/>
<xsd:attribute name="nb_samples" type="xsd:long" />
<!-- video attributes --> <!-- video attributes -->
<xsd:attribute name="width" type="xsd:long" /> <xsd:attribute name="width" type="xsd:long" />
<xsd:attribute name="height" type="xsd:long" /> <xsd:attribute name="height" type="xsd:long" />

View File

@ -1290,7 +1290,20 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream)
const char *s; const char *s;
print_section_header("frame"); print_section_header("frame");
print_str("media_type", "video");
s = av_get_media_type_string(stream->codec->codec_type);
if (s) print_str ("media_type", s);
else print_str_opt("media_type", "unknown");
print_int("key_frame", frame->key_frame);
print_ts ("pkt_pts", frame->pkt_pts);
print_time("pkt_pts_time", frame->pkt_pts, &stream->time_base);
print_ts ("pkt_dts", frame->pkt_dts);
print_time("pkt_dts_time", frame->pkt_dts, &stream->time_base);
if (frame->pkt_pos != -1) print_fmt ("pkt_pos", "%"PRId64, frame->pkt_pos);
else print_str_opt("pkt_pos", "N/A");
switch (stream->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
print_int("width", frame->width); print_int("width", frame->width);
print_int("height", frame->height); print_int("height", frame->height);
s = av_get_pix_fmt_name(frame->format); s = av_get_pix_fmt_name(frame->format);
@ -1310,45 +1323,65 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream)
print_int("top_field_first", frame->top_field_first); print_int("top_field_first", frame->top_field_first);
print_int("repeat_pict", frame->repeat_pict); print_int("repeat_pict", frame->repeat_pict);
print_int("reference", frame->reference); print_int("reference", frame->reference);
print_int("key_frame", frame->key_frame); break;
print_ts ("pkt_pts", frame->pkt_pts);
print_time("pkt_pts_time", frame->pkt_pts, &stream->time_base); case AVMEDIA_TYPE_AUDIO:
print_ts ("pkt_dts", frame->pkt_dts); s = av_get_sample_fmt_name(frame->format);
print_time("pkt_dts_time", frame->pkt_dts, &stream->time_base); if (s) print_str ("sample_fmt", s);
if (frame->pkt_pos != -1) print_fmt ("pkt_pos", "%"PRId64, frame->pkt_pos); else print_str_opt("sample_fmt", "unknown");
else print_str_opt("pkt_pos", "N/A"); print_int("nb_samples", frame->nb_samples);
break;
}
print_section_footer("frame"); print_section_footer("frame");
av_free(pbuf.s); av_free(pbuf.s);
fflush(stdout); fflush(stdout);
} }
static av_always_inline int get_video_frame(AVFormatContext *fmt_ctx, static av_always_inline int get_decoded_frame(AVFormatContext *fmt_ctx,
AVFrame *frame, AVPacket *pkt) AVFrame *frame, int *got_frame,
AVPacket *pkt)
{ {
AVCodecContext *dec_ctx = fmt_ctx->streams[pkt->stream_index]->codec; AVCodecContext *dec_ctx = fmt_ctx->streams[pkt->stream_index]->codec;
int got_picture = 0; int ret = 0;
if (dec_ctx->codec_id != CODEC_ID_NONE && *got_frame = 0;
dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) switch (dec_ctx->codec_type) {
avcodec_decode_video2(dec_ctx, frame, &got_picture, pkt); case AVMEDIA_TYPE_VIDEO:
return got_picture; ret = avcodec_decode_video2(dec_ctx, frame, got_frame, pkt);
break;
case AVMEDIA_TYPE_AUDIO:
ret = avcodec_decode_audio4(dec_ctx, frame, got_frame, pkt);
break;
}
return ret;
} }
static void show_packets(WriterContext *w, AVFormatContext *fmt_ctx) static void show_packets(WriterContext *w, AVFormatContext *fmt_ctx)
{ {
AVPacket pkt; AVPacket pkt, pkt1;
AVFrame frame; AVFrame frame;
int i = 0; int i = 0, ret, got_frame;
av_init_packet(&pkt); av_init_packet(&pkt);
while (!av_read_frame(fmt_ctx, &pkt)) { while (!av_read_frame(fmt_ctx, &pkt)) {
if (do_show_packets) if (do_show_packets)
show_packet(w, fmt_ctx, &pkt, i++); show_packet(w, fmt_ctx, &pkt, i++);
if (do_show_frames && if (do_show_frames) {
get_video_frame(fmt_ctx, &frame, &pkt)) { pkt1 = pkt;
show_frame(w, &frame, fmt_ctx->streams[pkt.stream_index]); while (1) {
avcodec_get_frame_defaults(&frame);
ret = get_decoded_frame(fmt_ctx, &frame, &got_frame, &pkt1);
if (ret < 0 || !got_frame)
break;
show_frame(w, &frame, fmt_ctx->streams[pkt.stream_index]);
pkt1.data += ret;
pkt1.size -= ret;
}
} }
av_free_packet(&pkt); av_free_packet(&pkt);
} }
@ -1358,7 +1391,7 @@ static void show_packets(WriterContext *w, AVFormatContext *fmt_ctx)
//Flush remaining frames that are cached in the decoder //Flush remaining frames that are cached in the decoder
for (i = 0; i < fmt_ctx->nb_streams; i++) { for (i = 0; i < fmt_ctx->nb_streams; i++) {
pkt.stream_index = i; pkt.stream_index = i;
while (get_video_frame(fmt_ctx, &frame, &pkt)) while (get_decoded_frame(fmt_ctx, &frame, &got_frame, &pkt) >= 0 && got_frame)
show_frame(w, &frame, fmt_ctx->streams[pkt.stream_index]); show_frame(w, &frame, fmt_ctx->streams[pkt.stream_index]);
} }
} }