output-example: K&R formatting cosmetics, comment spelling fixes

This commit is contained in:
Diego Biurrun 2012-03-30 02:27:05 +02:00
parent 93f6d0475f
commit afd8a3957b
1 changed files with 100 additions and 97 deletions

View File

@ -44,7 +44,7 @@
#define STREAM_DURATION 5.0 #define STREAM_DURATION 5.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */ #define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE)) #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */ #define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
static int sws_flags = SWS_BICUBIC; static int sws_flags = SWS_BICUBIC;
@ -80,13 +80,13 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
c = st->codec; c = st->codec;
/* put sample parameters */ /* put sample parameters */
c->sample_fmt = AV_SAMPLE_FMT_S16; c->sample_fmt = AV_SAMPLE_FMT_S16;
c->bit_rate = 64000; c->bit_rate = 64000;
c->sample_rate = 44100; c->sample_rate = 44100;
c->channels = 2; c->channels = 2;
// some formats want stream headers to be separate // some formats want stream headers to be separate
if(oc->oformat->flags & AVFMT_GLOBALHEADER) if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER; c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st; return st;
@ -105,7 +105,7 @@ static void open_audio(AVFormatContext *oc, AVStream *st)
} }
/* init signal generator */ /* init signal generator */
t = 0; t = 0;
tincr = 2 * M_PI * 110.0 / c->sample_rate; tincr = 2 * M_PI * 110.0 / c->sample_rate;
/* increment frequency by 110 Hz per second */ /* increment frequency by 110 Hz per second */
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
@ -114,23 +114,24 @@ static void open_audio(AVFormatContext *oc, AVStream *st)
audio_input_frame_size = 10000; audio_input_frame_size = 10000;
else else
audio_input_frame_size = c->frame_size; audio_input_frame_size = c->frame_size;
samples = av_malloc(audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) samples = av_malloc(audio_input_frame_size *
* c->channels); av_get_bytes_per_sample(c->sample_fmt) *
c->channels);
} }
/* prepare a 16 bit dummy audio frame of 'frame_size' samples and /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
'nb_channels' channels */ * 'nb_channels' channels. */
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
{ {
int j, i, v; int j, i, v;
int16_t *q; int16_t *q;
q = samples; q = samples;
for(j=0;j<frame_size;j++) { for (j = 0; j < frame_size; j++) {
v = (int)(sin(t) * 10000); v = (int)(sin(t) * 10000);
for(i = 0; i < nb_channels; i++) for (i = 0; i < nb_channels; i++)
*q++ = v; *q++ = v;
t += tincr; t += tincr;
tincr += tincr2; tincr += tincr2;
} }
} }
@ -147,17 +148,19 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st)
get_audio_frame(samples, audio_input_frame_size, c->channels); get_audio_frame(samples, audio_input_frame_size, c->channels);
frame->nb_samples = audio_input_frame_size; frame->nb_samples = audio_input_frame_size;
avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, (uint8_t *)samples, avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
audio_input_frame_size * av_get_bytes_per_sample(c->sample_fmt) (uint8_t *)samples,
* c->channels, 1); audio_input_frame_size *
av_get_bytes_per_sample(c->sample_fmt) *
c->channels, 1);
avcodec_encode_audio2(c, &pkt, frame, &got_packet); avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (!got_packet) if (!got_packet)
return; return;
pkt.stream_index= st->index; pkt.stream_index = st->index;
/* write the compressed frame in the media file */ /* Write the compressed frame to the media file. */
if (av_interleaved_write_frame(oc, &pkt) != 0) { if (av_interleaved_write_frame(oc, &pkt) != 0) {
fprintf(stderr, "Error while writing audio frame\n"); fprintf(stderr, "Error while writing audio frame\n");
exit(1); exit(1);
@ -178,7 +181,7 @@ static AVFrame *picture, *tmp_picture;
static uint8_t *video_outbuf; static uint8_t *video_outbuf;
static int frame_count, video_outbuf_size; static int frame_count, video_outbuf_size;
/* add a video output stream */ /* Add a video output stream. */
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id) static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
{ {
AVCodecContext *c; AVCodecContext *c;
@ -200,31 +203,31 @@ static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
c = st->codec; c = st->codec;
/* put sample parameters */ /* Put sample parameters. */
c->bit_rate = 400000; c->bit_rate = 400000;
/* resolution must be a multiple of two */ /* Resolution must be a multiple of two. */
c->width = 352; c->width = 352;
c->height = 288; c->height = 288;
/* time base: this is the fundamental unit of time (in seconds) in terms /* timebase: This is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content, * of which frame timestamps are represented. For fixed-fps content,
timebase should be 1/framerate and timestamp increments should be * timebase should be 1/framerate and timestamp increments should be
identically 1. */ * identical to 1. */
c->time_base.den = STREAM_FRAME_RATE; c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1; c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */ c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT; c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == CODEC_ID_MPEG2VIDEO) { if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */ /* just for testing, we also add B frames */
c->max_b_frames = 2; c->max_b_frames = 2;
} }
if (c->codec_id == CODEC_ID_MPEG1VIDEO){ if (c->codec_id == CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow. /* Needed to avoid using macroblocks in which some coeffs overflow.
This does not happen with normal video, it just happens here as * This does not happen with normal video, it just happens here as
the motion of the chroma plane does not match the luma plane. */ * the motion of the chroma plane does not match the luma plane. */
c->mb_decision=2; c->mb_decision = 2;
} }
// some formats want stream headers to be separate /* Some formats want stream headers to be separate. */
if(oc->oformat->flags & AVFMT_GLOBALHEADER) if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER; c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st; return st;
@ -239,7 +242,7 @@ static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
picture = avcodec_alloc_frame(); picture = avcodec_alloc_frame();
if (!picture) if (!picture)
return NULL; return NULL;
size = avpicture_get_size(pix_fmt, width, height); size = avpicture_get_size(pix_fmt, width, height);
picture_buf = av_malloc(size); picture_buf = av_malloc(size);
if (!picture_buf) { if (!picture_buf) {
av_free(picture); av_free(picture);
@ -264,26 +267,26 @@ static void open_video(AVFormatContext *oc, AVStream *st)
video_outbuf = NULL; video_outbuf = NULL;
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) { if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
/* allocate output buffer */ /* Allocate output buffer. */
/* XXX: API change will be done */ /* XXX: API change will be done. */
/* buffers passed into lav* can be allocated any way you prefer, /* Buffers passed into lav* can be allocated any way you prefer,
as long as they're aligned enough for the architecture, and * as long as they're aligned enough for the architecture, and
they're freed appropriately (such as using av_free for buffers * they're freed appropriately (such as using av_free for buffers
allocated with av_malloc) */ * allocated with av_malloc). */
video_outbuf_size = 200000; video_outbuf_size = 200000;
video_outbuf = av_malloc(video_outbuf_size); video_outbuf = av_malloc(video_outbuf_size);
} }
/* allocate the encoded raw picture */ /* Allocate the encoded raw picture. */
picture = alloc_picture(c->pix_fmt, c->width, c->height); picture = alloc_picture(c->pix_fmt, c->width, c->height);
if (!picture) { if (!picture) {
fprintf(stderr, "Could not allocate picture\n"); fprintf(stderr, "Could not allocate picture\n");
exit(1); exit(1);
} }
/* if the output format is not YUV420P, then a temporary YUV420P /* If the output format is not YUV420P, then a temporary YUV420P
picture is needed too. It is then converted to the required * picture is needed too. It is then converted to the required
output format */ * output format. */
tmp_picture = NULL; tmp_picture = NULL;
if (c->pix_fmt != PIX_FMT_YUV420P) { if (c->pix_fmt != PIX_FMT_YUV420P) {
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height); tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
@ -294,23 +297,22 @@ static void open_video(AVFormatContext *oc, AVStream *st)
} }
} }
/* prepare a dummy image */ /* Prepare a dummy image. */
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height) static void fill_yuv_image(AVFrame *pict, int frame_index,
int width, int height)
{ {
int x, y, i; int x, y, i;
i = frame_index; i = frame_index;
/* Y */ /* Y */
for(y=0;y<height;y++) { for (y = 0; y < height; y++)
for(x=0;x<width;x++) { for (x = 0; x < width; x++)
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */ /* Cb and Cr */
for(y=0;y<height/2;y++) { for (y = 0; y < height / 2; y++) {
for(x=0;x<width/2;x++) { for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
} }
@ -326,13 +328,13 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
c = st->codec; c = st->codec;
if (frame_count >= STREAM_NB_FRAMES) { if (frame_count >= STREAM_NB_FRAMES) {
/* no more frame to compress. The codec has a latency of a few /* No more frames to compress. The codec has a latency of a few
frames if using B frames, so we get the last frames by * frames if using B-frames, so we get the last frames by
passing the same picture again */ * passing the same picture again. */
} else { } else {
if (c->pix_fmt != PIX_FMT_YUV420P) { if (c->pix_fmt != PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it /* as we only generate a YUV420P picture, we must convert it
to the codec pixel format if needed */ * to the codec pixel format if needed */
if (img_convert_ctx == NULL) { if (img_convert_ctx == NULL) {
img_convert_ctx = sws_getContext(c->width, c->height, img_convert_ctx = sws_getContext(c->width, c->height,
PIX_FMT_YUV420P, PIX_FMT_YUV420P,
@ -340,7 +342,8 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
c->pix_fmt, c->pix_fmt,
sws_flags, NULL, NULL, NULL); sws_flags, NULL, NULL, NULL);
if (img_convert_ctx == NULL) { if (img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context\n"); fprintf(stderr,
"Cannot initialize the conversion context\n");
exit(1); exit(1);
} }
} }
@ -352,36 +355,37 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
} }
} }
if (oc->oformat->flags & AVFMT_RAWPICTURE) { if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near /* Raw video case - the API will change slightly in the near
futur for that */ * future for that. */
AVPacket pkt; AVPacket pkt;
av_init_packet(&pkt); av_init_packet(&pkt);
pkt.flags |= AV_PKT_FLAG_KEY; pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index= st->index; pkt.stream_index = st->index;
pkt.data= (uint8_t *)picture; pkt.data = (uint8_t *)picture;
pkt.size= sizeof(AVPicture); pkt.size = sizeof(AVPicture);
ret = av_interleaved_write_frame(oc, &pkt); ret = av_interleaved_write_frame(oc, &pkt);
} else { } else {
/* encode the image */ /* encode the image */
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture); out_size = avcodec_encode_video(c, video_outbuf,
/* if zero size, it means the image was buffered */ video_outbuf_size, picture);
/* If size is zero, it means the image was buffered. */
if (out_size > 0) { if (out_size > 0) {
AVPacket pkt; AVPacket pkt;
av_init_packet(&pkt); av_init_packet(&pkt);
if (c->coded_frame->pts != AV_NOPTS_VALUE) if (c->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); pkt.pts = av_rescale_q(c->coded_frame->pts,
if(c->coded_frame->key_frame) c->time_base, st->time_base);
if (c->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY; pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index= st->index; pkt.stream_index = st->index;
pkt.data= video_outbuf; pkt.data = video_outbuf;
pkt.size= out_size; pkt.size = out_size;
/* write the compressed frame in the media file */ /* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt); ret = av_interleaved_write_frame(oc, &pkt);
} else { } else {
ret = 0; ret = 0;
@ -418,7 +422,7 @@ int main(int argc, char **argv)
double audio_pts, video_pts; double audio_pts, video_pts;
int i; int i;
/* initialize libavcodec, and register all codecs and formats */ /* Initialize libavcodec, and register all codecs and formats. */
av_register_all(); av_register_all();
if (argc != 2) { if (argc != 2) {
@ -432,8 +436,7 @@ int main(int argc, char **argv)
filename = argv[1]; filename = argv[1];
/* auto detect the output format from the name. default is /* Autodetect the output format from the name. default is MPEG. */
mpeg. */
fmt = av_guess_format(NULL, filename, NULL); fmt = av_guess_format(NULL, filename, NULL);
if (!fmt) { if (!fmt) {
printf("Could not deduce output format from file extension: using MPEG.\n"); printf("Could not deduce output format from file extension: using MPEG.\n");
@ -444,7 +447,7 @@ int main(int argc, char **argv)
return 1; return 1;
} }
/* allocate the output media context */ /* Allocate the output media context. */
oc = avformat_alloc_context(); oc = avformat_alloc_context();
if (!oc) { if (!oc) {
fprintf(stderr, "Memory error\n"); fprintf(stderr, "Memory error\n");
@ -453,8 +456,8 @@ int main(int argc, char **argv)
oc->oformat = fmt; oc->oformat = fmt;
snprintf(oc->filename, sizeof(oc->filename), "%s", filename); snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
/* add the audio and video streams using the default format codecs /* Add the audio and video streams using the default format codecs
and initialize the codecs */ * and initialize the codecs. */
video_st = NULL; video_st = NULL;
audio_st = NULL; audio_st = NULL;
if (fmt->video_codec != CODEC_ID_NONE) { if (fmt->video_codec != CODEC_ID_NONE) {
@ -464,8 +467,8 @@ int main(int argc, char **argv)
audio_st = add_audio_stream(oc, fmt->audio_codec); audio_st = add_audio_stream(oc, fmt->audio_codec);
} }
/* now that all the parameters are set, we can open the audio and /* Now that all the parameters are set, we can open the audio and
video codecs and allocate the necessary encode buffers */ * video codecs and allocate the necessary encode buffers. */
if (video_st) if (video_st)
open_video(oc, video_st); open_video(oc, video_st);
if (audio_st) if (audio_st)
@ -481,18 +484,19 @@ int main(int argc, char **argv)
} }
} }
/* write the stream header, if any */ /* Write the stream header, if any. */
avformat_write_header(oc, NULL); avformat_write_header(oc, NULL);
for(;;) { for (;;) {
/* compute current audio and video time */ /* Compute current audio and video time. */
if (audio_st) if (audio_st)
audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
else else
audio_pts = 0.0; audio_pts = 0.0;
if (video_st) if (video_st)
video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den; video_pts = (double)video_st->pts.val * video_st->time_base.num /
video_st->time_base.den;
else else
video_pts = 0.0; video_pts = 0.0;
@ -508,28 +512,27 @@ int main(int argc, char **argv)
} }
} }
/* write the trailer, if any. the trailer must be written /* Write the trailer, if any. The trailer must be written before you
* before you close the CodecContexts open when you wrote the * close the CodecContexts open when you wrote the header; otherwise
* header; otherwise write_trailer may try to use memory that * av_write_trailer() may try to use memory that was freed on
* was freed on av_codec_close() */ * av_codec_close(). */
av_write_trailer(oc); av_write_trailer(oc);
/* close each codec */ /* Close each codec. */
if (video_st) if (video_st)
close_video(oc, video_st); close_video(oc, video_st);
if (audio_st) if (audio_st)
close_audio(oc, audio_st); close_audio(oc, audio_st);
/* free the streams */ /* Free the streams. */
for(i = 0; i < oc->nb_streams; i++) { for (i = 0; i < oc->nb_streams; i++) {
av_freep(&oc->streams[i]->codec); av_freep(&oc->streams[i]->codec);
av_freep(&oc->streams[i]); av_freep(&oc->streams[i]);
} }
if (!(fmt->flags & AVFMT_NOFILE)) { if (!(fmt->flags & AVFMT_NOFILE))
/* close the output file */ /* Close the output file. */
avio_close(oc->pb); avio_close(oc->pb);
}
/* free the stream */ /* free the stream */
av_free(oc); av_free(oc);