dv: use AVFrame API properly

This commit is contained in:
Anton Khirnov 2013-11-29 08:58:10 +01:00
parent c9ca220ef2
commit d4f1188d1a
4 changed files with 38 additions and 39 deletions

View File

@ -313,8 +313,6 @@ av_cold int ff_dvvideo_init(AVCodecContext *avctx)
s->idct_put[1] = ff_simple_idct248_put; // FIXME: need to add it to DSP s->idct_put[1] = ff_simple_idct248_put; // FIXME: need to add it to DSP
memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64); memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64);
avcodec_get_frame_defaults(&s->picture);
avctx->coded_frame = &s->picture;
s->avctx = avctx; s->avctx = avctx;
avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;

View File

@ -34,7 +34,7 @@
typedef struct DVVideoContext { typedef struct DVVideoContext {
const DVprofile *sys; const DVprofile *sys;
AVFrame picture; AVFrame *frame;
AVCodecContext *avctx; AVCodecContext *avctx;
uint8_t *buf; uint8_t *buf;

View File

@ -258,12 +258,12 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg)
if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) || if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) ||
(s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) || (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) ||
(s->sys->height >= 720 && mb_y != 134)) { (s->sys->height >= 720 && mb_y != 134)) {
y_stride = (s->picture.linesize[0] << ((!is_field_mode[mb_index]) * log2_blocksize)); y_stride = (s->frame->linesize[0] << ((!is_field_mode[mb_index]) * log2_blocksize));
} else { } else {
y_stride = (2 << log2_blocksize); y_stride = (2 << log2_blocksize);
} }
y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x) << log2_blocksize); y_ptr = s->frame->data[0] + ((mb_y * s->frame->linesize[0] + mb_x) << log2_blocksize);
linesize = s->picture.linesize[0] << is_field_mode[mb_index]; linesize = s->frame->linesize[0] << is_field_mode[mb_index];
mb[0] .idct_put(y_ptr , linesize, block + 0*64); mb[0] .idct_put(y_ptr , linesize, block + 0*64);
if (s->sys->video_stype == 4) { /* SD 422 */ if (s->sys->video_stype == 4) { /* SD 422 */
mb[2].idct_put(y_ptr + (1 << log2_blocksize) , linesize, block + 2*64); mb[2].idct_put(y_ptr + (1 << log2_blocksize) , linesize, block + 2*64);
@ -276,19 +276,19 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg)
block += 4*64; block += 4*64;
/* idct_put'ting chrominance */ /* idct_put'ting chrominance */
c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->picture.linesize[1] + c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->frame->linesize[1] +
(mb_x >> ((s->sys->pix_fmt == AV_PIX_FMT_YUV411P) ? 2 : 1))) << log2_blocksize); (mb_x >> ((s->sys->pix_fmt == AV_PIX_FMT_YUV411P) ? 2 : 1))) << log2_blocksize);
for (j = 2; j; j--) { for (j = 2; j; j--) {
uint8_t *c_ptr = s->picture.data[j] + c_offset; uint8_t *c_ptr = s->frame->data[j] + c_offset;
if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) { if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
uint64_t aligned_pixels[64/8]; uint64_t aligned_pixels[64/8];
uint8_t *pixels = (uint8_t*)aligned_pixels; uint8_t *pixels = (uint8_t*)aligned_pixels;
uint8_t *c_ptr1, *ptr1; uint8_t *c_ptr1, *ptr1;
int x, y; int x, y;
mb->idct_put(pixels, 8, block); mb->idct_put(pixels, 8, block);
for (y = 0; y < (1 << log2_blocksize); y++, c_ptr += s->picture.linesize[j], pixels += 8) { for (y = 0; y < (1 << log2_blocksize); y++, c_ptr += s->frame->linesize[j], pixels += 8) {
ptr1 = pixels + (1 << (log2_blocksize - 1)); ptr1 = pixels + (1 << (log2_blocksize - 1));
c_ptr1 = c_ptr + (s->picture.linesize[j] << log2_blocksize); c_ptr1 = c_ptr + (s->frame->linesize[j] << log2_blocksize);
for (x = 0; x < (1 << (log2_blocksize - 1)); x++) { for (x = 0; x < (1 << (log2_blocksize - 1)); x++) {
c_ptr[x] = pixels[x]; c_ptr[x] = pixels[x];
c_ptr1[x] = ptr1[x]; c_ptr1[x] = ptr1[x];
@ -297,8 +297,8 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg)
block += 64; mb++; block += 64; mb++;
} else { } else {
y_stride = (mb_y == 134) ? (1 << log2_blocksize) : y_stride = (mb_y == 134) ? (1 << log2_blocksize) :
s->picture.linesize[j] << ((!is_field_mode[mb_index]) * log2_blocksize); s->frame->linesize[j] << ((!is_field_mode[mb_index]) * log2_blocksize);
linesize = s->picture.linesize[j] << is_field_mode[mb_index]; linesize = s->frame->linesize[j] << is_field_mode[mb_index];
(mb++)-> idct_put(c_ptr , linesize, block); block += 64; (mb++)-> idct_put(c_ptr , linesize, block); block += 64;
if (s->sys->bpm == 8) { if (s->sys->bpm == 8) {
(mb++)->idct_put(c_ptr + y_stride, linesize, block); block += 64; (mb++)->idct_put(c_ptr + y_stride, linesize, block); block += 64;
@ -327,8 +327,9 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
return -1; /* NOTE: we only accept several full frames */ return -1; /* NOTE: we only accept several full frames */
} }
s->picture.key_frame = 1; s->frame = data;
s->picture.pict_type = AV_PICTURE_TYPE_I; s->frame->key_frame = 1;
s->frame->pict_type = AV_PICTURE_TYPE_I;
avctx->pix_fmt = s->sys->pix_fmt; avctx->pix_fmt = s->sys->pix_fmt;
avctx->time_base = s->sys->time_base; avctx->time_base = s->sys->time_base;
@ -336,12 +337,12 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ff_get_buffer(avctx, &s->picture, 0) < 0) { if (ff_get_buffer(avctx, s->frame, 0) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;
} }
s->picture.interlaced_frame = 1; s->frame->interlaced_frame = 1;
s->picture.top_field_first = 0; s->frame->top_field_first = 0;
s->buf = buf; s->buf = buf;
avctx->execute(avctx, dv_decode_video_segment, s->sys->work_chunks, NULL, avctx->execute(avctx, dv_decode_video_segment, s->sys->work_chunks, NULL,
@ -351,7 +352,6 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
/* return image */ /* return image */
*got_frame = 1; *got_frame = 1;
av_frame_move_ref(data, &s->picture);
/* Determine the codec's sample_aspect ratio from the packet */ /* Determine the codec's sample_aspect ratio from the packet */
vsc_pack = buf + 80*5 + 48 + 5; vsc_pack = buf + 80*5 + 48 + 5;
@ -364,15 +364,6 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
return s->sys->frame_size; return s->sys->frame_size;
} }
static int dvvideo_close(AVCodecContext *c)
{
DVVideoContext *s = c->priv_data;
av_frame_unref(&s->picture);
return 0;
}
AVCodec ff_dvvideo_decoder = { AVCodec ff_dvvideo_decoder = {
.name = "dvvideo", .name = "dvvideo",
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
@ -380,7 +371,6 @@ AVCodec ff_dvvideo_decoder = {
.id = AV_CODEC_ID_DVVIDEO, .id = AV_CODEC_ID_DVVIDEO,
.priv_data_size = sizeof(DVVideoContext), .priv_data_size = sizeof(DVVideoContext),
.init = ff_dvvideo_init, .init = ff_dvvideo_init,
.close = dvvideo_close,
.decode = dvvideo_decode_frame, .decode = dvvideo_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
}; };

View File

@ -43,6 +43,10 @@ static av_cold int dvvideo_init_encoder(AVCodecContext *avctx)
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
dv_vlc_map_tableinit(); dv_vlc_map_tableinit();
return ff_dvvideo_init(avctx); return ff_dvvideo_init(avctx);
@ -388,12 +392,12 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) || if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) ||
(s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) || (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) ||
(s->sys->height >= 720 && mb_y != 134)) { (s->sys->height >= 720 && mb_y != 134)) {
y_stride = s->picture.linesize[0] << 3; y_stride = s->frame->linesize[0] << 3;
} else { } else {
y_stride = 16; y_stride = 16;
} }
y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x) << 3); y_ptr = s->frame->data[0] + ((mb_y * s->frame->linesize[0] + mb_x) << 3);
linesize = s->picture.linesize[0]; linesize = s->frame->linesize[0];
if (s->sys->video_stype == 4) { /* SD 422 */ if (s->sys->video_stype == 4) { /* SD 422 */
vs_bit_size += vs_bit_size +=
@ -411,12 +415,12 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
enc_blk += 4; enc_blk += 4;
/* initializing chrominance blocks */ /* initializing chrominance blocks */
c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->picture.linesize[1] + c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->frame->linesize[1] +
(mb_x >> ((s->sys->pix_fmt == AV_PIX_FMT_YUV411P) ? 2 : 1))) << 3); (mb_x >> ((s->sys->pix_fmt == AV_PIX_FMT_YUV411P) ? 2 : 1))) << 3);
for (j = 2; j; j--) { for (j = 2; j; j--) {
uint8_t *c_ptr = s->picture.data[j] + c_offset; uint8_t *c_ptr = s->frame->data[j] + c_offset;
linesize = s->picture.linesize[j]; linesize = s->frame->linesize[j];
y_stride = (mb_y == 134) ? 8 : (s->picture.linesize[j] << 3); y_stride = (mb_y == 134) ? 8 : (s->frame->linesize[j] << 3);
if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) { if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
uint8_t* d; uint8_t* d;
uint8_t* b = scratch; uint8_t* b = scratch;
@ -665,9 +669,9 @@ static int dvvideo_encode_frame(AVCodecContext *c, AVPacket *pkt,
} }
c->pix_fmt = s->sys->pix_fmt; c->pix_fmt = s->sys->pix_fmt;
s->picture = *frame; s->frame = frame;
s->picture.key_frame = 1; c->coded_frame->key_frame = 1;
s->picture.pict_type = AV_PICTURE_TYPE_I; c->coded_frame->pict_type = AV_PICTURE_TYPE_I;
s->buf = pkt->data; s->buf = pkt->data;
c->execute(c, dv_encode_video_segment, s->sys->work_chunks, NULL, c->execute(c, dv_encode_video_segment, s->sys->work_chunks, NULL,
@ -683,6 +687,12 @@ static int dvvideo_encode_frame(AVCodecContext *c, AVPacket *pkt,
return 0; return 0;
} }
static int dvvideo_encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
AVCodec ff_dvvideo_encoder = { AVCodec ff_dvvideo_encoder = {
.name = "dvvideo", .name = "dvvideo",
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
@ -691,6 +701,7 @@ AVCodec ff_dvvideo_encoder = {
.priv_data_size = sizeof(DVVideoContext), .priv_data_size = sizeof(DVVideoContext),
.init = dvvideo_init_encoder, .init = dvvideo_init_encoder,
.encode2 = dvvideo_encode_frame, .encode2 = dvvideo_encode_frame,
.close = dvvideo_encode_close,
.capabilities = CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts = (const enum AVPixelFormat[]) { .pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE