From acaffdca21f63b3b465892e8e9d85fd8ca0022bd Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sat, 9 Nov 2013 10:14:46 +0100 Subject: [PATCH 1/4] mss1: use the AVFrame API properly. --- libavcodec/mss1.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/libavcodec/mss1.c b/libavcodec/mss1.c index b5e016b378..a67a942ac2 100644 --- a/libavcodec/mss1.c +++ b/libavcodec/mss1.c @@ -30,7 +30,7 @@ typedef struct MSS1Context { MSS12Context ctx; - AVFrame pic; + AVFrame *pic; SliceContext sc; } MSS1Context; @@ -151,34 +151,34 @@ static int mss1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, init_get_bits(&gb, buf, buf_size * 8); arith_init(&acoder, &gb); - if ((ret = ff_reget_buffer(avctx, &ctx->pic)) < 0) { + if ((ret = ff_reget_buffer(avctx, ctx->pic)) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return ret; } - c->pal_pic = ctx->pic.data[0] + ctx->pic.linesize[0] * (avctx->height - 1); - c->pal_stride = -ctx->pic.linesize[0]; + c->pal_pic = ctx->pic->data[0] + ctx->pic->linesize[0] * (avctx->height - 1); + c->pal_stride = -ctx->pic->linesize[0]; c->keyframe = !arith_get_bit(&acoder); if (c->keyframe) { c->corrupted = 0; ff_mss12_slicecontext_reset(&ctx->sc); pal_changed = decode_pal(c, &acoder); - ctx->pic.key_frame = 1; - ctx->pic.pict_type = AV_PICTURE_TYPE_I; + ctx->pic->key_frame = 1; + ctx->pic->pict_type = AV_PICTURE_TYPE_I; } else { if (c->corrupted) return AVERROR_INVALIDDATA; - ctx->pic.key_frame = 0; - ctx->pic.pict_type = AV_PICTURE_TYPE_P; + ctx->pic->key_frame = 0; + ctx->pic->pict_type = AV_PICTURE_TYPE_P; } c->corrupted = ff_mss12_decode_rect(&ctx->sc, &acoder, 0, 0, avctx->width, avctx->height); if (c->corrupted) return AVERROR_INVALIDDATA; - memcpy(ctx->pic.data[1], c->pal, AVPALETTE_SIZE); - ctx->pic.palette_has_changed = pal_changed; + memcpy(ctx->pic->data[1], c->pal, AVPALETTE_SIZE); + ctx->pic->palette_has_changed = pal_changed; - if ((ret = av_frame_ref(data, &ctx->pic)) < 0) + if ((ret = av_frame_ref(data, ctx->pic)) < 0) return ret; *got_frame = 1; @@ -193,7 +193,10 @@ static av_cold int mss1_decode_init(AVCodecContext *avctx) int ret; c->ctx.avctx = avctx; - avctx->coded_frame = &c->pic; + + c->pic = av_frame_alloc(); + if (!c->pic) + return AVERROR(ENOMEM); ret = ff_mss12_decode_init(&c->ctx, 0, &c->sc, NULL); @@ -206,7 +209,7 @@ static av_cold int mss1_decode_end(AVCodecContext *avctx) { MSS1Context * const ctx = avctx->priv_data; - av_frame_unref(&ctx->pic); + av_frame_free(&ctx->pic); ff_mss12_decode_end(&ctx->ctx); return 0; From e9198f61db90ae07e649096bcc6991a09786f09c Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sat, 9 Nov 2013 10:14:46 +0100 Subject: [PATCH 2/4] mss2: use the AVFrame API properly. --- libavcodec/mss2.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/libavcodec/mss2.c b/libavcodec/mss2.c index d9c04bea95..0e5fd6df21 100644 --- a/libavcodec/mss2.c +++ b/libavcodec/mss2.c @@ -34,7 +34,7 @@ typedef struct MSS2Context { VC1Context v; int split_position; - AVFrame last_pic; + AVFrame *last_pic; MSS12Context c; MSS2DSPContext dsp; SliceContext sc[2]; @@ -523,8 +523,8 @@ static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, return AVERROR_INVALIDDATA; avctx->pix_fmt = is_555 ? AV_PIX_FMT_RGB555 : AV_PIX_FMT_RGB24; - if (ctx->last_pic.format != avctx->pix_fmt) - av_frame_unref(&ctx->last_pic); + if (ctx->last_pic->format != avctx->pix_fmt) + av_frame_unref(ctx->last_pic); if (has_wmv9) { bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING); @@ -603,20 +603,20 @@ static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, return ret; } - if (ctx->last_pic.data[0]) { - av_assert0(frame->linesize[0] == ctx->last_pic.linesize[0]); - c->last_rgb_pic = ctx->last_pic.data[0] + - ctx->last_pic.linesize[0] * (avctx->height - 1); + if (ctx->last_pic->data[0]) { + av_assert0(frame->linesize[0] == ctx->last_pic->linesize[0]); + c->last_rgb_pic = ctx->last_pic->data[0] + + ctx->last_pic->linesize[0] * (avctx->height - 1); } else { av_log(avctx, AV_LOG_ERROR, "Missing keyframe\n"); return AVERROR_INVALIDDATA; } } else { - if ((ret = ff_reget_buffer(avctx, &ctx->last_pic)) < 0) { + if ((ret = ff_reget_buffer(avctx, ctx->last_pic)) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return ret; } - if ((ret = av_frame_ref(frame, &ctx->last_pic)) < 0) + if ((ret = av_frame_ref(frame, ctx->last_pic)) < 0) return ret; c->last_rgb_pic = NULL; @@ -730,8 +730,8 @@ static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, av_log(avctx, AV_LOG_WARNING, "buffer not fully consumed\n"); if (c->mvX < 0 || c->mvY < 0) { - av_frame_unref(&ctx->last_pic); - ret = av_frame_ref(&ctx->last_pic, frame); + av_frame_unref(ctx->last_pic); + ret = av_frame_ref(ctx->last_pic, frame); if (ret < 0) return ret; } @@ -806,7 +806,7 @@ static av_cold int mss2_decode_end(AVCodecContext *avctx) { MSS2Context *const ctx = avctx->priv_data; - av_frame_unref(&ctx->last_pic); + av_frame_free(&ctx->last_pic); ff_mss12_decode_end(&ctx->c); av_freep(&ctx->c.pal_pic); @@ -840,6 +840,12 @@ static av_cold int mss2_decode_init(AVCodecContext *avctx) avctx->pix_fmt = c->free_colours == 127 ? AV_PIX_FMT_RGB555 : AV_PIX_FMT_RGB24; + ctx->last_pic = av_frame_alloc(); + if (!ctx->last_pic) { + mss2_decode_end(avctx); + return AVERROR(ENOMEM); + } + return 0; } From 207909911d31612bb43f61117784fdf5d9f3aac6 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sat, 9 Nov 2013 10:14:46 +0100 Subject: [PATCH 3/4] mss3: use the AVFrame API properly. --- libavcodec/mss3.c | 62 ++++++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/libavcodec/mss3.c b/libavcodec/mss3.c index 020a43a15b..c5e29a3205 100644 --- a/libavcodec/mss3.c +++ b/libavcodec/mss3.c @@ -108,7 +108,7 @@ typedef struct HaarBlockCoder { typedef struct MSS3Context { AVCodecContext *avctx; - AVFrame pic; + AVFrame *pic; int got_error; RangeCoder coder; @@ -731,14 +731,14 @@ static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, return buf_size; c->got_error = 0; - if ((ret = ff_reget_buffer(avctx, &c->pic)) < 0) { + if ((ret = ff_reget_buffer(avctx, c->pic)) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return ret; } - c->pic.key_frame = keyframe; - c->pic.pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; + c->pic->key_frame = keyframe; + c->pic->pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; if (!bytestream2_get_bytes_left(&gb)) { - if ((ret = av_frame_ref(data, &c->pic)) < 0) + if ((ret = av_frame_ref(data, c->pic)) < 0) return ret; *got_frame = 1; @@ -751,9 +751,9 @@ static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, mb_width = dec_width >> 4; mb_height = dec_height >> 4; - dst[0] = c->pic.data[0] + dec_x + dec_y * c->pic.linesize[0]; - dst[1] = c->pic.data[1] + dec_x / 2 + (dec_y / 2) * c->pic.linesize[1]; - dst[2] = c->pic.data[2] + dec_x / 2 + (dec_y / 2) * c->pic.linesize[2]; + dst[0] = c->pic->data[0] + dec_x + dec_y * c->pic->linesize[0]; + dst[1] = c->pic->data[1] + dec_x / 2 + (dec_y / 2) * c->pic->linesize[1]; + dst[2] = c->pic->data[2] + dec_x / 2 + (dec_y / 2) * c->pic->linesize[2]; for (y = 0; y < mb_height; y++) { for (x = 0; x < mb_width; x++) { for (i = 0; i < 3; i++) { @@ -764,23 +764,23 @@ static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, case FILL_BLOCK: decode_fill_block(acoder, c->fill_coder + i, dst[i] + x * blk_size, - c->pic.linesize[i], blk_size); + c->pic->linesize[i], blk_size); break; case IMAGE_BLOCK: decode_image_block(acoder, c->image_coder + i, dst[i] + x * blk_size, - c->pic.linesize[i], blk_size); + c->pic->linesize[i], blk_size); break; case DCT_BLOCK: decode_dct_block(acoder, c->dct_coder + i, dst[i] + x * blk_size, - c->pic.linesize[i], blk_size, + c->pic->linesize[i], blk_size, c->dctblock, x, y); break; case HAAR_BLOCK: decode_haar_block(acoder, c->haar_coder + i, dst[i] + x * blk_size, - c->pic.linesize[i], blk_size, + c->pic->linesize[i], blk_size, c->hblock); break; } @@ -792,12 +792,12 @@ static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, } } } - dst[0] += c->pic.linesize[0] * 16; - dst[1] += c->pic.linesize[1] * 8; - dst[2] += c->pic.linesize[2] * 8; + dst[0] += c->pic->linesize[0] * 16; + dst[1] += c->pic->linesize[1] * 8; + dst[2] += c->pic->linesize[2] * 8; } - if ((ret = av_frame_ref(data, &c->pic)) < 0) + if ((ret = av_frame_ref(data, c->pic)) < 0) return ret; *got_frame = 1; @@ -805,6 +805,18 @@ static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, return buf_size; } +static av_cold int mss3_decode_end(AVCodecContext *avctx) +{ + MSS3Context * const c = avctx->priv_data; + int i; + + av_frame_free(&c->pic); + for (i = 0; i < 3; i++) + av_freep(&c->dct_coder[i].prev_dc); + + return 0; +} + static av_cold int mss3_decode_init(AVCodecContext *avctx) { MSS3Context * const c = avctx->priv_data; @@ -836,6 +848,12 @@ static av_cold int mss3_decode_init(AVCodecContext *avctx) } } + c->pic = av_frame_alloc(); + if (!c->pic) { + mss3_decode_end(avctx); + return AVERROR(ENOMEM); + } + avctx->pix_fmt = AV_PIX_FMT_YUV420P; init_coders(c); @@ -843,18 +861,6 @@ static av_cold int mss3_decode_init(AVCodecContext *avctx) return 0; } -static av_cold int mss3_decode_end(AVCodecContext *avctx) -{ - MSS3Context * const c = avctx->priv_data; - int i; - - av_frame_unref(&c->pic); - for (i = 0; i < 3; i++) - av_freep(&c->dct_coder[i].prev_dc); - - return 0; -} - AVCodec ff_msa1_decoder = { .name = "msa1", .long_name = NULL_IF_CONFIG_SMALL("MS ATC Screen"), From 730bac7bab3c7dcd9fcb7c70f154e5f4cfaef9a7 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sat, 9 Nov 2013 10:14:46 +0100 Subject: [PATCH 4/4] mss4: use the AVFrame API properly. --- libavcodec/mss4.c | 64 ++++++++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/libavcodec/mss4.c b/libavcodec/mss4.c index 81de942f32..fa85258aa3 100644 --- a/libavcodec/mss4.c +++ b/libavcodec/mss4.c @@ -126,7 +126,7 @@ static const uint8_t mss4_vec_entry_vlc_syms[2][9] = { #define MAX_ENTRIES 162 typedef struct MSS4Context { - AVFrame pic; + AVFrame *pic; VLC dc_vlc[2], ac_vlc[2]; VLC vec_entry_vlc[2]; @@ -297,10 +297,10 @@ static int mss4_decode_dct_block(MSS4Context *c, GetBitContext *gb, return ret; c->prev_dc[0][mb_x * 2 + i] = c->dc_cache[j][LEFT]; - ff_mss34_dct_put(out + xpos * 8, c->pic.linesize[0], + ff_mss34_dct_put(out + xpos * 8, c->pic->linesize[0], c->block); } - out += 8 * c->pic.linesize[0]; + out += 8 * c->pic->linesize[0]; } for (i = 1; i < 3; i++) { @@ -320,7 +320,7 @@ static int mss4_decode_dct_block(MSS4Context *c, GetBitContext *gb, for (j = 0; j < 16; j++) { for (k = 0; k < 8; k++) AV_WN16A(out + k * 2, c->imgbuf[i][k + (j & ~1) * 4] * 0x101); - out += c->pic.linesize[i]; + out += c->pic->linesize[i]; } } @@ -481,7 +481,7 @@ static int mss4_decode_image_block(MSS4Context *ctx, GetBitContext *gb, for (i = 0; i < 3; i++) for (j = 0; j < 16; j++) - memcpy(picdst[i] + mb_x * 16 + j * ctx->pic.linesize[i], + memcpy(picdst[i] + mb_x * 16 + j * ctx->pic->linesize[i], ctx->imgbuf[i] + j * 16, 16); return 0; @@ -554,16 +554,16 @@ static int mss4_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, return AVERROR_INVALIDDATA; } - if ((ret = ff_reget_buffer(avctx, &c->pic)) < 0) { + if ((ret = ff_reget_buffer(avctx, c->pic)) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return ret; } - c->pic.key_frame = (frame_type == INTRA_FRAME); - c->pic.pict_type = (frame_type == INTRA_FRAME) ? AV_PICTURE_TYPE_I + c->pic->key_frame = (frame_type == INTRA_FRAME); + c->pic->pict_type = (frame_type == INTRA_FRAME) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; if (frame_type == SKIP_FRAME) { *got_frame = 1; - if ((ret = av_frame_ref(data, &c->pic)) < 0) + if ((ret = av_frame_ref(data, c->pic)) < 0) return ret; return buf_size; @@ -579,9 +579,9 @@ static int mss4_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, mb_width = FFALIGN(width, 16) >> 4; mb_height = FFALIGN(height, 16) >> 4; - dst[0] = c->pic.data[0]; - dst[1] = c->pic.data[1]; - dst[2] = c->pic.data[2]; + dst[0] = c->pic->data[0]; + dst[1] = c->pic->data[1]; + dst[2] = c->pic->data[2]; memset(c->prev_vec, 0, sizeof(c->prev_vec)); for (y = 0; y < mb_height; y++) { @@ -615,12 +615,12 @@ static int mss4_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, if (blk_type != DCT_BLOCK) mss4_update_dc_cache(c, x); } - dst[0] += c->pic.linesize[0] * 16; - dst[1] += c->pic.linesize[1] * 16; - dst[2] += c->pic.linesize[2] * 16; + dst[0] += c->pic->linesize[0] * 16; + dst[1] += c->pic->linesize[1] * 16; + dst[2] += c->pic->linesize[2] * 16; } - if ((ret = av_frame_ref(data, &c->pic)) < 0) + if ((ret = av_frame_ref(data, c->pic)) < 0) return ret; *got_frame = 1; @@ -628,6 +628,19 @@ static int mss4_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, return buf_size; } +static av_cold int mss4_decode_end(AVCodecContext *avctx) +{ + MSS4Context * const c = avctx->priv_data; + int i; + + av_frame_free(&c->pic); + for (i = 0; i < 3; i++) + av_freep(&c->prev_dc[i]); + mss4_free_vlcs(c); + + return 0; +} + static av_cold int mss4_decode_init(AVCodecContext *avctx) { MSS4Context * const c = avctx->priv_data; @@ -648,24 +661,17 @@ static av_cold int mss4_decode_init(AVCodecContext *avctx) } } + c->pic = av_frame_alloc(); + if (!c->pic) { + mss4_decode_end(avctx); + return AVERROR(ENOMEM); + } + avctx->pix_fmt = AV_PIX_FMT_YUV444P; return 0; } -static av_cold int mss4_decode_end(AVCodecContext *avctx) -{ - MSS4Context * const c = avctx->priv_data; - int i; - - av_frame_unref(&c->pic); - for (i = 0; i < 3; i++) - av_freep(&c->prev_dc[i]); - mss4_free_vlcs(c); - - return 0; -} - AVCodec ff_mts2_decoder = { .name = "mts2", .long_name = NULL_IF_CONFIG_SMALL("MS Expression Encoder Screen"),