Merge commit 'e83c1e2d0bedb5d4fa9ab351126b2ecc552f1355'

* commit 'e83c1e2d0bedb5d4fa9ab351126b2ecc552f1355':
  avs: return meaningful error codes.
  aura: return meaningful error codes.
  asvdec: return meaningful error codes.
  ansi: return a meaningful error code
  anm: return meaningful error codes
  aasc: return meaningful error codes.
  8bps: return meaningful error codes.
  4xm: operate with pointers to AVFrames instead of whole structs.
  4xm: eliminate a pointless indirection

Conflicts:
	libavcodec/4xm.c
	libavcodec/aasc.c
	libavcodec/anm.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2013-01-06 23:48:48 +01:00
commit 6190446745
8 changed files with 66 additions and 64 deletions

View File

@ -130,7 +130,7 @@ typedef struct CFrameBuffer {
typedef struct FourXContext {
AVCodecContext *avctx;
DSPContext dsp;
AVFrame current_picture, last_picture;
AVFrame *current_picture, *last_picture;
GetBitContext pre_gb; ///< ac/dc prefix
GetBitContext gb;
GetByteContext g;
@ -261,9 +261,9 @@ static void init_mv(FourXContext *f)
for (i = 0; i < 256; i++) {
if (f->version > 1)
f->mv[i] = mv[i][0] + mv[i][1] * f->current_picture.linesize[0] / 2;
f->mv[i] = mv[i][0] + mv[i][1] * f->current_picture->linesize[0] / 2;
else
f->mv[i] = (i & 15) - 8 + ((i >> 4) - 8) * f->current_picture.linesize[0] / 2;
f->mv[i] = (i & 15) - 8 + ((i >> 4) - 8) * f->current_picture->linesize[0] / 2;
}
}
@ -340,7 +340,7 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
int code = get_vlc2(&f->gb,
block_type_vlc[1 - (f->version > 1)][index].table,
BLOCK_TYPE_VLC_BITS, 1);
uint16_t *start = (uint16_t *)f->last_picture.data[0];
uint16_t *start = (uint16_t *)f->last_picture->data[0];
uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w);
av_assert2(code >= 0 && code <= 6);
@ -409,9 +409,9 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
int x, y;
const int width = f->avctx->width;
const int height = f->avctx->height;
uint16_t *src = (uint16_t *)f->last_picture.data[0];
uint16_t *dst = (uint16_t *)f->current_picture.data[0];
const int stride = f->current_picture.linesize[0] >> 1;
uint16_t *src = (uint16_t *)f->last_picture->data[0];
uint16_t *dst = (uint16_t *)f->current_picture->data[0];
const int stride = f->current_picture->linesize[0] >> 1;
unsigned int bitstream_size, bytestream_size, wordstream_size, extra,
bytestream_offset, wordstream_offset;
@ -522,9 +522,9 @@ static int decode_i_block(FourXContext *f, DCTELEM *block)
static inline void idct_put(FourXContext *f, int x, int y)
{
DCTELEM (*block)[64] = f->block;
int stride = f->current_picture.linesize[0] >> 1;
int stride = f->current_picture->linesize[0] >> 1;
int i;
uint16_t *dst = ((uint16_t*)f->current_picture.data[0]) + y * stride + x;
uint16_t *dst = ((uint16_t*)f->current_picture->data[0]) + y * stride + x;
for (i = 0; i < 4; i++) {
block[i][0] += 0x80 * 8 * 8;
@ -681,8 +681,8 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
const int width = f->avctx->width;
const int height = f->avctx->height;
const int mbs = (FFALIGN(width, 16) >> 4) * (FFALIGN(height, 16) >> 4);
uint16_t *dst = (uint16_t*)f->current_picture.data[0];
const int stride = f->current_picture.linesize[0]>>1;
uint16_t *dst = (uint16_t*)f->current_picture->data[0];
const int stride = f->current_picture->linesize[0]>>1;
const uint8_t *buf_end = buf + length;
GetByteContext g3;
@ -867,9 +867,9 @@ static int decode_frame(AVCodecContext *avctx, void *data,
frame_size = buf_size - 12;
}
FFSWAP(AVFrame, f->current_picture, f->last_picture);
FFSWAP(AVFrame*, f->current_picture, f->last_picture);
p = &f->current_picture;
p = f->current_picture;
avctx->coded_frame = p;
// alternatively we would have to use our own buffer management
@ -894,14 +894,14 @@ static int decode_frame(AVCodecContext *avctx, void *data,
return ret;
}
} else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) {
if (!f->last_picture.data[0]) {
f->last_picture.reference = 3;
if ((ret = ff_get_buffer(avctx, &f->last_picture)) < 0) {
if (!f->last_picture->data[0]) {
f->last_picture->reference = 3;
if ((ret = ff_get_buffer(avctx, f->last_picture)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
for (i=0; i<avctx->height; i++)
memset(f->last_picture.data[0] + i*f->last_picture.linesize[0], 0, 2*avctx->width);
memset(f->last_picture->data[0] + i*f->last_picture->linesize[0], 0, 2*avctx->width);
}
p->pict_type = AV_PICTURE_TYPE_P;
@ -927,16 +927,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
return buf_size;
}
static av_cold void common_init(AVCodecContext *avctx)
{
FourXContext * const f = avctx->priv_data;
ff_dsputil_init(&f->dsp, avctx);
f->avctx = avctx;
}
static av_cold int decode_init(AVCodecContext *avctx)
{
FourXContext * const f = avctx->priv_data;
@ -953,7 +943,8 @@ static av_cold int decode_init(AVCodecContext *avctx)
avcodec_get_frame_defaults(&f->current_picture);
avcodec_get_frame_defaults(&f->last_picture);
f->version = AV_RL32(avctx->extradata) >> 16;
common_init(avctx);
ff_dsputil_init(&f->dsp, avctx);
f->avctx = avctx;
init_vlcs(f);
if (f->version > 2)
@ -961,6 +952,14 @@ static av_cold int decode_init(AVCodecContext *avctx)
else
avctx->pix_fmt = AV_PIX_FMT_BGR555;
f->current_picture = avcodec_alloc_frame();
f->last_picture = avcodec_alloc_frame();
if (!f->current_picture || !f->last_picture) {
avcodec_free_frame(&f->current_picture);
avcodec_free_frame(&f->last_picture);
return AVERROR(ENOMEM);
}
return 0;
}
@ -977,10 +976,12 @@ static av_cold int decode_end(AVCodecContext *avctx)
f->cfrm[i].allocated_size = 0;
}
ff_free_vlc(&f->pre_vlc);
if (f->current_picture.data[0])
avctx->release_buffer(avctx, &f->current_picture);
if (f->last_picture.data[0])
avctx->release_buffer(avctx, &f->last_picture);
if (f->current_picture->data[0])
avctx->release_buffer(avctx, f->current_picture);
if (f->last_picture->data[0])
avctx->release_buffer(avctx, f->last_picture);
avcodec_free_frame(&f->current_picture);
avcodec_free_frame(&f->last_picture);
return 0;
}

View File

@ -68,15 +68,16 @@ static int decode_frame(AVCodecContext *avctx, void *data,
unsigned char count;
unsigned int planes = c->planes;
unsigned char *planemap = c->planemap;
int ret;
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
c->pic.reference = 0;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
if (ff_get_buffer(avctx, &c->pic) < 0){
if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
return ret;
}
/* Set data pointer after line lengths */
@ -96,14 +97,14 @@ static int decode_frame(AVCodecContext *avctx, void *data,
/* Decode a row of this plane */
while (dlen > 0) {
if (dp + 1 >= buf + buf_size)
return -1;
return AVERROR_INVALIDDATA;
if ((count = *dp++) <= 127) {
count++;
dlen -= count + 1;
if (pixptr + count * planes > pixptr_end)
break;
if (dp + count > buf + buf_size)
return -1;
return AVERROR_INVALIDDATA;
while (count--) {
*pixptr = *dp++;
pixptr += planes;
@ -181,7 +182,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
default:
av_log(avctx, AV_LOG_ERROR, "Error: Unsupported color depth: %u.\n",
avctx->bits_per_coded_sample);
return -1;
return AVERROR_INVALIDDATA;
}
return 0;

View File

@ -81,7 +81,7 @@ static int aasc_decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AascContext *s = avctx->priv_data;
int compr, i, stride, psize;
int compr, i, stride, psize, ret;
if (buf_size < 4) {
av_log(avctx, AV_LOG_ERROR, "frame too short\n");
@ -90,9 +90,9 @@ static int aasc_decode_frame(AVCodecContext *avctx,
s->frame.reference = 3;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if (avctx->reget_buffer(avctx, &s->frame)) {
if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return -1;
return ret;
}
compr = AV_RL32(buf);
@ -124,7 +124,7 @@ static int aasc_decode_frame(AVCodecContext *avctx,
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr);
return -1;
return AVERROR_INVALIDDATA;
}
break;
default:

View File

@ -114,7 +114,7 @@ static int decode_frame(AVCodecContext *avctx,
uint8_t *dst, *dst_end;
int count, ret;
if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0) {
if ((ret = avctx->reget_buffer(avctx, &s->frame)) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}

View File

@ -434,8 +434,8 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_WARNING, "args overflow (%i)\n", s->nb_args);
if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args] >= 0)
s->nb_args++;
if (execute_code(avctx, buf[0]) < 0)
return -1;
if ((ret = execute_code(avctx, buf[0])) < 0)
return ret;
s->state = STATE_NORMAL;
}
break;

View File

@ -108,7 +108,7 @@ static inline int asv1_decode_block(ASV1Context *a, DCTELEM block[64])
break;
if (ccp < 0 || i >= 10) {
av_log(a->avctx, AV_LOG_ERROR, "coded coeff pattern damaged\n");
return -1;
return AVERROR_INVALIDDATA;
}
if (ccp & 8)
@ -210,15 +210,15 @@ static int decode_frame(AVCodecContext *avctx,
int buf_size = avpkt->size;
AVFrame *picture = data;
AVFrame * const p = &a->picture;
int mb_x, mb_y;
int mb_x, mb_y, ret;
if (p->data[0])
avctx->release_buffer(avctx, p);
p->reference = 0;
if (ff_get_buffer(avctx, p) < 0) {
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
return ret;
}
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
@ -240,8 +240,8 @@ static int decode_frame(AVCodecContext *avctx,
for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
for (mb_x = 0; mb_x < a->mb_width2; mb_x++) {
if (decode_mb(a, a->block) < 0)
return -1;
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
idct_put(a, mb_x, mb_y);
}
@ -250,8 +250,8 @@ static int decode_frame(AVCodecContext *avctx,
if (a->mb_width2 != a->mb_width) {
mb_x = a->mb_width2;
for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
if (decode_mb(a, a->block) < 0)
return -1;
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
idct_put(a, mb_x, mb_y);
}
@ -260,8 +260,8 @@ static int decode_frame(AVCodecContext *avctx,
if (a->mb_height2 != a->mb_height) {
mb_y = a->mb_height2;
for (mb_x = 0; mb_x < a->mb_width; mb_x++) {
if (decode_mb(a, a->block) < 0)
return -1;
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
idct_put(a, mb_x, mb_y);
}

View File

@ -39,7 +39,7 @@ static av_cold int aura_decode_init(AVCodecContext *avctx)
s->avctx = avctx;
/* width needs to be divisible by 4 for this codec to work */
if (avctx->width & 0x3)
return -1;
return AVERROR(EINVAL);
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
avcodec_get_frame_defaults(&s->frame);
@ -53,7 +53,7 @@ static int aura_decode_frame(AVCodecContext *avctx,
AuraDecodeContext *s = avctx->priv_data;
uint8_t *Y, *U, *V;
uint8_t val;
int x, y;
int x, y, ret;
const uint8_t *buf = pkt->data;
/* prediction error tables (make it clear that they are signed values) */
@ -62,7 +62,7 @@ static int aura_decode_frame(AVCodecContext *avctx,
if (pkt->size != 48 + avctx->height * avctx->width) {
av_log(avctx, AV_LOG_ERROR, "got a buffer with %d bytes when %d were expected\n",
pkt->size, 48 + avctx->height * avctx->width);
return -1;
return AVERROR_INVALIDDATA;
}
/* pixel data starts 48 bytes in, after 3x16-byte tables */
@ -73,9 +73,9 @@ static int aura_decode_frame(AVCodecContext *avctx,
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
s->frame.reference = 0;
if (ff_get_buffer(avctx, &s->frame) < 0) {
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
return ret;
}
Y = s->frame.data[0];

View File

@ -54,14 +54,14 @@ avs_decode_frame(AVCodecContext * avctx,
AVFrame *const p = &avs->picture;
const uint8_t *table, *vect;
uint8_t *out;
int i, j, x, y, stride, vect_w = 3, vect_h = 3;
int i, j, x, y, stride, ret, vect_w = 3, vect_h = 3;
AvsVideoSubType sub_type;
AvsBlockType type;
GetBitContext change_map = {0}; //init to silence warning
if (avctx->reget_buffer(avctx, p)) {
if ((ret = avctx->reget_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return -1;
return ret;
}
p->reference = 3;
p->pict_type = AV_PICTURE_TYPE_P;
@ -96,7 +96,7 @@ avs_decode_frame(AVCodecContext * avctx,
}
if (type != AVS_VIDEO)
return -1;
return AVERROR_INVALIDDATA;
switch (sub_type) {
case AVS_I_FRAME:
@ -118,7 +118,7 @@ avs_decode_frame(AVCodecContext * avctx,
break;
default:
return -1;
return AVERROR_INVALIDDATA;
}
if (buf_end - buf < 256 * vect_w * vect_h)