Merge remote-tracking branch 'qatar/master'

* qatar/master: (34 commits)
  mlp_parser: fix the channel mask value used for the top surround channel
  vorbisenc: check all allocations for failure
  roqaudioenc: return AVERROR codes instead of -1
  roqaudioenc: set correct bit rate
  roqaudioenc: use AVCodecContext.frame_size correctly.
  roqaudioenc: remove unneeded sample_fmt check
  ra144enc: use int16_t* for input samples rather than void*
  ra144enc: set AVCodecContext.coded_frame
  ra144enc: remove unneeded sample_fmt check
  nellymoserenc: set AVCodecContext.coded_frame
  nellymoserenc: improve error checking in encode_init()
  nellymoserenc: return AVERROR codes instead of -1
  libvorbis: improve error checking in oggvorbis_encode_init()
  mpegaudioenc: return AVERROR codes instead of -1
  libfaac: improve error checking and handling in Faac_encode_init()
  avutil: add AVERROR_UNKNOWN
  check for coded_frame allocation failure in several audio encoders
  audio encoders: do not set coded_frame->key_frame.
  g722enc: check for trellis data allocation error
  libspeexenc: export encoder delay through AVCodecContext.delay
  ...

Conflicts:
	doc/APIchanges
	libavcodec/avcodec.h
	libavcodec/fraps.c
	libavcodec/kgv1dec.c
	libavcodec/libfaac.c
	libavcodec/libgsm.c
	libavcodec/libvorbis.c
	libavcodec/mlp_parser.c
	libavcodec/roqaudioenc.c
	libavcodec/vorbisenc.c
	libavutil/avutil.h
	libavutil/error.c
	libavutil/error.h

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-02-26 04:47:56 +01:00
commit 305e4b35ea
30 changed files with 572 additions and 257 deletions

View File

@ -35,6 +35,9 @@ API changes, most recent first:
2012-01-24 - xxxxxxx - lavfi 2.60.100 2012-01-24 - xxxxxxx - lavfi 2.60.100
Add avfilter_graph_dump. Add avfilter_graph_dump.
2012-xx-xx - xxxxxxx - lavu 51.24.0 - error.h
Add AVERROR_UNKNOWN
2012-xx-xx - xxxxxxx - lavc 54.x.x 2012-xx-xx - xxxxxxx - lavc 54.x.x
Add duration field to AVCodecParserContext Add duration field to AVCodecParserContext

View File

@ -1372,6 +1372,19 @@ typedef struct AVCodecContext {
* the decoder output. (we assume the decoder matches the spec) * the decoder output. (we assume the decoder matches the spec)
* Decoding: Number of frames delay in addition to what a standard decoder * Decoding: Number of frames delay in addition to what a standard decoder
* as specified in the spec would produce. * as specified in the spec would produce.
*
* Video:
* Number of frames the decoded output will be delayed relative to the
* encoded input.
*
* Audio:
* Number of "priming" samples added to the beginning of the stream
* during encoding. The decoded output will be delayed by this many
* samples relative to the input to the encoder. Note that this field is
* purely informational and does not directly affect the pts output by
* the encoder, which should always be based on the actual presentation
* time, including any delay.
*
* - encoding: Set by libavcodec. * - encoding: Set by libavcodec.
* - decoding: Set by libavcodec. * - decoding: Set by libavcodec.
*/ */

View File

@ -122,7 +122,7 @@ static void cdxl_decode_ham6(CDXLVideoContext *c)
g = index * 0x11 << 8; g = index * 0x11 << 8;
break; break;
} }
AV_WN32(out + x * 3, r | g | b); AV_WL24(out + x * 3, r | g | b);
} }
out += c->frame.linesize[0]; out += c->frame.linesize[0];
} }
@ -165,7 +165,7 @@ static void cdxl_decode_ham8(CDXLVideoContext *c)
g = (index << 10) | (g & (3 << 8)); g = (index << 10) | (g & (3 << 8));
break; break;
} }
AV_WN32(out + x * 3, r | g | b); AV_WL24(out + x * 3, r | g | b);
} }
out += c->frame.linesize[0]; out += c->frame.linesize[0];
} }

View File

@ -142,7 +142,7 @@ static int decode_frame(AVCodecContext *avctx,
int i, j, is_chroma; int i, j, is_chroma;
const int planes = 3; const int planes = 3;
uint8_t *out; uint8_t *out;
enum PixelFormat pix_fmt;
header = AV_RL32(buf); header = AV_RL32(buf);
version = header & 0xff; version = header & 0xff;
@ -157,8 +157,6 @@ static int decode_frame(AVCodecContext *avctx,
buf += header_size; buf += header_size;
avctx->pix_fmt = version & 1 ? PIX_FMT_BGR24 : PIX_FMT_YUVJ420P;
if (version < 2) { if (version < 2) {
unsigned needed_size = avctx->width*avctx->height*3; unsigned needed_size = avctx->width*avctx->height*3;
if (version == 0) needed_size /= 2; if (version == 0) needed_size /= 2;
@ -205,6 +203,13 @@ static int decode_frame(AVCodecContext *avctx,
f->key_frame = 1; f->key_frame = 1;
f->reference = 0; f->reference = 0;
f->buffer_hints = FF_BUFFER_HINTS_VALID; f->buffer_hints = FF_BUFFER_HINTS_VALID;
pix_fmt = version & 1 ? PIX_FMT_BGR24 : PIX_FMT_YUVJ420P;
if (avctx->pix_fmt != pix_fmt && f->data[0]) {
avctx->release_buffer(avctx, f);
}
avctx->pix_fmt = pix_fmt;
if (ff_thread_get_buffer(avctx, f)) { if (ff_thread_get_buffer(avctx, f)) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1; return -1;

View File

@ -41,9 +41,22 @@
#define MIN_TRELLIS 0 #define MIN_TRELLIS 0
#define MAX_TRELLIS 16 #define MAX_TRELLIS 16
static av_cold int g722_encode_close(AVCodecContext *avctx)
{
G722Context *c = avctx->priv_data;
int i;
for (i = 0; i < 2; i++) {
av_freep(&c->paths[i]);
av_freep(&c->node_buf[i]);
av_freep(&c->nodep_buf[i]);
}
return 0;
}
static av_cold int g722_encode_init(AVCodecContext * avctx) static av_cold int g722_encode_init(AVCodecContext * avctx)
{ {
G722Context *c = avctx->priv_data; G722Context *c = avctx->priv_data;
int ret;
if (avctx->channels != 1) { if (avctx->channels != 1) {
av_log(avctx, AV_LOG_ERROR, "Only mono tracks are allowed.\n"); av_log(avctx, AV_LOG_ERROR, "Only mono tracks are allowed.\n");
@ -62,6 +75,10 @@ static av_cold int g722_encode_init(AVCodecContext * avctx)
c->paths[i] = av_mallocz(max_paths * sizeof(**c->paths)); c->paths[i] = av_mallocz(max_paths * sizeof(**c->paths));
c->node_buf[i] = av_mallocz(2 * frontier * sizeof(**c->node_buf)); c->node_buf[i] = av_mallocz(2 * frontier * sizeof(**c->node_buf));
c->nodep_buf[i] = av_mallocz(2 * frontier * sizeof(**c->nodep_buf)); c->nodep_buf[i] = av_mallocz(2 * frontier * sizeof(**c->nodep_buf));
if (!c->paths[i] || !c->node_buf[i] || !c->nodep_buf[i]) {
ret = AVERROR(ENOMEM);
goto error;
}
} }
} }
@ -100,18 +117,9 @@ static av_cold int g722_encode_init(AVCodecContext * avctx)
} }
return 0; return 0;
} error:
g722_encode_close(avctx);
static av_cold int g722_encode_close(AVCodecContext *avctx) return ret;
{
G722Context *c = avctx->priv_data;
int i;
for (i = 0; i < 2; i++) {
av_freep(&c->paths[i]);
av_freep(&c->node_buf[i]);
av_freep(&c->nodep_buf[i]);
}
return 0;
} }
static const int16_t low_quant[33] = { static const int16_t low_quant[33] = {

View File

@ -30,10 +30,17 @@
typedef struct { typedef struct {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame pic; AVFrame prev, cur;
uint16_t *prev, *cur;
} KgvContext; } KgvContext;
static void decode_flush(AVCodecContext *avctx)
{
KgvContext * const c = avctx->priv_data;
if (c->prev.data[0])
avctx->release_buffer(avctx, &c->prev);
}
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
@ -42,7 +49,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
int offsets[8]; int offsets[8];
uint16_t *out, *prev; uint16_t *out, *prev;
int outcnt = 0, maxcnt; int outcnt = 0, maxcnt;
int w, h, i; int w, h, i, res;
if (avpkt->size < 2) if (avpkt->size < 2)
return -1; return -1;
@ -54,20 +61,23 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
if (av_image_check_size(w, h, 0, avctx)) if (av_image_check_size(w, h, 0, avctx))
return -1; return -1;
if (w != avctx->width || h != avctx->height) if (w != avctx->width || h != avctx->height) {
if (c->prev.data[0])
avctx->release_buffer(avctx, &c->prev);
avcodec_set_dimensions(avctx, w, h); avcodec_set_dimensions(avctx, w, h);
}
maxcnt = w * h; maxcnt = w * h;
out = av_realloc(c->cur, w * h * 2); c->cur.reference = 3;
if (!out) if ((res = avctx->get_buffer(avctx, &c->cur)) < 0)
return -1; return res;
c->cur = out; out = (uint16_t *) c->cur.data[0];
if (c->prev.data[0]) {
prev = av_realloc(c->prev, w * h * 2); prev = (uint16_t *) c->prev.data[0];
if (!prev) } else {
return -1; prev = NULL;
c->prev = prev; }
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
offsets[i] = -1; offsets[i] = -1;
@ -80,6 +90,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
out[outcnt++] = code; // rgb555 pixel coded directly out[outcnt++] = code; // rgb555 pixel coded directly
} else { } else {
int count; int count;
int inp_off;
uint16_t *inp; uint16_t *inp;
if ((code & 0x6000) == 0x6000) { if ((code & 0x6000) == 0x6000) {
@ -101,7 +112,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
if (maxcnt - start < count) if (maxcnt - start < count)
break; break;
inp = prev + start; if (!prev) {
av_log(avctx, AV_LOG_ERROR,
"Frame reference does not exist\n");
break;
}
inp = prev;
inp_off = start;
} else { } else {
// copy from earlier in this frame // copy from earlier in this frame
int offset = (code & 0x1FFF) + 1; int offset = (code & 0x1FFF) + 1;
@ -119,27 +137,28 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
if (outcnt < offset) if (outcnt < offset)
break; break;
inp = out + outcnt - offset; inp = out;
inp_off = outcnt - offset;
} }
if (maxcnt - outcnt < count) if (maxcnt - outcnt < count)
break; break;
for (i = 0; i < count; i++) for (i = inp_off; i < count + inp_off; i++) {
out[outcnt++] = inp[i]; out[outcnt++] = inp[i];
}
} }
} }
if (outcnt - maxcnt) if (outcnt - maxcnt)
av_log(avctx, AV_LOG_DEBUG, "frame finished with %d diff\n", outcnt - maxcnt); av_log(avctx, AV_LOG_DEBUG, "frame finished with %d diff\n", outcnt - maxcnt);
c->pic.data[0] = (uint8_t *)c->cur;
c->pic.linesize[0] = w * 2;
*data_size = sizeof(AVFrame); *data_size = sizeof(AVFrame);
*(AVFrame*)data = c->pic; *(AVFrame*)data = c->cur;
FFSWAP(uint16_t *, c->cur, c->prev); if (c->prev.data[0])
avctx->release_buffer(avctx, &c->prev);
FFSWAP(AVFrame, c->cur, c->prev);
return avpkt->size; return avpkt->size;
} }
@ -150,18 +169,14 @@ static av_cold int decode_init(AVCodecContext *avctx)
c->avctx = avctx; c->avctx = avctx;
avctx->pix_fmt = PIX_FMT_RGB555; avctx->pix_fmt = PIX_FMT_RGB555;
avcodec_get_frame_defaults(&c->pic); avctx->flags |= CODEC_FLAG_EMU_EDGE;
return 0; return 0;
} }
static av_cold int decode_end(AVCodecContext *avctx) static av_cold int decode_end(AVCodecContext *avctx)
{ {
KgvContext * const c = avctx->priv_data; decode_flush(avctx);
av_freep(&c->cur);
av_freep(&c->prev);
return 0; return 0;
} }
@ -173,5 +188,6 @@ AVCodec ff_kgv1_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.flush = decode_flush,
.long_name = NULL_IF_CONFIG_SMALL("Kega Game Video"), .long_name = NULL_IF_CONFIG_SMALL("Kega Game Video"),
}; };

View File

@ -38,28 +38,47 @@ static const int channel_maps[][6] = {
{ 2, 0, 1, 4, 5, 3 }, //< C L R Ls Rs LFE { 2, 0, 1, 4, 5, 3 }, //< C L R Ls Rs LFE
}; };
static av_cold int Faac_encode_close(AVCodecContext *avctx)
{
FaacAudioContext *s = avctx->priv_data;
av_freep(&avctx->coded_frame);
av_freep(&avctx->extradata);
if (s->faac_handle)
faacEncClose(s->faac_handle);
return 0;
}
static av_cold int Faac_encode_init(AVCodecContext *avctx) static av_cold int Faac_encode_init(AVCodecContext *avctx)
{ {
FaacAudioContext *s = avctx->priv_data; FaacAudioContext *s = avctx->priv_data;
faacEncConfigurationPtr faac_cfg; faacEncConfigurationPtr faac_cfg;
unsigned long samples_input, max_bytes_output; unsigned long samples_input, max_bytes_output;
int ret;
/* number of channels */ /* number of channels */
if (avctx->channels < 1 || avctx->channels > 6) { if (avctx->channels < 1 || avctx->channels > 6) {
av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed\n", avctx->channels); av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed\n", avctx->channels);
return -1; ret = AVERROR(EINVAL);
goto error;
} }
s->faac_handle = faacEncOpen(avctx->sample_rate, s->faac_handle = faacEncOpen(avctx->sample_rate,
avctx->channels, avctx->channels,
&samples_input, &max_bytes_output); &samples_input, &max_bytes_output);
if (!s->faac_handle) {
av_log(avctx, AV_LOG_ERROR, "error in faacEncOpen()\n");
ret = AVERROR_UNKNOWN;
goto error;
}
/* check faac version */ /* check faac version */
faac_cfg = faacEncGetCurrentConfiguration(s->faac_handle); faac_cfg = faacEncGetCurrentConfiguration(s->faac_handle);
if (faac_cfg->version != FAAC_CFG_VERSION) { if (faac_cfg->version != FAAC_CFG_VERSION) {
av_log(avctx, AV_LOG_ERROR, "wrong libfaac version (compiled for: %d, using %d)\n", FAAC_CFG_VERSION, faac_cfg->version); av_log(avctx, AV_LOG_ERROR, "wrong libfaac version (compiled for: %d, using %d)\n", FAAC_CFG_VERSION, faac_cfg->version);
faacEncClose(s->faac_handle); ret = AVERROR(EINVAL);
return -1; goto error;
} }
/* put the options in the configuration struct */ /* put the options in the configuration struct */
@ -79,8 +98,8 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx)
break; break;
default: default:
av_log(avctx, AV_LOG_ERROR, "invalid AAC profile\n"); av_log(avctx, AV_LOG_ERROR, "invalid AAC profile\n");
faacEncClose(s->faac_handle); ret = AVERROR(EINVAL);
return -1; goto error;
} }
faac_cfg->mpegVersion = MPEG4; faac_cfg->mpegVersion = MPEG4;
faac_cfg->useTns = 0; faac_cfg->useTns = 0;
@ -100,7 +119,10 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx)
avctx->frame_size = samples_input / avctx->channels; avctx->frame_size = samples_input / avctx->channels;
avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame= avcodec_alloc_frame();
avctx->coded_frame->key_frame= 1; if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
/* Set decoder specific info */ /* Set decoder specific info */
avctx->extradata_size = 0; avctx->extradata_size = 0;
@ -112,6 +134,10 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx)
if (!faacEncGetDecoderSpecificInfo(s->faac_handle, &buffer, if (!faacEncGetDecoderSpecificInfo(s->faac_handle, &buffer,
&decoder_specific_info_size)) { &decoder_specific_info_size)) {
avctx->extradata = av_malloc(decoder_specific_info_size + FF_INPUT_BUFFER_PADDING_SIZE); avctx->extradata = av_malloc(decoder_specific_info_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!avctx->extradata) {
ret = AVERROR(ENOMEM);
goto error;
}
avctx->extradata_size = decoder_specific_info_size; avctx->extradata_size = decoder_specific_info_size;
memcpy(avctx->extradata, buffer, avctx->extradata_size); memcpy(avctx->extradata, buffer, avctx->extradata_size);
faac_cfg->outputFormat = 0; faac_cfg->outputFormat = 0;
@ -123,10 +149,14 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx)
if (!faacEncSetConfiguration(s->faac_handle, faac_cfg)) { if (!faacEncSetConfiguration(s->faac_handle, faac_cfg)) {
av_log(avctx, AV_LOG_ERROR, "libfaac doesn't support this output format!\n"); av_log(avctx, AV_LOG_ERROR, "libfaac doesn't support this output format!\n");
return -1; ret = AVERROR(EINVAL);
goto error;
} }
return 0; return 0;
error:
Faac_encode_close(avctx);
return ret;
} }
static int Faac_encode_frame(AVCodecContext *avctx, static int Faac_encode_frame(AVCodecContext *avctx,
@ -145,17 +175,6 @@ static int Faac_encode_frame(AVCodecContext *avctx,
return bytes_written; return bytes_written;
} }
static av_cold int Faac_encode_close(AVCodecContext *avctx)
{
FaacAudioContext *s = avctx->priv_data;
av_freep(&avctx->coded_frame);
av_freep(&avctx->extradata);
faacEncClose(s->faac_handle);
return 0;
}
static const AVProfile profiles[] = { static const AVProfile profiles[] = {
{ FF_PROFILE_AAC_MAIN, "Main" }, { FF_PROFILE_AAC_MAIN, "Main" },
{ FF_PROFILE_AAC_LOW, "LC" }, { FF_PROFILE_AAC_LOW, "LC" },

View File

@ -81,7 +81,6 @@ typedef struct {
int cbr_quality; ///< CBR quality 0 to 10 int cbr_quality; ///< CBR quality 0 to 10
int abr; ///< flag to enable ABR int abr; ///< flag to enable ABR
int pkt_frame_count; ///< frame count for the current packet int pkt_frame_count; ///< frame count for the current packet
int lookahead; ///< encoder delay
int64_t next_pts; ///< next pts, in sample_rate time base int64_t next_pts; ///< next pts, in sample_rate time base
int pkt_sample_count; ///< sample count in the current packet int pkt_sample_count; ///< sample count in the current packet
} LibSpeexEncContext; } LibSpeexEncContext;
@ -200,8 +199,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
s->header.frames_per_packet = s->frames_per_packet; s->header.frames_per_packet = s->frames_per_packet;
/* set encoding delay */ /* set encoding delay */
speex_encoder_ctl(s->enc_state, SPEEX_GET_LOOKAHEAD, &s->lookahead); speex_encoder_ctl(s->enc_state, SPEEX_GET_LOOKAHEAD, &avctx->delay);
s->next_pts = -s->lookahead;
/* create header packet bytes from header struct */ /* create header packet bytes from header struct */
/* note: libspeex allocates the memory for header_data, which is freed /* note: libspeex allocates the memory for header_data, which is freed
@ -257,7 +255,8 @@ static int encode_frame(AVCodecContext *avctx, uint8_t *frame, int buf_size,
/* write output if all frames for the packet have been encoded */ /* write output if all frames for the packet have been encoded */
if (s->pkt_frame_count == s->frames_per_packet) { if (s->pkt_frame_count == s->frames_per_packet) {
s->pkt_frame_count = 0; s->pkt_frame_count = 0;
avctx->coded_frame->pts = ff_samples_to_time_base(avctx, s->next_pts); avctx->coded_frame->pts = ff_samples_to_time_base(avctx, s->next_pts -
avctx->delay);
s->next_pts += s->pkt_sample_count; s->next_pts += s->pkt_sample_count;
s->pkt_sample_count = 0; s->pkt_sample_count = 0;
if (buf_size > speex_bits_nbytes(&s->bits)) { if (buf_size > speex_bits_nbytes(&s->bits)) {

View File

@ -39,6 +39,8 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
int index; int index;
avctx->coded_frame = avcodec_alloc_frame(); avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->frame_size = 1024; avctx->frame_size = 1024;
voGetAACEncAPI(&s->codec_api); voGetAACEncAPI(&s->codec_api);

View File

@ -87,6 +87,8 @@ static av_cold int amr_wb_encode_init(AVCodecContext *avctx)
avctx->frame_size = 320; avctx->frame_size = 320;
avctx->coded_frame = avcodec_alloc_frame(); avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
s->state = E_IF_init(); s->state = E_IF_init();

View File

@ -61,13 +61,13 @@ static const AVOption options[] = {
}; };
static const AVClass class = { "libvorbis", av_default_item_name, options, LIBAVUTIL_VERSION_INT }; static const AVClass class = { "libvorbis", av_default_item_name, options, LIBAVUTIL_VERSION_INT };
static const char * error(int oggerr, int *averr) static int vorbis_error_to_averror(int ov_err)
{ {
switch (oggerr) { switch (ov_err) {
case OV_EFAULT: *averr = AVERROR(EFAULT); return "internal error"; case OV_EFAULT: return AVERROR_BUG;
case OV_EIMPL: *averr = AVERROR(EINVAL); return "not supported"; case OV_EINVAL: return AVERROR(EINVAL);
case OV_EINVAL: *averr = AVERROR(EINVAL); return "invalid request"; case OV_EIMPL: return AVERROR(EINVAL);
default: *averr = AVERROR(EINVAL); return "unknown error"; default: return AVERROR_UNKNOWN;
} }
} }
@ -75,49 +75,41 @@ static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avcco
{ {
OggVorbisContext *context = avccontext->priv_data; OggVorbisContext *context = avccontext->priv_data;
double cfreq; double cfreq;
int r; int ret;
if (avccontext->flags & CODEC_FLAG_QSCALE) { if (avccontext->flags & CODEC_FLAG_QSCALE) {
/* variable bitrate */ /* variable bitrate */
float quality = avccontext->global_quality / (float)FF_QP2LAMBDA; float q = avccontext->global_quality / (float)FF_QP2LAMBDA;
r = vorbis_encode_setup_vbr(vi, avccontext->channels, if ((ret = vorbis_encode_setup_vbr(vi, avccontext->channels,
avccontext->sample_rate, avccontext->sample_rate,
quality / 10.0); q / 10.0)))
if (r) { goto error;
av_log(avccontext, AV_LOG_ERROR,
"Unable to set quality to %g: %s\n", quality, error(r, &r));
return r;
}
} else { } else {
int minrate = avccontext->rc_min_rate > 0 ? avccontext->rc_min_rate : -1; int minrate = avccontext->rc_min_rate > 0 ? avccontext->rc_min_rate : -1;
int maxrate = avccontext->rc_min_rate > 0 ? avccontext->rc_max_rate : -1; int maxrate = avccontext->rc_min_rate > 0 ? avccontext->rc_max_rate : -1;
/* constant bitrate */ /* constant bitrate */
r = vorbis_encode_setup_managed(vi, avccontext->channels, if ((ret = vorbis_encode_setup_managed(vi, avccontext->channels,
avccontext->sample_rate, minrate, avccontext->sample_rate, minrate,
avccontext->bit_rate, maxrate); avccontext->bit_rate, maxrate)))
if (r) { goto error;
av_log(avccontext, AV_LOG_ERROR,
"Unable to set CBR to %d: %s\n", avccontext->bit_rate,
error(r, &r));
return r;
}
/* variable bitrate by estimate, disable slow rate management */ /* variable bitrate by estimate, disable slow rate management */
if (minrate == -1 && maxrate == -1) if (minrate == -1 && maxrate == -1)
if (vorbis_encode_ctl(vi, OV_ECTL_RATEMANAGE2_SET, NULL)) if ((ret = vorbis_encode_ctl(vi, OV_ECTL_RATEMANAGE2_SET, NULL)))
return AVERROR(EINVAL); /* should not happen */ goto error; /* should not happen */
} }
/* cutoff frequency */ /* cutoff frequency */
if (avccontext->cutoff > 0) { if (avccontext->cutoff > 0) {
cfreq = avccontext->cutoff / 1000.0; cfreq = avccontext->cutoff / 1000.0;
if (vorbis_encode_ctl(vi, OV_ECTL_LOWPASS_SET, &cfreq)) if ((ret = vorbis_encode_ctl(vi, OV_ECTL_LOWPASS_SET, &cfreq)))
return AVERROR(EINVAL); /* should not happen */ goto error; /* should not happen */
} }
if (context->iblock) { if (context->iblock) {
vorbis_encode_ctl(vi, OV_ECTL_IBLOCK_SET, &context->iblock); if ((ret = vorbis_encode_ctl(vi, OV_ECTL_IBLOCK_SET, &context->iblock)))
goto error;
} }
if (avccontext->channels == 3 && if (avccontext->channels == 3 &&
@ -149,7 +141,12 @@ static av_cold int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avcco
} }
} }
return vorbis_encode_setup_init(vi); if ((ret = vorbis_encode_setup_init(vi)))
goto error;
return 0;
error:
return vorbis_error_to_averror(ret);
} }
/* How many bytes are needed for a buffer of length 'l' */ /* How many bytes are needed for a buffer of length 'l' */
@ -158,34 +155,63 @@ static int xiph_len(int l)
return 1 + l / 255 + l; return 1 + l / 255 + l;
} }
static av_cold int oggvorbis_encode_close(AVCodecContext *avccontext)
{
OggVorbisContext *context = avccontext->priv_data;
/* ogg_packet op ; */
vorbis_analysis_wrote(&context->vd, 0); /* notify vorbisenc this is EOF */
vorbis_block_clear(&context->vb);
vorbis_dsp_clear(&context->vd);
vorbis_info_clear(&context->vi);
av_freep(&avccontext->coded_frame);
av_freep(&avccontext->extradata);
return 0;
}
static av_cold int oggvorbis_encode_init(AVCodecContext *avccontext) static av_cold int oggvorbis_encode_init(AVCodecContext *avccontext)
{ {
OggVorbisContext *context = avccontext->priv_data; OggVorbisContext *context = avccontext->priv_data;
ogg_packet header, header_comm, header_code; ogg_packet header, header_comm, header_code;
uint8_t *p; uint8_t *p;
unsigned int offset; unsigned int offset;
int r; int ret;
vorbis_info_init(&context->vi); vorbis_info_init(&context->vi);
r = oggvorbis_init_encoder(&context->vi, avccontext); if ((ret = oggvorbis_init_encoder(&context->vi, avccontext))) {
if (r < 0) { av_log(avccontext, AV_LOG_ERROR, "oggvorbis_encode_init: init_encoder failed\n");
av_log(avccontext, AV_LOG_ERROR, "oggvorbis_encode_init failed\n"); goto error;
return r; }
if ((ret = vorbis_analysis_init(&context->vd, &context->vi))) {
ret = vorbis_error_to_averror(ret);
goto error;
}
if ((ret = vorbis_block_init(&context->vd, &context->vb))) {
ret = vorbis_error_to_averror(ret);
goto error;
} }
vorbis_analysis_init(&context->vd, &context->vi);
vorbis_block_init(&context->vd, &context->vb);
vorbis_comment_init(&context->vc); vorbis_comment_init(&context->vc);
vorbis_comment_add_tag(&context->vc, "encoder", LIBAVCODEC_IDENT); vorbis_comment_add_tag(&context->vc, "encoder", LIBAVCODEC_IDENT);
vorbis_analysis_headerout(&context->vd, &context->vc, &header, if ((ret = vorbis_analysis_headerout(&context->vd, &context->vc, &header,
&header_comm, &header_code); &header_comm, &header_code))) {
ret = vorbis_error_to_averror(ret);
goto error;
}
avccontext->extradata_size = avccontext->extradata_size =
1 + xiph_len(header.bytes) + xiph_len(header_comm.bytes) + 1 + xiph_len(header.bytes) + xiph_len(header_comm.bytes) +
header_code.bytes; header_code.bytes;
p = avccontext->extradata = p = avccontext->extradata =
av_malloc(avccontext->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); av_malloc(avccontext->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!p) {
ret = AVERROR(ENOMEM);
goto error;
}
p[0] = 2; p[0] = 2;
offset = 1; offset = 1;
offset += av_xiphlacing(&p[offset], header.bytes); offset += av_xiphlacing(&p[offset], header.bytes);
@ -208,9 +234,15 @@ static av_cold int oggvorbis_encode_init(AVCodecContext *avccontext)
avccontext->frame_size = OGGVORBIS_FRAME_SIZE; avccontext->frame_size = OGGVORBIS_FRAME_SIZE;
avccontext->coded_frame = avcodec_alloc_frame(); avccontext->coded_frame = avcodec_alloc_frame();
avccontext->coded_frame->key_frame = 1; if (!avccontext->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
return 0; return 0;
error:
oggvorbis_encode_close(avccontext);
return ret;
} }
static int oggvorbis_encode_frame(AVCodecContext *avccontext, static int oggvorbis_encode_frame(AVCodecContext *avccontext,
@ -286,23 +318,6 @@ static int oggvorbis_encode_frame(AVCodecContext *avccontext,
return l; return l;
} }
static av_cold int oggvorbis_encode_close(AVCodecContext *avccontext)
{
OggVorbisContext *context = avccontext->priv_data;
/* ogg_packet op ; */
vorbis_analysis_wrote(&context->vd, 0); /* notify vorbisenc this is EOF */
vorbis_block_clear(&context->vb);
vorbis_dsp_clear(&context->vd);
vorbis_info_clear(&context->vi);
av_freep(&avccontext->coded_frame);
av_freep(&avccontext->extradata);
return 0;
}
AVCodec ff_libvorbis_encoder = { AVCodec ff_libvorbis_encoder = {
.name = "libvorbis", .name = "libvorbis",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,

View File

@ -75,7 +75,7 @@ static av_cold int MPA_encode_init(AVCodecContext *avctx)
if (channels <= 0 || channels > 2){ if (channels <= 0 || channels > 2){
av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed in mp2\n", channels); av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed in mp2\n", channels);
return -1; return AVERROR(EINVAL);
} }
bitrate = bitrate / 1000; bitrate = bitrate / 1000;
s->nb_channels = channels; s->nb_channels = channels;
@ -93,7 +93,7 @@ static av_cold int MPA_encode_init(AVCodecContext *avctx)
} }
if (i == 3){ if (i == 3){
av_log(avctx, AV_LOG_ERROR, "Sampling rate %d is not allowed in mp2\n", freq); av_log(avctx, AV_LOG_ERROR, "Sampling rate %d is not allowed in mp2\n", freq);
return -1; return AVERROR(EINVAL);
} }
s->freq_index = i; s->freq_index = i;
@ -104,7 +104,7 @@ static av_cold int MPA_encode_init(AVCodecContext *avctx)
} }
if (i == 15){ if (i == 15){
av_log(avctx, AV_LOG_ERROR, "bitrate %d is not allowed in mp2\n", bitrate); av_log(avctx, AV_LOG_ERROR, "bitrate %d is not allowed in mp2\n", bitrate);
return -1; return AVERROR(EINVAL);
} }
s->bitrate_index = i; s->bitrate_index = i;
@ -181,7 +181,8 @@ static av_cold int MPA_encode_init(AVCodecContext *avctx)
} }
avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame= avcodec_alloc_frame();
avctx->coded_frame->key_frame= 1; if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }

View File

@ -127,42 +127,6 @@ static void apply_mdct(NellyMoserEncodeContext *s)
s->mdct_ctx.mdct_calc(&s->mdct_ctx, s->mdct_out + NELLY_BUF_LEN, s->buf[s->bufsel] + NELLY_BUF_LEN); s->mdct_ctx.mdct_calc(&s->mdct_ctx, s->mdct_out + NELLY_BUF_LEN, s->buf[s->bufsel] + NELLY_BUF_LEN);
} }
static av_cold int encode_init(AVCodecContext *avctx)
{
NellyMoserEncodeContext *s = avctx->priv_data;
int i;
if (avctx->channels != 1) {
av_log(avctx, AV_LOG_ERROR, "Nellymoser supports only 1 channel\n");
return -1;
}
if (avctx->sample_rate != 8000 && avctx->sample_rate != 16000 &&
avctx->sample_rate != 11025 &&
avctx->sample_rate != 22050 && avctx->sample_rate != 44100 &&
avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
av_log(avctx, AV_LOG_ERROR, "Nellymoser works only with 8000, 16000, 11025, 22050 and 44100 sample rate\n");
return -1;
}
avctx->frame_size = NELLY_SAMPLES;
s->avctx = avctx;
ff_mdct_init(&s->mdct_ctx, 8, 0, 32768.0);
ff_dsputil_init(&s->dsp, avctx);
/* Generate overlap window */
ff_sine_window_init(ff_sine_128, 128);
for (i = 0; i < POW_TABLE_SIZE; i++)
pow_table[i] = -pow(2, -i / 2048.0 - 3.0 + POW_TABLE_OFFSET);
if (s->avctx->trellis) {
s->opt = av_malloc(NELLY_BANDS * OPT_SIZE * sizeof(float ));
s->path = av_malloc(NELLY_BANDS * OPT_SIZE * sizeof(uint8_t));
}
return 0;
}
static av_cold int encode_end(AVCodecContext *avctx) static av_cold int encode_end(AVCodecContext *avctx)
{ {
NellyMoserEncodeContext *s = avctx->priv_data; NellyMoserEncodeContext *s = avctx->priv_data;
@ -173,10 +137,61 @@ static av_cold int encode_end(AVCodecContext *avctx)
av_free(s->opt); av_free(s->opt);
av_free(s->path); av_free(s->path);
} }
av_freep(&avctx->coded_frame);
return 0; return 0;
} }
static av_cold int encode_init(AVCodecContext *avctx)
{
NellyMoserEncodeContext *s = avctx->priv_data;
int i, ret;
if (avctx->channels != 1) {
av_log(avctx, AV_LOG_ERROR, "Nellymoser supports only 1 channel\n");
return AVERROR(EINVAL);
}
if (avctx->sample_rate != 8000 && avctx->sample_rate != 16000 &&
avctx->sample_rate != 11025 &&
avctx->sample_rate != 22050 && avctx->sample_rate != 44100 &&
avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
av_log(avctx, AV_LOG_ERROR, "Nellymoser works only with 8000, 16000, 11025, 22050 and 44100 sample rate\n");
return AVERROR(EINVAL);
}
avctx->frame_size = NELLY_SAMPLES;
s->avctx = avctx;
if ((ret = ff_mdct_init(&s->mdct_ctx, 8, 0, 32768.0)) < 0)
goto error;
ff_dsputil_init(&s->dsp, avctx);
/* Generate overlap window */
ff_sine_window_init(ff_sine_128, 128);
for (i = 0; i < POW_TABLE_SIZE; i++)
pow_table[i] = -pow(2, -i / 2048.0 - 3.0 + POW_TABLE_OFFSET);
if (s->avctx->trellis) {
s->opt = av_malloc(NELLY_BANDS * OPT_SIZE * sizeof(float ));
s->path = av_malloc(NELLY_BANDS * OPT_SIZE * sizeof(uint8_t));
if (!s->opt || !s->path) {
ret = AVERROR(ENOMEM);
goto error;
}
}
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
return 0;
error:
encode_end(avctx);
return ret;
}
#define find_best(val, table, LUT, LUT_add, LUT_size) \ #define find_best(val, table, LUT, LUT_add, LUT_size) \
best_idx = \ best_idx = \
LUT[av_clip ((lrintf(val) >> 8) + LUT_add, 0, LUT_size - 1)]; \ LUT[av_clip ((lrintf(val) >> 8) + LUT_add, 0, LUT_size - 1)]; \

View File

@ -33,15 +33,20 @@
#include "ra144.h" #include "ra144.h"
static av_cold int ra144_encode_close(AVCodecContext *avctx)
{
RA144Context *ractx = avctx->priv_data;
ff_lpc_end(&ractx->lpc_ctx);
av_freep(&avctx->coded_frame);
return 0;
}
static av_cold int ra144_encode_init(AVCodecContext * avctx) static av_cold int ra144_encode_init(AVCodecContext * avctx)
{ {
RA144Context *ractx; RA144Context *ractx;
int ret; int ret;
if (avctx->sample_fmt != AV_SAMPLE_FMT_S16) {
av_log(avctx, AV_LOG_ERROR, "invalid sample format\n");
return -1;
}
if (avctx->channels != 1) { if (avctx->channels != 1) {
av_log(avctx, AV_LOG_ERROR, "invalid number of channels: %d\n", av_log(avctx, AV_LOG_ERROR, "invalid number of channels: %d\n",
avctx->channels); avctx->channels);
@ -55,15 +60,19 @@ static av_cold int ra144_encode_init(AVCodecContext * avctx)
ractx->avctx = avctx; ractx->avctx = avctx;
ret = ff_lpc_init(&ractx->lpc_ctx, avctx->frame_size, LPC_ORDER, ret = ff_lpc_init(&ractx->lpc_ctx, avctx->frame_size, LPC_ORDER,
FF_LPC_TYPE_LEVINSON); FF_LPC_TYPE_LEVINSON);
return ret; if (ret < 0)
} goto error;
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
static av_cold int ra144_encode_close(AVCodecContext *avctx)
{
RA144Context *ractx = avctx->priv_data;
ff_lpc_end(&ractx->lpc_ctx);
return 0; return 0;
error:
ra144_encode_close(avctx);
return ret;
} }
@ -432,6 +441,7 @@ static int ra144_encode_frame(AVCodecContext *avctx, uint8_t *frame,
int16_t block_coefs[NBLOCKS][LPC_ORDER]; int16_t block_coefs[NBLOCKS][LPC_ORDER];
int lpc_refl[LPC_ORDER]; /**< reflection coefficients of the frame */ int lpc_refl[LPC_ORDER]; /**< reflection coefficients of the frame */
unsigned int refl_rms[NBLOCKS]; /**< RMS of the reflection coefficients */ unsigned int refl_rms[NBLOCKS]; /**< RMS of the reflection coefficients */
const int16_t *samples = data;
int energy = 0; int energy = 0;
int i, idx; int i, idx;
@ -506,7 +516,7 @@ static int ra144_encode_frame(AVCodecContext *avctx, uint8_t *frame,
ractx->lpc_refl_rms[1] = ractx->lpc_refl_rms[0]; ractx->lpc_refl_rms[1] = ractx->lpc_refl_rms[0];
FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]); FFSWAP(unsigned int *, ractx->lpc_coef[0], ractx->lpc_coef[1]);
for (i = 0; i < NBLOCKS * BLOCKSIZE; i++) for (i = 0; i < NBLOCKS * BLOCKSIZE; i++)
ractx->curr_block[i] = *((int16_t *)data + i) >> 2; ractx->curr_block[i] = samples[i] >> 2;
return FRAMESIZE; return FRAMESIZE;
} }

View File

@ -25,9 +25,8 @@
#include "avcodec.h" #include "avcodec.h"
#include "bytestream.h" #include "bytestream.h"
#define ROQ_FIRST_FRAME_SIZE (735*8)
#define ROQ_FRAME_SIZE 735 #define ROQ_FRAME_SIZE 735
#define ROQ_HEADER_SIZE 8
#define MAX_DPCM (127*127) #define MAX_DPCM (127*127)
@ -35,34 +34,59 @@
typedef struct typedef struct
{ {
short lastSample[2]; short lastSample[2];
int input_frames;
int buffered_samples;
int16_t *frame_buffer;
} ROQDPCMContext; } ROQDPCMContext;
static av_cold int roq_dpcm_encode_close(AVCodecContext *avctx)
{
ROQDPCMContext *context = avctx->priv_data;
av_freep(&avctx->coded_frame);
av_freep(&context->frame_buffer);
return 0;
}
static av_cold int roq_dpcm_encode_init(AVCodecContext *avctx) static av_cold int roq_dpcm_encode_init(AVCodecContext *avctx)
{ {
ROQDPCMContext *context = avctx->priv_data; ROQDPCMContext *context = avctx->priv_data;
int ret;
if (avctx->channels > 2) { if (avctx->channels > 2) {
av_log(avctx, AV_LOG_ERROR, "Audio must be mono or stereo\n"); av_log(avctx, AV_LOG_ERROR, "Audio must be mono or stereo\n");
return -1; return AVERROR(EINVAL);
} }
if (avctx->sample_rate != 22050) { if (avctx->sample_rate != 22050) {
av_log(avctx, AV_LOG_ERROR, "Audio must be 22050 Hz\n"); av_log(avctx, AV_LOG_ERROR, "Audio must be 22050 Hz\n");
return -1; return AVERROR(EINVAL);
}
if (avctx->sample_fmt != AV_SAMPLE_FMT_S16) {
av_log(avctx, AV_LOG_ERROR, "Audio must be signed 16-bit\n");
return -1;
} }
avctx->frame_size = ROQ_FIRST_FRAME_SIZE; avctx->frame_size = ROQ_FRAME_SIZE;
avctx->bit_rate = (ROQ_HEADER_SIZE + ROQ_FRAME_SIZE * avctx->channels) *
(22050 / ROQ_FRAME_SIZE) * 8;
context->frame_buffer = av_malloc(8 * ROQ_FRAME_SIZE * avctx->channels *
sizeof(*context->frame_buffer));
if (!context->frame_buffer) {
ret = AVERROR(ENOMEM);
goto error;
}
context->lastSample[0] = context->lastSample[1] = 0; context->lastSample[0] = context->lastSample[1] = 0;
avctx->coded_frame= avcodec_alloc_frame(); avctx->coded_frame= avcodec_alloc_frame();
if (!avctx->coded_frame) if (!avctx->coded_frame) {
return AVERROR(ENOMEM); ret = AVERROR(ENOMEM);
goto error;
}
return 0; return 0;
error:
roq_dpcm_encode_close(avctx);
return ret;
} }
static unsigned char dpcm_predict(short *previous, short current) static unsigned char dpcm_predict(short *previous, short current)
@ -108,25 +132,45 @@ static unsigned char dpcm_predict(short *previous, short current)
static int roq_dpcm_encode_frame(AVCodecContext *avctx, static int roq_dpcm_encode_frame(AVCodecContext *avctx,
unsigned char *frame, int buf_size, void *data) unsigned char *frame, int buf_size, void *data)
{ {
int i, samples, stereo, ch; int i, stereo, data_size;
const short *in; const int16_t *in = data;
unsigned char *out; uint8_t *out = frame;
ROQDPCMContext *context = avctx->priv_data; ROQDPCMContext *context = avctx->priv_data;
stereo = (avctx->channels == 2); stereo = (avctx->channels == 2);
if (!data && context->input_frames >= 8)
return 0;
if (data && context->input_frames < 8) {
memcpy(&context->frame_buffer[context->buffered_samples * avctx->channels],
in, avctx->frame_size * avctx->channels * sizeof(*in));
context->buffered_samples += avctx->frame_size;
if (context->input_frames < 7) {
context->input_frames++;
return 0;
}
in = context->frame_buffer;
}
if (stereo) { if (stereo) {
context->lastSample[0] &= 0xFF00; context->lastSample[0] &= 0xFF00;
context->lastSample[1] &= 0xFF00; context->lastSample[1] &= 0xFF00;
} }
out = frame; if (context->input_frames == 7 || !data)
in = data; data_size = avctx->channels * context->buffered_samples;
else
data_size = avctx->channels * avctx->frame_size;
if (buf_size < ROQ_HEADER_SIZE + data_size) {
av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
return AVERROR(EINVAL);
}
bytestream_put_byte(&out, stereo ? 0x21 : 0x20); bytestream_put_byte(&out, stereo ? 0x21 : 0x20);
bytestream_put_byte(&out, 0x10); bytestream_put_byte(&out, 0x10);
bytestream_put_le32(&out, avctx->frame_size*avctx->channels); bytestream_put_le32(&out, data_size);
if (stereo) { if (stereo) {
bytestream_put_byte(&out, (context->lastSample[1])>>8); bytestream_put_byte(&out, (context->lastSample[1])>>8);
@ -135,23 +179,15 @@ static int roq_dpcm_encode_frame(AVCodecContext *avctx,
bytestream_put_le16(&out, context->lastSample[0]); bytestream_put_le16(&out, context->lastSample[0]);
/* Write the actual samples */ /* Write the actual samples */
samples = avctx->frame_size; for (i = 0; i < data_size; i++)
for (i=0; i<samples; i++) *out++ = dpcm_predict(&context->lastSample[i & 1], *in++);
for (ch=0; ch<avctx->channels; ch++)
*out++ = dpcm_predict(&context->lastSample[ch], *in++);
/* Use smaller frames from now on */ context->input_frames++;
avctx->frame_size = ROQ_FRAME_SIZE; if (!data)
context->input_frames = FFMAX(context->input_frames, 8);
/* Return the result size */ /* Return the result size */
return out - frame; return ROQ_HEADER_SIZE + data_size;
}
static av_cold int roq_dpcm_encode_close(AVCodecContext *avctx)
{
av_freep(&avctx->coded_frame);
return 0;
} }
AVCodec ff_roq_dpcm_encoder = { AVCodec ff_roq_dpcm_encoder = {
@ -162,6 +198,7 @@ AVCodec ff_roq_dpcm_encoder = {
.init = roq_dpcm_encode_init, .init = roq_dpcm_encode_init,
.encode = roq_dpcm_encode_frame, .encode = roq_dpcm_encode_frame,
.close = roq_dpcm_encode_close, .close = roq_dpcm_encode_close,
.capabilities = CODEC_CAP_DELAY,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("id RoQ DPCM"), .long_name = NULL_IF_CONFIG_SMALL("id RoQ DPCM"),
}; };

View File

@ -603,6 +603,8 @@ int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){
return s->get_buffer(s, pic); return s->get_buffer(s, pic);
} }
assert(s->pix_fmt == pic->pix_fmt);
/* If internal buffer type return the same buffer */ /* If internal buffer type return the same buffer */
if(pic->type == FF_BUFFER_TYPE_INTERNAL) { if(pic->type == FF_BUFFER_TYPE_INTERNAL) {
return 0; return 0;
@ -967,6 +969,8 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
int user_packet = !!avpkt->data; int user_packet = !!avpkt->data;
int nb_samples; int nb_samples;
*got_packet_ptr = 0;
if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) {
av_init_packet(avpkt); av_init_packet(avpkt);
avpkt->size = 0; avpkt->size = 0;
@ -988,7 +992,6 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
} }
if (avctx->codec->encode2) { if (avctx->codec->encode2) {
*got_packet_ptr = 0;
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (!ret && *got_packet_ptr) { if (!ret && *got_packet_ptr) {
if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) { if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) {
@ -1196,10 +1199,11 @@ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
int ret; int ret;
int user_packet = !!avpkt->data; int user_packet = !!avpkt->data;
*got_packet_ptr = 0;
if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) {
av_init_packet(avpkt); av_init_packet(avpkt);
avpkt->size = 0; avpkt->size = 0;
*got_packet_ptr = 0;
return 0; return 0;
} }
@ -1208,17 +1212,15 @@ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
av_assert0(avctx->codec->encode2); av_assert0(avctx->codec->encode2);
*got_packet_ptr = 0;
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (!ret) { if (!ret) {
if (!*got_packet_ptr) if (!*got_packet_ptr)
avpkt->size = 0; avpkt->size = 0;
else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY))
avpkt->pts = avpkt->dts = frame->pts; avpkt->pts = avpkt->dts = frame->pts;
}
if (!ret)
avctx->frame_number++; avctx->frame_number++;
}
emms_c(); emms_c();
return ret; return ret;

View File

@ -1710,6 +1710,17 @@ static av_cold int vorbis_decode_close(AVCodecContext *avccontext)
return 0; return 0;
} }
static av_cold void vorbis_decode_flush(AVCodecContext *avccontext)
{
vorbis_context *vc = avccontext->priv_data;
if (vc->saved) {
memset(vc->saved, 0, (vc->blocksize[1] / 4) * vc->audio_channels *
sizeof(*vc->saved));
}
vc->previous_window = 0;
}
AVCodec ff_vorbis_decoder = { AVCodec ff_vorbis_decoder = {
.name = "vorbis", .name = "vorbis",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
@ -1718,6 +1729,7 @@ AVCodec ff_vorbis_decoder = {
.init = vorbis_decode_init, .init = vorbis_decode_init,
.close = vorbis_decode_close, .close = vorbis_decode_close,
.decode = vorbis_decode_frame, .decode = vorbis_decode_frame,
.flush = vorbis_decode_flush,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Vorbis"), .long_name = NULL_IF_CONFIG_SMALL("Vorbis"),
.channel_layouts = ff_vorbis_channel_layouts, .channel_layouts = ff_vorbis_channel_layouts,

View File

@ -155,7 +155,7 @@ static int cb_lookup_vals(int lookup, int dimentions, int entries)
return 0; return 0;
} }
static void ready_codebook(vorbis_enc_codebook *cb) static int ready_codebook(vorbis_enc_codebook *cb)
{ {
int i; int i;
@ -167,6 +167,8 @@ static void ready_codebook(vorbis_enc_codebook *cb)
int vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries); int vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries);
cb->dimentions = av_malloc(sizeof(float) * cb->nentries * cb->ndimentions); cb->dimentions = av_malloc(sizeof(float) * cb->nentries * cb->ndimentions);
cb->pow2 = av_mallocz(sizeof(float) * cb->nentries); cb->pow2 = av_mallocz(sizeof(float) * cb->nentries);
if (!cb->dimentions || !cb->pow2)
return AVERROR(ENOMEM);
for (i = 0; i < cb->nentries; i++) { for (i = 0; i < cb->nentries; i++) {
float last = 0; float last = 0;
int j; int j;
@ -187,13 +189,16 @@ static void ready_codebook(vorbis_enc_codebook *cb)
cb->pow2[i] /= 2.; cb->pow2[i] /= 2.;
} }
} }
return 0;
} }
static void ready_residue(vorbis_enc_residue *rc, vorbis_enc_context *venc) static int ready_residue(vorbis_enc_residue *rc, vorbis_enc_context *venc)
{ {
int i; int i;
assert(rc->type == 2); assert(rc->type == 2);
rc->maxes = av_mallocz(sizeof(float[2]) * rc->classifications); rc->maxes = av_mallocz(sizeof(float[2]) * rc->classifications);
if (!rc->maxes)
return AVERROR(ENOMEM);
for (i = 0; i < rc->classifications; i++) { for (i = 0; i < rc->classifications; i++) {
int j; int j;
vorbis_enc_codebook * cb; vorbis_enc_codebook * cb;
@ -223,15 +228,16 @@ static void ready_residue(vorbis_enc_residue *rc, vorbis_enc_context *venc)
rc->maxes[i][0] += 0.8; rc->maxes[i][0] += 0.8;
rc->maxes[i][1] += 0.8; rc->maxes[i][1] += 0.8;
} }
return 0;
} }
static void create_vorbis_context(vorbis_enc_context *venc, static int create_vorbis_context(vorbis_enc_context *venc,
AVCodecContext *avccontext) AVCodecContext *avccontext)
{ {
vorbis_enc_floor *fc; vorbis_enc_floor *fc;
vorbis_enc_residue *rc; vorbis_enc_residue *rc;
vorbis_enc_mapping *mc; vorbis_enc_mapping *mc;
int i, book; int i, book, ret;
venc->channels = avccontext->channels; venc->channels = avccontext->channels;
venc->sample_rate = avccontext->sample_rate; venc->sample_rate = avccontext->sample_rate;
@ -239,6 +245,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
venc->ncodebooks = FF_ARRAY_ELEMS(cvectors); venc->ncodebooks = FF_ARRAY_ELEMS(cvectors);
venc->codebooks = av_malloc(sizeof(vorbis_enc_codebook) * venc->ncodebooks); venc->codebooks = av_malloc(sizeof(vorbis_enc_codebook) * venc->ncodebooks);
if (!venc->codebooks)
return AVERROR(ENOMEM);
// codebook 0..14 - floor1 book, values 0..255 // codebook 0..14 - floor1 book, values 0..255
// codebook 15 residue masterbook // codebook 15 residue masterbook
@ -255,27 +263,36 @@ static void create_vorbis_context(vorbis_enc_context *venc,
cb->lens = av_malloc(sizeof(uint8_t) * cb->nentries); cb->lens = av_malloc(sizeof(uint8_t) * cb->nentries);
cb->codewords = av_malloc(sizeof(uint32_t) * cb->nentries); cb->codewords = av_malloc(sizeof(uint32_t) * cb->nentries);
if (!cb->lens || !cb->codewords)
return AVERROR(ENOMEM);
memcpy(cb->lens, cvectors[book].clens, cvectors[book].len); memcpy(cb->lens, cvectors[book].clens, cvectors[book].len);
memset(cb->lens + cvectors[book].len, 0, cb->nentries - cvectors[book].len); memset(cb->lens + cvectors[book].len, 0, cb->nentries - cvectors[book].len);
if (cb->lookup) { if (cb->lookup) {
vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries); vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries);
cb->quantlist = av_malloc(sizeof(int) * vals); cb->quantlist = av_malloc(sizeof(int) * vals);
if (!cb->quantlist)
return AVERROR(ENOMEM);
for (i = 0; i < vals; i++) for (i = 0; i < vals; i++)
cb->quantlist[i] = cvectors[book].quant[i]; cb->quantlist[i] = cvectors[book].quant[i];
} else { } else {
cb->quantlist = NULL; cb->quantlist = NULL;
} }
ready_codebook(cb); if ((ret = ready_codebook(cb)) < 0)
return ret;
} }
venc->nfloors = 1; venc->nfloors = 1;
venc->floors = av_malloc(sizeof(vorbis_enc_floor) * venc->nfloors); venc->floors = av_malloc(sizeof(vorbis_enc_floor) * venc->nfloors);
if (!venc->floors)
return AVERROR(ENOMEM);
// just 1 floor // just 1 floor
fc = &venc->floors[0]; fc = &venc->floors[0];
fc->partitions = NUM_FLOOR_PARTITIONS; fc->partitions = NUM_FLOOR_PARTITIONS;
fc->partition_to_class = av_malloc(sizeof(int) * fc->partitions); fc->partition_to_class = av_malloc(sizeof(int) * fc->partitions);
if (!fc->partition_to_class)
return AVERROR(ENOMEM);
fc->nclasses = 0; fc->nclasses = 0;
for (i = 0; i < fc->partitions; i++) { for (i = 0; i < fc->partitions; i++) {
static const int a[] = {0, 1, 2, 2, 3, 3, 4, 4}; static const int a[] = {0, 1, 2, 2, 3, 3, 4, 4};
@ -284,6 +301,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
} }
fc->nclasses++; fc->nclasses++;
fc->classes = av_malloc(sizeof(vorbis_enc_floor_class) * fc->nclasses); fc->classes = av_malloc(sizeof(vorbis_enc_floor_class) * fc->nclasses);
if (!fc->classes)
return AVERROR(ENOMEM);
for (i = 0; i < fc->nclasses; i++) { for (i = 0; i < fc->nclasses; i++) {
vorbis_enc_floor_class * c = &fc->classes[i]; vorbis_enc_floor_class * c = &fc->classes[i];
int j, books; int j, books;
@ -292,6 +311,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
c->masterbook = floor_classes[i].masterbook; c->masterbook = floor_classes[i].masterbook;
books = (1 << c->subclass); books = (1 << c->subclass);
c->books = av_malloc(sizeof(int) * books); c->books = av_malloc(sizeof(int) * books);
if (!c->books)
return AVERROR(ENOMEM);
for (j = 0; j < books; j++) for (j = 0; j < books; j++)
c->books[j] = floor_classes[i].nbooks[j]; c->books[j] = floor_classes[i].nbooks[j];
} }
@ -303,6 +324,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
fc->values += fc->classes[fc->partition_to_class[i]].dim; fc->values += fc->classes[fc->partition_to_class[i]].dim;
fc->list = av_malloc(sizeof(vorbis_floor1_entry) * fc->values); fc->list = av_malloc(sizeof(vorbis_floor1_entry) * fc->values);
if (!fc->list)
return AVERROR(ENOMEM);
fc->list[0].x = 0; fc->list[0].x = 0;
fc->list[1].x = 1 << fc->rangebits; fc->list[1].x = 1 << fc->rangebits;
for (i = 2; i < fc->values; i++) { for (i = 2; i < fc->values; i++) {
@ -317,6 +340,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
venc->nresidues = 1; venc->nresidues = 1;
venc->residues = av_malloc(sizeof(vorbis_enc_residue) * venc->nresidues); venc->residues = av_malloc(sizeof(vorbis_enc_residue) * venc->nresidues);
if (!venc->residues)
return AVERROR(ENOMEM);
// single residue // single residue
rc = &venc->residues[0]; rc = &venc->residues[0];
@ -327,6 +352,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
rc->classifications = 10; rc->classifications = 10;
rc->classbook = 15; rc->classbook = 15;
rc->books = av_malloc(sizeof(*rc->books) * rc->classifications); rc->books = av_malloc(sizeof(*rc->books) * rc->classifications);
if (!rc->books)
return AVERROR(ENOMEM);
{ {
static const int8_t a[10][8] = { static const int8_t a[10][8] = {
{ -1, -1, -1, -1, -1, -1, -1, -1, }, { -1, -1, -1, -1, -1, -1, -1, -1, },
@ -342,19 +369,26 @@ static void create_vorbis_context(vorbis_enc_context *venc,
}; };
memcpy(rc->books, a, sizeof a); memcpy(rc->books, a, sizeof a);
} }
ready_residue(rc, venc); if ((ret = ready_residue(rc, venc)) < 0)
return ret;
venc->nmappings = 1; venc->nmappings = 1;
venc->mappings = av_malloc(sizeof(vorbis_enc_mapping) * venc->nmappings); venc->mappings = av_malloc(sizeof(vorbis_enc_mapping) * venc->nmappings);
if (!venc->mappings)
return AVERROR(ENOMEM);
// single mapping // single mapping
mc = &venc->mappings[0]; mc = &venc->mappings[0];
mc->submaps = 1; mc->submaps = 1;
mc->mux = av_malloc(sizeof(int) * venc->channels); mc->mux = av_malloc(sizeof(int) * venc->channels);
if (!mc->mux)
return AVERROR(ENOMEM);
for (i = 0; i < venc->channels; i++) for (i = 0; i < venc->channels; i++)
mc->mux[i] = 0; mc->mux[i] = 0;
mc->floor = av_malloc(sizeof(int) * mc->submaps); mc->floor = av_malloc(sizeof(int) * mc->submaps);
mc->residue = av_malloc(sizeof(int) * mc->submaps); mc->residue = av_malloc(sizeof(int) * mc->submaps);
if (!mc->floor || !mc->residue)
return AVERROR(ENOMEM);
for (i = 0; i < mc->submaps; i++) { for (i = 0; i < mc->submaps; i++) {
mc->floor[i] = 0; mc->floor[i] = 0;
mc->residue[i] = 0; mc->residue[i] = 0;
@ -362,6 +396,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
mc->coupling_steps = venc->channels == 2 ? 1 : 0; mc->coupling_steps = venc->channels == 2 ? 1 : 0;
mc->magnitude = av_malloc(sizeof(int) * mc->coupling_steps); mc->magnitude = av_malloc(sizeof(int) * mc->coupling_steps);
mc->angle = av_malloc(sizeof(int) * mc->coupling_steps); mc->angle = av_malloc(sizeof(int) * mc->coupling_steps);
if (!mc->magnitude || !mc->angle)
return AVERROR(ENOMEM);
if (mc->coupling_steps) { if (mc->coupling_steps) {
mc->magnitude[0] = 0; mc->magnitude[0] = 0;
mc->angle[0] = 1; mc->angle[0] = 1;
@ -369,6 +405,8 @@ static void create_vorbis_context(vorbis_enc_context *venc,
venc->nmodes = 1; venc->nmodes = 1;
venc->modes = av_malloc(sizeof(vorbis_enc_mode) * venc->nmodes); venc->modes = av_malloc(sizeof(vorbis_enc_mode) * venc->nmodes);
if (!venc->modes)
return AVERROR(ENOMEM);
// single mode // single mode
venc->modes[0].blockflag = 0; venc->modes[0].blockflag = 0;
@ -379,12 +417,18 @@ static void create_vorbis_context(vorbis_enc_context *venc,
venc->samples = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1])); venc->samples = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]));
venc->floor = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2); venc->floor = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2);
venc->coeffs = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2); venc->coeffs = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2);
if (!venc->saved || !venc->samples || !venc->floor || !venc->coeffs)
return AVERROR(ENOMEM);
venc->win[0] = ff_vorbis_vwin[venc->log2_blocksize[0] - 6]; venc->win[0] = ff_vorbis_vwin[venc->log2_blocksize[0] - 6];
venc->win[1] = ff_vorbis_vwin[venc->log2_blocksize[1] - 6]; venc->win[1] = ff_vorbis_vwin[venc->log2_blocksize[1] - 6];
ff_mdct_init(&venc->mdct[0], venc->log2_blocksize[0], 0, 1.0); if ((ret = ff_mdct_init(&venc->mdct[0], venc->log2_blocksize[0], 0, 1.0)) < 0)
ff_mdct_init(&venc->mdct[1], venc->log2_blocksize[1], 0, 1.0); return ret;
if ((ret = ff_mdct_init(&venc->mdct[1], venc->log2_blocksize[1], 0, 1.0)) < 0)
return ret;
return 0;
} }
static void put_float(PutBitContext *pb, float f) static void put_float(PutBitContext *pb, float f)
@ -647,6 +691,8 @@ static int put_main_header(vorbis_enc_context *venc, uint8_t **out)
len = hlens[0] + hlens[1] + hlens[2]; len = hlens[0] + hlens[1] + hlens[2];
p = *out = av_mallocz(64 + len + len/255); p = *out = av_mallocz(64 + len + len/255);
if (!p)
return AVERROR(ENOMEM);
*p++ = 2; *p++ = 2;
p += av_xiphlacing(p, hlens[0]); p += av_xiphlacing(p, hlens[0]);
@ -952,33 +998,6 @@ static int apply_window_and_mdct(vorbis_enc_context *venc, const signed short *a
return 1; return 1;
} }
static av_cold int vorbis_encode_init(AVCodecContext *avccontext)
{
vorbis_enc_context *venc = avccontext->priv_data;
if (avccontext->channels != 2) {
av_log(avccontext, AV_LOG_ERROR, "Current FFmpeg Vorbis encoder only supports 2 channels.\n");
return -1;
}
create_vorbis_context(venc, avccontext);
if (avccontext->flags & CODEC_FLAG_QSCALE)
venc->quality = avccontext->global_quality / (float)FF_QP2LAMBDA / 10.;
else
venc->quality = 0.03;
venc->quality *= venc->quality;
avccontext->extradata_size = put_main_header(venc, (uint8_t**)&avccontext->extradata);
avccontext->frame_size = 1 << (venc->log2_blocksize[0] - 1);
avccontext->coded_frame = avcodec_alloc_frame();
avccontext->coded_frame->key_frame = 1;
return 0;
}
static int vorbis_encode_frame(AVCodecContext *avccontext, static int vorbis_encode_frame(AVCodecContext *avccontext,
unsigned char *packets, unsigned char *packets,
int buf_size, void *data) int buf_size, void *data)
@ -1102,6 +1121,43 @@ static av_cold int vorbis_encode_close(AVCodecContext *avccontext)
return 0 ; return 0 ;
} }
static av_cold int vorbis_encode_init(AVCodecContext *avccontext)
{
vorbis_enc_context *venc = avccontext->priv_data;
int ret;
if (avccontext->channels != 2) {
av_log(avccontext, AV_LOG_ERROR, "Current FFmpeg Vorbis encoder only supports 2 channels.\n");
return -1;
}
if ((ret = create_vorbis_context(venc, avccontext)) < 0)
goto error;
if (avccontext->flags & CODEC_FLAG_QSCALE)
venc->quality = avccontext->global_quality / (float)FF_QP2LAMBDA / 10.;
else
venc->quality = 0.03;
venc->quality *= venc->quality;
if ((ret = put_main_header(venc, (uint8_t**)&avccontext->extradata)) < 0)
goto error;
avccontext->extradata_size = ret;
avccontext->frame_size = 1 << (venc->log2_blocksize[0] - 1);
avccontext->coded_frame = avcodec_alloc_frame();
if (!avccontext->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
return 0;
error:
vorbis_encode_close(avccontext);
return ret;
}
AVCodec ff_vorbis_encoder = { AVCodec ff_vorbis_encoder = {
.name = "vorbis", .name = "vorbis",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,

View File

@ -44,7 +44,7 @@ static int xwd_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
{ {
enum PixelFormat pix_fmt = avctx->pix_fmt; enum PixelFormat pix_fmt = avctx->pix_fmt;
uint32_t pixdepth, bpp, bpad, ncolors = 0, lsize, vclass, be = 0; uint32_t pixdepth, bpp, bpad, ncolors = 0, lsize, vclass, be = 0;
uint32_t rgb[3] = { 0 }; uint32_t rgb[3] = { 0 }, bitorder = 0;
uint32_t header_size; uint32_t header_size;
int i, out_size, ret; int i, out_size, ret;
uint8_t *ptr, *buf; uint8_t *ptr, *buf;
@ -133,6 +133,8 @@ static int xwd_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
ncolors = 256; ncolors = 256;
break; break;
case PIX_FMT_MONOWHITE: case PIX_FMT_MONOWHITE:
be = 1;
bitorder = 1;
bpp = 1; bpp = 1;
bpad = 8; bpad = 8;
vclass = XWD_STATIC_GRAY; vclass = XWD_STATIC_GRAY;
@ -164,7 +166,7 @@ static int xwd_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
bytestream_put_be32(&buf, 0); // bitmap x offset bytestream_put_be32(&buf, 0); // bitmap x offset
bytestream_put_be32(&buf, be); // byte order bytestream_put_be32(&buf, be); // byte order
bytestream_put_be32(&buf, 32); // bitmap unit bytestream_put_be32(&buf, 32); // bitmap unit
bytestream_put_be32(&buf, be); // bit-order of image data bytestream_put_be32(&buf, bitorder); // bit-order of image data
bytestream_put_be32(&buf, bpad); // bitmap scan-line pad in bits bytestream_put_be32(&buf, bpad); // bitmap scan-line pad in bits
bytestream_put_be32(&buf, bpp); // bits per pixel bytestream_put_be32(&buf, bpp); // bits per pixel
bytestream_put_be32(&buf, lsize); // bytes per scan-line bytestream_put_be32(&buf, lsize); // bytes per scan-line

View File

@ -697,16 +697,19 @@ static int ebml_read_float(AVIOContext *pb, int size, double *num)
*/ */
static int ebml_read_ascii(AVIOContext *pb, int size, char **str) static int ebml_read_ascii(AVIOContext *pb, int size, char **str)
{ {
av_free(*str); char *res;
/* EBML strings are usually not 0-terminated, so we allocate one /* EBML strings are usually not 0-terminated, so we allocate one
* byte more, read the string and NULL-terminate it ourselves. */ * byte more, read the string and NULL-terminate it ourselves. */
if (!(*str = av_malloc(size + 1))) if (!(res = av_malloc(size + 1)))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
if (avio_read(pb, (uint8_t *) *str, size) != size) { if (avio_read(pb, (uint8_t *) res, size) != size) {
av_freep(str); av_free(res);
return AVERROR(EIO); return AVERROR(EIO);
} }
(*str)[size] = '\0'; (res)[size] = '\0';
av_free(*str);
*str = res;
return 0; return 0;
} }

View File

@ -154,7 +154,7 @@
*/ */
#define LIBAVUTIL_VERSION_MAJOR 51 #define LIBAVUTIL_VERSION_MAJOR 51
#define LIBAVUTIL_VERSION_MINOR 40 #define LIBAVUTIL_VERSION_MINOR 41
#define LIBAVUTIL_VERSION_MICRO 100 #define LIBAVUTIL_VERSION_MICRO 100
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ #define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \

View File

@ -41,6 +41,7 @@ int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
case AVERROR_PATCHWELCOME: errstr = "Not yet implemented in FFmpeg, patches welcome"; break; case AVERROR_PATCHWELCOME: errstr = "Not yet implemented in FFmpeg, patches welcome"; break;
case AVERROR_PROTOCOL_NOT_FOUND:errstr = "Protocol not found" ; break; case AVERROR_PROTOCOL_NOT_FOUND:errstr = "Protocol not found" ; break;
case AVERROR_STREAM_NOT_FOUND: errstr = "Stream not found" ; break; case AVERROR_STREAM_NOT_FOUND: errstr = "Stream not found" ; break;
case AVERROR_UNKNOWN: errstr = "Unknown error occurred" ; break;
} }
if (errstr) { if (errstr) {

View File

@ -64,6 +64,7 @@
* it has been introduced in Libav after our AVERROR_BUG and with a modified value. * it has been introduced in Libav after our AVERROR_BUG and with a modified value.
*/ */
#define AVERROR_BUG2 (-MKTAG( 'B','U','G',' ')) #define AVERROR_BUG2 (-MKTAG( 'B','U','G',' '))
#define AVERROR_UNKNOWN (-MKTAG( 'U','N','K','N')) ///< Unknown error, typically from an external library
/** /**
* Put a description of the AVERROR code errnum in errbuf. * Put a description of the AVERROR code errnum in errbuf.

View File

@ -46,6 +46,7 @@ include $(SRC_PATH)/tests/fate/amrnb.mak
include $(SRC_PATH)/tests/fate/amrwb.mak include $(SRC_PATH)/tests/fate/amrwb.mak
include $(SRC_PATH)/tests/fate/atrac.mak include $(SRC_PATH)/tests/fate/atrac.mak
include $(SRC_PATH)/tests/fate/audio.mak include $(SRC_PATH)/tests/fate/audio.mak
include $(SRC_PATH)/tests/fate/cdxl.mak
include $(SRC_PATH)/tests/fate/dct.mak include $(SRC_PATH)/tests/fate/dct.mak
include $(SRC_PATH)/tests/fate/demux.mak include $(SRC_PATH)/tests/fate/demux.mak
include $(SRC_PATH)/tests/fate/dfa.mak include $(SRC_PATH)/tests/fate/dfa.mak

14
tests/fate/cdxl.mak Normal file
View File

@ -0,0 +1,14 @@
FATE_CDXL += fate-cdxl-ham6
fate-cdxl-ham6: CMD = framecrc -i $(SAMPLES)/cdxl/cat.cdxl -an -frames:v 16
FATE_CDXL += fate-cdxl-ham8
fate-cdxl-ham8: CMD = framecrc -i $(SAMPLES)/cdxl/mirage.cdxl -an -frames:v 1
FATE_CDXL += fate-cdxl-pal8
fate-cdxl-pal8: CMD = framecrc -i $(SAMPLES)/cdxl/maku.cdxl -pix_fmt rgb24 -frames:v 11
FATE_CDXL += fate-cdxl-pal8-small
fate-cdxl-pal8-small: CMD = framecrc -i $(SAMPLES)/cdxl/fruit.cdxl -an -pix_fmt rgb24 -frames:v 46
FATE_TESTS += $(FATE_CDXL)
fate-cdxl: $(FATE_CDXL)

17
tests/ref/fate/cdxl-ham6 Normal file
View File

@ -0,0 +1,17 @@
#tb 0: 52/525
0, 0, 0, 1, 57600, 0x87887a7b
0, 1, 1, 1, 57600, 0x10c301d2
0, 2, 2, 1, 57600, 0xd1a6f910
0, 3, 3, 1, 57600, 0x20242bb9
0, 4, 4, 1, 57600, 0xae33cb7f
0, 5, 5, 1, 57600, 0x501b82c8
0, 6, 6, 1, 57600, 0x84199043
0, 7, 7, 1, 57600, 0x946a6dbb
0, 8, 8, 1, 57600, 0xeacea671
0, 9, 9, 1, 57600, 0x77b8723f
0, 10, 10, 1, 57600, 0x371cdb09
0, 11, 11, 1, 57600, 0xa16ef5ee
0, 12, 12, 1, 57600, 0xcb6abd9e
0, 13, 13, 1, 57600, 0xb73e800f
0, 14, 14, 1, 57600, 0x368bd93e
0, 15, 15, 1, 57600, 0xcde72dc5

2
tests/ref/fate/cdxl-ham8 Normal file
View File

@ -0,0 +1,2 @@
#tb 0: 3/158
0, 0, 0, 1, 67584, 0xce0cade5

12
tests/ref/fate/cdxl-pal8 Normal file
View File

@ -0,0 +1,12 @@
#tb 0: 12/601
0, 0, 0, 1, 67584, 0x5eae629b
0, 1, 1, 1, 67584, 0x32591227
0, 2, 2, 1, 67584, 0x4e4424c7
0, 3, 3, 1, 67584, 0x70db0134
0, 4, 4, 1, 67584, 0x3550ed0b
0, 5, 5, 1, 67584, 0x86fe3eef
0, 6, 6, 1, 67584, 0x3414bb33
0, 7, 7, 1, 67584, 0x667bfb91
0, 8, 8, 1, 67584, 0x6e1a4ccb
0, 9, 9, 1, 67584, 0xf723f9ae
0, 10, 10, 1, 67584, 0x88481d5d

View File

@ -0,0 +1,47 @@
#tb 0: 368/11025
0, 0, 0, 1, 30720, 0x0d552cfd
0, 1, 1, 1, 30720, 0x3cf93291
0, 2, 2, 1, 30720, 0xe45b2868
0, 3, 3, 1, 30720, 0xb5df289b
0, 4, 4, 1, 30720, 0x2562259e
0, 5, 5, 1, 30720, 0xbf171878
0, 6, 6, 1, 30720, 0x695b1d73
0, 7, 7, 1, 30720, 0x89ef1614
0, 8, 8, 1, 30720, 0xe12a1dd9
0, 9, 9, 1, 30720, 0x49622ffa
0, 10, 10, 1, 30720, 0xd6832703
0, 11, 11, 1, 30720, 0xec1d0cb7
0, 12, 12, 1, 30720, 0x8bee0525
0, 13, 13, 1, 30720, 0x1e0cf0c4
0, 14, 14, 1, 30720, 0xf83fd9db
0, 15, 15, 1, 30720, 0xffb0d6ab
0, 16, 16, 1, 30720, 0xe37fe239
0, 17, 17, 1, 30720, 0x74b0f856
0, 18, 18, 1, 30720, 0x9c88d3e1
0, 19, 19, 1, 30720, 0x714db368
0, 20, 20, 1, 30720, 0x6c8e8860
0, 21, 21, 1, 30720, 0x804968e6
0, 22, 22, 1, 30720, 0x7ac56ae4
0, 23, 23, 1, 30720, 0xffd85cbf
0, 24, 24, 1, 30720, 0x1f8455f9
0, 25, 25, 1, 30720, 0x3ae65296
0, 26, 26, 1, 30720, 0x9e544ecd
0, 27, 27, 1, 30720, 0x35678e5a
0, 28, 28, 1, 30720, 0x04bae866
0, 29, 29, 1, 30720, 0xb126ed94
0, 30, 30, 1, 30720, 0x1720efc5
0, 31, 31, 1, 30720, 0x4c1b01c2
0, 32, 32, 1, 30720, 0xd0a1e866
0, 33, 33, 1, 30720, 0x0d330789
0, 34, 34, 1, 30720, 0xf5ac08bb
0, 35, 35, 1, 30720, 0x9abe0d83
0, 36, 36, 1, 30720, 0xa44c02f4
0, 37, 37, 1, 30720, 0xdc4cc688
0, 38, 38, 1, 30720, 0x22eef3c1
0, 39, 39, 1, 30720, 0xcfbc0d1d
0, 40, 40, 1, 30720, 0x7104ea31
0, 41, 41, 1, 30720, 0x80daecfb
0, 42, 42, 1, 30720, 0xe1bab995
0, 43, 43, 1, 30720, 0x43f4b896
0, 44, 44, 1, 30720, 0xa0d2bf5c
0, 45, 45, 1, 30720, 0x3556a114

View File

@ -1,3 +1,3 @@
0ab6cd4fe5fe85a3f3e87508c2eadfa0 *./tests/data/images/xwd/02.xwd 50baa5560b7d1aa3188b19c1162bf7dc *./tests/data/images/xwd/02.xwd
./tests/data/images/xwd/%02d.xwd CRC=0x6da01946 ./tests/data/images/xwd/%02d.xwd CRC=0x6da01946
304239 ./tests/data/images/xwd/02.xwd 304239 ./tests/data/images/xwd/02.xwd