lavc: add a native Opus decoder.

Initial implementation by Andrew D'Addesio <modchipv12@gmail.com> during
GSoC 2012.

Completion by Anton Khirnov <anton@khirnov.net>, sponsored by the
Mozilla Corporation.

Further contributions by:
Christophe Gisquet <christophe.gisquet@gmail.com>
Janne Grunau <janne-libav@jannau.net>
Luca Barbato <lu_zero@gentoo.org>
This commit is contained in:
Anton Khirnov 2014-04-17 12:51:03 +02:00
parent 7e90133f64
commit b70d7a4ac7
14 changed files with 5743 additions and 2 deletions

View File

@ -25,6 +25,7 @@ version <next>:
- On2 AVC (Audio for Video) decoder
- support for decoding through DXVA2 in avconv
- libbs2b-based stereo-to-binaural audio filter
- native Opus decoder
version 10:

3
configure vendored
View File

@ -1822,6 +1822,7 @@ nellymoser_decoder_select="mdct sinewin"
nellymoser_encoder_select="audio_frame_queue mdct sinewin"
nuv_decoder_select="dsputil lzo"
on2avc_decoder_select="mdct"
opus_decoder_deps="avresample"
png_decoder_deps="zlib"
png_encoder_deps="zlib"
png_encoder_select="dsputil"
@ -4387,6 +4388,8 @@ enabled movie_filter && prepend avfilter_deps "avformat avcodec"
enabled resample_filter && prepend avfilter_deps "avresample"
enabled scale_filter && prepend avfilter_deps "swscale"
enabled opus_decoder && prepend avcodec_deps "avresample"
expand_deps(){
lib_deps=${1}_deps
eval "deps=\$$lib_deps"

View File

@ -279,6 +279,9 @@ OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
OBJS-$(CONFIG_ON2AVC_DECODER) += on2avc.o on2avcdata.o
OBJS-$(CONFIG_OPUS_DECODER) += opusdec.o opus.o opus_celt.o \
opus_imdct.o opus_silk.o \
vorbis_data.o
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += paf.o
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += paf.o
OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o
@ -653,6 +656,7 @@ OBJS-$(CONFIG_MPEGAUDIO_PARSER) += mpegaudio_parser.o \
mpegaudiodecheader.o mpegaudiodata.o
OBJS-$(CONFIG_MPEGVIDEO_PARSER) += mpegvideo_parser.o \
mpeg12.o mpeg12data.o
OBJS-$(CONFIG_OPUS_PARSER) += opus_parser.o opus.o vorbis_data.o
OBJS-$(CONFIG_PNG_PARSER) += png_parser.o
OBJS-$(CONFIG_PNM_PARSER) += pnm_parser.o pnm.o
OBJS-$(CONFIG_RV30_PARSER) += rv34_parser.o

View File

@ -331,6 +331,7 @@ void avcodec_register_all(void)
REGISTER_DECODER(MPC8, mpc8);
REGISTER_ENCDEC (NELLYMOSER, nellymoser);
REGISTER_DECODER(ON2AVC, on2avc);
REGISTER_DECODER(OPUS, opus);
REGISTER_DECODER(PAF_AUDIO, paf_audio);
REGISTER_DECODER(QCELP, qcelp);
REGISTER_DECODER(QDM2, qdm2);
@ -483,6 +484,7 @@ void avcodec_register_all(void)
REGISTER_PARSER(MPEG4VIDEO, mpeg4video);
REGISTER_PARSER(MPEGAUDIO, mpegaudio);
REGISTER_PARSER(MPEGVIDEO, mpegvideo);
REGISTER_PARSER(OPUS, opus);
REGISTER_PARSER(PNG, png);
REGISTER_PARSER(PNM, pnm);
REGISTER_PARSER(RV30, rv30);

428
libavcodec/opus.c Normal file
View File

@ -0,0 +1,428 @@
/*
* Copyright (c) 2012 Andrew D'Addesio
* Copyright (c) 2013-2014 Mozilla Corporation
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Opus decoder/parser shared code
*/
#include <stdint.h>
#include "libavutil/error.h"
#include "opus.h"
#include "vorbis.h"
static const uint16_t opus_frame_duration[32] = {
480, 960, 1920, 2880,
480, 960, 1920, 2880,
480, 960, 1920, 2880,
480, 960,
480, 960,
120, 240, 480, 960,
120, 240, 480, 960,
120, 240, 480, 960,
120, 240, 480, 960,
};
/**
* Read a 1- or 2-byte frame length
*/
static inline int xiph_lacing_16bit(const uint8_t **ptr, const uint8_t *end)
{
int val;
if (*ptr >= end)
return AVERROR_INVALIDDATA;
val = *(*ptr)++;
if (val >= 252) {
if (*ptr >= end)
return AVERROR_INVALIDDATA;
val += 4 * *(*ptr)++;
}
return val;
}
/**
* Read a multi-byte length (used for code 3 packet padding size)
*/
static inline int xiph_lacing_full(const uint8_t **ptr, const uint8_t *end)
{
int val = 0;
int next;
while (1) {
if (*ptr >= end || val > INT_MAX - 254)
return AVERROR_INVALIDDATA;
next = *(*ptr)++;
val += next;
if (next < 255)
break;
else
val--;
}
return val;
}
/**
* Parse Opus packet info from raw packet data
*/
int ff_opus_parse_packet(OpusPacket *pkt, const uint8_t *buf, int buf_size,
int self_delimiting)
{
const uint8_t *ptr = buf;
const uint8_t *end = buf + buf_size;
int padding = 0;
int frame_bytes, i;
if (buf_size < 1)
goto fail;
/* TOC byte */
i = *ptr++;
pkt->code = (i ) & 0x3;
pkt->stereo = (i >> 2) & 0x1;
pkt->config = (i >> 3) & 0x1F;
/* code 2 and code 3 packets have at least 1 byte after the TOC */
if (pkt->code >= 2 && buf_size < 2)
goto fail;
switch (pkt->code) {
case 0:
/* 1 frame */
pkt->frame_count = 1;
pkt->vbr = 0;
if (self_delimiting) {
int len = xiph_lacing_16bit(&ptr, end);
if (len < 0 || len > end - ptr)
goto fail;
end = ptr + len;
buf_size = end - buf;
}
frame_bytes = end - ptr;
if (frame_bytes > MAX_FRAME_SIZE)
goto fail;
pkt->frame_offset[0] = ptr - buf;
pkt->frame_size[0] = frame_bytes;
break;
case 1:
/* 2 frames, equal size */
pkt->frame_count = 2;
pkt->vbr = 0;
if (self_delimiting) {
int len = xiph_lacing_16bit(&ptr, end);
if (len < 0 || 2 * len > end - ptr)
goto fail;
end = ptr + 2 * len;
buf_size = end - buf;
}
frame_bytes = end - ptr;
if (frame_bytes & 1 || frame_bytes >> 1 > MAX_FRAME_SIZE)
goto fail;
pkt->frame_offset[0] = ptr - buf;
pkt->frame_size[0] = frame_bytes >> 1;
pkt->frame_offset[1] = pkt->frame_offset[0] + pkt->frame_size[0];
pkt->frame_size[1] = frame_bytes >> 1;
break;
case 2:
/* 2 frames, different sizes */
pkt->frame_count = 2;
pkt->vbr = 1;
/* read 1st frame size */
frame_bytes = xiph_lacing_16bit(&ptr, end);
if (frame_bytes < 0)
goto fail;
if (self_delimiting) {
int len = xiph_lacing_16bit(&ptr, end);
if (len < 0 || len + frame_bytes > end - ptr)
goto fail;
end = ptr + frame_bytes + len;
buf_size = end - buf;
}
pkt->frame_offset[0] = ptr - buf;
pkt->frame_size[0] = frame_bytes;
/* calculate 2nd frame size */
frame_bytes = end - ptr - pkt->frame_size[0];
if (frame_bytes < 0 || frame_bytes > MAX_FRAME_SIZE)
goto fail;
pkt->frame_offset[1] = pkt->frame_offset[0] + pkt->frame_size[0];
pkt->frame_size[1] = frame_bytes;
break;
case 3:
/* 1 to 48 frames, can be different sizes */
i = *ptr++;
pkt->frame_count = (i ) & 0x3F;
padding = (i >> 6) & 0x01;
pkt->vbr = (i >> 7) & 0x01;
if (pkt->frame_count == 0 || pkt->frame_count > MAX_FRAMES)
goto fail;
/* read padding size */
if (padding) {
padding = xiph_lacing_full(&ptr, end);
if (padding < 0)
goto fail;
}
/* read frame sizes */
if (pkt->vbr) {
/* for VBR, all frames except the final one have their size coded
in the bitstream. the last frame size is implicit. */
int total_bytes = 0;
for (i = 0; i < pkt->frame_count - 1; i++) {
frame_bytes = xiph_lacing_16bit(&ptr, end);
if (frame_bytes < 0)
goto fail;
pkt->frame_size[i] = frame_bytes;
total_bytes += frame_bytes;
}
if (self_delimiting) {
int len = xiph_lacing_16bit(&ptr, end);
if (len < 0 || len + total_bytes + padding > end - ptr)
goto fail;
end = ptr + total_bytes + len + padding;
buf_size = end - buf;
}
frame_bytes = end - ptr - padding;
if (total_bytes > frame_bytes)
goto fail;
pkt->frame_offset[0] = ptr - buf;
for (i = 1; i < pkt->frame_count; i++)
pkt->frame_offset[i] = pkt->frame_offset[i-1] + pkt->frame_size[i-1];
pkt->frame_size[pkt->frame_count-1] = frame_bytes - total_bytes;
} else {
/* for CBR, the remaining packet bytes are divided evenly between
the frames */
if (self_delimiting) {
frame_bytes = xiph_lacing_16bit(&ptr, end);
if (frame_bytes < 0 || pkt->frame_count * frame_bytes + padding > end - ptr)
goto fail;
end = ptr + pkt->frame_count * frame_bytes + padding;
buf_size = end - buf;
} else {
frame_bytes = end - ptr - padding;
if (frame_bytes % pkt->frame_count ||
frame_bytes / pkt->frame_count > MAX_FRAME_SIZE)
goto fail;
frame_bytes /= pkt->frame_count;
}
pkt->frame_offset[0] = ptr - buf;
pkt->frame_size[0] = frame_bytes;
for (i = 1; i < pkt->frame_count; i++) {
pkt->frame_offset[i] = pkt->frame_offset[i-1] + pkt->frame_size[i-1];
pkt->frame_size[i] = frame_bytes;
}
}
}
pkt->packet_size = buf_size;
pkt->data_size = pkt->packet_size - padding;
/* total packet duration cannot be larger than 120ms */
pkt->frame_duration = opus_frame_duration[pkt->config];
if (pkt->frame_duration * pkt->frame_count > MAX_PACKET_DUR)
goto fail;
/* set mode and bandwidth */
if (pkt->config < 12) {
pkt->mode = OPUS_MODE_SILK;
pkt->bandwidth = pkt->config >> 2;
} else if (pkt->config < 16) {
pkt->mode = OPUS_MODE_HYBRID;
pkt->bandwidth = OPUS_BANDWIDTH_SUPERWIDEBAND + (pkt->config >= 14);
} else {
pkt->mode = OPUS_MODE_CELT;
pkt->bandwidth = (pkt->config - 16) >> 2;
/* skip mediumband */
if (pkt->bandwidth)
pkt->bandwidth++;
}
return 0;
fail:
memset(pkt, 0, sizeof(*pkt));
return AVERROR_INVALIDDATA;
}
static int channel_reorder_vorbis(int nb_channels, int channel_idx)
{
return ff_vorbis_channel_layout_offsets[nb_channels - 1][channel_idx];
}
static int channel_reorder_unknown(int nb_channels, int channel_idx)
{
return channel_idx;
}
av_cold int ff_opus_parse_extradata(AVCodecContext *avctx,
OpusContext *s)
{
static const uint8_t default_channel_map[2] = { 0, 1 };
uint8_t default_extradata[19] = {
'O', 'p', 'u', 's', 'H', 'e', 'a', 'd',
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
int (*channel_reorder)(int, int) = channel_reorder_unknown;
const uint8_t *extradata, *channel_map;
int extradata_size;
int version, channels, map_type, streams, stereo_streams, i, j;
uint64_t layout;
if (!avctx->extradata) {
if (avctx->channels > 2) {
av_log(avctx, AV_LOG_ERROR,
"Multichannel configuration without extradata.\n");
return AVERROR(EINVAL);
}
default_extradata[9] = (avctx->channels == 1) ? 1 : 2;
extradata = default_extradata;
extradata_size = sizeof(default_extradata);
} else {
extradata = avctx->extradata;
extradata_size = avctx->extradata_size;
}
if (extradata_size < 19) {
av_log(avctx, AV_LOG_ERROR, "Invalid extradata size: %d\n",
extradata_size);
return AVERROR_INVALIDDATA;
}
version = extradata[8];
if (version > 15) {
avpriv_request_sample(avctx, "Extradata version %d", version);
return AVERROR_PATCHWELCOME;
}
avctx->delay = AV_RL16(extradata + 10);
channels = extradata[9];
if (!channels) {
av_log(avctx, AV_LOG_ERROR, "Zero channel count specified in the extadata\n");
return AVERROR_INVALIDDATA;
}
s->gain_i = AV_RL16(extradata + 16);
if (s->gain_i)
s->gain = pow(10, s->gain_i / (20.0 * 256));
map_type = extradata[18];
if (!map_type) {
if (channels > 2) {
av_log(avctx, AV_LOG_ERROR,
"Channel mapping 0 is only specified for up to 2 channels\n");
return AVERROR_INVALIDDATA;
}
layout = (channels == 1) ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
streams = 1;
stereo_streams = channels - 1;
channel_map = default_channel_map;
} else if (map_type == 1 || map_type == 255) {
if (extradata_size < 21 + channels) {
av_log(avctx, AV_LOG_ERROR, "Invalid extradata size: %d\n",
extradata_size);
return AVERROR_INVALIDDATA;
}
streams = extradata[19];
stereo_streams = extradata[20];
if (!streams || stereo_streams > streams ||
streams + stereo_streams > 255) {
av_log(avctx, AV_LOG_ERROR,
"Invalid stream/stereo stream count: %d/%d\n", streams, stereo_streams);
return AVERROR_INVALIDDATA;
}
if (map_type == 1) {
if (channels > 8) {
av_log(avctx, AV_LOG_ERROR,
"Channel mapping 1 is only specified for up to 8 channels\n");
return AVERROR_INVALIDDATA;
}
layout = ff_vorbis_channel_layouts[channels - 1];
channel_reorder = channel_reorder_vorbis;
} else
layout = 0;
channel_map = extradata + 21;
} else {
avpriv_request_sample(avctx, "Mapping type %d", map_type);
return AVERROR_PATCHWELCOME;
}
s->channel_maps = av_mallocz_array(channels, sizeof(*s->channel_maps));
if (!s->channel_maps)
return AVERROR(ENOMEM);
for (i = 0; i < channels; i++) {
ChannelMap *map = &s->channel_maps[i];
uint8_t idx = channel_map[channel_reorder(channels, i)];
if (idx == 255) {
map->silence = 1;
continue;
} else if (idx >= streams + stereo_streams) {
av_log(avctx, AV_LOG_ERROR,
"Invalid channel map for output channel %d: %d\n", i, idx);
return AVERROR_INVALIDDATA;
}
/* check that we din't see this index yet */
map->copy = 0;
for (j = 0; j < i; j++)
if (channel_map[channel_reorder(channels, j)] == idx) {
map->copy = 1;
map->copy_idx = j;
break;
}
if (idx < 2 * stereo_streams) {
map->stream_idx = idx / 2;
map->channel_idx = idx & 1;
} else {
map->stream_idx = idx - stereo_streams;
map->channel_idx = 0;
}
}
avctx->channels = channels;
avctx->channel_layout = layout;
s->nb_streams = streams;
s->nb_stereo_streams = stereo_streams;
return 0;
}

429
libavcodec/opus.h Normal file
View File

@ -0,0 +1,429 @@
/*
* Opus decoder/demuxer common functions
* Copyright (c) 2012 Andrew D'Addesio
* Copyright (c) 2013-2014 Mozilla Corporation
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_OPUS_H
#define AVCODEC_OPUS_H
#include <stdint.h>
#include "libavutil/audio_fifo.h"
#include "libavutil/float_dsp.h"
#include "libavutil/frame.h"
#include "libavresample/avresample.h"
#include "avcodec.h"
#include "get_bits.h"
#define MAX_FRAME_SIZE 1275
#define MAX_FRAMES 48
#define MAX_PACKET_DUR 5760
#define CELT_SHORT_BLOCKSIZE 120
#define CELT_OVERLAP CELT_SHORT_BLOCKSIZE
#define CELT_MAX_LOG_BLOCKS 3
#define CELT_MAX_FRAME_SIZE (CELT_SHORT_BLOCKSIZE * (1 << CELT_MAX_LOG_BLOCKS))
#define CELT_MAX_BANDS 21
#define CELT_VECTORS 11
#define CELT_ALLOC_STEPS 6
#define CELT_FINE_OFFSET 21
#define CELT_MAX_FINE_BITS 8
#define CELT_NORM_SCALE 16384
#define CELT_QTHETA_OFFSET 4
#define CELT_QTHETA_OFFSET_TWOPHASE 16
#define CELT_DEEMPH_COEFF 0.85000610f
#define CELT_POSTFILTER_MINPERIOD 15
#define CELT_ENERGY_SILENCE (-28.0f)
#define SILK_HISTORY 322
#define SILK_MAX_LPC 16
#define ROUND_MULL(a,b,s) (((MUL64(a, b) >> (s - 1)) + 1) >> 1)
#define ROUND_MUL16(a,b) ((MUL16(a, b) + 16384) >> 15)
#define opus_ilog(i) (av_log2(i) + !!(i))
enum OpusMode {
OPUS_MODE_SILK,
OPUS_MODE_HYBRID,
OPUS_MODE_CELT
};
enum OpusBandwidth {
OPUS_BANDWIDTH_NARROWBAND,
OPUS_BANDWIDTH_MEDIUMBAND,
OPUS_BANDWIDTH_WIDEBAND,
OPUS_BANDWIDTH_SUPERWIDEBAND,
OPUS_BANDWIDTH_FULLBAND
};
typedef struct RawBitsContext {
const uint8_t *position;
unsigned int bytes;
unsigned int cachelen;
unsigned int cacheval;
} RawBitsContext;
typedef struct OpusRangeCoder {
GetBitContext gb;
RawBitsContext rb;
unsigned int range;
unsigned int value;
unsigned int total_read_bits;
} OpusRangeCoder;
typedef struct SilkContext SilkContext;
typedef struct CeltIMDCTContext CeltIMDCTContext;
typedef struct CeltContext CeltContext;
typedef struct OpusPacket {
int packet_size; /** packet size */
int data_size; /** size of the useful data -- packet size - padding */
int code; /** packet code: specifies the frame layout */
int stereo; /** whether this packet is mono or stereo */
int vbr; /** vbr flag */
int config; /** configuration: tells the audio mode,
** bandwidth, and frame duration */
int frame_count; /** frame count */
int frame_offset[MAX_FRAMES]; /** frame offsets */
int frame_size[MAX_FRAMES]; /** frame sizes */
int frame_duration; /** frame duration, in samples @ 48kHz */
enum OpusMode mode; /** mode */
enum OpusBandwidth bandwidth; /** bandwidth */
} OpusPacket;
typedef struct OpusStreamContext {
AVCodecContext *avctx;
int output_channels;
OpusRangeCoder rc;
OpusRangeCoder redundancy_rc;
SilkContext *silk;
CeltContext *celt;
AVFloatDSPContext *fdsp;
float silk_buf[2][960];
float *silk_output[2];
DECLARE_ALIGNED(32, float, celt_buf)[2][960];
float *celt_output[2];
float redundancy_buf[2][960];
float *redundancy_output[2];
/* data buffers for the final output data */
float *out[2];
int out_size;
float *out_dummy;
int out_dummy_allocated_size;
AVAudioResampleContext *avr;
AVAudioFifo *celt_delay;
int silk_samplerate;
/* number of samples we still want to get from the resampler */
int delayed_samples;
OpusPacket packet;
int redundancy_idx;
} OpusStreamContext;
// a mapping between an opus stream and an output channel
typedef struct ChannelMap {
int stream_idx;
int channel_idx;
// when a single decoded channel is mapped to multiple output channels, we
// write to the first output directly and copy from it to the others
// this field is set to 1 for those copied output channels
int copy;
// this is the index of the output channel to copy from
int copy_idx;
// this channel is silent
int silence;
} ChannelMap;
typedef struct OpusContext {
OpusStreamContext *streams;
int nb_streams;
int nb_stereo_streams;
AVFloatDSPContext fdsp;
int16_t gain_i;
float gain;
ChannelMap *channel_maps;
} OpusContext;
static av_always_inline void opus_rc_normalize(OpusRangeCoder *rc)
{
while (rc->range <= 1<<23) {
rc->value = ((rc->value << 8) | (get_bits(&rc->gb, 8) ^ 0xFF)) & ((1u << 31) - 1);
rc->range <<= 8;
rc->total_read_bits += 8;
}
}
static av_always_inline void opus_rc_update(OpusRangeCoder *rc, unsigned int scale,
unsigned int low, unsigned int high,
unsigned int total)
{
rc->value -= scale * (total - high);
rc->range = low ? scale * (high - low)
: rc->range - scale * (total - high);
opus_rc_normalize(rc);
}
static av_always_inline unsigned int opus_rc_getsymbol(OpusRangeCoder *rc, const uint16_t *cdf)
{
unsigned int k, scale, total, symbol, low, high;
total = *cdf++;
scale = rc->range / total;
symbol = rc->value / scale + 1;
symbol = total - FFMIN(symbol, total);
for (k = 0; cdf[k] <= symbol; k++);
high = cdf[k];
low = k ? cdf[k-1] : 0;
opus_rc_update(rc, scale, low, high, total);
return k;
}
static av_always_inline unsigned int opus_rc_p2model(OpusRangeCoder *rc, unsigned int bits)
{
unsigned int k, scale;
scale = rc->range >> bits; // in this case, scale = symbol
if (rc->value >= scale) {
rc->value -= scale;
rc->range -= scale;
k = 0;
} else {
rc->range = scale;
k = 1;
}
opus_rc_normalize(rc);
return k;
}
/**
* CELT: estimate bits of entropy that have thus far been consumed for the
* current CELT frame, to integer and fractional (1/8th bit) precision
*/
static av_always_inline unsigned int opus_rc_tell(const OpusRangeCoder *rc)
{
return rc->total_read_bits - av_log2(rc->range) - 1;
}
static av_always_inline unsigned int opus_rc_tell_frac(const OpusRangeCoder *rc)
{
unsigned int i, total_bits, rcbuffer, range;
total_bits = rc->total_read_bits << 3;
rcbuffer = av_log2(rc->range) + 1;
range = rc->range >> (rcbuffer-16);
for (i = 0; i < 3; i++) {
int bit;
range = range * range >> 15;
bit = range >> 16;
rcbuffer = rcbuffer << 1 | bit;
range >>= bit;
}
return total_bits - rcbuffer;
}
/**
* CELT: read 1-25 raw bits at the end of the frame, backwards byte-wise
*/
static av_always_inline unsigned int opus_getrawbits(OpusRangeCoder *rc, unsigned int count)
{
unsigned int value = 0;
while (rc->rb.bytes && rc->rb.cachelen < count) {
rc->rb.cacheval |= *--rc->rb.position << rc->rb.cachelen;
rc->rb.cachelen += 8;
rc->rb.bytes--;
}
value = rc->rb.cacheval & ((1<<count)-1);
rc->rb.cacheval >>= count;
rc->rb.cachelen -= count;
rc->total_read_bits += count;
return value;
}
/**
* CELT: read a uniform distribution
*/
static av_always_inline unsigned int opus_rc_unimodel(OpusRangeCoder *rc, unsigned int size)
{
unsigned int bits, k, scale, total;
bits = opus_ilog(size - 1);
total = (bits > 8) ? ((size - 1) >> (bits - 8)) + 1 : size;
scale = rc->range / total;
k = rc->value / scale + 1;
k = total - FFMIN(k, total);
opus_rc_update(rc, scale, k, k + 1, total);
if (bits > 8) {
k = k << (bits - 8) | opus_getrawbits(rc, bits - 8);
return FFMIN(k, size - 1);
} else
return k;
}
static av_always_inline int opus_rc_laplace(OpusRangeCoder *rc, unsigned int symbol, int decay)
{
/* extends the range coder to model a Laplace distribution */
int value = 0;
unsigned int scale, low = 0, center;
scale = rc->range >> 15;
center = rc->value / scale + 1;
center = (1 << 15) - FFMIN(center, 1 << 15);
if (center >= symbol) {
value++;
low = symbol;
symbol = 1 + ((32768 - 32 - symbol) * (16384-decay) >> 15);
while (symbol > 1 && center >= low + 2 * symbol) {
value++;
symbol *= 2;
low += symbol;
symbol = (((symbol - 2) * decay) >> 15) + 1;
}
if (symbol <= 1) {
int distance = (center - low) >> 1;
value += distance;
low += 2 * distance;
}
if (center < low + symbol)
value *= -1;
else
low += symbol;
}
opus_rc_update(rc, scale, low, FFMIN(low + symbol, 32768), 32768);
return value;
}
static av_always_inline unsigned int opus_rc_stepmodel(OpusRangeCoder *rc, int k0)
{
/* Use a probability of 3 up to itheta=8192 and then use 1 after */
unsigned int k, scale, symbol, total = (k0+1)*3 + k0;
scale = rc->range / total;
symbol = rc->value / scale + 1;
symbol = total - FFMIN(symbol, total);
k = (symbol < (k0+1)*3) ? symbol/3 : symbol - (k0+1)*2;
opus_rc_update(rc, scale, (k <= k0) ? 3*(k+0) : (k-1-k0) + 3*(k0+1),
(k <= k0) ? 3*(k+1) : (k-0-k0) + 3*(k0+1), total);
return k;
}
static av_always_inline unsigned int opus_rc_trimodel(OpusRangeCoder *rc, int qn)
{
unsigned int k, scale, symbol, total, low, center;
total = ((qn>>1) + 1) * ((qn>>1) + 1);
scale = rc->range / total;
center = rc->value / scale + 1;
center = total - FFMIN(center, total);
if (center < total >> 1) {
k = (ff_sqrt(8 * center + 1) - 1) >> 1;
low = k * (k + 1) >> 1;
symbol = k + 1;
} else {
k = (2*(qn + 1) - ff_sqrt(8*(total - center - 1) + 1)) >> 1;
low = total - ((qn + 1 - k) * (qn + 2 - k) >> 1);
symbol = qn + 1 - k;
}
opus_rc_update(rc, scale, low, low + symbol, total);
return k;
}
int ff_opus_parse_packet(OpusPacket *pkt, const uint8_t *buf, int buf_size,
int self_delimited);
int ff_opus_parse_extradata(AVCodecContext *avctx, OpusContext *s);
int ff_silk_init(AVCodecContext *avctx, SilkContext **ps, int output_channels);
void ff_silk_free(SilkContext **ps);
void ff_silk_flush(SilkContext *s);
/**
* Decode the LP layer of one Opus frame (which may correspond to several SILK
* frames).
*/
int ff_silk_decode_superframe(SilkContext *s, OpusRangeCoder *rc,
float *output[2],
enum OpusBandwidth bandwidth, int coded_channels,
int duration_ms);
/**
* Init an iMDCT of the length 2 * 15 * (2^N)
*/
int ff_celt_imdct_init(CeltIMDCTContext **s, int N);
/**
* Free an iMDCT.
*/
void ff_celt_imdct_uninit(CeltIMDCTContext **s);
/**
* Calculate the middle half of the iMDCT
*/
void ff_celt_imdct_half(CeltIMDCTContext *s, float *dst, const float *src,
int src_stride, float scale);
int ff_celt_init(AVCodecContext *avctx, CeltContext **s, int output_channels);
void ff_celt_free(CeltContext **s);
void ff_celt_flush(CeltContext *s);
int ff_celt_decode_frame(CeltContext *s, OpusRangeCoder *rc,
float **output, int coded_channels, int frame_size,
int startband, int endband);
extern const float ff_celt_window2[120];
#endif /* AVCODEC_OPUS_H */

2220
libavcodec/opus_celt.c Normal file

File diff suppressed because it is too large Load Diff

268
libavcodec/opus_imdct.c Normal file
View File

@ -0,0 +1,268 @@
/*
* Copyright (c) 2013-2014 Mozilla Corporation
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Celt non-power of 2 iMDCT
*/
#include <float.h>
#include <math.h>
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "fft.h"
#include "opus.h"
// complex c = a * b
#define CMUL3(cre, cim, are, aim, bre, bim) \
do { \
cre = are * bre - aim * bim; \
cim = are * bim + aim * bre; \
} while (0)
#define CMUL(c, a, b) CMUL3((c).re, (c).im, (a).re, (a).im, (b).re, (b).im)
// complex c = a * b
// d = a * conjugate(b)
#define CMUL2(c, d, a, b) \
do { \
float are = (a).re; \
float aim = (a).im; \
float bre = (b).re; \
float bim = (b).im; \
float rr = are * bre; \
float ri = are * bim; \
float ir = aim * bre; \
float ii = aim * bim; \
(c).re = rr - ii; \
(c).im = ri + ir; \
(d).re = rr + ii; \
(d).im = -ri + ir; \
} while (0)
struct CeltIMDCTContext {
int fft_n;
int len2;
int len4;
FFTComplex *tmp;
FFTComplex *twiddle_exptab;
FFTComplex *exptab[6];
};
av_cold void ff_celt_imdct_uninit(CeltIMDCTContext **ps)
{
CeltIMDCTContext *s = *ps;
int i;
if (!s)
return;
for (i = 0; i < FF_ARRAY_ELEMS(s->exptab); i++)
av_freep(&s->exptab[i]);
av_freep(&s->twiddle_exptab);
av_freep(&s->tmp);
av_freep(ps);
}
av_cold int ff_celt_imdct_init(CeltIMDCTContext **ps, int N)
{
CeltIMDCTContext *s;
int len2 = 15 * (1 << N);
int len = 2 * len2;
int i, j;
if (len2 > CELT_MAX_FRAME_SIZE)
return AVERROR(EINVAL);
s = av_mallocz(sizeof(*s));
if (!s)
return AVERROR(ENOMEM);
s->fft_n = N - 1;
s->len4 = len2 / 2;
s->len2 = len2;
s->tmp = av_malloc(len * 2 * sizeof(*s->tmp));
if (!s->tmp)
goto fail;
s->twiddle_exptab = av_malloc(s->len4 * sizeof(*s->twiddle_exptab));
if (!s->twiddle_exptab)
goto fail;
for (i = 0; i < s->len4; i++) {
s->twiddle_exptab[i].re = cos(2 * M_PI * (i + 0.125 + s->len4) / len);
s->twiddle_exptab[i].im = sin(2 * M_PI * (i + 0.125 + s->len4) / len);
}
for (i = 0; i < FF_ARRAY_ELEMS(s->exptab); i++) {
int N = 15 * (1 << i);
s->exptab[i] = av_malloc(sizeof(*s->exptab[i]) * FFMAX(N, 19));
if (!s->exptab[i])
goto fail;
for (j = 0; j < N; j++) {
s->exptab[i][j].re = cos(2 * M_PI * j / N);
s->exptab[i][j].im = sin(2 * M_PI * j / N);
}
}
// wrap around to simplify fft15
for (j = 15; j < 19; j++)
s->exptab[0][j] = s->exptab[0][j - 15];
*ps = s;
return 0;
fail:
ff_celt_imdct_uninit(&s);
return AVERROR(ENOMEM);
}
static void fft5(FFTComplex *out, const FFTComplex *in, int stride)
{
// [0] = exp(2 * i * pi / 5), [1] = exp(2 * i * pi * 2 / 5)
static const FFTComplex fact[] = { { 0.30901699437494745, 0.95105651629515353 },
{ -0.80901699437494734, 0.58778525229247325 } };
FFTComplex z[4][4];
CMUL2(z[0][0], z[0][3], in[1 * stride], fact[0]);
CMUL2(z[0][1], z[0][2], in[1 * stride], fact[1]);
CMUL2(z[1][0], z[1][3], in[2 * stride], fact[0]);
CMUL2(z[1][1], z[1][2], in[2 * stride], fact[1]);
CMUL2(z[2][0], z[2][3], in[3 * stride], fact[0]);
CMUL2(z[2][1], z[2][2], in[3 * stride], fact[1]);
CMUL2(z[3][0], z[3][3], in[4 * stride], fact[0]);
CMUL2(z[3][1], z[3][2], in[4 * stride], fact[1]);
out[0].re = in[0].re + in[stride].re + in[2 * stride].re + in[3 * stride].re + in[4 * stride].re;
out[0].im = in[0].im + in[stride].im + in[2 * stride].im + in[3 * stride].im + in[4 * stride].im;
out[1].re = in[0].re + z[0][0].re + z[1][1].re + z[2][2].re + z[3][3].re;
out[1].im = in[0].im + z[0][0].im + z[1][1].im + z[2][2].im + z[3][3].im;
out[2].re = in[0].re + z[0][1].re + z[1][3].re + z[2][0].re + z[3][2].re;
out[2].im = in[0].im + z[0][1].im + z[1][3].im + z[2][0].im + z[3][2].im;
out[3].re = in[0].re + z[0][2].re + z[1][0].re + z[2][3].re + z[3][1].re;
out[3].im = in[0].im + z[0][2].im + z[1][0].im + z[2][3].im + z[3][1].im;
out[4].re = in[0].re + z[0][3].re + z[1][2].re + z[2][1].re + z[3][0].re;
out[4].im = in[0].im + z[0][3].im + z[1][2].im + z[2][1].im + z[3][0].im;
}
static void fft15(CeltIMDCTContext *s, FFTComplex *out, const FFTComplex *in, int stride)
{
const FFTComplex *exptab = s->exptab[0];
FFTComplex tmp[5];
FFTComplex tmp1[5];
FFTComplex tmp2[5];
int k;
fft5(tmp, in, stride * 3);
fft5(tmp1, in + stride, stride * 3);
fft5(tmp2, in + 2 * stride, stride * 3);
for (k = 0; k < 5; k++) {
FFTComplex t1, t2;
CMUL(t1, tmp1[k], exptab[k]);
CMUL(t2, tmp2[k], exptab[2 * k]);
out[k].re = tmp[k].re + t1.re + t2.re;
out[k].im = tmp[k].im + t1.im + t2.im;
CMUL(t1, tmp1[k], exptab[k + 5]);
CMUL(t2, tmp2[k], exptab[2 * (k + 5)]);
out[k + 5].re = tmp[k].re + t1.re + t2.re;
out[k + 5].im = tmp[k].im + t1.im + t2.im;
CMUL(t1, tmp1[k], exptab[k + 10]);
CMUL(t2, tmp2[k], exptab[2 * k + 5]);
out[k + 10].re = tmp[k].re + t1.re + t2.re;
out[k + 10].im = tmp[k].im + t1.im + t2.im;
}
}
/*
* FFT of the length 15 * (2^N)
*/
static void fft_calc(CeltIMDCTContext *s, FFTComplex *out, const FFTComplex *in, int N, int stride)
{
if (N) {
const FFTComplex *exptab = s->exptab[N];
const int len2 = 15 * (1 << (N - 1));
int k;
fft_calc(s, out, in, N - 1, stride * 2);
fft_calc(s, out + len2, in + stride, N - 1, stride * 2);
for (k = 0; k < len2; k++) {
FFTComplex t;
CMUL(t, out[len2 + k], exptab[k]);
out[len2 + k].re = out[k].re - t.re;
out[len2 + k].im = out[k].im - t.im;
out[k].re += t.re;
out[k].im += t.im;
}
} else
fft15(s, out, in, stride);
}
void ff_celt_imdct_half(CeltIMDCTContext *s, float *dst, const float *src,
int stride, float scale)
{
FFTComplex *z = (FFTComplex *)dst;
const int len8 = s->len4 / 2;
const float *in1 = src;
const float *in2 = src + (s->len2 - 1) * stride;
int i;
for (i = 0; i < s->len4; i++) {
FFTComplex tmp = { *in2, *in1 };
CMUL(s->tmp[i], tmp, s->twiddle_exptab[i]);
in1 += 2 * stride;
in2 -= 2 * stride;
}
fft_calc(s, z, s->tmp, s->fft_n, 1);
for (i = 0; i < len8; i++) {
float r0, i0, r1, i1;
CMUL3(r0, i1, z[len8 - i - 1].im, z[len8 - i - 1].re, s->twiddle_exptab[len8 - i - 1].im, s->twiddle_exptab[len8 - i - 1].re);
CMUL3(r1, i0, z[len8 + i].im, z[len8 + i].re, s->twiddle_exptab[len8 + i].im, s->twiddle_exptab[len8 + i].re);
z[len8 - i - 1].re = scale * r0;
z[len8 - i - 1].im = scale * i0;
z[len8 + i].re = scale * r1;
z[len8 + i].im = scale * i1;
}
}

75
libavcodec/opus_parser.c Normal file
View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2013-2014 Mozilla Corporation
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Opus parser
*
* Determines the duration for each packet.
*/
#include "avcodec.h"
#include "opus.h"
typedef struct OpusParseContext {
OpusContext ctx;
OpusPacket pkt;
int extradata_parsed;
} OpusParseContext;
static int opus_parse(AVCodecParserContext *ctx, AVCodecContext *avctx,
const uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size)
{
OpusParseContext *s = ctx->priv_data;
int ret;
if (!buf_size)
return 0;
if (avctx->extradata && !s->extradata_parsed) {
ret = ff_opus_parse_extradata(avctx, &s->ctx);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Error parsing Ogg extradata.\n");
goto fail;
}
av_freep(&s->ctx.channel_maps);
s->extradata_parsed = 1;
}
ret = ff_opus_parse_packet(&s->pkt, buf, buf_size, s->ctx.nb_streams > 1);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Error parsing Opus packet header.\n");
goto fail;
}
ctx->duration = s->pkt.frame_count * s->pkt.frame_duration;
fail:
*poutbuf = buf;
*poutbuf_size = buf_size;
return buf_size;
}
AVCodecParser ff_opus_parser = {
.codec_ids = { AV_CODEC_ID_OPUS },
.priv_data_size = sizeof(OpusParseContext),
.parser_parse = opus_parse,
};

1597
libavcodec/opus_silk.c Normal file

File diff suppressed because it is too large Load Diff

674
libavcodec/opusdec.c Normal file
View File

@ -0,0 +1,674 @@
/*
* Opus decoder
* Copyright (c) 2012 Andrew D'Addesio
* Copyright (c) 2013-2014 Mozilla Corporation
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Opus decoder
* @author Andrew D'Addesio, Anton Khirnov
*
* Codec homepage: http://opus-codec.org/
* Specification: http://tools.ietf.org/html/rfc6716
* Ogg Opus specification: https://tools.ietf.org/html/draft-ietf-codec-oggopus-03
*
* Ogg-contained .opus files can be produced with opus-tools:
* http://git.xiph.org/?p=opus-tools.git
*/
#include <stdint.h>
#include "libavutil/attributes.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "libavresample/avresample.h"
#include "avcodec.h"
#include "celp_filters.h"
#include "fft.h"
#include "get_bits.h"
#include "internal.h"
#include "mathops.h"
#include "opus.h"
static const uint16_t silk_frame_duration_ms[16] = {
10, 20, 40, 60,
10, 20, 40, 60,
10, 20, 40, 60,
10, 20,
10, 20,
};
/* number of samples of silence to feed to the resampler
* at the beginning */
static const int silk_resample_delay[] = {
4, 8, 11, 11, 11
};
static const uint8_t celt_band_end[] = { 13, 17, 17, 19, 21 };
static int get_silk_samplerate(int config)
{
if (config < 4)
return 8000;
else if (config < 8)
return 12000;
return 16000;
}
/**
* Range decoder
*/
static int opus_rc_init(OpusRangeCoder *rc, const uint8_t *data, int size)
{
int ret = init_get_bits8(&rc->gb, data, size);
if (ret < 0)
return ret;
rc->range = 128;
rc->value = 127 - get_bits(&rc->gb, 7);
rc->total_read_bits = 9;
opus_rc_normalize(rc);
return 0;
}
static void opus_raw_init(OpusRangeCoder *rc, const uint8_t *rightend,
unsigned int bytes)
{
rc->rb.position = rightend;
rc->rb.bytes = bytes;
rc->rb.cachelen = 0;
rc->rb.cacheval = 0;
}
static void opus_fade(float *out,
const float *in1, const float *in2,
const float *window, int len)
{
int i;
for (i = 0; i < len; i++)
out[i] = in2[i] * window[i] + in1[i] * (1.0 - window[i]);
}
static int opus_flush_resample(OpusStreamContext *s, int nb_samples)
{
int celt_size = av_audio_fifo_size(s->celt_delay);
int ret, i;
ret = avresample_convert(s->avr, (uint8_t**)s->out, s->out_size, nb_samples,
NULL, 0, 0);
if (ret < 0)
return ret;
else if (ret != nb_samples) {
av_log(s->avctx, AV_LOG_ERROR, "Wrong number of flushed samples: %d\n",
ret);
return AVERROR_BUG;
}
if (celt_size) {
if (celt_size != nb_samples) {
av_log(s->avctx, AV_LOG_ERROR, "Wrong number of CELT delay samples.\n");
return AVERROR_BUG;
}
av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, nb_samples);
for (i = 0; i < s->output_channels; i++) {
s->fdsp->vector_fmac_scalar(s->out[i],
s->celt_output[i], 1.0,
nb_samples);
}
}
if (s->redundancy_idx) {
for (i = 0; i < s->output_channels; i++)
opus_fade(s->out[i], s->out[i],
s->redundancy_output[i] + 120 + s->redundancy_idx,
ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx);
s->redundancy_idx = 0;
}
s->out[0] += nb_samples;
s->out[1] += nb_samples;
s->out_size -= nb_samples * sizeof(float);
return 0;
}
static int opus_init_resample(OpusStreamContext *s)
{
float delay[16] = { 0.0 };
uint8_t *delayptr[2] = { (uint8_t*)delay, (uint8_t*)delay };
int ret;
av_opt_set_int(s->avr, "in_sample_rate", s->silk_samplerate, 0);
ret = avresample_open(s->avr);
if (ret < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Error opening the resampler.\n");
return ret;
}
ret = avresample_convert(s->avr, NULL, 0, 0, delayptr, sizeof(delay),
silk_resample_delay[s->packet.bandwidth]);
if (ret < 0) {
av_log(s->avctx, AV_LOG_ERROR,
"Error feeding initial silence to the resampler.\n");
return ret;
}
return 0;
}
static int opus_decode_redundancy(OpusStreamContext *s, const uint8_t *data, int size)
{
int ret;
enum OpusBandwidth bw = s->packet.bandwidth;
if (s->packet.mode == OPUS_MODE_SILK &&
bw == OPUS_BANDWIDTH_MEDIUMBAND)
bw = OPUS_BANDWIDTH_WIDEBAND;
ret = opus_rc_init(&s->redundancy_rc, data, size);
if (ret < 0)
goto fail;
opus_raw_init(&s->redundancy_rc, data + size, size);
ret = ff_celt_decode_frame(s->celt, &s->redundancy_rc,
s->redundancy_output,
s->packet.stereo + 1, 240,
0, celt_band_end[s->packet.bandwidth]);
if (ret < 0)
goto fail;
return 0;
fail:
av_log(s->avctx, AV_LOG_ERROR, "Error decoding the redundancy frame.\n");
return ret;
}
static int opus_decode_frame(OpusStreamContext *s, const uint8_t *data, int size)
{
int samples = s->packet.frame_duration;
int redundancy = 0;
int redundancy_size, redundancy_pos;
int ret, i, consumed;
int delayed_samples = s->delayed_samples;
ret = opus_rc_init(&s->rc, data, size);
if (ret < 0)
return ret;
/* decode the silk frame */
if (s->packet.mode == OPUS_MODE_SILK || s->packet.mode == OPUS_MODE_HYBRID) {
if (!avresample_is_open(s->avr)) {
ret = opus_init_resample(s);
if (ret < 0)
return ret;
}
samples = ff_silk_decode_superframe(s->silk, &s->rc, s->silk_output,
FFMIN(s->packet.bandwidth, OPUS_BANDWIDTH_WIDEBAND),
s->packet.stereo + 1,
silk_frame_duration_ms[s->packet.config]);
if (samples < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Error decoding a SILK frame.\n");
return samples;
}
samples = avresample_convert(s->avr, (uint8_t**)s->out, s->out_size,
s->packet.frame_duration,
(uint8_t**)s->silk_output,
sizeof(s->silk_buf[0]),
samples);
if (samples < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Error resampling SILK data.\n");
return samples;
}
s->delayed_samples += s->packet.frame_duration - samples;
} else
ff_silk_flush(s->silk);
// decode redundancy information
consumed = opus_rc_tell(&s->rc);
if (s->packet.mode == OPUS_MODE_HYBRID && consumed + 37 <= size * 8)
redundancy = opus_rc_p2model(&s->rc, 12);
else if (s->packet.mode == OPUS_MODE_SILK && consumed + 17 <= size * 8)
redundancy = 1;
if (redundancy) {
redundancy_pos = opus_rc_p2model(&s->rc, 1);
if (s->packet.mode == OPUS_MODE_HYBRID)
redundancy_size = opus_rc_unimodel(&s->rc, 256) + 2;
else
redundancy_size = size - (consumed + 7) / 8;
size -= redundancy_size;
if (size < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid redundancy frame size.\n");
return AVERROR_INVALIDDATA;
}
if (redundancy_pos) {
ret = opus_decode_redundancy(s, data + size, redundancy_size);
if (ret < 0)
return ret;
ff_celt_flush(s->celt);
}
}
/* decode the CELT frame */
if (s->packet.mode == OPUS_MODE_CELT || s->packet.mode == OPUS_MODE_HYBRID) {
float *out_tmp[2] = { s->out[0], s->out[1] };
float **dst = (s->packet.mode == OPUS_MODE_CELT) ?
out_tmp : s->celt_output;
int celt_output_samples = samples;
int delay_samples = av_audio_fifo_size(s->celt_delay);
if (delay_samples) {
if (s->packet.mode == OPUS_MODE_HYBRID) {
av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, delay_samples);
for (i = 0; i < s->output_channels; i++) {
s->fdsp->vector_fmac_scalar(out_tmp[i], s->celt_output[i], 1.0,
delay_samples);
out_tmp[i] += delay_samples;
}
celt_output_samples -= delay_samples;
} else {
av_log(s->avctx, AV_LOG_WARNING,
"Spurious CELT delay samples present.\n");
av_audio_fifo_drain(s->celt_delay, delay_samples);
if (s->avctx->err_recognition & AV_EF_EXPLODE)
return AVERROR_BUG;
}
}
opus_raw_init(&s->rc, data + size, size);
ret = ff_celt_decode_frame(s->celt, &s->rc, dst,
s->packet.stereo + 1,
s->packet.frame_duration,
(s->packet.mode == OPUS_MODE_HYBRID) ? 17 : 0,
celt_band_end[s->packet.bandwidth]);
if (ret < 0)
return ret;
if (s->packet.mode == OPUS_MODE_HYBRID) {
int celt_delay = s->packet.frame_duration - celt_output_samples;
void *delaybuf[2] = { s->celt_output[0] + celt_output_samples,
s->celt_output[1] + celt_output_samples };
for (i = 0; i < s->output_channels; i++) {
s->fdsp->vector_fmac_scalar(out_tmp[i],
s->celt_output[i], 1.0,
celt_output_samples);
}
ret = av_audio_fifo_write(s->celt_delay, delaybuf, celt_delay);
if (ret < 0)
return ret;
}
} else
ff_celt_flush(s->celt);
if (s->redundancy_idx) {
for (i = 0; i < s->output_channels; i++)
opus_fade(s->out[i], s->out[i],
s->redundancy_output[i] + 120 + s->redundancy_idx,
ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx);
s->redundancy_idx = 0;
}
if (redundancy) {
if (!redundancy_pos) {
ff_celt_flush(s->celt);
ret = opus_decode_redundancy(s, data + size, redundancy_size);
if (ret < 0)
return ret;
for (i = 0; i < s->output_channels; i++) {
opus_fade(s->out[i] + samples - 120 + delayed_samples,
s->out[i] + samples - 120 + delayed_samples,
s->redundancy_output[i] + 120,
ff_celt_window2, 120 - delayed_samples);
if (delayed_samples)
s->redundancy_idx = 120 - delayed_samples;
}
} else {
for (i = 0; i < s->output_channels; i++) {
memcpy(s->out[i] + delayed_samples, s->redundancy_output[i], 120 * sizeof(float));
opus_fade(s->out[i] + 120 + delayed_samples,
s->redundancy_output[i] + 120,
s->out[i] + 120 + delayed_samples,
ff_celt_window2, 120);
}
}
}
return samples;
}
static int opus_decode_subpacket(OpusStreamContext *s,
const uint8_t *buf, int buf_size,
int nb_samples)
{
int output_samples = 0;
int flush_needed = 0;
int i, j, ret;
/* check if we need to flush the resampler */
if (avresample_is_open(s->avr)) {
if (buf) {
int64_t cur_samplerate;
av_opt_get_int(s->avr, "in_sample_rate", 0, &cur_samplerate);
flush_needed = (s->packet.mode == OPUS_MODE_CELT) || (cur_samplerate != s->silk_samplerate);
} else {
flush_needed = !!s->delayed_samples;
}
}
if (!buf && !flush_needed)
return 0;
/* use dummy output buffers if the channel is not mapped to anything */
if (!s->out[0] ||
(s->output_channels == 2 && !s->out[1])) {
av_fast_malloc(&s->out_dummy, &s->out_dummy_allocated_size, s->out_size);
if (!s->out_dummy)
return AVERROR(ENOMEM);
if (!s->out[0])
s->out[0] = s->out_dummy;
if (!s->out[1])
s->out[1] = s->out_dummy;
}
/* flush the resampler if necessary */
if (flush_needed) {
ret = opus_flush_resample(s, s->delayed_samples);
if (ret < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Error flushing the resampler.\n");
return ret;
}
avresample_close(s->avr);
output_samples += s->delayed_samples;
s->delayed_samples = 0;
if (!buf)
goto finish;
}
/* decode all the frames in the packet */
for (i = 0; i < s->packet.frame_count; i++) {
int size = s->packet.frame_size[i];
int samples = opus_decode_frame(s, buf + s->packet.frame_offset[i], size);
if (samples < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Error decoding an Opus frame.\n");
if (s->avctx->err_recognition & AV_EF_EXPLODE)
return samples;
for (j = 0; j < s->output_channels; j++)
memset(s->out[j], 0, s->packet.frame_duration * sizeof(float));
samples = s->packet.frame_duration;
}
output_samples += samples;
for (j = 0; j < s->output_channels; j++)
s->out[j] += samples;
s->out_size -= samples * sizeof(float);
}
finish:
s->out[0] = s->out[1] = NULL;
s->out_size = 0;
return output_samples;
}
static int opus_decode_packet(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
OpusContext *c = avctx->priv_data;
AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
int coded_samples = 0;
int decoded_samples = 0;
int i, ret;
/* decode the header of the first sub-packet to find out the sample count */
if (buf) {
OpusPacket *pkt = &c->streams[0].packet;
ret = ff_opus_parse_packet(pkt, buf, buf_size, c->nb_streams > 1);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
return ret;
}
coded_samples += pkt->frame_count * pkt->frame_duration;
c->streams[0].silk_samplerate = get_silk_samplerate(pkt->config);
}
frame->nb_samples = coded_samples + c->streams[0].delayed_samples;
/* no input or buffered data => nothing to do */
if (!frame->nb_samples) {
*got_frame_ptr = 0;
return 0;
}
/* setup the data buffers */
ret = ff_get_buffer(avctx, frame, 0);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
frame->nb_samples = 0;
for (i = 0; i < avctx->channels; i++) {
ChannelMap *map = &c->channel_maps[i];
if (!map->copy)
c->streams[map->stream_idx].out[map->channel_idx] = (float*)frame->extended_data[i];
}
for (i = 0; i < c->nb_streams; i++)
c->streams[i].out_size = frame->linesize[0];
/* decode each sub-packet */
for (i = 0; i < c->nb_streams; i++) {
OpusStreamContext *s = &c->streams[i];
if (i && buf) {
ret = ff_opus_parse_packet(&s->packet, buf, buf_size, i != c->nb_streams - 1);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
return ret;
}
s->silk_samplerate = get_silk_samplerate(s->packet.config);
}
ret = opus_decode_subpacket(&c->streams[i], buf,
s->packet.data_size, coded_samples);
if (ret < 0)
return ret;
if (decoded_samples && ret != decoded_samples) {
av_log(avctx, AV_LOG_ERROR, "Different numbers of decoded samples "
"in a multi-channel stream\n");
return AVERROR_INVALIDDATA;
}
decoded_samples = ret;
buf += s->packet.packet_size;
buf_size -= s->packet.packet_size;
}
for (i = 0; i < avctx->channels; i++) {
ChannelMap *map = &c->channel_maps[i];
/* handle copied channels */
if (map->copy) {
memcpy(frame->extended_data[i],
frame->extended_data[map->copy_idx],
frame->linesize[0]);
} else if (map->silence) {
memset(frame->extended_data[i], 0, frame->linesize[0]);
}
if (c->gain_i) {
c->fdsp.vector_fmul_scalar((float*)frame->extended_data[i],
(float*)frame->extended_data[i],
c->gain, FFALIGN(decoded_samples, 8));
}
}
frame->nb_samples = decoded_samples;
*got_frame_ptr = !!decoded_samples;
return avpkt->size;
}
static av_cold void opus_decode_flush(AVCodecContext *ctx)
{
OpusContext *c = ctx->priv_data;
int i;
for (i = 0; i < c->nb_streams; i++) {
OpusStreamContext *s = &c->streams[i];
memset(&s->packet, 0, sizeof(s->packet));
s->delayed_samples = 0;
if (s->celt_delay)
av_audio_fifo_drain(s->celt_delay, av_audio_fifo_size(s->celt_delay));
avresample_close(s->avr);
ff_silk_flush(s->silk);
ff_celt_flush(s->celt);
}
}
static av_cold int opus_decode_close(AVCodecContext *avctx)
{
OpusContext *c = avctx->priv_data;
int i;
for (i = 0; i < c->nb_streams; i++) {
OpusStreamContext *s = &c->streams[i];
ff_silk_free(&s->silk);
ff_celt_free(&s->celt);
av_freep(&s->out_dummy);
s->out_dummy_allocated_size = 0;
av_audio_fifo_free(s->celt_delay);
avresample_free(&s->avr);
}
av_freep(&c->streams);
c->nb_streams = 0;
av_freep(&c->channel_maps);
return 0;
}
static av_cold int opus_decode_init(AVCodecContext *avctx)
{
OpusContext *c = avctx->priv_data;
int ret, i, j;
avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
avctx->sample_rate = 48000;
avpriv_float_dsp_init(&c->fdsp, 0);
/* find out the channel configuration */
ret = ff_opus_parse_extradata(avctx, c);
if (ret < 0)
return ret;
/* allocate and init each independent decoder */
c->streams = av_mallocz_array(c->nb_streams, sizeof(*c->streams));
if (!c->streams) {
c->nb_streams = 0;
ret = AVERROR(ENOMEM);
goto fail;
}
for (i = 0; i < c->nb_streams; i++) {
OpusStreamContext *s = &c->streams[i];
uint64_t layout;
s->output_channels = (i < c->nb_stereo_streams) ? 2 : 1;
s->avctx = avctx;
for (j = 0; j < s->output_channels; j++) {
s->silk_output[j] = s->silk_buf[j];
s->celt_output[j] = s->celt_buf[j];
s->redundancy_output[j] = s->redundancy_buf[j];
}
s->fdsp = &c->fdsp;
s->avr = avresample_alloc_context();
if (!s->avr)
goto fail;
layout = (s->output_channels == 1) ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
av_opt_set_int(s->avr, "in_sample_fmt", avctx->sample_fmt, 0);
av_opt_set_int(s->avr, "out_sample_fmt", avctx->sample_fmt, 0);
av_opt_set_int(s->avr, "in_channel_layout", layout, 0);
av_opt_set_int(s->avr, "out_channel_layout", layout, 0);
av_opt_set_int(s->avr, "out_sample_rate", avctx->sample_rate, 0);
ret = ff_silk_init(avctx, &s->silk, s->output_channels);
if (ret < 0)
goto fail;
ret = ff_celt_init(avctx, &s->celt, s->output_channels);
if (ret < 0)
goto fail;
s->celt_delay = av_audio_fifo_alloc(avctx->sample_fmt,
s->output_channels, 1024);
if (!s->celt_delay) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
return 0;
fail:
opus_decode_close(avctx);
return ret;
}
AVCodec ff_opus_decoder = {
.name = "opus",
.long_name = NULL_IF_CONFIG_SMALL("Opus"),
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_OPUS,
.priv_data_size = sizeof(OpusContext),
.init = opus_decode_init,
.close = opus_decode_close,
.decode = opus_decode_packet,
.flush = opus_decode_flush,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
};

View File

@ -29,8 +29,8 @@
#include "libavutil/version.h"
#define LIBAVCODEC_VERSION_MAJOR 55
#define LIBAVCODEC_VERSION_MINOR 50
#define LIBAVCODEC_VERSION_MICRO 3
#define LIBAVCODEC_VERSION_MINOR 51
#define LIBAVCODEC_VERSION_MICRO 0
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \

View File

@ -93,6 +93,7 @@ include $(SRC_PATH)/tests/fate/microsoft.mak
include $(SRC_PATH)/tests/fate/monkeysaudio.mak
include $(SRC_PATH)/tests/fate/mp3.mak
include $(SRC_PATH)/tests/fate/mpc.mak
include $(SRC_PATH)/tests/fate/opus.mak
include $(SRC_PATH)/tests/fate/pcm.mak
include $(SRC_PATH)/tests/fate/probe.mak
include $(SRC_PATH)/tests/fate/prores.mak

39
tests/fate/opus.mak Normal file
View File

@ -0,0 +1,39 @@
# The samples were produced by simply rewrapping the official test vectors from
# their custom format into Matroska.
# The reference files were created with our decoder and tested against the
# libopus output with the official opus_compare tool. We cannot use libopus
# output as reference directly, because the use of different resamplers would
# require too high fuzz values, which can hide bugs.
# Before adding new tests here, always make sure they pass opus_compare.
OPUS_CELT_SAMPLES = $(addprefix testvector, 01 07 11) tron.6ch.tinypkts
OPUS_HYBRID_SAMPLES = $(addprefix testvector, 05 06)
OPUS_SILK_SAMPLES = $(addprefix testvector, 02 03 04)
OPUS_SAMPLES = $(addprefix testvector, 08 09 10 12)
define FATE_OPUS_TEST
FATE_OPUS += fate-opus-$(1)
FATE_OPUS$(2) += fate-opus-$(1)
fate-opus-$(1): CMD = avconv -i $(TARGET_SAMPLES)/opus/$(1).mka -f f32le -
fate-opus-$(1): REF = $(TARGET_SAMPLES)/opus/$(1).f32
endef
$(foreach N,$(OPUS_CELT_SAMPLES), $(eval $(call FATE_OPUS_TEST,$(N),_CELT)))
$(foreach N,$(OPUS_HYBRID_SAMPLES),$(eval $(call FATE_OPUS_TEST,$(N),_HYBRID)))
$(foreach N,$(OPUS_SILK_SAMPLES), $(eval $(call FATE_OPUS_TEST,$(N),_SILK)))
$(foreach N,$(OPUS_SAMPLES), $(eval $(call FATE_OPUS_TEST,$(N),)))
FATE_OPUS := $(sort $(FATE_OPUS))
$(FATE_OPUS): CMP = stddev
$(FATE_OPUS): CMP_UNIT = f32
$(FATE_OPUS): FUZZ = 3
$(FATE_OPUS_CELT): CMP = oneoff
$(FATE_OPUS_CELT): FUZZ = 5
FATE_SAMPLES_AVCONV-$(call DEMDEC, MATROSKA, OPUS) += $(FATE_OPUS)
fate-opus-celt: $(FATE_OPUS_CELT)
fate-opus-hybrid: $(FATE_OPUS_HYBRID)
fate-opus-silk: $(FATE_OPUS_SILK)
fate-opus: $(FATE_OPUS)