avcodec/opus: Move stuff shared by decoder and parser to a new file

opus.h (which is used by all the Opus code) currently includes
several structures only used by the parser and the decoder;
several elements of OpusContext are even only used by the decoder.

This commit therefore moves the part of OpusContext that is shared
between these two components (and used by ff_opus_parse_extradata())
out into a new structure and moves all the other accompanying
structures and functions to a new header, opus_parse.h; the
functions itself are also moved to a new file, opus_parse.c.
(This also allows to remove several spurious dependencies
of the Opus parser and encoder.)

Reviewed-by: Lynne <dev@lynne.ee>
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
Andreas Rheinhardt 2022-10-03 20:48:09 +02:00
parent 6658028482
commit 4fc2531fff
7 changed files with 595 additions and 529 deletions

View File

@ -556,9 +556,9 @@ OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
OBJS-$(CONFIG_ON2AVC_DECODER) += on2avc.o on2avcdata.o
OBJS-$(CONFIG_OPUS_DECODER) += opusdec.o opus.o opus_celt.o opus_rc.o \
opus_pvq.o opus_silk.o opustab.o vorbis_data.o \
opusdsp.o
opusdsp.o opus_parse.o
OBJS-$(CONFIG_OPUS_ENCODER) += opusenc.o opus.o opus_rc.o opustab.o opus_pvq.o \
opusenc_psy.o vorbis_data.o
opusenc_psy.o
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += pafaudio.o
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += pafvideo.o
OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o
@ -1156,8 +1156,8 @@ OBJS-$(CONFIG_MPEG4VIDEO_PARSER) += mpeg4video_parser.o h263.o \
OBJS-$(CONFIG_MPEGAUDIO_PARSER) += mpegaudio_parser.o
OBJS-$(CONFIG_MPEGVIDEO_PARSER) += mpegvideo_parser.o \
mpeg12.o mpeg12data.o
OBJS-$(CONFIG_OPUS_PARSER) += opus_parser.o opus.o opustab.o \
opus_rc.o vorbis_data.o
OBJS-$(CONFIG_OPUS_PARSER) += opus_parser.o opus_parse.o \
vorbis_data.o
OBJS-$(CONFIG_PNG_PARSER) += png_parser.o
OBJS-$(CONFIG_PNM_PARSER) += pnm_parser.o pnm.o
OBJS-$(CONFIG_QOI_PARSER) += qoi_parser.o

View File

@ -19,452 +19,10 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Opus decoder/parser shared code
*/
#include <stdint.h>
#include "libavutil/channel_layout.h"
#include "libavutil/error.h"
#include "libavutil/ffmath.h"
#include "opus_celt.h"
#include "opustab.h"
#include "internal.h"
#include "vorbis_data.h"
static const uint16_t opus_frame_duration[32] = {
480, 960, 1920, 2880,
480, 960, 1920, 2880,
480, 960, 1920, 2880,
480, 960,
480, 960,
120, 240, 480, 960,
120, 240, 480, 960,
120, 240, 480, 960,
120, 240, 480, 960,
};
/**
* Read a 1- or 2-byte frame length
*/
static inline int xiph_lacing_16bit(const uint8_t **ptr, const uint8_t *end)
{
int val;
if (*ptr >= end)
return AVERROR_INVALIDDATA;
val = *(*ptr)++;
if (val >= 252) {
if (*ptr >= end)
return AVERROR_INVALIDDATA;
val += 4 * *(*ptr)++;
}
return val;
}
/**
* Read a multi-byte length (used for code 3 packet padding size)
*/
static inline int xiph_lacing_full(const uint8_t **ptr, const uint8_t *end)
{
int val = 0;
int next;
while (1) {
if (*ptr >= end || val > INT_MAX - 254)
return AVERROR_INVALIDDATA;
next = *(*ptr)++;
val += next;
if (next < 255)
break;
else
val--;
}
return val;
}
/**
* Parse Opus packet info from raw packet data
*/
int ff_opus_parse_packet(OpusPacket *pkt, const uint8_t *buf, int buf_size,
int self_delimiting)
{
const uint8_t *ptr = buf;
const uint8_t *end = buf + buf_size;
int padding = 0;
int frame_bytes, i;
if (buf_size < 1)
goto fail;
/* TOC byte */
i = *ptr++;
pkt->code = (i ) & 0x3;
pkt->stereo = (i >> 2) & 0x1;
pkt->config = (i >> 3) & 0x1F;
/* code 2 and code 3 packets have at least 1 byte after the TOC */
if (pkt->code >= 2 && buf_size < 2)
goto fail;
switch (pkt->code) {
case 0:
/* 1 frame */
pkt->frame_count = 1;
pkt->vbr = 0;
if (self_delimiting) {
int len = xiph_lacing_16bit(&ptr, end);
if (len < 0 || len > end - ptr)
goto fail;
end = ptr + len;
buf_size = end - buf;
}
frame_bytes = end - ptr;
if (frame_bytes > MAX_FRAME_SIZE)
goto fail;
pkt->frame_offset[0] = ptr - buf;
pkt->frame_size[0] = frame_bytes;
break;
case 1:
/* 2 frames, equal size */
pkt->frame_count = 2;
pkt->vbr = 0;
if (self_delimiting) {
int len = xiph_lacing_16bit(&ptr, end);
if (len < 0 || 2 * len > end - ptr)
goto fail;
end = ptr + 2 * len;
buf_size = end - buf;
}
frame_bytes = end - ptr;
if (frame_bytes & 1 || frame_bytes >> 1 > MAX_FRAME_SIZE)
goto fail;
pkt->frame_offset[0] = ptr - buf;
pkt->frame_size[0] = frame_bytes >> 1;
pkt->frame_offset[1] = pkt->frame_offset[0] + pkt->frame_size[0];
pkt->frame_size[1] = frame_bytes >> 1;
break;
case 2:
/* 2 frames, different sizes */
pkt->frame_count = 2;
pkt->vbr = 1;
/* read 1st frame size */
frame_bytes = xiph_lacing_16bit(&ptr, end);
if (frame_bytes < 0)
goto fail;
if (self_delimiting) {
int len = xiph_lacing_16bit(&ptr, end);
if (len < 0 || len + frame_bytes > end - ptr)
goto fail;
end = ptr + frame_bytes + len;
buf_size = end - buf;
}
pkt->frame_offset[0] = ptr - buf;
pkt->frame_size[0] = frame_bytes;
/* calculate 2nd frame size */
frame_bytes = end - ptr - pkt->frame_size[0];
if (frame_bytes < 0 || frame_bytes > MAX_FRAME_SIZE)
goto fail;
pkt->frame_offset[1] = pkt->frame_offset[0] + pkt->frame_size[0];
pkt->frame_size[1] = frame_bytes;
break;
case 3:
/* 1 to 48 frames, can be different sizes */
i = *ptr++;
pkt->frame_count = (i ) & 0x3F;
padding = (i >> 6) & 0x01;
pkt->vbr = (i >> 7) & 0x01;
if (pkt->frame_count == 0 || pkt->frame_count > MAX_FRAMES)
goto fail;
/* read padding size */
if (padding) {
padding = xiph_lacing_full(&ptr, end);
if (padding < 0)
goto fail;
}
/* read frame sizes */
if (pkt->vbr) {
/* for VBR, all frames except the final one have their size coded
in the bitstream. the last frame size is implicit. */
int total_bytes = 0;
for (i = 0; i < pkt->frame_count - 1; i++) {
frame_bytes = xiph_lacing_16bit(&ptr, end);
if (frame_bytes < 0)
goto fail;
pkt->frame_size[i] = frame_bytes;
total_bytes += frame_bytes;
}
if (self_delimiting) {
int len = xiph_lacing_16bit(&ptr, end);
if (len < 0 || len + total_bytes + padding > end - ptr)
goto fail;
end = ptr + total_bytes + len + padding;
buf_size = end - buf;
}
frame_bytes = end - ptr - padding;
if (total_bytes > frame_bytes)
goto fail;
pkt->frame_offset[0] = ptr - buf;
for (i = 1; i < pkt->frame_count; i++)
pkt->frame_offset[i] = pkt->frame_offset[i-1] + pkt->frame_size[i-1];
pkt->frame_size[pkt->frame_count-1] = frame_bytes - total_bytes;
} else {
/* for CBR, the remaining packet bytes are divided evenly between
the frames */
if (self_delimiting) {
frame_bytes = xiph_lacing_16bit(&ptr, end);
if (frame_bytes < 0 || pkt->frame_count * frame_bytes + padding > end - ptr)
goto fail;
end = ptr + pkt->frame_count * frame_bytes + padding;
buf_size = end - buf;
} else {
frame_bytes = end - ptr - padding;
if (frame_bytes % pkt->frame_count ||
frame_bytes / pkt->frame_count > MAX_FRAME_SIZE)
goto fail;
frame_bytes /= pkt->frame_count;
}
pkt->frame_offset[0] = ptr - buf;
pkt->frame_size[0] = frame_bytes;
for (i = 1; i < pkt->frame_count; i++) {
pkt->frame_offset[i] = pkt->frame_offset[i-1] + pkt->frame_size[i-1];
pkt->frame_size[i] = frame_bytes;
}
}
}
pkt->packet_size = buf_size;
pkt->data_size = pkt->packet_size - padding;
/* total packet duration cannot be larger than 120ms */
pkt->frame_duration = opus_frame_duration[pkt->config];
if (pkt->frame_duration * pkt->frame_count > MAX_PACKET_DUR)
goto fail;
/* set mode and bandwidth */
if (pkt->config < 12) {
pkt->mode = OPUS_MODE_SILK;
pkt->bandwidth = pkt->config >> 2;
} else if (pkt->config < 16) {
pkt->mode = OPUS_MODE_HYBRID;
pkt->bandwidth = OPUS_BANDWIDTH_SUPERWIDEBAND + (pkt->config >= 14);
} else {
pkt->mode = OPUS_MODE_CELT;
pkt->bandwidth = (pkt->config - 16) >> 2;
/* skip medium band */
if (pkt->bandwidth)
pkt->bandwidth++;
}
return 0;
fail:
memset(pkt, 0, sizeof(*pkt));
return AVERROR_INVALIDDATA;
}
static int channel_reorder_vorbis(int nb_channels, int channel_idx)
{
return ff_vorbis_channel_layout_offsets[nb_channels - 1][channel_idx];
}
static int channel_reorder_unknown(int nb_channels, int channel_idx)
{
return channel_idx;
}
av_cold int ff_opus_parse_extradata(AVCodecContext *avctx,
OpusContext *s)
{
static const uint8_t default_channel_map[2] = { 0, 1 };
int (*channel_reorder)(int, int) = channel_reorder_unknown;
int channels = avctx->ch_layout.nb_channels;
const uint8_t *extradata, *channel_map;
int extradata_size;
int version, map_type, streams, stereo_streams, i, j, ret;
AVChannelLayout layout = { 0 };
if (!avctx->extradata) {
if (channels > 2) {
av_log(avctx, AV_LOG_ERROR,
"Multichannel configuration without extradata.\n");
return AVERROR(EINVAL);
}
extradata = opus_default_extradata;
extradata_size = sizeof(opus_default_extradata);
} else {
extradata = avctx->extradata;
extradata_size = avctx->extradata_size;
}
if (extradata_size < 19) {
av_log(avctx, AV_LOG_ERROR, "Invalid extradata size: %d\n",
extradata_size);
return AVERROR_INVALIDDATA;
}
version = extradata[8];
if (version > 15) {
avpriv_request_sample(avctx, "Extradata version %d", version);
return AVERROR_PATCHWELCOME;
}
avctx->delay = AV_RL16(extradata + 10);
if (avctx->internal)
avctx->internal->skip_samples = avctx->delay;
channels = avctx->extradata ? extradata[9] : (channels == 1) ? 1 : 2;
if (!channels) {
av_log(avctx, AV_LOG_ERROR, "Zero channel count specified in the extradata\n");
return AVERROR_INVALIDDATA;
}
s->gain_i = AV_RL16(extradata + 16);
if (s->gain_i)
s->gain = ff_exp10(s->gain_i / (20.0 * 256));
map_type = extradata[18];
if (!map_type) {
if (channels > 2) {
av_log(avctx, AV_LOG_ERROR,
"Channel mapping 0 is only specified for up to 2 channels\n");
ret = AVERROR_INVALIDDATA;
goto fail;
}
layout = (channels == 1) ? (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO :
(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO;
streams = 1;
stereo_streams = channels - 1;
channel_map = default_channel_map;
} else if (map_type == 1 || map_type == 2 || map_type == 255) {
if (extradata_size < 21 + channels) {
av_log(avctx, AV_LOG_ERROR, "Invalid extradata size: %d\n",
extradata_size);
ret = AVERROR_INVALIDDATA;
goto fail;
}
streams = extradata[19];
stereo_streams = extradata[20];
if (!streams || stereo_streams > streams ||
streams + stereo_streams > 255) {
av_log(avctx, AV_LOG_ERROR,
"Invalid stream/stereo stream count: %d/%d\n", streams, stereo_streams);
ret = AVERROR_INVALIDDATA;
goto fail;
}
if (map_type == 1) {
if (channels > 8) {
av_log(avctx, AV_LOG_ERROR,
"Channel mapping 1 is only specified for up to 8 channels\n");
ret = AVERROR_INVALIDDATA;
goto fail;
}
av_channel_layout_copy(&layout, &ff_vorbis_ch_layouts[channels - 1]);
channel_reorder = channel_reorder_vorbis;
} else if (map_type == 2) {
int ambisonic_order = ff_sqrt(channels) - 1;
if (channels != ((ambisonic_order + 1) * (ambisonic_order + 1)) &&
channels != ((ambisonic_order + 1) * (ambisonic_order + 1) + 2)) {
av_log(avctx, AV_LOG_ERROR,
"Channel mapping 2 is only specified for channel counts"
" which can be written as (n + 1)^2 or (n + 1)^2 + 2"
" for nonnegative integer n\n");
ret = AVERROR_INVALIDDATA;
goto fail;
}
if (channels > 227) {
av_log(avctx, AV_LOG_ERROR, "Too many channels\n");
ret = AVERROR_INVALIDDATA;
goto fail;
}
layout.order = AV_CHANNEL_ORDER_AMBISONIC;
layout.nb_channels = channels;
if (channels != ((ambisonic_order + 1) * (ambisonic_order + 1)))
layout.u.mask = AV_CH_LAYOUT_STEREO;
} else {
layout.order = AV_CHANNEL_ORDER_UNSPEC;
layout.nb_channels = channels;
}
channel_map = extradata + 21;
} else {
avpriv_request_sample(avctx, "Mapping type %d", map_type);
return AVERROR_PATCHWELCOME;
}
s->channel_maps = av_calloc(channels, sizeof(*s->channel_maps));
if (!s->channel_maps) {
ret = AVERROR(ENOMEM);
goto fail;
}
for (i = 0; i < channels; i++) {
ChannelMap *map = &s->channel_maps[i];
uint8_t idx = channel_map[channel_reorder(channels, i)];
if (idx == 255) {
map->silence = 1;
continue;
} else if (idx >= streams + stereo_streams) {
av_log(avctx, AV_LOG_ERROR,
"Invalid channel map for output channel %d: %d\n", i, idx);
av_freep(&s->channel_maps);
ret = AVERROR_INVALIDDATA;
goto fail;
}
/* check that we did not see this index yet */
map->copy = 0;
for (j = 0; j < i; j++)
if (channel_map[channel_reorder(channels, j)] == idx) {
map->copy = 1;
map->copy_idx = j;
break;
}
if (idx < 2 * stereo_streams) {
map->stream_idx = idx / 2;
map->channel_idx = idx & 1;
} else {
map->stream_idx = idx - stereo_streams;
map->channel_idx = 0;
}
}
ret = av_channel_layout_copy(&avctx->ch_layout, &layout);
if (ret < 0)
goto fail;
s->nb_streams = streams;
s->nb_stereo_streams = stereo_streams;
return 0;
fail:
av_channel_layout_uninit(&layout);
return ret;
}
void ff_celt_quant_bands(CeltFrame *f, OpusRangeCoder *rc)
{

View File

@ -1,5 +1,5 @@
/*
* Opus decoder/demuxer common functions
* Opus common header
* Copyright (c) 2012 Andrew D'Addesio
* Copyright (c) 2013-2014 Mozilla Corporation
*
@ -25,8 +25,6 @@
#include <stdint.h>
#include "libavutil/float_dsp.h"
#include "avcodec.h"
#include "opus_rc.h"
@ -77,58 +75,6 @@ typedef struct SilkContext SilkContext;
typedef struct CeltFrame CeltFrame;
typedef struct OpusPacket {
int packet_size; /**< packet size */
int data_size; /**< size of the useful data -- packet size - padding */
int code; /**< packet code: specifies the frame layout */
int stereo; /**< whether this packet is mono or stereo */
int vbr; /**< vbr flag */
int config; /**< configuration: tells the audio mode,
** bandwidth, and frame duration */
int frame_count; /**< frame count */
int frame_offset[MAX_FRAMES]; /**< frame offsets */
int frame_size[MAX_FRAMES]; /**< frame sizes */
int frame_duration; /**< frame duration, in samples @ 48kHz */
enum OpusMode mode; /**< mode */
enum OpusBandwidth bandwidth; /**< bandwidth */
} OpusPacket;
// a mapping between an opus stream and an output channel
typedef struct ChannelMap {
int stream_idx;
int channel_idx;
// when a single decoded channel is mapped to multiple output channels, we
// write to the first output directly and copy from it to the others
// this field is set to 1 for those copied output channels
int copy;
// this is the index of the output channel to copy from
int copy_idx;
// this channel is silent
int silence;
} ChannelMap;
typedef struct OpusContext {
AVClass *av_class;
struct OpusStreamContext *streams;
int apply_phase_inv;
int nb_streams;
int nb_stereo_streams;
AVFloatDSPContext *fdsp;
int16_t gain_i;
float gain;
ChannelMap *channel_maps;
} OpusContext;
int ff_opus_parse_packet(OpusPacket *pkt, const uint8_t *buf, int buf_size,
int self_delimited);
int ff_opus_parse_extradata(AVCodecContext *avctx, OpusContext *s);
int ff_silk_init(AVCodecContext *avctx, SilkContext **ps, int output_channels);
void ff_silk_free(SilkContext **ps);
void ff_silk_flush(SilkContext *s);

469
libavcodec/opus_parse.c Normal file
View File

@ -0,0 +1,469 @@
/*
* Copyright (c) 2012 Andrew D'Addesio
* Copyright (c) 2013-2014 Mozilla Corporation
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Opus decoder/parser shared code
*/
#include "libavutil/attributes.h"
#include "libavutil/channel_layout.h"
#include "libavutil/error.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "avcodec.h"
#include "internal.h"
#include "mathops.h"
#include "opus.h"
#include "opus_parse.h"
#include "vorbis_data.h"
static const uint16_t opus_frame_duration[32] = {
480, 960, 1920, 2880,
480, 960, 1920, 2880,
480, 960, 1920, 2880,
480, 960,
480, 960,
120, 240, 480, 960,
120, 240, 480, 960,
120, 240, 480, 960,
120, 240, 480, 960,
};
/**
* Read a 1- or 2-byte frame length
*/
static inline int xiph_lacing_16bit(const uint8_t **ptr, const uint8_t *end)
{
int val;
if (*ptr >= end)
return AVERROR_INVALIDDATA;
val = *(*ptr)++;
if (val >= 252) {
if (*ptr >= end)
return AVERROR_INVALIDDATA;
val += 4 * *(*ptr)++;
}
return val;
}
/**
* Read a multi-byte length (used for code 3 packet padding size)
*/
static inline int xiph_lacing_full(const uint8_t **ptr, const uint8_t *end)
{
int val = 0;
int next;
while (1) {
if (*ptr >= end || val > INT_MAX - 254)
return AVERROR_INVALIDDATA;
next = *(*ptr)++;
val += next;
if (next < 255)
break;
else
val--;
}
return val;
}
/**
* Parse Opus packet info from raw packet data
*/
int ff_opus_parse_packet(OpusPacket *pkt, const uint8_t *buf, int buf_size,
int self_delimiting)
{
const uint8_t *ptr = buf;
const uint8_t *end = buf + buf_size;
int padding = 0;
int frame_bytes, i;
if (buf_size < 1)
goto fail;
/* TOC byte */
i = *ptr++;
pkt->code = (i ) & 0x3;
pkt->stereo = (i >> 2) & 0x1;
pkt->config = (i >> 3) & 0x1F;
/* code 2 and code 3 packets have at least 1 byte after the TOC */
if (pkt->code >= 2 && buf_size < 2)
goto fail;
switch (pkt->code) {
case 0:
/* 1 frame */
pkt->frame_count = 1;
pkt->vbr = 0;
if (self_delimiting) {
int len = xiph_lacing_16bit(&ptr, end);
if (len < 0 || len > end - ptr)
goto fail;
end = ptr + len;
buf_size = end - buf;
}
frame_bytes = end - ptr;
if (frame_bytes > MAX_FRAME_SIZE)
goto fail;
pkt->frame_offset[0] = ptr - buf;
pkt->frame_size[0] = frame_bytes;
break;
case 1:
/* 2 frames, equal size */
pkt->frame_count = 2;
pkt->vbr = 0;
if (self_delimiting) {
int len = xiph_lacing_16bit(&ptr, end);
if (len < 0 || 2 * len > end - ptr)
goto fail;
end = ptr + 2 * len;
buf_size = end - buf;
}
frame_bytes = end - ptr;
if (frame_bytes & 1 || frame_bytes >> 1 > MAX_FRAME_SIZE)
goto fail;
pkt->frame_offset[0] = ptr - buf;
pkt->frame_size[0] = frame_bytes >> 1;
pkt->frame_offset[1] = pkt->frame_offset[0] + pkt->frame_size[0];
pkt->frame_size[1] = frame_bytes >> 1;
break;
case 2:
/* 2 frames, different sizes */
pkt->frame_count = 2;
pkt->vbr = 1;
/* read 1st frame size */
frame_bytes = xiph_lacing_16bit(&ptr, end);
if (frame_bytes < 0)
goto fail;
if (self_delimiting) {
int len = xiph_lacing_16bit(&ptr, end);
if (len < 0 || len + frame_bytes > end - ptr)
goto fail;
end = ptr + frame_bytes + len;
buf_size = end - buf;
}
pkt->frame_offset[0] = ptr - buf;
pkt->frame_size[0] = frame_bytes;
/* calculate 2nd frame size */
frame_bytes = end - ptr - pkt->frame_size[0];
if (frame_bytes < 0 || frame_bytes > MAX_FRAME_SIZE)
goto fail;
pkt->frame_offset[1] = pkt->frame_offset[0] + pkt->frame_size[0];
pkt->frame_size[1] = frame_bytes;
break;
case 3:
/* 1 to 48 frames, can be different sizes */
i = *ptr++;
pkt->frame_count = (i ) & 0x3F;
padding = (i >> 6) & 0x01;
pkt->vbr = (i >> 7) & 0x01;
if (pkt->frame_count == 0 || pkt->frame_count > MAX_FRAMES)
goto fail;
/* read padding size */
if (padding) {
padding = xiph_lacing_full(&ptr, end);
if (padding < 0)
goto fail;
}
/* read frame sizes */
if (pkt->vbr) {
/* for VBR, all frames except the final one have their size coded
in the bitstream. the last frame size is implicit. */
int total_bytes = 0;
for (i = 0; i < pkt->frame_count - 1; i++) {
frame_bytes = xiph_lacing_16bit(&ptr, end);
if (frame_bytes < 0)
goto fail;
pkt->frame_size[i] = frame_bytes;
total_bytes += frame_bytes;
}
if (self_delimiting) {
int len = xiph_lacing_16bit(&ptr, end);
if (len < 0 || len + total_bytes + padding > end - ptr)
goto fail;
end = ptr + total_bytes + len + padding;
buf_size = end - buf;
}
frame_bytes = end - ptr - padding;
if (total_bytes > frame_bytes)
goto fail;
pkt->frame_offset[0] = ptr - buf;
for (i = 1; i < pkt->frame_count; i++)
pkt->frame_offset[i] = pkt->frame_offset[i-1] + pkt->frame_size[i-1];
pkt->frame_size[pkt->frame_count-1] = frame_bytes - total_bytes;
} else {
/* for CBR, the remaining packet bytes are divided evenly between
the frames */
if (self_delimiting) {
frame_bytes = xiph_lacing_16bit(&ptr, end);
if (frame_bytes < 0 || pkt->frame_count * frame_bytes + padding > end - ptr)
goto fail;
end = ptr + pkt->frame_count * frame_bytes + padding;
buf_size = end - buf;
} else {
frame_bytes = end - ptr - padding;
if (frame_bytes % pkt->frame_count ||
frame_bytes / pkt->frame_count > MAX_FRAME_SIZE)
goto fail;
frame_bytes /= pkt->frame_count;
}
pkt->frame_offset[0] = ptr - buf;
pkt->frame_size[0] = frame_bytes;
for (i = 1; i < pkt->frame_count; i++) {
pkt->frame_offset[i] = pkt->frame_offset[i-1] + pkt->frame_size[i-1];
pkt->frame_size[i] = frame_bytes;
}
}
}
pkt->packet_size = buf_size;
pkt->data_size = pkt->packet_size - padding;
/* total packet duration cannot be larger than 120ms */
pkt->frame_duration = opus_frame_duration[pkt->config];
if (pkt->frame_duration * pkt->frame_count > MAX_PACKET_DUR)
goto fail;
/* set mode and bandwidth */
if (pkt->config < 12) {
pkt->mode = OPUS_MODE_SILK;
pkt->bandwidth = pkt->config >> 2;
} else if (pkt->config < 16) {
pkt->mode = OPUS_MODE_HYBRID;
pkt->bandwidth = OPUS_BANDWIDTH_SUPERWIDEBAND + (pkt->config >= 14);
} else {
pkt->mode = OPUS_MODE_CELT;
pkt->bandwidth = (pkt->config - 16) >> 2;
/* skip medium band */
if (pkt->bandwidth)
pkt->bandwidth++;
}
return 0;
fail:
memset(pkt, 0, sizeof(*pkt));
return AVERROR_INVALIDDATA;
}
static int channel_reorder_vorbis(int nb_channels, int channel_idx)
{
return ff_vorbis_channel_layout_offsets[nb_channels - 1][channel_idx];
}
static int channel_reorder_unknown(int nb_channels, int channel_idx)
{
return channel_idx;
}
av_cold int ff_opus_parse_extradata(AVCodecContext *avctx,
OpusParseContext *s)
{
static const uint8_t default_channel_map[2] = { 0, 1 };
int (*channel_reorder)(int, int) = channel_reorder_unknown;
int channels = avctx->ch_layout.nb_channels;
const uint8_t *extradata, *channel_map;
int extradata_size;
int version, map_type, streams, stereo_streams, i, j, ret;
AVChannelLayout layout = { 0 };
if (!avctx->extradata) {
if (channels > 2) {
av_log(avctx, AV_LOG_ERROR,
"Multichannel configuration without extradata.\n");
return AVERROR(EINVAL);
}
extradata = opus_default_extradata;
extradata_size = sizeof(opus_default_extradata);
} else {
extradata = avctx->extradata;
extradata_size = avctx->extradata_size;
}
if (extradata_size < 19) {
av_log(avctx, AV_LOG_ERROR, "Invalid extradata size: %d\n",
extradata_size);
return AVERROR_INVALIDDATA;
}
version = extradata[8];
if (version > 15) {
avpriv_request_sample(avctx, "Extradata version %d", version);
return AVERROR_PATCHWELCOME;
}
avctx->delay = AV_RL16(extradata + 10);
if (avctx->internal)
avctx->internal->skip_samples = avctx->delay;
channels = avctx->extradata ? extradata[9] : (channels == 1) ? 1 : 2;
if (!channels) {
av_log(avctx, AV_LOG_ERROR, "Zero channel count specified in the extradata\n");
return AVERROR_INVALIDDATA;
}
s->gain_i = AV_RL16(extradata + 16);
map_type = extradata[18];
if (!map_type) {
if (channels > 2) {
av_log(avctx, AV_LOG_ERROR,
"Channel mapping 0 is only specified for up to 2 channels\n");
ret = AVERROR_INVALIDDATA;
goto fail;
}
layout = (channels == 1) ? (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO :
(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO;
streams = 1;
stereo_streams = channels - 1;
channel_map = default_channel_map;
} else if (map_type == 1 || map_type == 2 || map_type == 255) {
if (extradata_size < 21 + channels) {
av_log(avctx, AV_LOG_ERROR, "Invalid extradata size: %d\n",
extradata_size);
ret = AVERROR_INVALIDDATA;
goto fail;
}
streams = extradata[19];
stereo_streams = extradata[20];
if (!streams || stereo_streams > streams ||
streams + stereo_streams > 255) {
av_log(avctx, AV_LOG_ERROR,
"Invalid stream/stereo stream count: %d/%d\n", streams, stereo_streams);
ret = AVERROR_INVALIDDATA;
goto fail;
}
if (map_type == 1) {
if (channels > 8) {
av_log(avctx, AV_LOG_ERROR,
"Channel mapping 1 is only specified for up to 8 channels\n");
ret = AVERROR_INVALIDDATA;
goto fail;
}
av_channel_layout_copy(&layout, &ff_vorbis_ch_layouts[channels - 1]);
channel_reorder = channel_reorder_vorbis;
} else if (map_type == 2) {
int ambisonic_order = ff_sqrt(channels) - 1;
if (channels != ((ambisonic_order + 1) * (ambisonic_order + 1)) &&
channels != ((ambisonic_order + 1) * (ambisonic_order + 1) + 2)) {
av_log(avctx, AV_LOG_ERROR,
"Channel mapping 2 is only specified for channel counts"
" which can be written as (n + 1)^2 or (n + 1)^2 + 2"
" for nonnegative integer n\n");
ret = AVERROR_INVALIDDATA;
goto fail;
}
if (channels > 227) {
av_log(avctx, AV_LOG_ERROR, "Too many channels\n");
ret = AVERROR_INVALIDDATA;
goto fail;
}
layout.order = AV_CHANNEL_ORDER_AMBISONIC;
layout.nb_channels = channels;
if (channels != ((ambisonic_order + 1) * (ambisonic_order + 1)))
layout.u.mask = AV_CH_LAYOUT_STEREO;
} else {
layout.order = AV_CHANNEL_ORDER_UNSPEC;
layout.nb_channels = channels;
}
channel_map = extradata + 21;
} else {
avpriv_request_sample(avctx, "Mapping type %d", map_type);
return AVERROR_PATCHWELCOME;
}
s->channel_maps = av_calloc(channels, sizeof(*s->channel_maps));
if (!s->channel_maps) {
ret = AVERROR(ENOMEM);
goto fail;
}
for (i = 0; i < channels; i++) {
ChannelMap *map = &s->channel_maps[i];
uint8_t idx = channel_map[channel_reorder(channels, i)];
if (idx == 255) {
map->silence = 1;
continue;
} else if (idx >= streams + stereo_streams) {
av_log(avctx, AV_LOG_ERROR,
"Invalid channel map for output channel %d: %d\n", i, idx);
av_freep(&s->channel_maps);
ret = AVERROR_INVALIDDATA;
goto fail;
}
/* check that we did not see this index yet */
map->copy = 0;
for (j = 0; j < i; j++)
if (channel_map[channel_reorder(channels, j)] == idx) {
map->copy = 1;
map->copy_idx = j;
break;
}
if (idx < 2 * stereo_streams) {
map->stream_idx = idx / 2;
map->channel_idx = idx & 1;
} else {
map->stream_idx = idx - stereo_streams;
map->channel_idx = 0;
}
}
ret = av_channel_layout_copy(&avctx->ch_layout, &layout);
if (ret < 0)
goto fail;
s->nb_streams = streams;
s->nb_stereo_streams = stereo_streams;
return 0;
fail:
av_channel_layout_uninit(&layout);
return ret;
}

77
libavcodec/opus_parse.h Normal file
View File

@ -0,0 +1,77 @@
/*
* Opus decoder/parser common functions and structures
* Copyright (c) 2012 Andrew D'Addesio
* Copyright (c) 2013-2014 Mozilla Corporation
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_OPUS_PARSE_H
#define AVCODEC_OPUS_PARSE_H
#include <stdint.h>
#include "avcodec.h"
#include "opus.h"
typedef struct OpusPacket {
int packet_size; /**< packet size */
int data_size; /**< size of the useful data -- packet size - padding */
int code; /**< packet code: specifies the frame layout */
int stereo; /**< whether this packet is mono or stereo */
int vbr; /**< vbr flag */
int config; /**< configuration: tells the audio mode,
** bandwidth, and frame duration */
int frame_count; /**< frame count */
int frame_offset[MAX_FRAMES]; /**< frame offsets */
int frame_size[MAX_FRAMES]; /**< frame sizes */
int frame_duration; /**< frame duration, in samples @ 48kHz */
enum OpusMode mode; /**< mode */
enum OpusBandwidth bandwidth; /**< bandwidth */
} OpusPacket;
// a mapping between an opus stream and an output channel
typedef struct ChannelMap {
int stream_idx;
int channel_idx;
// when a single decoded channel is mapped to multiple output channels, we
// write to the first output directly and copy from it to the others
// this field is set to 1 for those copied output channels
int copy;
// this is the index of the output channel to copy from
int copy_idx;
// this channel is silent
int silence;
} ChannelMap;
typedef struct OpusParseContext {
int nb_streams;
int nb_stereo_streams;
int16_t gain_i;
ChannelMap *channel_maps;
} OpusParseContext;
int ff_opus_parse_packet(OpusPacket *pkt, const uint8_t *buf, int buf_size,
int self_delimited);
int ff_opus_parse_extradata(AVCodecContext *avctx, OpusParseContext *s);
#endif /* AVCODEC_OPUS_PARSE_H */

View File

@ -28,15 +28,16 @@
#include "avcodec.h"
#include "bytestream.h"
#include "opus.h"
#include "opus_parse.h"
#include "parser.h"
typedef struct OpusParseContext {
typedef struct OpusParserContext {
ParseContext pc;
OpusContext ctx;
OpusParseContext ctx;
OpusPacket pkt;
int extradata_parsed;
int ts_framing;
} OpusParseContext;
} OpusParserContext;
static const uint8_t *parse_opus_ts_header(const uint8_t *start, int *payload_len, int buf_len)
{
@ -83,7 +84,7 @@ static const uint8_t *parse_opus_ts_header(const uint8_t *start, int *payload_le
static int opus_find_frame_end(AVCodecParserContext *ctx, AVCodecContext *avctx,
const uint8_t *buf, int buf_size, int *header_len)
{
OpusParseContext *s = ctx->priv_data;
OpusParserContext *s = ctx->priv_data;
ParseContext *pc = &s->pc;
int ret, start_found, i = 0, payload_len = 0;
const uint8_t *payload;
@ -166,7 +167,7 @@ static int opus_parse(AVCodecParserContext *ctx, AVCodecContext *avctx,
const uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size)
{
OpusParseContext *s = ctx->priv_data;
OpusParserContext *s = ctx->priv_data;
ParseContext *pc = &s->pc;
int next, header_len;
@ -192,7 +193,7 @@ static int opus_parse(AVCodecParserContext *ctx, AVCodecContext *avctx,
const AVCodecParser ff_opus_parser = {
.codec_ids = { AV_CODEC_ID_OPUS },
.priv_data_size = sizeof(OpusParseContext),
.priv_data_size = sizeof(OpusParserContext),
.parser_parse = opus_parse,
.parser_close = ff_parse_close
};

View File

@ -38,6 +38,8 @@
#include "libavutil/attributes.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/channel_layout.h"
#include "libavutil/ffmath.h"
#include "libavutil/float_dsp.h"
#include "libavutil/frame.h"
#include "libavutil/mem_internal.h"
#include "libavutil/opt.h"
@ -50,6 +52,7 @@
#include "opus.h"
#include "opustab.h"
#include "opus_celt.h"
#include "opus_parse.h"
static const uint16_t silk_frame_duration_ms[16] = {
10, 20, 40, 60,
@ -110,6 +113,18 @@ typedef struct OpusStreamContext {
int redundancy_idx;
} OpusStreamContext;
typedef struct OpusContext {
AVClass *av_class;
struct OpusStreamContext *streams;
int apply_phase_inv;
AVFloatDSPContext *fdsp;
float gain;
OpusParseContext p;
} OpusContext;
static int get_silk_samplerate(int config)
{
if (config < 4)
@ -469,7 +484,7 @@ static int opus_decode_packet(AVCodecContext *avctx, AVFrame *frame,
int i, ret;
/* calculate the number of delayed samples */
for (i = 0; i < c->nb_streams; i++) {
for (int i = 0; i < c->p.nb_streams; i++) {
OpusStreamContext *s = &c->streams[i];
s->out[0] =
s->out[1] = NULL;
@ -480,7 +495,7 @@ static int opus_decode_packet(AVCodecContext *avctx, AVFrame *frame,
/* decode the header of the first sub-packet to find out the sample count */
if (buf) {
OpusPacket *pkt = &c->streams[0].packet;
ret = ff_opus_parse_packet(pkt, buf, buf_size, c->nb_streams > 1);
ret = ff_opus_parse_packet(pkt, buf, buf_size, c->p.nb_streams > 1);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
return ret;
@ -504,13 +519,13 @@ static int opus_decode_packet(AVCodecContext *avctx, AVFrame *frame,
frame->nb_samples = 0;
for (i = 0; i < avctx->ch_layout.nb_channels; i++) {
ChannelMap *map = &c->channel_maps[i];
ChannelMap *map = &c->p.channel_maps[i];
if (!map->copy)
c->streams[map->stream_idx].out[map->channel_idx] = (float*)frame->extended_data[i];
}
/* read the data from the sync buffers */
for (i = 0; i < c->nb_streams; i++) {
for (int i = 0; i < c->p.nb_streams; i++) {
OpusStreamContext *s = &c->streams[i];
float **out = s->out;
int sync_size = av_audio_fifo_size(s->sync_buffer);
@ -542,11 +557,11 @@ static int opus_decode_packet(AVCodecContext *avctx, AVFrame *frame,
}
/* decode each sub-packet */
for (i = 0; i < c->nb_streams; i++) {
for (int i = 0; i < c->p.nb_streams; i++) {
OpusStreamContext *s = &c->streams[i];
if (i && buf) {
ret = ff_opus_parse_packet(&s->packet, buf, buf_size, i != c->nb_streams - 1);
ret = ff_opus_parse_packet(&s->packet, buf, buf_size, i != c->p.nb_streams - 1);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
return ret;
@ -572,7 +587,7 @@ static int opus_decode_packet(AVCodecContext *avctx, AVFrame *frame,
}
/* buffer the extra samples */
for (i = 0; i < c->nb_streams; i++) {
for (int i = 0; i < c->p.nb_streams; i++) {
OpusStreamContext *s = &c->streams[i];
int buffer_samples = s->decoded_samples - decoded_samples;
if (buffer_samples) {
@ -587,7 +602,7 @@ static int opus_decode_packet(AVCodecContext *avctx, AVFrame *frame,
}
for (i = 0; i < avctx->ch_layout.nb_channels; i++) {
ChannelMap *map = &c->channel_maps[i];
ChannelMap *map = &c->p.channel_maps[i];
/* handle copied channels */
if (map->copy) {
@ -598,7 +613,7 @@ static int opus_decode_packet(AVCodecContext *avctx, AVFrame *frame,
memset(frame->extended_data[i], 0, frame->linesize[0]);
}
if (c->gain_i && decoded_samples > 0) {
if (c->p.gain_i && decoded_samples > 0) {
c->fdsp->vector_fmul_scalar((float*)frame->extended_data[i],
(float*)frame->extended_data[i],
c->gain, FFALIGN(decoded_samples, 8));
@ -614,9 +629,8 @@ static int opus_decode_packet(AVCodecContext *avctx, AVFrame *frame,
static av_cold void opus_decode_flush(AVCodecContext *ctx)
{
OpusContext *c = ctx->priv_data;
int i;
for (i = 0; i < c->nb_streams; i++) {
for (int i = 0; i < c->p.nb_streams; i++) {
OpusStreamContext *s = &c->streams[i];
memset(&s->packet, 0, sizeof(s->packet));
@ -635,9 +649,8 @@ static av_cold void opus_decode_flush(AVCodecContext *ctx)
static av_cold int opus_decode_close(AVCodecContext *avctx)
{
OpusContext *c = avctx->priv_data;
int i;
for (i = 0; i < c->nb_streams; i++) {
for (int i = 0; i < c->p.nb_streams; i++) {
OpusStreamContext *s = &c->streams[i];
ff_silk_free(&s->silk);
@ -653,9 +666,9 @@ static av_cold int opus_decode_close(AVCodecContext *avctx)
av_freep(&c->streams);
c->nb_streams = 0;
c->p.nb_streams = 0;
av_freep(&c->channel_maps);
av_freep(&c->p.channel_maps);
av_freep(&c->fdsp);
return 0;
@ -664,7 +677,7 @@ static av_cold int opus_decode_close(AVCodecContext *avctx)
static av_cold int opus_decode_init(AVCodecContext *avctx)
{
OpusContext *c = avctx->priv_data;
int ret, i, j;
int ret;
avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
avctx->sample_rate = 48000;
@ -674,26 +687,28 @@ static av_cold int opus_decode_init(AVCodecContext *avctx)
return AVERROR(ENOMEM);
/* find out the channel configuration */
ret = ff_opus_parse_extradata(avctx, c);
ret = ff_opus_parse_extradata(avctx, &c->p);
if (ret < 0)
return ret;
if (c->p.gain_i)
c->gain = ff_exp10(c->p.gain_i / (20.0 * 256));
/* allocate and init each independent decoder */
c->streams = av_calloc(c->nb_streams, sizeof(*c->streams));
c->streams = av_calloc(c->p.nb_streams, sizeof(*c->streams));
if (!c->streams) {
c->nb_streams = 0;
c->p.nb_streams = 0;
return AVERROR(ENOMEM);
}
for (i = 0; i < c->nb_streams; i++) {
for (int i = 0; i < c->p.nb_streams; i++) {
OpusStreamContext *s = &c->streams[i];
AVChannelLayout layout;
s->output_channels = (i < c->nb_stereo_streams) ? 2 : 1;
s->output_channels = (i < c->p.nb_stereo_streams) ? 2 : 1;
s->avctx = avctx;
for (j = 0; j < s->output_channels; j++) {
for (int j = 0; j < s->output_channels; j++) {
s->silk_output[j] = s->silk_buf[j];
s->celt_output[j] = s->celt_buf[j];
s->redundancy_output[j] = s->redundancy_buf[j];