mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2024-12-30 03:12:08 +00:00
Merge remote-tracking branch 'qatar/master'
* qatar/master: (21 commits) CDXL demuxer and decoder hls: Re-add legacy applehttp name to preserve interface compatibility. hlsproto: Rename the functions and context hlsproto: Encourage users to try the hls demuxer instead of the proto doc: Move the hls protocol section into the right place libavformat: Rename the applehttp protocol to hls hls: Rename the functions and context libavformat: Rename the applehttp demuxer to hls rtpdec: Support H263 in RFC 2190 format rv30: check block type validity ttadec: CRC checking movenc: Support muxing VC1 avconv: Don't split out inline sequence headers when stream copying VC1 rv34: handle size changes during frame multithreading rv40: prevent undefined signed overflow in rv40_loop_filter() rv34: use AVERROR return values in ff_rv34_decode_frame() rv34: use uint16_t for RV34DecContext.deblock_coefs librtmp: Add "lib" prefix to librtmp URLProtocol declarations. movenc: Use defines instead of hardcoded numbers for RTCP types smjpegdec: implement seeking ... Conflicts: Changelog doc/general.texi libavcodec/avcodec.h libavcodec/rv30.c libavcodec/tta.c libavcodec/version.h libavformat/Makefile libavformat/allformats.c libavformat/version.h libswscale/x86/swscale_mmx.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
c980be9e3a
@ -4,6 +4,7 @@ releases are sorted from youngest to oldest.
|
||||
version next:
|
||||
- v408 Quicktime and Microsoft AYUV Uncompressed 4:4:4:4 encoder and decoder
|
||||
- setfield filter
|
||||
- CDXL demuxer and decoder
|
||||
|
||||
|
||||
version 0.10:
|
||||
|
10
configure
vendored
10
configure
vendored
@ -1640,13 +1640,15 @@ httpproxy_protocol_select="tcp_protocol"
|
||||
http_protocol_deps="network"
|
||||
http_protocol_select="tcp_protocol"
|
||||
https_protocol_select="tls_protocol"
|
||||
librtmp_protocol_deps="librtmp"
|
||||
librtmpe_protocol_deps="librtmp"
|
||||
librtmps_protocol_deps="librtmp"
|
||||
librtmpt_protocol_deps="librtmp"
|
||||
librtmpte_protocol_deps="librtmp"
|
||||
mmsh_protocol_select="http_protocol"
|
||||
mmst_protocol_deps="network"
|
||||
rtmp_protocol_deps="!librtmp_protocol"
|
||||
rtmp_protocol_select="tcp_protocol"
|
||||
rtmpe_protocol_deps="librtmp"
|
||||
rtmps_protocol_deps="librtmp"
|
||||
rtmpt_protocol_deps="librtmp"
|
||||
rtmpte_protocol_deps="librtmp"
|
||||
rtp_protocol_select="udp_protocol"
|
||||
tcp_protocol_deps="network"
|
||||
tls_protocol_deps_any="openssl gnutls"
|
||||
|
@ -143,6 +143,8 @@ library:
|
||||
@tab Multimedia format used by Delphine Software games.
|
||||
@item CD+G @tab @tab X
|
||||
@tab Video format used by CD+G karaoke disks
|
||||
@item Commodore CDXL @tab @tab X
|
||||
@tab Amiga CD video format
|
||||
@item Core Audio Format @tab X @tab X
|
||||
@tab Apple Core Audio Format
|
||||
@item CRC testing format @tab X @tab
|
||||
@ -460,6 +462,8 @@ following image formats are supported:
|
||||
@tab fourcc: CSCD
|
||||
@item CD+G @tab @tab X
|
||||
@tab Video codec for CD+G karaoke disks
|
||||
@item CDXL @tab @tab X
|
||||
@tab Amiga CD video codec
|
||||
@item Chinese AVS video @tab E @tab X
|
||||
@tab AVS1-P2, JiZhun profile, encoding through external library libxavs
|
||||
@item Delphine Software International CIN video @tab @tab X
|
||||
|
@ -19,22 +19,6 @@ supported protocols.
|
||||
|
||||
A description of the currently available protocols follows.
|
||||
|
||||
@section applehttp
|
||||
|
||||
Read Apple HTTP Live Streaming compliant segmented stream as
|
||||
a uniform one. The M3U8 playlists describing the segments can be
|
||||
remote HTTP resources or local files, accessed using the standard
|
||||
file protocol.
|
||||
HTTP is default, specific protocol can be declared by specifying
|
||||
"+@var{proto}" after the applehttp URI scheme name, where @var{proto}
|
||||
is either "file" or "http".
|
||||
|
||||
@example
|
||||
applehttp://host/path/to/remote/resource.m3u8
|
||||
applehttp+http://host/path/to/remote/resource.m3u8
|
||||
applehttp+file://path/to/local/resource.m3u8
|
||||
@end example
|
||||
|
||||
@section concat
|
||||
|
||||
Physical concatenation protocol.
|
||||
@ -81,6 +65,26 @@ specified with the name "FILE.mpeg" is interpreted as the URL
|
||||
|
||||
Gopher protocol.
|
||||
|
||||
@section hls
|
||||
|
||||
Read Apple HTTP Live Streaming compliant segmented stream as
|
||||
a uniform one. The M3U8 playlists describing the segments can be
|
||||
remote HTTP resources or local files, accessed using the standard
|
||||
file protocol.
|
||||
The nested protocol is declared by specifying
|
||||
"+@var{proto}" after the hls URI scheme name, where @var{proto}
|
||||
is either "file" or "http".
|
||||
|
||||
@example
|
||||
hls+http://host/path/to/remote/resource.m3u8
|
||||
hls+file://path/to/local/resource.m3u8
|
||||
@end example
|
||||
|
||||
Using this protocol is discouraged - the hls demuxer should work
|
||||
just as well (if not, please report the issues) and is more complete.
|
||||
To use the hls demuxer instead, simply use the direct URLs to the
|
||||
m3u8 files.
|
||||
|
||||
@section http
|
||||
|
||||
HTTP (Hyper Text Transfer Protocol).
|
||||
|
1
ffmpeg.c
1
ffmpeg.c
@ -1931,6 +1931,7 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
|
||||
if ( ost->st->codec->codec_id != CODEC_ID_H264
|
||||
&& ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
|
||||
&& ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
|
||||
&& ost->st->codec->codec_id != CODEC_ID_VC1
|
||||
) {
|
||||
if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
|
||||
opkt.destruct = av_destruct_packet;
|
||||
|
@ -111,6 +111,7 @@ OBJS-$(CONFIG_C93_DECODER) += c93.o
|
||||
OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
|
||||
mpeg12data.o mpegvideo.o
|
||||
OBJS-$(CONFIG_CDGRAPHICS_DECODER) += cdgraphics.o
|
||||
OBJS-$(CONFIG_CDXL_DECODER) += cdxl.o
|
||||
OBJS-$(CONFIG_CINEPAK_DECODER) += cinepak.o
|
||||
OBJS-$(CONFIG_CLJR_DECODER) += cljr.o
|
||||
OBJS-$(CONFIG_CLJR_ENCODER) += cljr.o
|
||||
|
@ -90,6 +90,7 @@ void avcodec_register_all(void)
|
||||
REGISTER_DECODER (C93, c93);
|
||||
REGISTER_DECODER (CAVS, cavs);
|
||||
REGISTER_DECODER (CDGRAPHICS, cdgraphics);
|
||||
REGISTER_DECODER (CDXL, cdxl);
|
||||
REGISTER_DECODER (CINEPAK, cinepak);
|
||||
REGISTER_ENCDEC (CLJR, cljr);
|
||||
REGISTER_DECODER (CSCD, cscd);
|
||||
|
@ -246,6 +246,7 @@ enum CodecID {
|
||||
CODEC_ID_DXTORY,
|
||||
CODEC_ID_V410,
|
||||
CODEC_ID_XWD,
|
||||
CODEC_ID_CDXL,
|
||||
CODEC_ID_Y41P = MKBETAG('Y','4','1','P'),
|
||||
CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'),
|
||||
CODEC_ID_AVRP = MKBETAG('A','V','R','P'),
|
||||
|
278
libavcodec/cdxl.c
Normal file
278
libavcodec/cdxl.c
Normal file
@ -0,0 +1,278 @@
|
||||
/*
|
||||
* CDXL video decoder
|
||||
* Copyright (c) 2011-2012 Paul B Mahol
|
||||
*
|
||||
* This file is part of Libav.
|
||||
*
|
||||
* Libav is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Libav is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with Libav; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "avcodec.h"
|
||||
#include "get_bits.h"
|
||||
|
||||
typedef struct {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame frame;
|
||||
int bpp;
|
||||
const uint8_t *palette;
|
||||
int palette_size;
|
||||
const uint8_t *video;
|
||||
int video_size;
|
||||
uint8_t *new_video;
|
||||
int new_video_size;
|
||||
} CDXLVideoContext;
|
||||
|
||||
static av_cold int cdxl_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
CDXLVideoContext *c = avctx->priv_data;
|
||||
|
||||
avcodec_get_frame_defaults(&c->frame);
|
||||
c->new_video_size = 0;
|
||||
c->avctx = avctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void import_palette(CDXLVideoContext *c, uint32_t *new_palette)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < c->palette_size / 2; i++) {
|
||||
unsigned rgb = AV_RB16(&c->palette[i * 2]);
|
||||
unsigned r = ((rgb >> 8) & 0xF) * 0x11;
|
||||
unsigned g = ((rgb >> 4) & 0xF) * 0x11;
|
||||
unsigned b = (rgb & 0xF) * 0x11;
|
||||
AV_WN32(&new_palette[i], (r << 16) | (g << 8) | b);
|
||||
}
|
||||
}
|
||||
|
||||
static void bitplanar2chunky(CDXLVideoContext *c, int width,
|
||||
int linesize, uint8_t *out)
|
||||
{
|
||||
GetBitContext gb;
|
||||
int x, y, plane;
|
||||
|
||||
init_get_bits(&gb, c->video, c->video_size * 8);
|
||||
memset(out, 0, linesize * c->avctx->height);
|
||||
for (plane = 0; plane < c->bpp; plane++)
|
||||
for (y = 0; y < c->avctx->height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
out[linesize * y + x] |= get_bits1(&gb) << plane;
|
||||
}
|
||||
|
||||
static void cdxl_decode_rgb(CDXLVideoContext *c)
|
||||
{
|
||||
uint32_t *new_palette = (uint32_t *)c->frame.data[1];
|
||||
int padded_width = FFALIGN(c->avctx->width, 16);
|
||||
|
||||
import_palette(c, new_palette);
|
||||
bitplanar2chunky(c, padded_width, c->frame.linesize[0], c->frame.data[0]);
|
||||
}
|
||||
|
||||
static void cdxl_decode_ham6(CDXLVideoContext *c)
|
||||
{
|
||||
AVCodecContext *avctx = c->avctx;
|
||||
uint32_t new_palette[16], r, g, b;
|
||||
uint8_t *ptr, *out, index, op;
|
||||
int x, y;
|
||||
|
||||
ptr = c->new_video;
|
||||
out = c->frame.data[0];
|
||||
|
||||
import_palette(c, new_palette);
|
||||
bitplanar2chunky(c, avctx->width, avctx->width, c->new_video);
|
||||
|
||||
for (y = 0; y < avctx->height; y++) {
|
||||
r = new_palette[0] & 0xFF0000;
|
||||
g = new_palette[0] & 0xFF00;
|
||||
b = new_palette[0] & 0xFF;
|
||||
for (x = 0; x < avctx->width; x++) {
|
||||
index = *ptr++;
|
||||
op = index >> 4;
|
||||
index &= 15;
|
||||
switch (op) {
|
||||
case 0:
|
||||
r = new_palette[index] & 0xFF0000;
|
||||
g = new_palette[index] & 0xFF00;
|
||||
b = new_palette[index] & 0xFF;
|
||||
break;
|
||||
case 1:
|
||||
b = index * 0x11;
|
||||
break;
|
||||
case 2:
|
||||
r = index * 0x11 << 16;
|
||||
break;
|
||||
case 3:
|
||||
g = index * 0x11 << 8;
|
||||
break;
|
||||
}
|
||||
AV_WN32(out + x * 3, r | g | b);
|
||||
}
|
||||
out += c->frame.linesize[0];
|
||||
}
|
||||
}
|
||||
|
||||
static void cdxl_decode_ham8(CDXLVideoContext *c)
|
||||
{
|
||||
AVCodecContext *avctx = c->avctx;
|
||||
uint32_t new_palette[64], r, g, b;
|
||||
uint8_t *ptr, *out, index, op;
|
||||
int x, y;
|
||||
|
||||
ptr = c->new_video;
|
||||
out = c->frame.data[0];
|
||||
|
||||
import_palette(c, new_palette);
|
||||
bitplanar2chunky(c, avctx->width, avctx->width, c->new_video);
|
||||
|
||||
for (y = 0; y < avctx->height; y++) {
|
||||
r = new_palette[0] & 0xFF0000;
|
||||
g = new_palette[0] & 0xFF00;
|
||||
b = new_palette[0] & 0xFF;
|
||||
for (x = 0; x < avctx->width; x++) {
|
||||
index = *ptr++;
|
||||
op = index >> 6;
|
||||
index &= 63;
|
||||
switch (op) {
|
||||
case 0:
|
||||
r = new_palette[index] & 0xFF0000;
|
||||
g = new_palette[index] & 0xFF00;
|
||||
b = new_palette[index] & 0xFF;
|
||||
break;
|
||||
case 1:
|
||||
b = (index << 2) | (b & 3);
|
||||
break;
|
||||
case 2:
|
||||
r = (index << 18) | (r & (3 << 16));
|
||||
break;
|
||||
case 3:
|
||||
g = (index << 10) | (g & (3 << 8));
|
||||
break;
|
||||
}
|
||||
AV_WN32(out + x * 3, r | g | b);
|
||||
}
|
||||
out += c->frame.linesize[0];
|
||||
}
|
||||
}
|
||||
|
||||
static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *data_size, AVPacket *pkt)
|
||||
{
|
||||
CDXLVideoContext *c = avctx->priv_data;
|
||||
AVFrame * const p = &c->frame;
|
||||
int ret, w, h, encoding, format, buf_size = pkt->size;
|
||||
const uint8_t *buf = pkt->data;
|
||||
|
||||
if (buf_size < 32)
|
||||
return AVERROR_INVALIDDATA;
|
||||
encoding = buf[1] & 7;
|
||||
format = buf[1] & 0xE0;
|
||||
w = AV_RB16(&buf[14]);
|
||||
h = AV_RB16(&buf[16]);
|
||||
c->bpp = buf[19];
|
||||
c->palette_size = AV_RB16(&buf[20]);
|
||||
c->palette = buf + 32;
|
||||
c->video = c->palette + c->palette_size;
|
||||
c->video_size = buf_size - c->palette_size - 32;
|
||||
|
||||
if (c->palette_size > 512)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (buf_size < c->palette_size + 32)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (c->bpp < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (c->bpp > 8) {
|
||||
av_log_ask_for_sample(avctx, "unsupported pixel size: %d\n", c->bpp);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
if (format) {
|
||||
av_log_ask_for_sample(avctx, "unsupported pixel format: %d\n", format);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
if ((ret = av_image_check_size(w, h, 0, avctx)) < 0)
|
||||
return ret;
|
||||
if (w != avctx->width || h != avctx->height)
|
||||
avcodec_set_dimensions(avctx, w, h);
|
||||
|
||||
if (encoding == 0) {
|
||||
if (c->video_size < FFALIGN(avctx->width, 16) *
|
||||
avctx->height * c->bpp / 8)
|
||||
return AVERROR_INVALIDDATA;
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
} else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) {
|
||||
if (c->palette_size != (1 << (c->bpp - 1)))
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (c->video_size < avctx->width * avctx->height * c->bpp / 8)
|
||||
return AVERROR_INVALIDDATA;
|
||||
avctx->pix_fmt = PIX_FMT_BGR24;
|
||||
} else {
|
||||
av_log_ask_for_sample(avctx, "unsupported encoding %d and bpp %d\n",
|
||||
encoding, c->bpp);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
if (p->data[0])
|
||||
avctx->release_buffer(avctx, p);
|
||||
|
||||
p->reference = 0;
|
||||
if ((ret = avctx->get_buffer(avctx, p)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
p->pict_type = AV_PICTURE_TYPE_I;
|
||||
|
||||
if (encoding) {
|
||||
av_fast_padded_malloc(&c->new_video, &c->new_video_size,
|
||||
h * w + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!c->new_video)
|
||||
return AVERROR(ENOMEM);
|
||||
if (c->bpp == 8)
|
||||
cdxl_decode_ham8(c);
|
||||
else
|
||||
cdxl_decode_ham6(c);
|
||||
} else {
|
||||
cdxl_decode_rgb(c);
|
||||
}
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = c->frame;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
static av_cold int cdxl_decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
CDXLVideoContext *c = avctx->priv_data;
|
||||
|
||||
av_free(c->new_video);
|
||||
if (c->frame.data[0])
|
||||
avctx->release_buffer(avctx, &c->frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVCodec ff_cdxl_decoder = {
|
||||
.name = "cdxl",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = CODEC_ID_CDXL,
|
||||
.priv_data_size = sizeof(CDXLVideoContext),
|
||||
.init = cdxl_decode_init,
|
||||
.close = cdxl_decode_end,
|
||||
.decode = cdxl_decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Commodore CDXL video"),
|
||||
};
|
@ -711,8 +711,7 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type,
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
/* wait for the referenced mb row to be finished */
|
||||
int mb_row = FFMIN(s->mb_height - 1,
|
||||
s->mb_y + ((yoff + my + 5 + 8 * height) >> 4));
|
||||
int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
|
||||
AVFrame *f = dir ? &s->next_picture_ptr->f : &s->last_picture_ptr->f;
|
||||
ff_thread_await_progress(f, mb_row, 0);
|
||||
}
|
||||
@ -1361,6 +1360,53 @@ static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void rv34_decoder_free(RV34DecContext *r)
|
||||
{
|
||||
av_freep(&r->intra_types_hist);
|
||||
r->intra_types = NULL;
|
||||
av_freep(&r->tmp_b_block_base);
|
||||
av_freep(&r->mb_type);
|
||||
av_freep(&r->cbp_luma);
|
||||
av_freep(&r->cbp_chroma);
|
||||
av_freep(&r->deblock_coefs);
|
||||
}
|
||||
|
||||
|
||||
static int rv34_decoder_alloc(RV34DecContext *r)
|
||||
{
|
||||
r->intra_types_stride = r->s.mb_width * 4 + 4;
|
||||
|
||||
r->cbp_chroma = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->cbp_chroma));
|
||||
r->cbp_luma = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->cbp_luma));
|
||||
r->deblock_coefs = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->deblock_coefs));
|
||||
r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 *
|
||||
sizeof(*r->intra_types_hist));
|
||||
r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->mb_type));
|
||||
|
||||
if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
|
||||
r->intra_types_hist && r->mb_type)) {
|
||||
rv34_decoder_free(r);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int rv34_decoder_realloc(RV34DecContext *r)
|
||||
{
|
||||
rv34_decoder_free(r);
|
||||
return rv34_decoder_alloc(r);
|
||||
}
|
||||
|
||||
|
||||
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
|
||||
{
|
||||
MpegEncContext *s = &r->s;
|
||||
@ -1376,22 +1422,19 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
|
||||
}
|
||||
|
||||
if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
|
||||
if(s->width != r->si.width || s->height != r->si.height){
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "Changing dimensions to %dx%d\n", r->si.width,r->si.height);
|
||||
if (s->width != r->si.width || s->height != r->si.height) {
|
||||
int err;
|
||||
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
|
||||
r->si.width, r->si.height);
|
||||
MPV_common_end(s);
|
||||
s->width = r->si.width;
|
||||
s->height = r->si.height;
|
||||
avcodec_set_dimensions(s->avctx, s->width, s->height);
|
||||
if(MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
r->intra_types_stride = s->mb_width*4 + 4;
|
||||
r->intra_types_hist = av_realloc(r->intra_types_hist, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
|
||||
r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
|
||||
r->mb_type = av_realloc(r->mb_type, r->s.mb_stride * r->s.mb_height * sizeof(*r->mb_type));
|
||||
r->cbp_luma = av_realloc(r->cbp_luma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_luma));
|
||||
r->cbp_chroma = av_realloc(r->cbp_chroma, r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma));
|
||||
r->deblock_coefs = av_realloc(r->deblock_coefs, r->s.mb_stride * r->s.mb_height * sizeof(*r->deblock_coefs));
|
||||
av_freep(&r->tmp_b_block_base);
|
||||
if ((err = MPV_common_init(s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_realloc(r)) < 0)
|
||||
return err;
|
||||
}
|
||||
s->pict_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
|
||||
if(MPV_frame_start(s, s->avctx) < 0)
|
||||
@ -1500,6 +1543,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
RV34DecContext *r = avctx->priv_data;
|
||||
MpegEncContext *s = &r->s;
|
||||
int ret;
|
||||
|
||||
MPV_decode_defaults(s);
|
||||
s->avctx = avctx;
|
||||
@ -1516,8 +1560,8 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
|
||||
avctx->has_b_frames = 1;
|
||||
s->low_delay = 0;
|
||||
|
||||
if (MPV_common_init(s) < 0)
|
||||
return -1;
|
||||
if ((ret = MPV_common_init(s)) < 0)
|
||||
return ret;
|
||||
|
||||
ff_h264_pred_init(&r->h, CODEC_ID_RV40, 8, 1);
|
||||
|
||||
@ -1530,15 +1574,8 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
|
||||
ff_rv40dsp_init(&r->rdsp, &r->s.dsp);
|
||||
#endif
|
||||
|
||||
r->intra_types_stride = 4*s->mb_stride + 4;
|
||||
r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
|
||||
r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
|
||||
|
||||
r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height * sizeof(*r->mb_type));
|
||||
|
||||
r->cbp_luma = av_malloc(r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_luma));
|
||||
r->cbp_chroma = av_malloc(r->s.mb_stride * r->s.mb_height * sizeof(*r->cbp_chroma));
|
||||
r->deblock_coefs = av_malloc(r->s.mb_stride * r->s.mb_height * sizeof(*r->deblock_coefs));
|
||||
if ((ret = rv34_decoder_alloc(r)) < 0)
|
||||
return ret;
|
||||
|
||||
if(!intra_vlcs[0].cbppattern[0].bits)
|
||||
rv34_init_tables();
|
||||
@ -1548,40 +1585,17 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
|
||||
|
||||
int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx)
|
||||
{
|
||||
int err;
|
||||
RV34DecContext *r = avctx->priv_data;
|
||||
|
||||
r->s.avctx = avctx;
|
||||
|
||||
if (avctx->internal->is_copy) {
|
||||
r->cbp_chroma = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->cbp_chroma));
|
||||
r->cbp_luma = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->cbp_luma));
|
||||
r->deblock_coefs = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->deblock_coefs));
|
||||
r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 *
|
||||
sizeof(*r->intra_types_hist));
|
||||
r->mb_type = av_malloc(r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->mb_type));
|
||||
|
||||
if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
|
||||
r->intra_types_hist && r->mb_type)) {
|
||||
av_freep(&r->cbp_chroma);
|
||||
av_freep(&r->cbp_luma);
|
||||
av_freep(&r->deblock_coefs);
|
||||
av_freep(&r->intra_types_hist);
|
||||
av_freep(&r->mb_type);
|
||||
r->intra_types = NULL;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
|
||||
r->tmp_b_block_base = NULL;
|
||||
|
||||
memset(r->mb_type, 0, r->s.mb_stride * r->s.mb_height *
|
||||
sizeof(*r->mb_type));
|
||||
|
||||
MPV_common_init(&r->s);
|
||||
if ((err = MPV_common_init(&r->s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_alloc(r)) < 0)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -1595,6 +1609,16 @@ int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecConte
|
||||
if (dst == src || !s1->context_initialized)
|
||||
return 0;
|
||||
|
||||
if (s->height != s1->height || s->width != s1->width) {
|
||||
MPV_common_end(s);
|
||||
s->height = s1->height;
|
||||
s->width = s1->width;
|
||||
if ((err = MPV_common_init(s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_realloc(r)) < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
if ((err = ff_mpeg_update_thread_context(dst, src)))
|
||||
return err;
|
||||
|
||||
@ -1656,15 +1680,19 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
if(get_slice_offset(avctx, slices_hdr, 0) < 0 ||
|
||||
get_slice_offset(avctx, slices_hdr, 0) > buf_size){
|
||||
av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
init_get_bits(&s->gb, buf+get_slice_offset(avctx, slices_hdr, 0), (buf_size-get_slice_offset(avctx, slices_hdr, 0))*8);
|
||||
if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
|
||||
av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
|
||||
return -1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if ((!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) &&
|
||||
si.type == AV_PICTURE_TYPE_B) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
|
||||
"reference data.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if ((!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) && si.type == AV_PICTURE_TYPE_B)
|
||||
return -1;
|
||||
if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
|
||||
|| (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
@ -1712,11 +1740,12 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
if(last && s->current_picture_ptr){
|
||||
if(r->loop_filter)
|
||||
r->loop_filter(r, s->mb_height - 1);
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f,
|
||||
s->mb_height - 1, 0);
|
||||
ff_er_frame_end(s);
|
||||
MPV_frame_end(s);
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||
*pict = *(AVFrame*)s->current_picture_ptr;
|
||||
} else if (s->last_picture_ptr != NULL) {
|
||||
@ -1737,14 +1766,7 @@ av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
|
||||
RV34DecContext *r = avctx->priv_data;
|
||||
|
||||
MPV_common_end(&r->s);
|
||||
|
||||
av_freep(&r->intra_types_hist);
|
||||
r->intra_types = NULL;
|
||||
av_freep(&r->tmp_b_block_base);
|
||||
av_freep(&r->mb_type);
|
||||
av_freep(&r->cbp_luma);
|
||||
av_freep(&r->cbp_chroma);
|
||||
av_freep(&r->deblock_coefs);
|
||||
rv34_decoder_free(r);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ typedef struct RV34DecContext{
|
||||
|
||||
uint16_t *cbp_luma; ///< CBP values for luma subblocks
|
||||
uint8_t *cbp_chroma; ///< CBP values for chroma subblocks
|
||||
int *deblock_coefs; ///< deblock coefficients for each macroblock
|
||||
uint16_t *deblock_coefs; ///< deblock coefficients for each macroblock
|
||||
|
||||
/** 8x8 block available flags (for MV prediction) */
|
||||
DECLARE_ALIGNED(8, uint32_t, avail_cache)[3*4];
|
||||
|
@ -360,7 +360,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
|
||||
* in addition to the coded ones because because they lie at the edge of
|
||||
* 8x8 block with different enough motion vectors
|
||||
*/
|
||||
int mvmasks[4];
|
||||
unsigned mvmasks[4];
|
||||
|
||||
mb_pos = row * s->mb_stride;
|
||||
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
|
||||
@ -376,7 +376,8 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
|
||||
int c_v_deblock[2], c_h_deblock[2];
|
||||
int clip_left;
|
||||
int avail[4];
|
||||
int y_to_deblock, c_to_deblock[2];
|
||||
unsigned y_to_deblock;
|
||||
int c_to_deblock[2];
|
||||
|
||||
q = s->current_picture_ptr->f.qscale_table[mb_pos];
|
||||
alpha = rv40_alpha_tab[q];
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <limits.h>
|
||||
#include "avcodec.h"
|
||||
#include "get_bits.h"
|
||||
#include "libavutil/crc.h"
|
||||
|
||||
#define FORMAT_SIMPLE 1
|
||||
#define FORMAT_ENCRYPTED 2
|
||||
@ -58,6 +59,7 @@ typedef struct TTAContext {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame frame;
|
||||
GetBitContext gb;
|
||||
const AVCRC *crc_table;
|
||||
|
||||
int format, channels, bps, data_length;
|
||||
int frame_length, last_frame_length, total_frames;
|
||||
@ -198,6 +200,20 @@ static const int64_t tta_channel_layouts[7] = {
|
||||
AV_CH_LAYOUT_7POINT1_WIDE
|
||||
};
|
||||
|
||||
static int tta_check_crc(TTAContext *s, const uint8_t *buf, int buf_size)
|
||||
{
|
||||
uint32_t crc, CRC;
|
||||
|
||||
CRC = AV_RL32(buf + buf_size);
|
||||
crc = av_crc(s->crc_table, 0xFFFFFFFFU, buf, buf_size);
|
||||
if (CRC != (crc ^ 0xFFFFFFFFU)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int tta_decode_init(AVCodecContext * avctx)
|
||||
{
|
||||
TTAContext *s = avctx->priv_data;
|
||||
@ -211,6 +227,12 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
|
||||
init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8);
|
||||
if (show_bits_long(&s->gb, 32) == AV_RL32("TTA1"))
|
||||
{
|
||||
if (avctx->err_recognition & AV_EF_CRCCHECK) {
|
||||
s->crc_table = av_crc_get_table(AV_CRC_32_IEEE_LE);
|
||||
if (tta_check_crc(s, avctx->extradata, 18))
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* signature */
|
||||
skip_bits_long(&s->gb, 32);
|
||||
|
||||
@ -274,6 +296,12 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
|
||||
s->data_length, s->frame_length, s->last_frame_length, s->total_frames);
|
||||
|
||||
// FIXME: seek table
|
||||
if (get_bits_left(&s->gb) < 32 * s->total_frames + 32)
|
||||
av_log(avctx, AV_LOG_WARNING, "Seek table missing or too small\n");
|
||||
else if (avctx->err_recognition & AV_EF_CRCCHECK) {
|
||||
if (tta_check_crc(s, avctx->extradata + 22, s->total_frames * 4))
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
skip_bits_long(&s->gb, 32 * s->total_frames);
|
||||
skip_bits_long(&s->gb, 32); // CRC32 of seektable
|
||||
|
||||
@ -314,6 +342,11 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int cur_chan = 0, framelen = s->frame_length;
|
||||
int32_t *p;
|
||||
|
||||
if (avctx->err_recognition & AV_EF_CRCCHECK) {
|
||||
if (buf_size < 4 || tta_check_crc(s, buf, buf_size - 4))
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
init_get_bits(&s->gb, buf, buf_size*8);
|
||||
|
||||
// FIXME: seeking
|
||||
|
@ -21,7 +21,7 @@
|
||||
#define AVCODEC_VERSION_H
|
||||
|
||||
#define LIBAVCODEC_VERSION_MAJOR 54
|
||||
#define LIBAVCODEC_VERSION_MINOR 1
|
||||
#define LIBAVCODEC_VERSION_MINOR 2
|
||||
#define LIBAVCODEC_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
|
||||
|
@ -37,7 +37,6 @@ OBJS-$(CONFIG_AMR_MUXER) += amr.o
|
||||
OBJS-$(CONFIG_ANM_DEMUXER) += anm.o
|
||||
OBJS-$(CONFIG_APC_DEMUXER) += apc.o
|
||||
OBJS-$(CONFIG_APE_DEMUXER) += ape.o apetag.o
|
||||
OBJS-$(CONFIG_APPLEHTTP_DEMUXER) += applehttp.o
|
||||
OBJS-$(CONFIG_ASF_DEMUXER) += asfdec.o asf.o asfcrypt.o \
|
||||
avlanguage.o
|
||||
OBJS-$(CONFIG_ASF_MUXER) += asfenc.o asf.o
|
||||
@ -64,6 +63,7 @@ OBJS-$(CONFIG_CAF_MUXER) += cafenc.o caf.o riff.o isom.o
|
||||
OBJS-$(CONFIG_CAVSVIDEO_DEMUXER) += cavsvideodec.o rawdec.o
|
||||
OBJS-$(CONFIG_CAVSVIDEO_MUXER) += rawenc.o
|
||||
OBJS-$(CONFIG_CDG_DEMUXER) += cdg.o
|
||||
OBJS-$(CONFIG_CDXL_DEMUXER) += cdxl.o
|
||||
OBJS-$(CONFIG_CRC_MUXER) += crcenc.o
|
||||
OBJS-$(CONFIG_DAUD_DEMUXER) += daud.o
|
||||
OBJS-$(CONFIG_DAUD_MUXER) += daud.o
|
||||
@ -114,6 +114,7 @@ OBJS-$(CONFIG_H263_DEMUXER) += h263dec.o rawdec.o
|
||||
OBJS-$(CONFIG_H263_MUXER) += rawenc.o
|
||||
OBJS-$(CONFIG_H264_DEMUXER) += h264dec.o rawdec.o
|
||||
OBJS-$(CONFIG_H264_MUXER) += rawenc.o
|
||||
OBJS-$(CONFIG_HLS_DEMUXER) += hls.o
|
||||
OBJS-$(CONFIG_ICO_DEMUXER) += icodec.o
|
||||
OBJS-$(CONFIG_IDCIN_DEMUXER) += idcin.o
|
||||
OBJS-$(CONFIG_IDF_DEMUXER) += bintext.o
|
||||
@ -270,6 +271,7 @@ OBJS-$(CONFIG_RTPDEC) += rdt.o \
|
||||
rtpdec_asf.o \
|
||||
rtpdec_g726.o \
|
||||
rtpdec_h263.o \
|
||||
rtpdec_h263_rfc2190.o \
|
||||
rtpdec_h264.o \
|
||||
rtpdec_latm.o \
|
||||
rtpdec_mpeg4.o \
|
||||
@ -344,16 +346,18 @@ OBJS-$(CONFIG_YUV4MPEGPIPE_DEMUXER) += yuv4mpeg.o
|
||||
OBJS-$(CONFIG_LIBMODPLUG_DEMUXER) += libmodplug.o
|
||||
OBJS-$(CONFIG_LIBNUT_DEMUXER) += libnut.o
|
||||
OBJS-$(CONFIG_LIBNUT_MUXER) += libnut.o
|
||||
OBJS-$(CONFIG_LIBRTMP) += librtmp.o
|
||||
|
||||
# protocols I/O
|
||||
OBJS+= avio.o aviobuf.o
|
||||
|
||||
OBJS-$(CONFIG_APPLEHTTP_PROTOCOL) += applehttpproto.o
|
||||
OBJS-$(CONFIG_APPLEHTTP_PROTOCOL) += hlsproto.o
|
||||
OBJS-$(CONFIG_CACHE_PROTOCOL) += cache.o
|
||||
OBJS-$(CONFIG_CONCAT_PROTOCOL) += concat.o
|
||||
OBJS-$(CONFIG_CRYPTO_PROTOCOL) += crypto.o
|
||||
OBJS-$(CONFIG_FILE_PROTOCOL) += file.o
|
||||
OBJS-$(CONFIG_GOPHER_PROTOCOL) += gopher.o
|
||||
OBJS-$(CONFIG_HLS_PROTOCOL) += hlsproto.o
|
||||
OBJS-$(CONFIG_HTTP_PROTOCOL) += http.o httpauth.o
|
||||
OBJS-$(CONFIG_HTTPPROXY_PROTOCOL) += http.o httpauth.o
|
||||
OBJS-$(CONFIG_HTTPS_PROTOCOL) += http.o httpauth.o
|
||||
@ -361,12 +365,7 @@ OBJS-$(CONFIG_MMSH_PROTOCOL) += mmsh.o mms.o asf.o
|
||||
OBJS-$(CONFIG_MMST_PROTOCOL) += mmst.o mms.o asf.o
|
||||
OBJS-$(CONFIG_MD5_PROTOCOL) += md5proto.o
|
||||
OBJS-$(CONFIG_PIPE_PROTOCOL) += file.o
|
||||
|
||||
# external or internal rtmp
|
||||
RTMP-OBJS-$(CONFIG_LIBRTMP) = librtmp.o
|
||||
RTMP-OBJS-$(!CONFIG_LIBRTMP) = rtmpproto.o rtmppkt.o
|
||||
OBJS-$(CONFIG_RTMP_PROTOCOL) += $(RTMP-OBJS-yes)
|
||||
|
||||
OBJS-$(CONFIG_RTMP_PROTOCOL) += rtmpproto.o rtmppkt.o
|
||||
OBJS-$(CONFIG_RTP_PROTOCOL) += rtpproto.o
|
||||
OBJS-$(CONFIG_TCP_PROTOCOL) += tcp.o
|
||||
OBJS-$(CONFIG_TLS_PROTOCOL) += tls.o
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "rtp.h"
|
||||
#include "rdt.h"
|
||||
#include "url.h"
|
||||
#include "version.h"
|
||||
|
||||
#define REGISTER_MUXER(X,x) { \
|
||||
extern AVOutputFormat ff_##x##_muxer; \
|
||||
@ -61,7 +62,6 @@ void av_register_all(void)
|
||||
REGISTER_DEMUXER (ANM, anm);
|
||||
REGISTER_DEMUXER (APC, apc);
|
||||
REGISTER_DEMUXER (APE, ape);
|
||||
REGISTER_DEMUXER (APPLEHTTP, applehttp);
|
||||
REGISTER_MUXDEMUX (ASF, asf);
|
||||
REGISTER_MUXDEMUX (ASS, ass);
|
||||
REGISTER_MUXER (ASF_STREAM, asf_stream);
|
||||
@ -80,6 +80,7 @@ void av_register_all(void)
|
||||
REGISTER_MUXDEMUX (CAF, caf);
|
||||
REGISTER_MUXDEMUX (CAVSVIDEO, cavsvideo);
|
||||
REGISTER_DEMUXER (CDG, cdg);
|
||||
REGISTER_DEMUXER (CDXL, cdxl);
|
||||
REGISTER_MUXER (CRC, crc);
|
||||
REGISTER_MUXDEMUX (DAUD, daud);
|
||||
REGISTER_DEMUXER (DFA, dfa);
|
||||
@ -110,6 +111,7 @@ void av_register_all(void)
|
||||
REGISTER_MUXDEMUX (H261, h261);
|
||||
REGISTER_MUXDEMUX (H263, h263);
|
||||
REGISTER_MUXDEMUX (H264, h264);
|
||||
REGISTER_DEMUXER (HLS, hls);
|
||||
REGISTER_DEMUXER (ICO, ico);
|
||||
REGISTER_DEMUXER (IDCIN, idcin);
|
||||
REGISTER_DEMUXER (IDF, idf);
|
||||
@ -254,15 +256,16 @@ void av_register_all(void)
|
||||
#if CONFIG_LIBMODPLUG
|
||||
REGISTER_DEMUXER (LIBMODPLUG, libmodplug);
|
||||
#endif
|
||||
REGISTER_MUXDEMUX (LIBNUT, libnut);
|
||||
|
||||
/* protocols */
|
||||
#if FF_API_APPLEHTTP_PROTO
|
||||
REGISTER_PROTOCOL (APPLEHTTP, applehttp);
|
||||
#endif
|
||||
REGISTER_PROTOCOL (CACHE, cache);
|
||||
REGISTER_PROTOCOL (CONCAT, concat);
|
||||
REGISTER_PROTOCOL (CRYPTO, crypto);
|
||||
REGISTER_PROTOCOL (FILE, file);
|
||||
REGISTER_PROTOCOL (GOPHER, gopher);
|
||||
REGISTER_PROTOCOL (HLS, hls);
|
||||
REGISTER_PROTOCOL (HTTP, http);
|
||||
REGISTER_PROTOCOL (HTTPPROXY, httpproxy);
|
||||
REGISTER_PROTOCOL (HTTPS, https);
|
||||
@ -271,12 +274,16 @@ void av_register_all(void)
|
||||
REGISTER_PROTOCOL (MD5, md5);
|
||||
REGISTER_PROTOCOL (PIPE, pipe);
|
||||
REGISTER_PROTOCOL (RTMP, rtmp);
|
||||
REGISTER_PROTOCOL (RTMPE, rtmpe);
|
||||
REGISTER_PROTOCOL (RTMPS, rtmps);
|
||||
REGISTER_PROTOCOL (RTMPT, rtmpt);
|
||||
REGISTER_PROTOCOL (RTMPTE, rtmpte);
|
||||
REGISTER_PROTOCOL (RTP, rtp);
|
||||
REGISTER_PROTOCOL (TCP, tcp);
|
||||
REGISTER_PROTOCOL (TLS, tls);
|
||||
REGISTER_PROTOCOL (UDP, udp);
|
||||
|
||||
/* external libraries */
|
||||
REGISTER_MUXDEMUX (LIBNUT, libnut);
|
||||
REGISTER_PROTOCOL (LIBRTMP, librtmp);
|
||||
REGISTER_PROTOCOL (LIBRTMPE, librtmpe);
|
||||
REGISTER_PROTOCOL (LIBRTMPS, librtmps);
|
||||
REGISTER_PROTOCOL (LIBRTMPT, librtmpt);
|
||||
REGISTER_PROTOCOL (LIBRTMPTE, librtmpte);
|
||||
}
|
||||
|
170
libavformat/cdxl.c
Normal file
170
libavformat/cdxl.c
Normal file
@ -0,0 +1,170 @@
|
||||
/*
|
||||
* CDXL demuxer
|
||||
* Copyright (c) 2011-2012 Paul B Mahol
|
||||
*
|
||||
* This file is part of Libav.
|
||||
*
|
||||
* Libav is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Libav is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with Libav; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "avformat.h"
|
||||
#include "internal.h"
|
||||
|
||||
#define CDXL_HEADER_SIZE 32
|
||||
|
||||
typedef struct CDXLDemuxContext {
|
||||
AVClass *class;
|
||||
int sample_rate;
|
||||
char *framerate;
|
||||
AVRational fps;
|
||||
int read_chunk;
|
||||
uint8_t header[CDXL_HEADER_SIZE];
|
||||
int video_stream_index;
|
||||
int audio_stream_index;
|
||||
} CDXLDemuxContext;
|
||||
|
||||
static int cdxl_read_header(AVFormatContext *s)
|
||||
{
|
||||
CDXLDemuxContext *cdxl = s->priv_data;
|
||||
int ret;
|
||||
|
||||
if ((ret = av_parse_video_rate(&cdxl->fps, cdxl->framerate)) < 0) {
|
||||
av_log(s, AV_LOG_ERROR,
|
||||
"Could not parse framerate: %s.\n", cdxl->framerate);
|
||||
return ret;
|
||||
}
|
||||
|
||||
cdxl->read_chunk = 0;
|
||||
cdxl->video_stream_index = -1;
|
||||
cdxl->audio_stream_index = -1;
|
||||
|
||||
s->ctx_flags |= AVFMTCTX_NOHEADER;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cdxl_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
{
|
||||
CDXLDemuxContext *cdxl = s->priv_data;
|
||||
AVIOContext *pb = s->pb;
|
||||
uint32_t current_size;
|
||||
uint16_t audio_size, palette_size;
|
||||
int32_t video_size;
|
||||
int64_t pos;
|
||||
int ret;
|
||||
|
||||
if (pb->eof_reached)
|
||||
return AVERROR_EOF;
|
||||
|
||||
pos = avio_tell(pb);
|
||||
if (!cdxl->read_chunk &&
|
||||
avio_read(pb, cdxl->header, CDXL_HEADER_SIZE) != CDXL_HEADER_SIZE)
|
||||
return AVERROR_EOF;
|
||||
if (cdxl->header[0] != 1) {
|
||||
av_log(s, AV_LOG_ERROR, "non-standard cdxl file\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
current_size = AV_RB32(&cdxl->header[2]);
|
||||
palette_size = AV_RB16(&cdxl->header[20]);
|
||||
audio_size = AV_RB16(&cdxl->header[22]);
|
||||
|
||||
if (palette_size > 512)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (current_size < audio_size + palette_size + CDXL_HEADER_SIZE)
|
||||
return AVERROR_INVALIDDATA;
|
||||
video_size = current_size - audio_size - CDXL_HEADER_SIZE;
|
||||
|
||||
if (cdxl->read_chunk && audio_size) {
|
||||
if (cdxl->audio_stream_index == -1) {
|
||||
AVStream *st = avformat_new_stream(s, NULL);
|
||||
if (!st)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
|
||||
st->codec->codec_tag = 0;
|
||||
st->codec->codec_id = CODEC_ID_PCM_S8;
|
||||
st->codec->channels = cdxl->header[1] & 0x10 ? 2 : 1;
|
||||
st->codec->sample_rate = cdxl->sample_rate;
|
||||
cdxl->audio_stream_index = st->index;
|
||||
avpriv_set_pts_info(st, 32, 1, cdxl->sample_rate);
|
||||
}
|
||||
|
||||
ret = av_get_packet(pb, pkt, audio_size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
pkt->stream_index = cdxl->audio_stream_index;
|
||||
pkt->pos = pos;
|
||||
pkt->duration = audio_size;
|
||||
cdxl->read_chunk = 0;
|
||||
} else {
|
||||
if (cdxl->video_stream_index == -1) {
|
||||
AVStream *st = avformat_new_stream(s, NULL);
|
||||
if (!st)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
st->codec->codec_tag = 0;
|
||||
st->codec->codec_id = CODEC_ID_CDXL;
|
||||
st->codec->width = AV_RB16(&cdxl->header[14]);
|
||||
st->codec->height = AV_RB16(&cdxl->header[16]);
|
||||
cdxl->video_stream_index = st->index;
|
||||
avpriv_set_pts_info(st, 63, cdxl->fps.den, cdxl->fps.num);
|
||||
}
|
||||
|
||||
if (av_new_packet(pkt, video_size + CDXL_HEADER_SIZE) < 0)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(pkt->data, cdxl->header, CDXL_HEADER_SIZE);
|
||||
ret = avio_read(pb, pkt->data + CDXL_HEADER_SIZE, video_size);
|
||||
if (ret < 0) {
|
||||
av_free_packet(pkt);
|
||||
return ret;
|
||||
}
|
||||
pkt->stream_index = cdxl->video_stream_index;
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
pkt->pos = pos;
|
||||
cdxl->read_chunk = audio_size;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define OFFSET(x) offsetof(CDXLDemuxContext, x)
|
||||
static const AVOption cdxl_options[] = {
|
||||
{ "sample_rate", "", OFFSET(sample_rate), AV_OPT_TYPE_INT, { .dbl = 11025 }, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
|
||||
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, { .str = "10" }, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
static const AVClass cdxl_demuxer_class = {
|
||||
.class_name = "CDXL demuxer",
|
||||
.item_name = av_default_item_name,
|
||||
.option = cdxl_options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
AVInputFormat ff_cdxl_demuxer = {
|
||||
.name = "cdxl",
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Commodore CDXL video format"),
|
||||
.priv_data_size = sizeof(CDXLDemuxContext),
|
||||
.read_header = cdxl_read_header,
|
||||
.read_packet = cdxl_read_packet,
|
||||
.extensions = "cdxl,xl",
|
||||
.flags = AVFMT_GENERIC_INDEX,
|
||||
.priv_class = &cdxl_demuxer_class,
|
||||
};
|
@ -93,7 +93,7 @@ struct variant {
|
||||
uint8_t key[16];
|
||||
};
|
||||
|
||||
typedef struct AppleHTTPContext {
|
||||
typedef struct HLSContext {
|
||||
int n_variants;
|
||||
struct variant **variants;
|
||||
int cur_seq_no;
|
||||
@ -103,7 +103,7 @@ typedef struct AppleHTTPContext {
|
||||
int64_t seek_timestamp;
|
||||
int seek_flags;
|
||||
AVIOInterruptCB *interrupt_callback;
|
||||
} AppleHTTPContext;
|
||||
} HLSContext;
|
||||
|
||||
static int read_chomp_line(AVIOContext *s, char *buf, int maxlen)
|
||||
{
|
||||
@ -122,7 +122,7 @@ static void free_segment_list(struct variant *var)
|
||||
var->n_segments = 0;
|
||||
}
|
||||
|
||||
static void free_variant_list(AppleHTTPContext *c)
|
||||
static void free_variant_list(HLSContext *c)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < c->n_variants; i++) {
|
||||
@ -152,7 +152,7 @@ static void reset_packet(AVPacket *pkt)
|
||||
pkt->data = NULL;
|
||||
}
|
||||
|
||||
static struct variant *new_variant(AppleHTTPContext *c, int bandwidth,
|
||||
static struct variant *new_variant(HLSContext *c, int bandwidth,
|
||||
const char *url, const char *base)
|
||||
{
|
||||
struct variant *var = av_mallocz(sizeof(struct variant));
|
||||
@ -199,7 +199,7 @@ static void handle_key_args(struct key_info *info, const char *key,
|
||||
}
|
||||
}
|
||||
|
||||
static int parse_playlist(AppleHTTPContext *c, const char *url,
|
||||
static int parse_playlist(HLSContext *c, const char *url,
|
||||
struct variant *var, AVIOContext *in)
|
||||
{
|
||||
int ret = 0, duration = 0, is_segment = 0, is_variant = 0, bandwidth = 0;
|
||||
@ -373,7 +373,7 @@ static int open_input(struct variant *var)
|
||||
static int read_data(void *opaque, uint8_t *buf, int buf_size)
|
||||
{
|
||||
struct variant *v = opaque;
|
||||
AppleHTTPContext *c = v->parent->priv_data;
|
||||
HLSContext *c = v->parent->priv_data;
|
||||
int ret, i;
|
||||
|
||||
restart:
|
||||
@ -445,9 +445,9 @@ reload:
|
||||
goto restart;
|
||||
}
|
||||
|
||||
static int applehttp_read_header(AVFormatContext *s)
|
||||
static int hls_read_header(AVFormatContext *s)
|
||||
{
|
||||
AppleHTTPContext *c = s->priv_data;
|
||||
HLSContext *c = s->priv_data;
|
||||
int ret = 0, i, j, stream_offset = 0;
|
||||
|
||||
c->interrupt_callback = &s->interrupt_callback;
|
||||
@ -557,7 +557,7 @@ fail:
|
||||
|
||||
static int recheck_discard_flags(AVFormatContext *s, int first)
|
||||
{
|
||||
AppleHTTPContext *c = s->priv_data;
|
||||
HLSContext *c = s->priv_data;
|
||||
int i, changed = 0;
|
||||
|
||||
/* Check if any new streams are needed */
|
||||
@ -590,9 +590,9 @@ static int recheck_discard_flags(AVFormatContext *s, int first)
|
||||
return changed;
|
||||
}
|
||||
|
||||
static int applehttp_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
static int hls_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
{
|
||||
AppleHTTPContext *c = s->priv_data;
|
||||
HLSContext *c = s->priv_data;
|
||||
int ret, i, minvariant = -1;
|
||||
|
||||
if (c->first_packet) {
|
||||
@ -668,18 +668,18 @@ start:
|
||||
return AVERROR_EOF;
|
||||
}
|
||||
|
||||
static int applehttp_close(AVFormatContext *s)
|
||||
static int hls_close(AVFormatContext *s)
|
||||
{
|
||||
AppleHTTPContext *c = s->priv_data;
|
||||
HLSContext *c = s->priv_data;
|
||||
|
||||
free_variant_list(c);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int applehttp_read_seek(AVFormatContext *s, int stream_index,
|
||||
static int hls_read_seek(AVFormatContext *s, int stream_index,
|
||||
int64_t timestamp, int flags)
|
||||
{
|
||||
AppleHTTPContext *c = s->priv_data;
|
||||
HLSContext *c = s->priv_data;
|
||||
int i, j, ret;
|
||||
|
||||
if ((flags & AVSEEK_FLAG_BYTE) || !c->variants[0]->finished)
|
||||
@ -729,7 +729,7 @@ static int applehttp_read_seek(AVFormatContext *s, int stream_index,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int applehttp_probe(AVProbeData *p)
|
||||
static int hls_probe(AVProbeData *p)
|
||||
{
|
||||
/* Require #EXTM3U at the start, and either one of the ones below
|
||||
* somewhere for a proper match. */
|
||||
@ -742,13 +742,13 @@ static int applehttp_probe(AVProbeData *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVInputFormat ff_applehttp_demuxer = {
|
||||
.name = "applehttp",
|
||||
AVInputFormat ff_hls_demuxer = {
|
||||
.name = "hls,applehttp",
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Apple HTTP Live Streaming format"),
|
||||
.priv_data_size = sizeof(AppleHTTPContext),
|
||||
.read_probe = applehttp_probe,
|
||||
.read_header = applehttp_read_header,
|
||||
.read_packet = applehttp_read_packet,
|
||||
.read_close = applehttp_close,
|
||||
.read_seek = applehttp_read_seek,
|
||||
.priv_data_size = sizeof(HLSContext),
|
||||
.read_probe = hls_probe,
|
||||
.read_header = hls_read_header,
|
||||
.read_packet = hls_read_packet,
|
||||
.read_close = hls_close,
|
||||
.read_seek = hls_read_seek,
|
||||
};
|
@ -29,6 +29,7 @@
|
||||
#include "avformat.h"
|
||||
#include "internal.h"
|
||||
#include "url.h"
|
||||
#include "version.h"
|
||||
#include <unistd.h>
|
||||
|
||||
/*
|
||||
@ -53,7 +54,7 @@ struct variant {
|
||||
char url[MAX_URL_SIZE];
|
||||
};
|
||||
|
||||
typedef struct AppleHTTPContext {
|
||||
typedef struct HLSContext {
|
||||
char playlisturl[MAX_URL_SIZE];
|
||||
int target_duration;
|
||||
int start_seq_no;
|
||||
@ -65,7 +66,7 @@ typedef struct AppleHTTPContext {
|
||||
int cur_seq_no;
|
||||
URLContext *seg_hd;
|
||||
int64_t last_load_time;
|
||||
} AppleHTTPContext;
|
||||
} HLSContext;
|
||||
|
||||
static int read_chomp_line(AVIOContext *s, char *buf, int maxlen)
|
||||
{
|
||||
@ -75,7 +76,7 @@ static int read_chomp_line(AVIOContext *s, char *buf, int maxlen)
|
||||
return len;
|
||||
}
|
||||
|
||||
static void free_segment_list(AppleHTTPContext *s)
|
||||
static void free_segment_list(HLSContext *s)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < s->n_segments; i++)
|
||||
@ -84,7 +85,7 @@ static void free_segment_list(AppleHTTPContext *s)
|
||||
s->n_segments = 0;
|
||||
}
|
||||
|
||||
static void free_variant_list(AppleHTTPContext *s)
|
||||
static void free_variant_list(HLSContext *s)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < s->n_variants; i++)
|
||||
@ -108,7 +109,7 @@ static void handle_variant_args(struct variant_info *info, const char *key,
|
||||
|
||||
static int parse_playlist(URLContext *h, const char *url)
|
||||
{
|
||||
AppleHTTPContext *s = h->priv_data;
|
||||
HLSContext *s = h->priv_data;
|
||||
AVIOContext *in;
|
||||
int ret = 0, duration = 0, is_segment = 0, is_variant = 0, bandwidth = 0;
|
||||
char line[1024];
|
||||
@ -174,9 +175,9 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int applehttp_close(URLContext *h)
|
||||
static int hls_close(URLContext *h)
|
||||
{
|
||||
AppleHTTPContext *s = h->priv_data;
|
||||
HLSContext *s = h->priv_data;
|
||||
|
||||
free_segment_list(s);
|
||||
free_variant_list(s);
|
||||
@ -184,9 +185,9 @@ static int applehttp_close(URLContext *h)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int applehttp_open(URLContext *h, const char *uri, int flags)
|
||||
static int hls_open(URLContext *h, const char *uri, int flags)
|
||||
{
|
||||
AppleHTTPContext *s = h->priv_data;
|
||||
HLSContext *s = h->priv_data;
|
||||
int ret, i;
|
||||
const char *nested_url;
|
||||
|
||||
@ -195,16 +196,38 @@ static int applehttp_open(URLContext *h, const char *uri, int flags)
|
||||
|
||||
h->is_streamed = 1;
|
||||
|
||||
if (av_strstart(uri, "applehttp+", &nested_url)) {
|
||||
if (av_strstart(uri, "hls+", &nested_url)) {
|
||||
av_strlcpy(s->playlisturl, nested_url, sizeof(s->playlisturl));
|
||||
} else if (av_strstart(uri, "hls://", &nested_url)) {
|
||||
av_log(h, AV_LOG_ERROR,
|
||||
"No nested protocol specified. Specify e.g. hls+http://%s\n",
|
||||
nested_url);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto fail;
|
||||
#if FF_API_APPLEHTTP_PROTO
|
||||
} else if (av_strstart(uri, "applehttp+", &nested_url)) {
|
||||
av_strlcpy(s->playlisturl, nested_url, sizeof(s->playlisturl));
|
||||
av_log(h, AV_LOG_WARNING,
|
||||
"The applehttp protocol is deprecated, use hls+%s as url "
|
||||
"instead.\n", nested_url);
|
||||
} else if (av_strstart(uri, "applehttp://", &nested_url)) {
|
||||
av_strlcpy(s->playlisturl, "http://", sizeof(s->playlisturl));
|
||||
av_strlcat(s->playlisturl, nested_url, sizeof(s->playlisturl));
|
||||
av_log(h, AV_LOG_WARNING,
|
||||
"The applehttp protocol is deprecated, use hls+http://%s as url "
|
||||
"instead.\n", nested_url);
|
||||
#endif
|
||||
} else {
|
||||
av_log(h, AV_LOG_ERROR, "Unsupported url %s\n", uri);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto fail;
|
||||
}
|
||||
av_log(h, AV_LOG_WARNING,
|
||||
"Using the hls protocol is discouraged, please try using the "
|
||||
"hls demuxer instead. The hls demuxer should be more complete "
|
||||
"and work as well as the protocol implementation. (If not, "
|
||||
"please report it.) To use the demuxer, simply use %s as url.\n",
|
||||
s->playlisturl);
|
||||
|
||||
if ((ret = parse_playlist(h, s->playlisturl)) < 0)
|
||||
goto fail;
|
||||
@ -235,13 +258,13 @@ static int applehttp_open(URLContext *h, const char *uri, int flags)
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
applehttp_close(h);
|
||||
hls_close(h);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int applehttp_read(URLContext *h, uint8_t *buf, int size)
|
||||
static int hls_read(URLContext *h, uint8_t *buf, int size)
|
||||
{
|
||||
AppleHTTPContext *s = h->priv_data;
|
||||
HLSContext *s = h->priv_data;
|
||||
const char *url;
|
||||
int ret;
|
||||
int64_t reload_interval;
|
||||
@ -303,11 +326,22 @@ retry:
|
||||
goto start;
|
||||
}
|
||||
|
||||
#if FF_API_APPLEHTTP_PROTO
|
||||
URLProtocol ff_applehttp_protocol = {
|
||||
.name = "applehttp",
|
||||
.url_open = applehttp_open,
|
||||
.url_read = applehttp_read,
|
||||
.url_close = applehttp_close,
|
||||
.url_open = hls_open,
|
||||
.url_read = hls_read,
|
||||
.url_close = hls_close,
|
||||
.flags = URL_PROTOCOL_FLAG_NESTED_SCHEME,
|
||||
.priv_data_size = sizeof(AppleHTTPContext),
|
||||
.priv_data_size = sizeof(HLSContext),
|
||||
};
|
||||
#endif
|
||||
|
||||
URLProtocol ff_hls_protocol = {
|
||||
.name = "hls",
|
||||
.url_open = hls_open,
|
||||
.url_read = hls_read,
|
||||
.url_close = hls_close,
|
||||
.flags = URL_PROTOCOL_FLAG_NESTED_SCHEME,
|
||||
.priv_data_size = sizeof(HLSContext),
|
||||
};
|
@ -152,7 +152,7 @@ static int rtmp_get_file_handle(URLContext *s)
|
||||
return RTMP_Socket(r);
|
||||
}
|
||||
|
||||
URLProtocol ff_rtmp_protocol = {
|
||||
URLProtocol ff_librtmp_protocol = {
|
||||
.name = "rtmp",
|
||||
.url_open = rtmp_open,
|
||||
.url_read = rtmp_read,
|
||||
@ -165,7 +165,7 @@ URLProtocol ff_rtmp_protocol = {
|
||||
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
||||
};
|
||||
|
||||
URLProtocol ff_rtmpt_protocol = {
|
||||
URLProtocol ff_librtmpt_protocol = {
|
||||
.name = "rtmpt",
|
||||
.url_open = rtmp_open,
|
||||
.url_read = rtmp_read,
|
||||
@ -178,7 +178,7 @@ URLProtocol ff_rtmpt_protocol = {
|
||||
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
||||
};
|
||||
|
||||
URLProtocol ff_rtmpe_protocol = {
|
||||
URLProtocol ff_librtmpe_protocol = {
|
||||
.name = "rtmpe",
|
||||
.url_open = rtmp_open,
|
||||
.url_read = rtmp_read,
|
||||
@ -191,7 +191,7 @@ URLProtocol ff_rtmpe_protocol = {
|
||||
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
||||
};
|
||||
|
||||
URLProtocol ff_rtmpte_protocol = {
|
||||
URLProtocol ff_librtmpte_protocol = {
|
||||
.name = "rtmpte",
|
||||
.url_open = rtmp_open,
|
||||
.url_read = rtmp_read,
|
||||
@ -204,7 +204,7 @@ URLProtocol ff_rtmpte_protocol = {
|
||||
.flags = URL_PROTOCOL_FLAG_NETWORK,
|
||||
};
|
||||
|
||||
URLProtocol ff_rtmps_protocol = {
|
||||
URLProtocol ff_librtmps_protocol = {
|
||||
.name = "rtmps",
|
||||
.url_open = rtmp_open,
|
||||
.url_read = rtmp_read,
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "avc.h"
|
||||
#include "libavcodec/get_bits.h"
|
||||
#include "libavcodec/put_bits.h"
|
||||
#include "libavcodec/vc1.h"
|
||||
#include "internal.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/intfloat.h"
|
||||
@ -433,6 +434,98 @@ static int mov_write_wave_tag(AVIOContext *pb, MOVTrack *track)
|
||||
return update_size(pb, pos);
|
||||
}
|
||||
|
||||
static int mov_write_dvc1_structs(MOVTrack *track, uint8_t *buf)
|
||||
{
|
||||
uint8_t *unescaped;
|
||||
const uint8_t *start, *next, *end = track->vos_data + track->vos_len;
|
||||
int unescaped_size, seq_found = 0;
|
||||
int level = 0, interlace = 0;
|
||||
int packet_seq = track->vc1_info.packet_seq;
|
||||
int packet_entry = track->vc1_info.packet_entry;
|
||||
int slices = track->vc1_info.slices;
|
||||
PutBitContext pbc;
|
||||
|
||||
if (track->start_dts == AV_NOPTS_VALUE) {
|
||||
/* No packets written yet, vc1_info isn't authoritative yet. */
|
||||
/* Assume inline sequence and entry headers. This will be
|
||||
* overwritten at the end if the file is seekable. */
|
||||
packet_seq = packet_entry = 1;
|
||||
}
|
||||
|
||||
unescaped = av_mallocz(track->vos_len + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!unescaped)
|
||||
return AVERROR(ENOMEM);
|
||||
start = find_next_marker(track->vos_data, end);
|
||||
for (next = start; next < end; start = next) {
|
||||
GetBitContext gb;
|
||||
int size;
|
||||
next = find_next_marker(start + 4, end);
|
||||
size = next - start - 4;
|
||||
if (size <= 0)
|
||||
continue;
|
||||
unescaped_size = vc1_unescape_buffer(start + 4, size, unescaped);
|
||||
init_get_bits(&gb, unescaped, 8 * unescaped_size);
|
||||
if (AV_RB32(start) == VC1_CODE_SEQHDR) {
|
||||
int profile = get_bits(&gb, 2);
|
||||
if (profile != PROFILE_ADVANCED) {
|
||||
av_free(unescaped);
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
seq_found = 1;
|
||||
level = get_bits(&gb, 3);
|
||||
/* chromaformat, frmrtq_postproc, bitrtq_postproc, postprocflag,
|
||||
* width, height */
|
||||
skip_bits_long(&gb, 2 + 3 + 5 + 1 + 2*12);
|
||||
skip_bits(&gb, 1); /* broadcast */
|
||||
interlace = get_bits1(&gb);
|
||||
skip_bits(&gb, 4); /* tfcntrflag, finterpflag, reserved, psf */
|
||||
}
|
||||
}
|
||||
if (!seq_found) {
|
||||
av_free(unescaped);
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
init_put_bits(&pbc, buf, 7);
|
||||
/* VC1DecSpecStruc */
|
||||
put_bits(&pbc, 4, 12); /* profile - advanced */
|
||||
put_bits(&pbc, 3, level);
|
||||
put_bits(&pbc, 1, 0); /* reserved */
|
||||
/* VC1AdvDecSpecStruc */
|
||||
put_bits(&pbc, 3, level);
|
||||
put_bits(&pbc, 1, 0); /* cbr */
|
||||
put_bits(&pbc, 6, 0); /* reserved */
|
||||
put_bits(&pbc, 1, !interlace); /* no interlace */
|
||||
put_bits(&pbc, 1, !packet_seq); /* no multiple seq */
|
||||
put_bits(&pbc, 1, !packet_entry); /* no multiple entry */
|
||||
put_bits(&pbc, 1, !slices); /* no slice code */
|
||||
put_bits(&pbc, 1, 0); /* no bframe */
|
||||
put_bits(&pbc, 1, 0); /* reserved */
|
||||
put_bits32(&pbc, track->enc->time_base.den); /* framerate */
|
||||
flush_put_bits(&pbc);
|
||||
|
||||
av_free(unescaped);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mov_write_dvc1_tag(AVIOContext *pb, MOVTrack *track)
|
||||
{
|
||||
uint8_t buf[7] = { 0 };
|
||||
int ret;
|
||||
|
||||
if ((ret = mov_write_dvc1_structs(track, buf)) < 0)
|
||||
return ret;
|
||||
|
||||
avio_wb32(pb, track->vos_len + 8 + sizeof(buf));
|
||||
ffio_wfourcc(pb, "dvc1");
|
||||
track->vc1_info.struct_offset = avio_tell(pb);
|
||||
avio_write(pb, buf, sizeof(buf));
|
||||
avio_write(pb, track->vos_data, track->vos_len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mov_write_glbl_tag(AVIOContext *pb, MOVTrack *track)
|
||||
{
|
||||
avio_wb32(pb, track->vos_len + 8);
|
||||
@ -659,6 +752,7 @@ static int mp4_get_codec_tag(AVFormatContext *s, MOVTrack *track)
|
||||
else if (track->enc->codec_id == CODEC_ID_AC3) tag = MKTAG('a','c','-','3');
|
||||
else if (track->enc->codec_id == CODEC_ID_DIRAC) tag = MKTAG('d','r','a','c');
|
||||
else if (track->enc->codec_id == CODEC_ID_MOV_TEXT) tag = MKTAG('t','x','3','g');
|
||||
else if (track->enc->codec_id == CODEC_ID_VC1) tag = MKTAG('v','c','-','1');
|
||||
else if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO) tag = MKTAG('m','p','4','v');
|
||||
else if (track->enc->codec_type == AVMEDIA_TYPE_AUDIO) tag = MKTAG('m','p','4','a');
|
||||
|
||||
@ -945,6 +1039,8 @@ static int mov_write_video_tag(AVIOContext *pb, MOVTrack *track)
|
||||
mov_write_uuid_tag_ipod(pb);
|
||||
} else if (track->enc->field_order != AV_FIELD_UNKNOWN)
|
||||
mov_write_fiel_tag(pb, track);
|
||||
else if (track->enc->codec_id == CODEC_ID_VC1 && track->vos_len > 0)
|
||||
mov_write_dvc1_tag(pb, track);
|
||||
else if (track->vos_len > 0)
|
||||
mov_write_glbl_tag(pb, track);
|
||||
|
||||
@ -2557,6 +2653,63 @@ static int mov_parse_mpeg2_frame(AVPacket *pkt, uint32_t *flags)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mov_parse_vc1_frame(AVPacket *pkt, MOVTrack *trk, int fragment)
|
||||
{
|
||||
const uint8_t *start, *next, *end = pkt->data + pkt->size;
|
||||
int seq = 0, entry = 0;
|
||||
int key = pkt->flags & AV_PKT_FLAG_KEY;
|
||||
start = find_next_marker(pkt->data, end);
|
||||
for (next = start; next < end; start = next) {
|
||||
next = find_next_marker(start + 4, end);
|
||||
switch (AV_RB32(start)) {
|
||||
case VC1_CODE_SEQHDR:
|
||||
seq = 1;
|
||||
break;
|
||||
case VC1_CODE_ENTRYPOINT:
|
||||
entry = 1;
|
||||
break;
|
||||
case VC1_CODE_SLICE:
|
||||
trk->vc1_info.slices = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!trk->entry && !fragment) {
|
||||
/* First packet in first fragment */
|
||||
trk->vc1_info.first_packet_seq = seq;
|
||||
trk->vc1_info.first_packet_entry = entry;
|
||||
} else if ((seq && !trk->vc1_info.packet_seq) ||
|
||||
(entry && !trk->vc1_info.packet_entry)) {
|
||||
int i;
|
||||
for (i = 0; i < trk->entry; i++)
|
||||
trk->cluster[i].flags &= ~MOV_SYNC_SAMPLE;
|
||||
trk->has_keyframes = 0;
|
||||
if (seq)
|
||||
trk->vc1_info.packet_seq = 1;
|
||||
if (entry)
|
||||
trk->vc1_info.packet_entry = 1;
|
||||
if (!fragment) {
|
||||
/* First fragment */
|
||||
if ((!seq || trk->vc1_info.first_packet_seq) &&
|
||||
(!entry || trk->vc1_info.first_packet_entry)) {
|
||||
/* First packet had the same headers as this one, readd the
|
||||
* sync sample flag. */
|
||||
trk->cluster[0].flags |= MOV_SYNC_SAMPLE;
|
||||
trk->has_keyframes = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (trk->vc1_info.packet_seq && trk->vc1_info.packet_entry)
|
||||
key = seq && entry;
|
||||
else if (trk->vc1_info.packet_seq)
|
||||
key = seq;
|
||||
else if (trk->vc1_info.packet_entry)
|
||||
key = entry;
|
||||
if (key) {
|
||||
trk->cluster[trk->entry].flags |= MOV_SYNC_SAMPLE;
|
||||
trk->has_keyframes++;
|
||||
}
|
||||
}
|
||||
|
||||
static int mov_flush_fragment(AVFormatContext *s)
|
||||
{
|
||||
MOVMuxContext *mov = s->priv_data;
|
||||
@ -2788,7 +2941,9 @@ static int mov_write_packet_internal(AVFormatContext *s, AVPacket *pkt)
|
||||
trk->flags |= MOV_TRACK_CTTS;
|
||||
trk->cluster[trk->entry].cts = pkt->pts - pkt->dts;
|
||||
trk->cluster[trk->entry].flags = 0;
|
||||
if (pkt->flags & AV_PKT_FLAG_KEY) {
|
||||
if (enc->codec_id == CODEC_ID_VC1) {
|
||||
mov_parse_vc1_frame(pkt, trk, mov->fragments);
|
||||
} else if (pkt->flags & AV_PKT_FLAG_KEY) {
|
||||
if (mov->mode == MODE_MOV && enc->codec_id == CODEC_ID_MPEG2VIDEO &&
|
||||
trk->entry > 0) { // force sync sample for the first key frame
|
||||
mov_parse_mpeg2_frame(pkt, &trk->cluster[trk->entry].flags);
|
||||
@ -3113,6 +3268,16 @@ static int mov_write_trailer(AVFormatContext *s)
|
||||
for (i=0; i<mov->nb_streams; i++) {
|
||||
if (mov->tracks[i].tag == MKTAG('r','t','p',' '))
|
||||
ff_mov_close_hinting(&mov->tracks[i]);
|
||||
if (mov->flags & FF_MOV_FLAG_FRAGMENT &&
|
||||
mov->tracks[i].vc1_info.struct_offset && s->pb->seekable) {
|
||||
int64_t off = avio_tell(pb);
|
||||
uint8_t buf[7];
|
||||
if (mov_write_dvc1_structs(&mov->tracks[i], buf) >= 0) {
|
||||
avio_seek(pb, mov->tracks[i].vc1_info.struct_offset, SEEK_SET);
|
||||
avio_write(pb, buf, 7);
|
||||
avio_seek(pb, off, SEEK_SET);
|
||||
}
|
||||
}
|
||||
av_freep(&mov->tracks[i].cluster);
|
||||
av_freep(&mov->tracks[i].frag_info);
|
||||
|
||||
|
@ -122,6 +122,15 @@ typedef struct MOVIndex {
|
||||
|
||||
int nb_frag_info;
|
||||
MOVFragmentInfo *frag_info;
|
||||
|
||||
struct {
|
||||
int64_t struct_offset;
|
||||
int first_packet_seq;
|
||||
int first_packet_entry;
|
||||
int packet_seq;
|
||||
int packet_entry;
|
||||
int slices;
|
||||
} vc1_info;
|
||||
} MOVTrack;
|
||||
|
||||
typedef struct MOVMuxContext {
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "internal.h"
|
||||
#include "rtpenc_chain.h"
|
||||
#include "avio_internal.h"
|
||||
#include "rtp.h"
|
||||
|
||||
int ff_mov_init_hinting(AVFormatContext *s, int index, int src_index)
|
||||
{
|
||||
@ -332,7 +333,7 @@ static int write_hint_packets(AVIOContext *out, const uint8_t *data,
|
||||
size -= 4;
|
||||
if (packet_len > size || packet_len <= 12)
|
||||
break;
|
||||
if (data[1] >= 200 && data[1] <= 204) {
|
||||
if (data[1] >= RTCP_SR && data[1] <= RTCP_APP) {
|
||||
/* RTCP packet, just skip */
|
||||
data += packet_len;
|
||||
size -= packet_len;
|
||||
|
@ -66,6 +66,7 @@ void av_register_rtp_dynamic_payload_handlers(void)
|
||||
ff_register_dynamic_payload_handler(&ff_amr_wb_dynamic_handler);
|
||||
ff_register_dynamic_payload_handler(&ff_h263_1998_dynamic_handler);
|
||||
ff_register_dynamic_payload_handler(&ff_h263_2000_dynamic_handler);
|
||||
ff_register_dynamic_payload_handler(&ff_h263_rfc2190_dynamic_handler);
|
||||
ff_register_dynamic_payload_handler(&ff_h264_dynamic_handler);
|
||||
ff_register_dynamic_payload_handler(&ff_vorbis_dynamic_handler);
|
||||
ff_register_dynamic_payload_handler(&ff_theora_dynamic_handler);
|
||||
|
@ -39,6 +39,7 @@ extern RTPDynamicProtocolHandler ff_g726_32_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_g726_40_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_h263_1998_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_h263_2000_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_h263_rfc2190_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_h264_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_mp4a_latm_dynamic_handler;
|
||||
extern RTPDynamicProtocolHandler ff_mp4v_es_dynamic_handler;
|
||||
|
184
libavformat/rtpdec_h263_rfc2190.c
Normal file
184
libavformat/rtpdec_h263_rfc2190.c
Normal file
@ -0,0 +1,184 @@
|
||||
/*
|
||||
* RTP H.263 Depacketizer, RFC 2190
|
||||
* Copyright (c) 2012 Martin Storsjo
|
||||
* Based on the GStreamer H.263 Depayloder:
|
||||
* Copyright 2005 Wim Taymans
|
||||
* Copyright 2007 Edward Hervey
|
||||
* Copyright 2007 Nokia Corporation
|
||||
* Copyright 2007 Collabora Ltd, Philippe Kalaf
|
||||
* Copyright 2010 Mark Nauwelaerts
|
||||
*
|
||||
* This file is part of Libav.
|
||||
*
|
||||
* Libav is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Libav is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with Libav; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "avformat.h"
|
||||
#include "rtpdec_formats.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavcodec/get_bits.h"
|
||||
|
||||
struct PayloadContext {
|
||||
AVIOContext *buf;
|
||||
uint8_t endbyte;
|
||||
int endbyte_bits;
|
||||
uint32_t timestamp;
|
||||
};
|
||||
|
||||
static PayloadContext *h263_new_context(void)
|
||||
{
|
||||
return av_mallocz(sizeof(PayloadContext));
|
||||
}
|
||||
|
||||
static void h263_free_context(PayloadContext *data)
|
||||
{
|
||||
if (!data)
|
||||
return;
|
||||
if (data->buf) {
|
||||
uint8_t *p;
|
||||
avio_close_dyn_buf(data->buf, &p);
|
||||
av_free(p);
|
||||
}
|
||||
av_free(data);
|
||||
}
|
||||
|
||||
static int h263_handle_packet(AVFormatContext *ctx, PayloadContext *data,
|
||||
AVStream *st, AVPacket *pkt, uint32_t *timestamp,
|
||||
const uint8_t *buf, int len, int flags)
|
||||
{
|
||||
int f, p, i, sbit, ebit; /* Corresponding to header fields in the RFC */
|
||||
int header_size;
|
||||
|
||||
if (data->buf && data->timestamp != *timestamp) {
|
||||
/* Dropping old buffered, unfinished data */
|
||||
uint8_t *p;
|
||||
avio_close_dyn_buf(data->buf, &p);
|
||||
av_free(p);
|
||||
data->buf = NULL;
|
||||
}
|
||||
|
||||
if (len < 4) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Too short H.263 RTP packet: %d\n", len);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
f = buf[0] & 0x80;
|
||||
p = buf[0] & 0x40;
|
||||
if (!f) {
|
||||
/* Mode A */
|
||||
header_size = 4;
|
||||
i = buf[1] & 0x10;
|
||||
} else if (!p) {
|
||||
/* Mode B */
|
||||
header_size = 8;
|
||||
if (len < header_size) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Too short H.263 RTP packet: %d bytes, %d header bytes\n",
|
||||
len, header_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
i = buf[4] & 0x80;
|
||||
} else {
|
||||
/* Mode C */
|
||||
header_size = 12;
|
||||
if (len < header_size) {
|
||||
av_log(ctx, AV_LOG_ERROR,
|
||||
"Too short H.263 RTP packet: %d bytes, %d header bytes\n",
|
||||
len, header_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
i = buf[4] & 0x80;
|
||||
}
|
||||
sbit = (buf[0] >> 3) & 0x7;
|
||||
ebit = buf[0] & 0x7;
|
||||
|
||||
buf += header_size;
|
||||
len -= header_size;
|
||||
|
||||
if (!data->buf) {
|
||||
/* Check the picture start code, only start buffering a new frame
|
||||
* if this is correct */
|
||||
if (!f && len > 4 && AV_RB32(buf) >> 10 == 0x20) {
|
||||
int ret = avio_open_dyn_buf(&data->buf);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
data->timestamp = *timestamp;
|
||||
} else {
|
||||
/* Frame not started yet, skipping */
|
||||
return AVERROR(EAGAIN);
|
||||
}
|
||||
}
|
||||
|
||||
if (data->endbyte_bits || sbit) {
|
||||
if (data->endbyte_bits == sbit) {
|
||||
data->endbyte |= buf[0] & (0xff >> sbit);
|
||||
data->endbyte_bits = 0;
|
||||
buf++;
|
||||
len--;
|
||||
avio_w8(data->buf, data->endbyte);
|
||||
} else {
|
||||
/* Start/end skip bits not matching - missed packets? */
|
||||
GetBitContext gb;
|
||||
init_get_bits(&gb, buf, len*8 - ebit);
|
||||
skip_bits(&gb, sbit);
|
||||
if (data->endbyte_bits) {
|
||||
data->endbyte |= get_bits(&gb, 8 - data->endbyte_bits);
|
||||
avio_w8(data->buf, data->endbyte);
|
||||
}
|
||||
while (get_bits_left(&gb) >= 8)
|
||||
avio_w8(data->buf, get_bits(&gb, 8));
|
||||
data->endbyte_bits = get_bits_left(&gb);
|
||||
if (data->endbyte_bits)
|
||||
data->endbyte = get_bits(&gb, data->endbyte_bits) <<
|
||||
(8 - data->endbyte_bits);
|
||||
ebit = 0;
|
||||
len = 0;
|
||||
}
|
||||
}
|
||||
if (ebit) {
|
||||
if (len > 0)
|
||||
avio_write(data->buf, buf, len - 1);
|
||||
data->endbyte_bits = 8 - ebit;
|
||||
data->endbyte = buf[len - 1] & (0xff << ebit);
|
||||
} else {
|
||||
avio_write(data->buf, buf, len);
|
||||
}
|
||||
|
||||
if (!(flags & RTP_FLAG_MARKER))
|
||||
return AVERROR(EAGAIN);
|
||||
|
||||
if (data->endbyte_bits)
|
||||
avio_w8(data->buf, data->endbyte);
|
||||
data->endbyte_bits = 0;
|
||||
|
||||
av_init_packet(pkt);
|
||||
pkt->size = avio_close_dyn_buf(data->buf, &pkt->data);
|
||||
pkt->destruct = av_destruct_packet;
|
||||
pkt->stream_index = st->index;
|
||||
if (!i)
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
data->buf = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
RTPDynamicProtocolHandler ff_h263_rfc2190_dynamic_handler = {
|
||||
.codec_type = AVMEDIA_TYPE_VIDEO,
|
||||
.codec_id = CODEC_ID_H263,
|
||||
.parse_packet = h263_handle_packet,
|
||||
.alloc = h263_new_context,
|
||||
.free = h263_free_context,
|
||||
.static_payload_id = 34,
|
||||
};
|
@ -136,9 +136,11 @@ static int smjpeg_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
{
|
||||
SMJPEGContext *sc = s->priv_data;
|
||||
uint32_t dtype, ret, size, timestamp;
|
||||
int64_t pos;
|
||||
|
||||
if (s->pb->eof_reached)
|
||||
return AVERROR_EOF;
|
||||
pos = avio_tell(s->pb);
|
||||
dtype = avio_rl32(s->pb);
|
||||
switch (dtype) {
|
||||
case SMJPEG_SNDD:
|
||||
@ -147,6 +149,7 @@ static int smjpeg_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
ret = av_get_packet(s->pb, pkt, size);
|
||||
pkt->stream_index = sc->audio_stream_index;
|
||||
pkt->pts = timestamp;
|
||||
pkt->pos = pos;
|
||||
break;
|
||||
case SMJPEG_VIDD:
|
||||
timestamp = avio_rb32(s->pb);
|
||||
@ -154,6 +157,7 @@ static int smjpeg_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
ret = av_get_packet(s->pb, pkt, size);
|
||||
pkt->stream_index = sc->video_stream_index;
|
||||
pkt->pts = timestamp;
|
||||
pkt->pos = pos;
|
||||
break;
|
||||
case SMJPEG_DONE:
|
||||
ret = AVERROR_EOF;
|
||||
@ -174,4 +178,5 @@ AVInputFormat ff_smjpeg_demuxer = {
|
||||
.read_header = smjpeg_read_header,
|
||||
.read_packet = smjpeg_read_packet,
|
||||
.extensions = "mjpg",
|
||||
.flags = AVFMT_GENERIC_INDEX,
|
||||
};
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include "libavutil/avutil.h"
|
||||
|
||||
#define LIBAVFORMAT_VERSION_MAJOR 54
|
||||
#define LIBAVFORMAT_VERSION_MINOR 0
|
||||
#define LIBAVFORMAT_VERSION_MINOR 1
|
||||
#define LIBAVFORMAT_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
|
||||
@ -68,5 +68,8 @@
|
||||
#ifndef FF_API_CLOSE_INPUT_FILE
|
||||
#define FF_API_CLOSE_INPUT_FILE (LIBAVFORMAT_VERSION_MAJOR < 55)
|
||||
#endif
|
||||
#ifndef FF_API_APPLEHTTP_PROTO
|
||||
#define FF_API_APPLEHTTP_PROTO (LIBAVFORMAT_VERSION_MAJOR < 55)
|
||||
#endif
|
||||
|
||||
#endif /* AVFORMAT_VERSION_H */
|
||||
|
Loading…
Reference in New Issue
Block a user