Merge commit '72da8d9bb24d1b1bf74c2f1108650c0da0054d2e'

* commit '72da8d9bb24d1b1bf74c2f1108650c0da0054d2e':
  h264_parser: remove the remaining dependencies on the h264 decoder

Merged-by: Clément Bœsch <u@pkh.me>
This commit is contained in:
Clément Bœsch 2016-06-19 12:43:21 +02:00
commit 0ed14bba12
7 changed files with 73 additions and 98 deletions

2
configure vendored
View File

@ -2697,7 +2697,7 @@ vp9_cuvid_decoder_deps="cuda cuvid CUVIDVP9PICPARAMS"
vp9_cuvid_decoder_select="vp9_cuvid_hwaccel"
# parsers
h264_parser_select="h264_decoder"
h264_parser_select="golomb h264dsp"
hevc_parser_select="golomb"
mpegvideo_parser_select="mpegvideo"
mpeg4video_parser_select="h263dsp mpegvideo qpeldsp"

View File

@ -924,7 +924,8 @@ OBJS-$(CONFIG_G729_PARSER) += g729_parser.o
OBJS-$(CONFIG_GSM_PARSER) += gsm_parser.o
OBJS-$(CONFIG_H261_PARSER) += h261_parser.o
OBJS-$(CONFIG_H263_PARSER) += h263_parser.o
OBJS-$(CONFIG_H264_PARSER) += h264_parser.o h264_parse.o h2645_parse.o
OBJS-$(CONFIG_H264_PARSER) += h264_parser.o h264_parse.o h2645_parse.o \
h264_ps.o h264_sei.o h264data.o
OBJS-$(CONFIG_HEVC_PARSER) += hevc_parser.o h2645_parse.o hevc_ps.o hevc_data.o
OBJS-$(CONFIG_MJPEG_PARSER) += mjpeg_parser.o
OBJS-$(CONFIG_MLP_PARSER) += mlp_parser.o mlp.o

View File

@ -782,34 +782,6 @@ static void flush_dpb(AVCodecContext *avctx)
h->context_initialized = 0;
}
/**
* Compute profile from profile_idc and constraint_set?_flags.
*
* @param sps SPS
*
* @return profile as defined by FF_PROFILE_H264_*
*/
int ff_h264_get_profile(const SPS *sps)
{
int profile = sps->profile_idc;
switch (sps->profile_idc) {
case FF_PROFILE_H264_BASELINE:
// constraint_set1_flag set to 1
profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
break;
case FF_PROFILE_H264_HIGH_10:
case FF_PROFILE_H264_HIGH_422:
case FF_PROFILE_H264_HIGH_444_PREDICTIVE:
// constraint_set3_flag set to 1
profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
break;
}
return profile;
}
#if FF_API_CAP_VDPAU
static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
#endif

View File

@ -707,11 +707,6 @@ extern const uint16_t ff_h264_mb_sizes[4];
int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
H264ParamSets *ps, int ignore_truncation);
/**
* compute profile from sps
*/
int ff_h264_get_profile(const SPS *sps);
/**
* Decode PPS
*/

View File

@ -468,3 +468,30 @@ int ff_h264_decode_extradata(const uint8_t *data, int size, H264ParamSets *ps,
}
return size;
}
/**
* Compute profile from profile_idc and constraint_set?_flags.
*
* @param sps SPS
*
* @return profile as defined by FF_PROFILE_H264_*
*/
int ff_h264_get_profile(const SPS *sps)
{
int profile = sps->profile_idc;
switch (sps->profile_idc) {
case FF_PROFILE_H264_BASELINE:
// constraint_set1_flag set to 1
profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
break;
case FF_PROFILE_H264_HIGH_10:
case FF_PROFILE_H264_HIGH_422:
case FF_PROFILE_H264_HIGH_444_PREDICTIVE:
// constraint_set3_flag set to 1
profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
break;
}
return profile;
}

View File

@ -87,4 +87,9 @@ int ff_h264_decode_extradata(const uint8_t *data, int size, struct H264ParamSets
int *is_avc, int *nal_length_size,
int err_recognition, void *logctx);
/**
* compute profile from sps
*/
int ff_h264_get_profile(const struct SPS *sps);
#endif /* AVCODEC_H264_PARSE_H */

View File

@ -46,7 +46,6 @@
#include "parser.h"
typedef struct H264ParseContext {
H264Context h;
ParseContext pc;
H264ParamSets ps;
H264DSPContext h264dsp;
@ -55,6 +54,7 @@ typedef struct H264ParseContext {
int is_avc;
int nal_length_size;
int got_first;
int picture_structure;
uint8_t parse_history[6];
int parse_history_count;
int parse_last_mb;
@ -148,12 +148,12 @@ found:
return i - (state & 5) - 5 * (state > 7);
}
static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb)
static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb,
AVCodecContext *avctx)
{
H264PredWeightTable pwt;
int slice_type_nos = s->pict_type & 3;
H264ParseContext *p = s->priv_data;
H264Context *h = &p->h;
int list_count, ref_count[2];
@ -164,7 +164,7 @@ static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb)
get_bits1(gb); // direct_spatial_mv_pred
if (ff_h264_parse_ref_count(&list_count, ref_count, gb, p->ps.pps,
slice_type_nos, h->picture_structure, h->avctx) < 0)
slice_type_nos, p->picture_structure, avctx) < 0)
return AVERROR_INVALIDDATA;
if (slice_type_nos != AV_PICTURE_TYPE_I) {
@ -178,7 +178,7 @@ static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb)
if (reordering_of_pic_nums_idc < 3)
get_ue_golomb_long(gb);
else if (reordering_of_pic_nums_idc > 3) {
av_log(h->avctx, AV_LOG_ERROR,
av_log(avctx, AV_LOG_ERROR,
"illegal reordering_of_pic_nums_idc %d\n",
reordering_of_pic_nums_idc);
return AVERROR_INVALIDDATA;
@ -186,7 +186,7 @@ static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb)
break;
if (index >= ref_count[list]) {
av_log(h->avctx, AV_LOG_ERROR,
av_log(avctx, AV_LOG_ERROR,
"reference count %d overflow\n", index);
return AVERROR_INVALIDDATA;
}
@ -205,7 +205,7 @@ static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb)
for (i = 0; i < MAX_MMCO_COUNT; i++) {
MMCOOpcode opcode = get_ue_golomb_31(gb);
if (opcode > (unsigned) MMCO_LONG) {
av_log(h->avctx, AV_LOG_ERROR,
av_log(avctx, AV_LOG_ERROR,
"illegal memory management control operation %d\n",
opcode);
return AVERROR_INVALIDDATA;
@ -259,7 +259,6 @@ static inline int parse_nal_units(AVCodecParserContext *s,
const uint8_t * const buf, int buf_size)
{
H264ParseContext *p = s->priv_data;
H264Context *h = &p->h;
H2645NAL nal = { NULL };
int buf_index, next_avc;
unsigned int pps_id;
@ -274,9 +273,8 @@ static inline int parse_nal_units(AVCodecParserContext *s,
s->key_frame = 0;
s->picture_structure = AV_PICTURE_STRUCTURE_UNKNOWN;
h->avctx = avctx;
ff_h264_sei_uninit(&p->sei);
h->sei.frame_packing.frame_packing_arrangement_cancel_flag = -1;
p->sei.frame_packing.frame_packing_arrangement_cancel_flag = -1;
if (!buf_size)
return 0;
@ -331,11 +329,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
nal.ref_idc = get_bits(&nal.gb, 2);
nal.type = get_bits(&nal.gb, 5);
h->gb = nal.gb;
h->nal_ref_idc = nal.ref_idc;
h->nal_unit_type = nal.type;
switch (h->nal_unit_type) {
switch (nal.type) {
case NAL_SPS:
ff_h264_decode_seq_parameter_set(&nal.gb, avctx, &p->ps, 0);
break;
@ -364,29 +358,27 @@ static inline int parse_nal_units(AVCodecParserContext *s,
}
pps_id = get_ue_golomb(&nal.gb);
if (pps_id >= MAX_PPS_COUNT) {
av_log(h->avctx, AV_LOG_ERROR,
av_log(avctx, AV_LOG_ERROR,
"pps_id %u out of range\n", pps_id);
goto fail;
}
if (!p->ps.pps_list[pps_id]) {
av_log(h->avctx, AV_LOG_ERROR,
av_log(avctx, AV_LOG_ERROR,
"non-existing PPS %u referenced\n", pps_id);
goto fail;
}
p->ps.pps = (const PPS*)p->ps.pps_list[pps_id]->data;
if (!p->ps.sps_list[p->ps.pps->sps_id]) {
av_log(h->avctx, AV_LOG_ERROR,
av_log(avctx, AV_LOG_ERROR,
"non-existing SPS %u referenced\n", p->ps.pps->sps_id);
goto fail;
}
p->ps.sps = (SPS*)p->ps.sps_list[p->ps.pps->sps_id]->data;
h->ps.sps = p->ps.sps;
h->ps.pps = p->ps.pps;
sps = p->ps.sps;
// heuristic to detect non marked keyframes
if (h->ps.sps->ref_frame_count <= 1 && h->ps.pps->ref_count[0] <= 1 && s->pict_type == AV_PICTURE_TYPE_I)
if (p->ps.sps->ref_frame_count <= 1 && p->ps.pps->ref_count[0] <= 1 && s->pict_type == AV_PICTURE_TYPE_I)
s->key_frame = 1;
p->poc.frame_num = get_bits(&nal.gb, sps->log2_max_frame_num);
@ -402,19 +394,19 @@ static inline int parse_nal_units(AVCodecParserContext *s,
switch (sps->bit_depth_luma) {
case 9:
if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P9;
else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P9;
else s->format = AV_PIX_FMT_YUV420P9;
if (sps->chroma_format_idc == 3) s->format = AV_PIX_FMT_YUV444P9;
else if (sps->chroma_format_idc == 2) s->format = AV_PIX_FMT_YUV422P9;
else s->format = AV_PIX_FMT_YUV420P9;
break;
case 10:
if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P10;
else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P10;
else s->format = AV_PIX_FMT_YUV420P10;
if (sps->chroma_format_idc == 3) s->format = AV_PIX_FMT_YUV444P10;
else if (sps->chroma_format_idc == 2) s->format = AV_PIX_FMT_YUV422P10;
else s->format = AV_PIX_FMT_YUV420P10;
break;
case 8:
if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P;
else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P;
else s->format = AV_PIX_FMT_YUV420P;
if (sps->chroma_format_idc == 3) s->format = AV_PIX_FMT_YUV444P;
else if (sps->chroma_format_idc == 2) s->format = AV_PIX_FMT_YUV422P;
else s->format = AV_PIX_FMT_YUV420P;
break;
default:
s->format = AV_PIX_FMT_NONE;
@ -424,22 +416,22 @@ static inline int parse_nal_units(AVCodecParserContext *s,
avctx->level = sps->level_idc;
if (sps->frame_mbs_only_flag) {
h->picture_structure = PICT_FRAME;
p->picture_structure = PICT_FRAME;
} else {
if (get_bits1(&nal.gb)) { // field_pic_flag
h->picture_structure = PICT_TOP_FIELD + get_bits1(&nal.gb); // bottom_field_flag
p->picture_structure = PICT_TOP_FIELD + get_bits1(&nal.gb); // bottom_field_flag
} else {
h->picture_structure = PICT_FRAME;
p->picture_structure = PICT_FRAME;
}
}
if (h->nal_unit_type == NAL_IDR_SLICE)
if (nal.type == NAL_IDR_SLICE)
get_ue_golomb_long(&nal.gb); /* idr_pic_id */
if (sps->poc_type == 0) {
p->poc.poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb);
if (p->ps.pps->pic_order_present == 1 &&
h->picture_structure == PICT_FRAME)
p->picture_structure == PICT_FRAME)
p->poc.delta_poc_bottom = get_se_golomb(&nal.gb);
}
@ -448,7 +440,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
p->poc.delta_poc[0] = get_se_golomb(&nal.gb);
if (p->ps.pps->pic_order_present == 1 &&
h->picture_structure == PICT_FRAME)
p->picture_structure == PICT_FRAME)
p->poc.delta_poc[1] = get_se_golomb(&nal.gb);
}
@ -456,14 +448,14 @@ static inline int parse_nal_units(AVCodecParserContext *s,
* The prev_ values needed for decoding POC of the next picture are not set here. */
field_poc[0] = field_poc[1] = INT_MAX;
ff_h264_init_poc(field_poc, &s->output_picture_number, sps,
&p->poc, h->picture_structure, nal.ref_idc);
&p->poc, p->picture_structure, nal.ref_idc);
/* Continue parsing to check if MMCO_RESET is present.
* FIXME: MMCO_RESET could appear in non-first slice.
* Maybe, we should parse all undisposable non-IDR slice of this
* picture until encountering MMCO_RESET in a slice of it. */
if (h->nal_ref_idc && h->nal_unit_type != NAL_IDR_SLICE) {
got_reset = scan_mmco_reset(s, &nal.gb);
if (nal.ref_idc && nal.type != NAL_IDR_SLICE) {
got_reset = scan_mmco_reset(s, &nal.gb, avctx);
if (got_reset < 0)
goto fail;
}
@ -471,14 +463,14 @@ static inline int parse_nal_units(AVCodecParserContext *s,
/* Set up the prev_ values for decoding POC of the next picture. */
p->poc.prev_frame_num = got_reset ? 0 : p->poc.frame_num;
p->poc.prev_frame_num_offset = got_reset ? 0 : p->poc.frame_num_offset;
if (h->nal_ref_idc != 0) {
if (nal.ref_idc != 0) {
if (!got_reset) {
p->poc.prev_poc_msb = p->poc.poc_msb;
p->poc.prev_poc_lsb = p->poc.poc_lsb;
} else {
p->poc.prev_poc_msb = 0;
p->poc.prev_poc_lsb =
h->picture_structure == PICT_BOTTOM_FIELD ? 0 : field_poc[0];
p->picture_structure == PICT_BOTTOM_FIELD ? 0 : field_poc[0];
}
}
@ -504,14 +496,14 @@ static inline int parse_nal_units(AVCodecParserContext *s,
s->repeat_pict = 5;
break;
default:
s->repeat_pict = h->picture_structure == PICT_FRAME ? 1 : 0;
s->repeat_pict = p->picture_structure == PICT_FRAME ? 1 : 0;
break;
}
} else {
s->repeat_pict = h->picture_structure == PICT_FRAME ? 1 : 0;
s->repeat_pict = p->picture_structure == PICT_FRAME ? 1 : 0;
}
if (h->picture_structure == PICT_FRAME) {
if (p->picture_structure == PICT_FRAME) {
s->picture_structure = AV_PICTURE_STRUCTURE_FRAME;
if (sps->pic_struct_present_flag) {
switch (p->sei.picture_timing.pic_struct) {
@ -536,7 +528,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
s->field_order = AV_FIELD_PROGRESSIVE;
}
} else {
if (h->picture_structure == PICT_TOP_FIELD)
if (p->picture_structure == PICT_TOP_FIELD)
s->picture_structure = AV_PICTURE_STRUCTURE_TOP_FIELD;
else
s->picture_structure = AV_PICTURE_STRUCTURE_BOTTOM_FIELD;
@ -552,7 +544,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
return 0;
}
/* didn't find a picture! */
av_log(h->avctx, AV_LOG_ERROR, "missing picture in access unit with size %d\n", buf_size);
av_log(avctx, AV_LOG_ERROR, "missing picture in access unit with size %d\n", buf_size);
fail:
av_freep(&nal.rbsp_buffer);
return -1;
@ -564,20 +556,12 @@ static int h264_parse(AVCodecParserContext *s,
const uint8_t *buf, int buf_size)
{
H264ParseContext *p = s->priv_data;
H264Context *h = &p->h;
ParseContext *pc = &p->pc;
int next;
if (!p->got_first) {
p->got_first = 1;
if (avctx->extradata_size) {
h->avctx = avctx;
// must be done like in decoder, otherwise opening the parser,
// letting it create extradata and then closing and opening again
// will cause has_b_frames to be always set.
// Note that estimate_timings_from_pts does exactly this.
if (!avctx->has_b_frames)
h->low_delay = 1;
ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
&p->ps, &p->is_avc, &p->nal_length_size,
avctx->err_recognition, avctx);
@ -664,12 +648,10 @@ static int h264_split(AVCodecContext *avctx,
static void h264_close(AVCodecParserContext *s)
{
H264ParseContext *p = s->priv_data;
H264Context *h = &p->h;
ParseContext *pc = &p->pc;
int i;
av_freep(&pc->buffer);
ff_h264_free_context(h);
ff_h264_sei_uninit(&p->sei);
@ -683,14 +665,7 @@ static void h264_close(AVCodecParserContext *s)
static av_cold int init(AVCodecParserContext *s)
{
H264ParseContext *p = s->priv_data;
H264Context *h = &p->h;
h->slice_ctx = av_mallocz(sizeof(*h->slice_ctx));
if (!h->slice_ctx)
return 0;
h->nb_slice_ctx = 1;
h->slice_context_count = 1;
ff_h264dsp_init(&p->h264dsp, 8, 1);
return 0;
}