avcodec/hevcdec: remove HEVCContext usage from hevc_sei

Based on the H264 SEI implementation.

Reviewed-by: Hendrik Leppkes <h.leppkes@gmail.com>
Reviewed-by: Aaron Levinson <alevinsn@aracnet.com>
Signed-off-by: James Almer <jamrial@gmail.com>
This commit is contained in:
James Almer 2017-04-29 22:01:03 -03:00
parent f52fbf4f3e
commit c4b08c8a4e
5 changed files with 177 additions and 164 deletions

View File

@ -192,6 +192,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, const uint8_t *buf,
GetBitContext *gb;
SliceHeader *sh = &h->sh;
HEVCParamSets *ps = &h->ps;
HEVCSEIContext *sei = &h->sei;
H2645Packet *pkt = &ctx->pkt;
const uint8_t *buf_end = buf + buf_size;
int state = -1, i;
@ -212,7 +213,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, const uint8_t *buf,
h->avctx = avctx;
ff_hevc_reset_sei(h);
ff_hevc_reset_sei(sei);
if (!buf_size)
return 0;
@ -265,7 +266,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, const uint8_t *buf,
break;
case HEVC_NAL_SEI_PREFIX:
case HEVC_NAL_SEI_SUFFIX:
ff_hevc_decode_nal_sei(h);
ff_hevc_decode_nal_sei(gb, avctx, sei, ps, h->nal_unit_type);
break;
case HEVC_NAL_TRAIL_N:
case HEVC_NAL_TRAIL_R:
@ -290,8 +291,8 @@ static inline int parse_nal_units(AVCodecParserContext *s, const uint8_t *buf,
}
sh->first_slice_in_pic_flag = get_bits1(gb);
s->picture_structure = h->picture_struct;
s->field_order = h->picture_struct;
s->picture_structure = h->sei.picture_timing.picture_struct;
s->field_order = h->sei.picture_timing.picture_struct;
if (IS_IRAP(h)) {
s->key_frame = 1;

View File

@ -109,8 +109,8 @@ static HEVCFrame *alloc_frame(HEVCContext *s)
for (j = 0; j < frame->ctb_count; j++)
frame->rpl_tab[j] = (RefPicListTab *)frame->rpl_buf->data;
frame->frame->top_field_first = s->picture_struct == AV_PICTURE_STRUCTURE_TOP_FIELD;
frame->frame->interlaced_frame = (s->picture_struct == AV_PICTURE_STRUCTURE_TOP_FIELD) || (s->picture_struct == AV_PICTURE_STRUCTURE_BOTTOM_FIELD);
frame->frame->top_field_first = s->sei.picture_timing.picture_struct == AV_PICTURE_STRUCTURE_TOP_FIELD;
frame->frame->interlaced_frame = (s->sei.picture_timing.picture_struct == AV_PICTURE_STRUCTURE_TOP_FIELD) || (s->sei.picture_timing.picture_struct == AV_PICTURE_STRUCTURE_BOTTOM_FIELD);
if (s->avctx->hwaccel) {
const AVHWAccel *hwaccel = s->avctx->hwaccel;

View File

@ -53,13 +53,12 @@ enum HEVC_SEI_TYPE {
SEI_TYPE_CONTENT_LIGHT_LEVEL_INFO = 144,
};
static int decode_nal_sei_decoded_picture_hash(HEVCContext *s)
static int decode_nal_sei_decoded_picture_hash(HEVCSEIPictureHash *s, GetBitContext *gb)
{
int cIdx, i;
uint8_t hash_type;
//uint16_t picture_crc;
//uint32_t picture_checksum;
GetBitContext *gb = &s->HEVClc->gb;
hash_type = get_bits(gb, 8);
for (cIdx = 0; cIdx < 3/*((s->sps->chroma_format_idc == 0) ? 1 : 3)*/; cIdx++) {
@ -78,9 +77,8 @@ static int decode_nal_sei_decoded_picture_hash(HEVCContext *s)
return 0;
}
static int decode_nal_sei_mastering_display_info(HEVCContext *s)
static int decode_nal_sei_mastering_display_info(HEVCSEIMasteringDisplay *s, GetBitContext *gb)
{
GetBitContext *gb = &s->HEVClc->gb;
int i;
// Mastering primaries
for (i = 0; i < 3; i++) {
@ -92,38 +90,35 @@ static int decode_nal_sei_mastering_display_info(HEVCContext *s)
s->white_point[1] = get_bits(gb, 16);
// Max and min luminance of mastering display
s->max_mastering_luminance = get_bits_long(gb, 32);
s->min_mastering_luminance = get_bits_long(gb, 32);
s->max_luminance = get_bits_long(gb, 32);
s->min_luminance = get_bits_long(gb, 32);
// As this SEI message comes before the first frame that references it,
// initialize the flag to 2 and decrement on IRAP access unit so it
// persists for the coded video sequence (e.g., between two IRAPs)
s->sei_mastering_display_info_present = 2;
s->present = 2;
return 0;
}
static int decode_nal_sei_content_light_info(HEVCContext *s)
static int decode_nal_sei_content_light_info(HEVCSEIContentLight *s, GetBitContext *gb)
{
GetBitContext *gb = &s->HEVClc->gb;
// Max and average light levels
s->max_content_light_level = get_bits_long(gb, 16);
s->max_pic_average_light_level = get_bits_long(gb, 16);
// As this SEI message comes before the first frame that references it,
// initialize the flag to 2 and decrement on IRAP access unit so it
// persists for the coded video sequence (e.g., between two IRAPs)
s-> sei_content_light_present = 2;
s->present = 2;
return 0;
}
static int decode_nal_sei_frame_packing_arrangement(HEVCContext *s)
static int decode_nal_sei_frame_packing_arrangement(HEVCSEIFramePacking *s, GetBitContext *gb)
{
GetBitContext *gb = &s->HEVClc->gb;
get_ue_golomb_long(gb); // frame_packing_arrangement_id
s->sei_frame_packing_present = !get_bits1(gb);
s->present = !get_bits1(gb);
if (s->sei_frame_packing_present) {
s->frame_packing_arrangement_type = get_bits(gb, 7);
if (s->present) {
s->arrangement_type = get_bits(gb, 7);
s->quincunx_subsampling = get_bits1(gb);
s->content_interpretation_type = get_bits(gb, 6);
@ -132,7 +127,7 @@ static int decode_nal_sei_frame_packing_arrangement(HEVCContext *s)
// frame0_self_contained_flag frame1_self_contained_flag
skip_bits(gb, 6);
if (!s->quincunx_subsampling && s->frame_packing_arrangement_type != 5)
if (!s->quincunx_subsampling && s->arrangement_type != 5)
skip_bits(gb, 16); // frame[01]_grid_position_[xy]
skip_bits(gb, 8); // frame_packing_arrangement_reserved_byte
skip_bits1(gb); // frame_packing_arrangement_persistence_flag
@ -141,41 +136,40 @@ static int decode_nal_sei_frame_packing_arrangement(HEVCContext *s)
return 0;
}
static int decode_nal_sei_display_orientation(HEVCContext *s)
static int decode_nal_sei_display_orientation(HEVCSEIDisplayOrientation *s, GetBitContext *gb)
{
GetBitContext *gb = &s->HEVClc->gb;
s->present = !get_bits1(gb);
s->sei_display_orientation_present = !get_bits1(gb);
if (s->present) {
s->hflip = get_bits1(gb); // hor_flip
s->vflip = get_bits1(gb); // ver_flip
if (s->sei_display_orientation_present) {
s->sei_hflip = get_bits1(gb); // hor_flip
s->sei_vflip = get_bits1(gb); // ver_flip
s->sei_anticlockwise_rotation = get_bits(gb, 16);
s->anticlockwise_rotation = get_bits(gb, 16);
skip_bits1(gb); // display_orientation_persistence_flag
}
return 0;
}
static int decode_pic_timing(HEVCContext *s)
static int decode_pic_timing(HEVCSEIContext *s, GetBitContext *gb, const HEVCParamSets *ps,
void *logctx)
{
GetBitContext *gb = &s->HEVClc->gb;
HEVCSEIPictureTiming *h = &s->picture_timing;
HEVCSPS *sps;
if (!s->ps.sps_list[s->active_seq_parameter_set_id])
if (!ps->sps_list[s->active_seq_parameter_set_id])
return(AVERROR(ENOMEM));
sps = (HEVCSPS*)s->ps.sps_list[s->active_seq_parameter_set_id]->data;
sps = (HEVCSPS*)ps->sps_list[s->active_seq_parameter_set_id]->data;
if (sps->vui.frame_field_info_present_flag) {
int pic_struct = get_bits(gb, 4);
s->picture_struct = AV_PICTURE_STRUCTURE_UNKNOWN;
h->picture_struct = AV_PICTURE_STRUCTURE_UNKNOWN;
if (pic_struct == 2) {
av_log(s->avctx, AV_LOG_DEBUG, "BOTTOM Field\n");
s->picture_struct = AV_PICTURE_STRUCTURE_BOTTOM_FIELD;
av_log(logctx, AV_LOG_DEBUG, "BOTTOM Field\n");
h->picture_struct = AV_PICTURE_STRUCTURE_BOTTOM_FIELD;
} else if (pic_struct == 1) {
av_log(s->avctx, AV_LOG_DEBUG, "TOP Field\n");
s->picture_struct = AV_PICTURE_STRUCTURE_TOP_FIELD;
av_log(logctx, AV_LOG_DEBUG, "TOP Field\n");
h->picture_struct = AV_PICTURE_STRUCTURE_TOP_FIELD;
}
get_bits(gb, 2); // source_scan_type
get_bits(gb, 1); // duplicate_flag
@ -183,14 +177,13 @@ static int decode_pic_timing(HEVCContext *s)
return 1;
}
static int decode_registered_user_data_closed_caption(HEVCContext *s, int size)
static int decode_registered_user_data_closed_caption(HEVCSEIA53Caption *s, GetBitContext *gb,
int size)
{
int flag;
int user_data_type_code;
int cc_count;
GetBitContext *gb = &s->HEVClc->gb;
if (size < 3)
return AVERROR(EINVAL);
@ -235,13 +228,12 @@ static int decode_registered_user_data_closed_caption(HEVCContext *s, int size)
return 0;
}
static int decode_nal_sei_user_data_registered_itu_t_t35(HEVCContext *s, int size)
static int decode_nal_sei_user_data_registered_itu_t_t35(HEVCSEIContext *s, GetBitContext *gb,
int size)
{
uint32_t country_code;
uint32_t user_identifier;
GetBitContext *gb = &s->HEVClc->gb;
if (size < 7)
return AVERROR(EINVAL);
size -= 7;
@ -259,7 +251,7 @@ static int decode_nal_sei_user_data_registered_itu_t_t35(HEVCContext *s, int siz
switch (user_identifier) {
case MKBETAG('G', 'A', '9', '4'):
return decode_registered_user_data_closed_caption(s, size);
return decode_registered_user_data_closed_caption(&s->a53_caption, gb, size);
default:
skip_bits_long(gb, size * 8);
break;
@ -267,9 +259,8 @@ static int decode_nal_sei_user_data_registered_itu_t_t35(HEVCContext *s, int siz
return 0;
}
static int active_parameter_sets(HEVCContext *s)
static int active_parameter_sets(HEVCSEIContext *s, GetBitContext *gb, void *logctx)
{
GetBitContext *gb = &s->HEVClc->gb;
int num_sps_ids_minus1;
int i;
unsigned active_seq_parameter_set_id;
@ -280,13 +271,13 @@ static int active_parameter_sets(HEVCContext *s)
num_sps_ids_minus1 = get_ue_golomb_long(gb); // num_sps_ids_minus1
if (num_sps_ids_minus1 < 0 || num_sps_ids_minus1 > 15) {
av_log(s->avctx, AV_LOG_ERROR, "num_sps_ids_minus1 %d invalid\n", num_sps_ids_minus1);
av_log(logctx, AV_LOG_ERROR, "num_sps_ids_minus1 %d invalid\n", num_sps_ids_minus1);
return AVERROR_INVALIDDATA;
}
active_seq_parameter_set_id = get_ue_golomb_long(gb);
if (active_seq_parameter_set_id >= HEVC_MAX_SPS_COUNT) {
av_log(s->avctx, AV_LOG_ERROR, "active_parameter_set_id %d invalid\n", active_seq_parameter_set_id);
av_log(logctx, AV_LOG_ERROR, "active_parameter_set_id %d invalid\n", active_seq_parameter_set_id);
return AVERROR_INVALIDDATA;
}
s->active_seq_parameter_set_id = active_seq_parameter_set_id;
@ -297,63 +288,61 @@ static int active_parameter_sets(HEVCContext *s)
return 0;
}
static int decode_nal_sei_prefix(HEVCContext *s, int type, int size)
static int decode_nal_sei_prefix(GetBitContext *gb, HEVCSEIContext *s, const HEVCParamSets *ps,
int type, int size, void *logctx)
{
GetBitContext *gb = &s->HEVClc->gb;
switch (type) {
case 256: // Mismatched value from HM 8.1
return decode_nal_sei_decoded_picture_hash(s);
return decode_nal_sei_decoded_picture_hash(&s->picture_hash, gb);
case SEI_TYPE_FRAME_PACKING:
return decode_nal_sei_frame_packing_arrangement(s);
return decode_nal_sei_frame_packing_arrangement(&s->frame_packing, gb);
case SEI_TYPE_DISPLAY_ORIENTATION:
return decode_nal_sei_display_orientation(s);
return decode_nal_sei_display_orientation(&s->display_orientation, gb);
case SEI_TYPE_PICTURE_TIMING:
{
int ret = decode_pic_timing(s);
av_log(s->avctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", type);
int ret = decode_pic_timing(s, gb, ps, logctx);
av_log(logctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", type);
skip_bits(gb, 8 * size);
return ret;
}
case SEI_TYPE_MASTERING_DISPLAY_INFO:
return decode_nal_sei_mastering_display_info(s);
return decode_nal_sei_mastering_display_info(&s->mastering_display, gb);
case SEI_TYPE_CONTENT_LIGHT_LEVEL_INFO:
return decode_nal_sei_content_light_info(s);
return decode_nal_sei_content_light_info(&s->content_light, gb);
case SEI_TYPE_ACTIVE_PARAMETER_SETS:
active_parameter_sets(s);
av_log(s->avctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", type);
active_parameter_sets(s, gb, logctx);
av_log(logctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", type);
return 0;
case SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35:
return decode_nal_sei_user_data_registered_itu_t_t35(s, size);
return decode_nal_sei_user_data_registered_itu_t_t35(s, gb, size);
default:
av_log(s->avctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", type);
av_log(logctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", type);
skip_bits_long(gb, 8 * size);
return 0;
}
}
static int decode_nal_sei_suffix(HEVCContext *s, int type, int size)
static int decode_nal_sei_suffix(GetBitContext *gb, HEVCSEIContext *s,
int type, int size, void *logctx)
{
GetBitContext *gb = &s->HEVClc->gb;
switch (type) {
case SEI_TYPE_DECODED_PICTURE_HASH:
return decode_nal_sei_decoded_picture_hash(s);
return decode_nal_sei_decoded_picture_hash(&s->picture_hash, gb);
default:
av_log(s->avctx, AV_LOG_DEBUG, "Skipped SUFFIX SEI %d\n", type);
av_log(logctx, AV_LOG_DEBUG, "Skipped SUFFIX SEI %d\n", type);
skip_bits_long(gb, 8 * size);
return 0;
}
}
static int decode_nal_sei_message(HEVCContext *s)
static int decode_nal_sei_message(GetBitContext *gb, HEVCSEIContext *s,
const HEVCParamSets *ps, int nal_unit_type,
void *logctx)
{
GetBitContext *gb = &s->HEVClc->gb;
int payload_type = 0;
int payload_size = 0;
int byte = 0xFF;
av_log(s->avctx, AV_LOG_DEBUG, "Decoding SEI\n");
av_log(logctx, AV_LOG_DEBUG, "Decoding SEI\n");
while (byte == 0xFF) {
byte = get_bits(gb, 8);
@ -364,10 +353,10 @@ static int decode_nal_sei_message(HEVCContext *s)
byte = get_bits(gb, 8);
payload_size += byte;
}
if (s->nal_unit_type == HEVC_NAL_SEI_PREFIX) {
return decode_nal_sei_prefix(s, payload_type, payload_size);
if (nal_unit_type == HEVC_NAL_SEI_PREFIX) {
return decode_nal_sei_prefix(gb, s, ps, payload_type, payload_size, logctx);
} else { /* nal_unit_type == NAL_SEI_SUFFIX */
return decode_nal_sei_suffix(s, payload_type, payload_size);
return decode_nal_sei_suffix(gb, s, payload_type, payload_size, logctx);
}
}
@ -376,20 +365,21 @@ static int more_rbsp_data(GetBitContext *gb)
return get_bits_left(gb) > 0 && show_bits(gb, 8) != 0x80;
}
int ff_hevc_decode_nal_sei(HEVCContext *s)
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEIContext *s,
const HEVCParamSets *ps, int type)
{
int ret;
do {
ret = decode_nal_sei_message(s);
ret = decode_nal_sei_message(gb, s, ps, type, logctx);
if (ret < 0)
return(AVERROR(ENOMEM));
} while (more_rbsp_data(&s->HEVClc->gb));
} while (more_rbsp_data(gb));
return 1;
}
void ff_hevc_reset_sei(HEVCContext *s)
void ff_hevc_reset_sei(HEVCSEIContext *s)
{
s->a53_caption_size = 0;
av_freep(&s->a53_caption);
s->a53_caption.a53_caption_size = 0;
av_freep(&s->a53_caption.a53_caption);
}

View File

@ -2559,18 +2559,18 @@ static int set_side_data(HEVCContext *s)
{
AVFrame *out = s->ref->frame;
if (s->sei_frame_packing_present &&
s->frame_packing_arrangement_type >= 3 &&
s->frame_packing_arrangement_type <= 5 &&
s->content_interpretation_type > 0 &&
s->content_interpretation_type < 3) {
if (s->sei.frame_packing.present &&
s->sei.frame_packing.arrangement_type >= 3 &&
s->sei.frame_packing.arrangement_type <= 5 &&
s->sei.frame_packing.content_interpretation_type > 0 &&
s->sei.frame_packing.content_interpretation_type < 3) {
AVStereo3D *stereo = av_stereo3d_create_side_data(out);
if (!stereo)
return AVERROR(ENOMEM);
switch (s->frame_packing_arrangement_type) {
switch (s->sei.frame_packing.arrangement_type) {
case 3:
if (s->quincunx_subsampling)
if (s->sei.frame_packing.quincunx_subsampling)
stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
else
stereo->type = AV_STEREO3D_SIDEBYSIDE;
@ -2583,13 +2583,14 @@ static int set_side_data(HEVCContext *s)
break;
}
if (s->content_interpretation_type == 2)
if (s->sei.frame_packing.content_interpretation_type == 2)
stereo->flags = AV_STEREO3D_FLAG_INVERT;
}
if (s->sei_display_orientation_present &&
(s->sei_anticlockwise_rotation || s->sei_hflip || s->sei_vflip)) {
double angle = s->sei_anticlockwise_rotation * 360 / (double) (1 << 16);
if (s->sei.display_orientation.present &&
(s->sei.display_orientation.anticlockwise_rotation ||
s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) {
double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
AVFrameSideData *rotation = av_frame_new_side_data(out,
AV_FRAME_DATA_DISPLAYMATRIX,
sizeof(int32_t) * 9);
@ -2598,16 +2599,17 @@ static int set_side_data(HEVCContext *s)
av_display_rotation_set((int32_t *)rotation->data, angle);
av_display_matrix_flip((int32_t *)rotation->data,
s->sei_hflip, s->sei_vflip);
s->sei.display_orientation.hflip,
s->sei.display_orientation.vflip);
}
// Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
// so the side data persists for the entire coded video sequence.
if (s->sei_mastering_display_info_present > 0 &&
if (s->sei.mastering_display.present > 0 &&
IS_IRAP(s) && s->no_rasl_output_flag) {
s->sei_mastering_display_info_present--;
s->sei.mastering_display.present--;
}
if (s->sei_mastering_display_info_present) {
if (s->sei.mastering_display.present) {
// HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
const int mapping[3] = {2, 0, 1};
const int chroma_den = 50000;
@ -2620,19 +2622,19 @@ static int set_side_data(HEVCContext *s)
for (i = 0; i < 3; i++) {
const int j = mapping[i];
metadata->display_primaries[i][0].num = s->display_primaries[j][0];
metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0];
metadata->display_primaries[i][0].den = chroma_den;
metadata->display_primaries[i][1].num = s->display_primaries[j][1];
metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1];
metadata->display_primaries[i][1].den = chroma_den;
}
metadata->white_point[0].num = s->white_point[0];
metadata->white_point[0].num = s->sei.mastering_display.white_point[0];
metadata->white_point[0].den = chroma_den;
metadata->white_point[1].num = s->white_point[1];
metadata->white_point[1].num = s->sei.mastering_display.white_point[1];
metadata->white_point[1].den = chroma_den;
metadata->max_luminance.num = s->max_mastering_luminance;
metadata->max_luminance.num = s->sei.mastering_display.max_luminance;
metadata->max_luminance.den = luma_den;
metadata->min_luminance.num = s->min_mastering_luminance;
metadata->min_luminance.num = s->sei.mastering_display.min_luminance;
metadata->min_luminance.den = luma_den;
metadata->has_luminance = 1;
metadata->has_primaries = 1;
@ -2653,31 +2655,31 @@ static int set_side_data(HEVCContext *s)
}
// Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
// so the side data persists for the entire coded video sequence.
if (s->sei_content_light_present > 0 &&
if (s->sei.content_light.present > 0 &&
IS_IRAP(s) && s->no_rasl_output_flag) {
s->sei_content_light_present--;
s->sei.content_light.present--;
}
if (s->sei_content_light_present) {
if (s->sei.content_light.present) {
AVContentLightMetadata *metadata =
av_content_light_metadata_create_side_data(out);
if (!metadata)
return AVERROR(ENOMEM);
metadata->MaxCLL = s->max_content_light_level;
metadata->MaxFALL = s->max_pic_average_light_level;
metadata->MaxCLL = s->sei.content_light.max_content_light_level;
metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level;
av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
metadata->MaxCLL, metadata->MaxFALL);
}
if (s->a53_caption) {
if (s->sei.a53_caption.a53_caption) {
AVFrameSideData* sd = av_frame_new_side_data(out,
AV_FRAME_DATA_A53_CC,
s->a53_caption_size);
s->sei.a53_caption.a53_caption_size);
if (sd)
memcpy(sd->data, s->a53_caption, s->a53_caption_size);
av_freep(&s->a53_caption);
s->a53_caption_size = 0;
memcpy(sd->data, s->sei.a53_caption.a53_caption, s->sei.a53_caption.a53_caption_size);
av_freep(&s->sei.a53_caption.a53_caption);
s->sei.a53_caption.a53_caption_size = 0;
s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
}
@ -2772,7 +2774,7 @@ static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
break;
case HEVC_NAL_SEI_PREFIX:
case HEVC_NAL_SEI_SUFFIX:
ret = ff_hevc_decode_nal_sei(s);
ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
if (ret < 0)
goto fail;
break;
@ -2966,7 +2968,7 @@ static int verify_md5(HEVCContext *s, AVFrame *frame)
int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
uint8_t md5[16];
av_md5_init(s->md5_ctx);
av_md5_init(s->sei.picture_hash.md5_ctx);
for (j = 0; j < h; j++) {
const uint8_t *src = frame->data[i] + j * frame->linesize[i];
#if HAVE_BIGENDIAN
@ -2976,11 +2978,11 @@ static int verify_md5(HEVCContext *s, AVFrame *frame)
src = s->checksum_buf;
}
#endif
av_md5_update(s->md5_ctx, src, w << pixel_shift);
av_md5_update(s->sei.picture_hash.md5_ctx, src, w << pixel_shift);
}
av_md5_final(s->md5_ctx, md5);
av_md5_final(s->sei.picture_hash.md5_ctx, md5);
if (!memcmp(md5, s->md5[i], 16)) {
if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i);
print_md5(s->avctx, AV_LOG_DEBUG, md5);
av_log (s->avctx, AV_LOG_DEBUG, "; ");
@ -2988,7 +2990,7 @@ static int verify_md5(HEVCContext *s, AVFrame *frame)
av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i);
print_md5(s->avctx, AV_LOG_ERROR, md5);
av_log (s->avctx, AV_LOG_ERROR, " != ");
print_md5(s->avctx, AV_LOG_ERROR, s->md5[i]);
print_md5(s->avctx, AV_LOG_ERROR, s->sei.picture_hash.md5[i]);
av_log (s->avctx, AV_LOG_ERROR, "\n");
return AVERROR_INVALIDDATA;
}
@ -3061,7 +3063,7 @@ static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
} else {
/* verify the SEI checksum */
if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded &&
s->is_md5) {
s->sei.picture_hash.is_md5) {
ret = verify_md5(s, s->ref->frame);
if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
ff_hevc_unref_frame(s, s->ref, ~0);
@ -3069,7 +3071,7 @@ static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
}
}
}
s->is_md5 = 0;
s->sei.picture_hash.is_md5 = 0;
if (s->is_decoded) {
av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
@ -3132,7 +3134,7 @@ static av_cold int hevc_decode_free(AVCodecContext *avctx)
pic_arrays_free(s);
av_freep(&s->md5_ctx);
av_freep(&s->sei.picture_hash.md5_ctx);
av_freep(&s->cabac_state);
@ -3207,8 +3209,8 @@ static av_cold int hevc_init_context(AVCodecContext *avctx)
s->max_ra = INT_MAX;
s->md5_ctx = av_md5_alloc();
if (!s->md5_ctx)
s->sei.picture_hash.md5_ctx = av_md5_alloc();
if (!s->sei.picture_hash.md5_ctx)
goto fail;
ff_bswapdsp_init(&s->bdsp);
@ -3216,7 +3218,7 @@ static av_cold int hevc_init_context(AVCodecContext *avctx)
s->context_initialized = 1;
s->eos = 0;
ff_hevc_reset_sei(s);
ff_hevc_reset_sei(&s->sei);
return 0;
@ -3313,7 +3315,7 @@ static av_cold int hevc_decode_init(AVCodecContext *avctx)
return ret;
s->enable_parallel_tiles = 0;
s->picture_struct = 0;
s->sei.picture_timing.picture_struct = 0;
s->eos = 1;
atomic_init(&s->wpp_err, 0);

View File

@ -464,6 +464,59 @@ typedef struct HEVCLocalContext {
int boundary_flags;
} HEVCLocalContext;
typedef struct HEVCSEIPictureHash {
struct AVMD5 *md5_ctx;
uint8_t md5[3][16];
uint8_t is_md5;
} HEVCSEIPictureHash;
typedef struct HEVCSEIFramePacking {
int present;
int arrangement_type;
int content_interpretation_type;
int quincunx_subsampling;
} HEVCSEIFramePacking;
typedef struct HEVCSEIDisplayOrientation {
int present;
int anticlockwise_rotation;
int hflip, vflip;
} HEVCSEIDisplayOrientation;
typedef struct HEVCSEIPictureTiming {
int picture_struct;
} HEVCSEIPictureTiming;
typedef struct HEVCSEIA53Caption {
int a53_caption_size;
uint8_t *a53_caption;
} HEVCSEIA53Caption;
typedef struct HEVCSEIMasteringDisplay {
int present;
uint16_t display_primaries[3][2];
uint16_t white_point[2];
uint32_t max_luminance;
uint32_t min_luminance;
} HEVCSEIMasteringDisplay;
typedef struct HEVCSEIContentLight {
int present;
uint16_t max_content_light_level;
uint16_t max_pic_average_light_level;
} HEVCSEIContentLight;
typedef struct HEVCSEIContext {
HEVCSEIPictureHash picture_hash;
HEVCSEIFramePacking frame_packing;
HEVCSEIDisplayOrientation display_orientation;
HEVCSEIPictureTiming picture_timing;
HEVCSEIA53Caption a53_caption;
HEVCSEIMasteringDisplay mastering_display;
HEVCSEIContentLight content_light;
int active_seq_parameter_set_id;
} HEVCSEIContext;
typedef struct HEVCContext {
const AVClass *c; // needed by private avoptions
AVCodecContext *avctx;
@ -558,52 +611,19 @@ typedef struct HEVCContext {
// type of the first VCL NAL of the current frame
enum HEVCNALUnitType first_nal_type;
// for checking the frame checksums
struct AVMD5 *md5_ctx;
uint8_t md5[3][16];
uint8_t is_md5;
uint8_t context_initialized;
int is_nalff; ///< this flag is != 0 if bitstream is encapsulated
///< as a format defined in 14496-15
int apply_defdispwin;
int active_seq_parameter_set_id;
int nal_length_size; ///< Number of bytes used for nal length (1, 2 or 4)
int nuh_layer_id;
/** frame packing arrangement variables */
int sei_frame_packing_present;
int frame_packing_arrangement_type;
int content_interpretation_type;
int quincunx_subsampling;
/** display orientation */
int sei_display_orientation_present;
int sei_anticlockwise_rotation;
int sei_hflip, sei_vflip;
int picture_struct;
uint8_t* a53_caption;
int a53_caption_size;
/** mastering display */
int sei_mastering_display_info_present;
uint16_t display_primaries[3][2];
uint16_t white_point[2];
uint32_t max_mastering_luminance;
uint32_t min_mastering_luminance;
/* content light level */
int sei_content_light_present;
uint16_t max_content_light_level;
uint16_t max_pic_average_light_level;
HEVCSEIContext sei;
} HEVCContext;
int ff_hevc_decode_nal_sei(HEVCContext *s);
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEIContext *s,
const HEVCParamSets *ps, int type);
/**
* Mark all frames in DPB as unused for reference.
@ -715,7 +735,7 @@ void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size);
*
* @param s HEVCContext.
*/
void ff_hevc_reset_sei(HEVCContext *s);
void ff_hevc_reset_sei(HEVCSEIContext *s);
extern const uint8_t ff_hevc_qpel_extra_before[4];
extern const uint8_t ff_hevc_qpel_extra_after[4];