mirror of https://git.ffmpeg.org/ffmpeg.git
avcodec/hapdec: Change compressed_offset to unsigned 32bit
Fixes: out of array access
Fixes: 29345/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_HAP_fuzzer-5401813482340352
Fixes: 30745/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_HAP_fuzzer-5762798221131776
Suggested-by: Anton
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 89fe1935b1
)
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
This commit is contained in:
parent
3a8911ec09
commit
c641b7dd4b
|
@ -52,7 +52,7 @@ enum HapSectionType {
|
|||
|
||||
typedef struct HapChunk {
|
||||
enum HapCompressor compressor;
|
||||
int compressed_offset;
|
||||
uint32_t compressed_offset;
|
||||
size_t compressed_size;
|
||||
int uncompressed_offset;
|
||||
size_t uncompressed_size;
|
||||
|
|
|
@ -105,6 +105,8 @@ static int hap_parse_decode_instructions(HapContext *ctx, int size)
|
|||
size_t running_size = 0;
|
||||
for (i = 0; i < ctx->chunk_count; i++) {
|
||||
ctx->chunks[i].compressed_offset = running_size;
|
||||
if (ctx->chunks[i].compressed_size > UINT32_MAX - running_size)
|
||||
return AVERROR_INVALIDDATA;
|
||||
running_size += ctx->chunks[i].compressed_size;
|
||||
}
|
||||
}
|
||||
|
@ -186,7 +188,7 @@ static int hap_parse_frame_header(AVCodecContext *avctx)
|
|||
HapChunk *chunk = &ctx->chunks[i];
|
||||
|
||||
/* Check the compressed buffer is valid */
|
||||
if (chunk->compressed_offset + chunk->compressed_size > bytestream2_get_bytes_left(gbc))
|
||||
if (chunk->compressed_offset + (uint64_t)chunk->compressed_size > bytestream2_get_bytes_left(gbc))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
/* Chunks are unpacked sequentially, ctx->tex_size is the uncompressed
|
||||
|
|
Loading…
Reference in New Issue