/* * Go2Webinar decoder * Copyright (c) 2012 Konstantin Shishkov * * This file is part of Libav. * * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Go2Webinar decoder */ #include #include "libavutil/intreadwrite.h" #include "avcodec.h" #include "bytestream.h" #include "dsputil.h" #include "get_bits.h" #include "internal.h" #include "mjpeg.h" enum ChunkType { FRAME_INFO = 0xC8, TILE_DATA, CURSOR_POS, CURSOR_SHAPE, CHUNK_CC, CHUNK_CD }; enum Compression { COMPR_EPIC_J_B = 2, COMPR_KEMPF_J_B, }; static const uint8_t luma_quant[64] = { 8, 6, 5, 8, 12, 20, 26, 31, 6, 6, 7, 10, 13, 29, 30, 28, 7, 7, 8, 12, 20, 29, 35, 28, 7, 9, 11, 15, 26, 44, 40, 31, 9, 11, 19, 28, 34, 55, 52, 39, 12, 18, 28, 32, 41, 52, 57, 46, 25, 32, 39, 44, 52, 61, 60, 51, 36, 46, 48, 49, 56, 50, 52, 50 }; static const uint8_t chroma_quant[64] = { 9, 9, 12, 24, 50, 50, 50, 50, 9, 11, 13, 33, 50, 50, 50, 50, 12, 13, 28, 50, 50, 50, 50, 50, 24, 33, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, }; typedef struct JPGContext { DSPContext dsp; ScanTable scantable; VLC dc_vlc[2], ac_vlc[2]; int prev_dc[3]; DECLARE_ALIGNED(16, int16_t, block)[6][64]; uint8_t *buf; } JPGContext; typedef struct G2MContext { JPGContext jc; int version; int compression; int width, height, bpp; int tile_width, tile_height; int tiles_x, tiles_y, tile_x, tile_y; int got_header; uint8_t *framebuf; int framebuf_stride, old_width, old_height; uint8_t *synth_tile, *jpeg_tile; int tile_stride, old_tile_w, old_tile_h; uint8_t *kempf_buf, *kempf_flags; uint8_t *cursor; int cursor_stride; int cursor_fmt; int cursor_w, cursor_h, cursor_x, cursor_y; int cursor_hot_x, cursor_hot_y; } G2MContext; static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int is_ac) { uint8_t huff_size[256] = { 0 }; uint16_t huff_code[256]; uint16_t huff_sym[256]; int i; ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table); for (i = 0; i < 256; i++) huff_sym[i] = i + 16 * is_ac; if (is_ac) huff_sym[0] = 16 * 256; return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1, huff_code, 2, 2, huff_sym, 2, 2, 0); } static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c) { int ret; ret = build_vlc(&c->dc_vlc[0], avpriv_mjpeg_bits_dc_luminance, avpriv_mjpeg_val_dc, 12, 0); if (ret) return ret; ret = build_vlc(&c->dc_vlc[1], avpriv_mjpeg_bits_dc_chrominance, avpriv_mjpeg_val_dc, 12, 0); if (ret) return ret; ret = build_vlc(&c->ac_vlc[0], avpriv_mjpeg_bits_ac_luminance, avpriv_mjpeg_val_ac_luminance, 251, 1); if (ret) return ret; ret = build_vlc(&c->ac_vlc[1], avpriv_mjpeg_bits_ac_chrominance, avpriv_mjpeg_val_ac_chrominance, 251, 1); if (ret) return ret; ff_dsputil_init(&c->dsp, avctx); ff_init_scantable(c->dsp.idct_permutation, &c->scantable, ff_zigzag_direct); return 0; } static av_cold void jpg_free_context(JPGContext *ctx) { int i; for (i = 0; i < 2; i++) { ff_free_vlc(&ctx->dc_vlc[i]); ff_free_vlc(&ctx->ac_vlc[i]); } av_freep(&ctx->buf); } static void jpg_unescape(const uint8_t *src, int src_size, uint8_t *dst, int *dst_size) { const uint8_t *src_end = src + src_size; uint8_t *dst_start = dst; while (src < src_end) { uint8_t x = *src++; *dst++ = x; if (x == 0xFF && !*src) src++; } *dst_size = dst - dst_start; } static int jpg_decode_block(JPGContext *c, GetBitContext *gb, int plane, int16_t *block) { int dc, val, pos; const int is_chroma = !!plane; const uint8_t *qmat = is_chroma ? chroma_quant : luma_quant; c->dsp.clear_block(block); dc = get_vlc2(gb, c->dc_vlc[is_chroma].table, 9, 3); if (dc < 0) return AVERROR_INVALIDDATA; if (dc) dc = get_xbits(gb, dc); dc = dc * qmat[0] + c->prev_dc[plane]; block[0] = dc; c->prev_dc[plane] = dc; pos = 0; while (pos < 63) { val = get_vlc2(gb, c->ac_vlc[is_chroma].table, 9, 3); if (val < 0) return AVERROR_INVALIDDATA; pos += val >> 4; val &= 0xF; if (pos > 63) return val ? AVERROR_INVALIDDATA : 0; if (val) { int nbits = val; val = get_xbits(gb, nbits); val *= qmat[ff_zigzag_direct[pos]]; block[c->scantable.permutated[pos]] = val; } } return 0; } static inline void yuv2rgb(uint8_t *out, int Y, int U, int V) { out[0] = av_clip_uint8(Y + ( 91881 * V + 32768 >> 16)); out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16)); out[2] = av_clip_uint8(Y + (116130 * U + 32768 >> 16)); } static int jpg_decode_data(JPGContext *c, int width, int height, const uint8_t *src, int src_size, uint8_t *dst, int dst_stride, const uint8_t *mask, int mask_stride, int num_mbs, int swapuv) { GetBitContext gb; uint8_t *tmp; int mb_w, mb_h, mb_x, mb_y, i, j; int bx, by; int unesc_size; int ret; tmp = av_realloc(c->buf, src_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!tmp) return AVERROR(ENOMEM); c->buf = tmp; jpg_unescape(src, src_size, c->buf, &unesc_size); memset(c->buf + unesc_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); init_get_bits(&gb, c->buf, unesc_size * 8); width = FFALIGN(width, 16); mb_w = width >> 4; mb_h = (height + 15) >> 4; if (!num_mbs) num_mbs = mb_w * mb_h; for (i = 0; i < 3; i++) c->prev_dc[i] = 1024; bx = by = 0; for (mb_y = 0; mb_y < mb_h; mb_y++) { for (mb_x = 0; mb_x < mb_w; mb_x++) { if (mask && !mask[mb_x]) { bx += 16; continue; } for (j = 0; j < 2; j++) { for (i = 0; i < 2; i++) { if ((ret = jpg_decode_block(c, &gb, 0, c->block[i + j * 2])) != 0) return ret; c->dsp.idct(c->block[i + j * 2]); } } for (i = 1; i < 3; i++) { if ((ret = jpg_decode_block(c, &gb, i, c->block[i + 3])) != 0) return ret; c->dsp.idct(c->block[i + 3]); } for (j = 0; j < 16; j++) { uint8_t *out = dst + bx * 3 + (by + j) * dst_stride; for (i = 0; i < 16; i++) { int Y, U, V; Y = c->block[(j >> 3) * 2 + (i >> 3)][(i & 7) + (j & 7) * 8]; U = c->block[4 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128; V = c->block[5 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128; yuv2rgb(out + i * 3, Y, U, V); } } if (!--num_mbs) return 0; bx += 16; } bx = 0; by += 16; if (mask) mask += mask_stride; } return 0; } static void kempf_restore_buf(const uint8_t *src, int len, uint8_t *dst, int stride, const uint8_t *jpeg_tile, int tile_stride, int width, int height, const uint8_t *pal, int npal, int tidx) { GetBitContext gb; int i, j, nb, col; init_get_bits(&gb, src, len * 8); if (npal <= 2) nb = 1; else if (npal <= 4) nb = 2; else if (npal <= 16) nb = 4; else nb = 8; for (j = 0; j < height; j++, dst += stride, jpeg_tile += tile_stride) { if (get_bits(&gb, 8)) continue; for (i = 0; i < width; i++) { col = get_bits(&gb, nb); if (col != tidx) memcpy(dst + i * 3, pal + col * 3, 3); else memcpy(dst + i * 3, jpeg_tile + i * 3, 3); } } } static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y, const uint8_t *src, int src_size) { int width, height; int hdr, zsize, npal, tidx = -1, ret; int i, j; const uint8_t *src_end = src + src_size; uint8_t pal[768], transp[3]; uLongf dlen = (c->tile_width + 1) * c->tile_height; int sub_type; int nblocks, cblocks, bstride; int bits, bitbuf, coded; uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 + tile_y * c->tile_height * c->framebuf_stride; if (src_size < 2) return AVERROR_INVALIDDATA; width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width); height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height); hdr = *src++; sub_type = hdr >> 5; if (sub_type == 0) { int j; memcpy(transp, src, 3); src += 3; for (j = 0; j < height; j++, dst += c->framebuf_stride) for (i = 0; i < width; i++) memcpy(dst + i * 3, transp, 3); return 0; } else if (sub_type == 1) { return jpg_decode_data(&c->jc, width, height, src, src_end - src, dst, c->framebuf_stride, NULL, 0, 0, 0); } if (sub_type != 2) { memcpy(transp, src, 3); src += 3; } npal = *src++ + 1; memcpy(pal, src, npal * 3); src += npal * 3; if (sub_type != 2) { for (i = 0; i < npal; i++) { if (!memcmp(pal + i * 3, transp, 3)) { tidx = i; break; } } } if (src_end - src < 2) return 0; zsize = (src[0] << 8) | src[1]; src += 2; if (src_end - src < zsize) return AVERROR_INVALIDDATA; ret = uncompress(c->kempf_buf, &dlen, src, zsize); if (ret) return AVERROR_INVALIDDATA; src += zsize; if (sub_type == 2) { kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride, NULL, 0, width, height, pal, npal, tidx); return 0; } nblocks = *src++ + 1; cblocks = 0; bstride = FFALIGN(width, 16) >> 4; // blocks are coded LSB and we need normal bitreader for JPEG data bits = 0; for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) { for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) { if (!bits) { bitbuf = *src++; bits = 8; } coded = bitbuf & 1; bits--; bitbuf >>= 1; cblocks += coded; if (cblocks > nblocks) return AVERROR_INVALIDDATA; c->kempf_flags[j + i * bstride] = coded; } } memset(c->jpeg_tile, 0, c->tile_stride * height); jpg_decode_data(&c->jc, width, height, src, src_end - src, c->jpeg_tile, c->tile_stride, c->kempf_flags, bstride, nblocks, 0); kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride, c->jpeg_tile, c->tile_stride, width, height, pal, npal, tidx); return 0; } static int g2m_init_buffers(G2MContext *c) { int aligned_height; if (!c->framebuf || c->old_width < c->width || c->height < c->height) { c->framebuf_stride = FFALIGN(c->width * 3, 16); aligned_height = FFALIGN(c->height, 16); av_free(c->framebuf); c->framebuf = av_mallocz(c->framebuf_stride * aligned_height); if (!c->framebuf) return AVERROR(ENOMEM); } if (!c->synth_tile || !c->jpeg_tile || c->old_tile_w < c->tile_width || c->old_tile_h < c->tile_height) { c->tile_stride = FFALIGN(c->tile_width * 3, 16); aligned_height = FFALIGN(c->tile_height, 16); av_free(c->synth_tile); av_free(c->jpeg_tile); c->synth_tile = av_mallocz(c->tile_stride * aligned_height); c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height); c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height + FF_INPUT_BUFFER_PADDING_SIZE); c->kempf_flags = av_mallocz( c->tile_width * aligned_height); if (!c->synth_tile || !c->jpeg_tile || !c->kempf_buf || !c->kempf_flags) return AVERROR(ENOMEM); } return 0; } static int g2m_load_cursor(G2MContext *c, GetByteContext *gb) { int i, j, k; uint8_t *dst; uint32_t bits; c->cursor_stride = c->cursor_w * 4; c->cursor = av_realloc(c->cursor, c->cursor_stride * c->cursor_h); if (!c->cursor) return AVERROR(ENOMEM); dst = c->cursor; switch (c->cursor_fmt) { case 1: // old monochrome for (j = 0; j < c->cursor_h; j++) { for (i = 0; i < c->cursor_w; i += 32) { bits = bytestream2_get_be32(gb); for (k = 0; k < 32; k++) { dst[0] = !!(bits & 0x80000000); dst += 4; bits <<= 1; } } } dst = c->cursor; for (j = 0; j < c->cursor_h; j++) { for (i = 0; i < c->cursor_w; i += 32) { bits = bytestream2_get_be32(gb); for (k = 0; k < 32; k++) { int mask_bit = !!(bits & 0x80000000); switch (dst[0] * 2 + mask_bit) { case 0: dst[0] = 0xFF; dst[1] = 0x00; dst[2] = 0x00; dst[3] = 0x00; break; case 1: dst[0] = 0xFF; dst[1] = 0xFF; dst[2] = 0xFF; dst[3] = 0xFF; break; default: dst[0] = 0x00; dst[1] = 0x00; dst[2] = 0x00; dst[3] = 0x00; } dst += 4; bits <<= 1; } } } break; case 32: // full colour /* skip monochrome version of the cursor and decode RGBA instead */ bytestream2_skip(gb, c->cursor_h * (FFALIGN(c->cursor_w, 32) >> 3)); for (j = 0; j < c->cursor_h; j++) { for (i = 0; i < c->cursor_w; i++) { int val = bytestream2_get_be32(gb); *dst++ = val >> 0; *dst++ = val >> 8; *dst++ = val >> 16; *dst++ = val >> 24; } } break; default: return AVERROR_PATCHWELCOME; } return 0; } #define APPLY_ALPHA(src, new, alpha) \ src = (src * (256 - alpha) + new * alpha) >> 8 static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride) { int i, j; int x, y, w, h; const uint8_t *cursor; if (!c->cursor) return; x = c->cursor_x - c->cursor_hot_x; y = c->cursor_y - c->cursor_hot_y; cursor = c->cursor; w = c->cursor_w; h = c->cursor_h; if (x + w > c->width) w = c->width - x; if (y + h > c->height) h = c->height - y; if (x < 0) { w += x; cursor += -x * 4; } else { dst += x * 3; } if (y < 0) { h += y; cursor += -y * c->cursor_stride; } else { dst += y * stride; } if (w < 0 || h < 0) return; for (j = 0; j < h; j++) { for (i = 0; i < w; i++) { uint8_t alpha = cursor[i * 4]; APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha); APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha); APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha); } dst += stride; cursor += c->cursor_stride; } } static int g2m_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; G2MContext *c = avctx->priv_data; AVFrame *pic = data; GetByteContext bc, tbc; int magic; int got_header = 0; uint32_t chunk_size, cur_size; int chunk_type; int i; int ret; if (buf_size < 12) { av_log(avctx, AV_LOG_ERROR, "Frame should have at least 12 bytes, got %d instead\n", buf_size); return AVERROR_INVALIDDATA; } bytestream2_init(&bc, buf, buf_size); magic = bytestream2_get_be32(&bc); if ((magic & ~0xF) != MKBETAG('G', '2', 'M', '0') || (magic & 0xF) < 2 || (magic & 0xF) > 4) { av_log(avctx, AV_LOG_ERROR, "Wrong magic %08X\n", magic); return AVERROR_INVALIDDATA; } if ((magic & 0xF) != 4) { av_log(avctx, AV_LOG_ERROR, "G2M2 and G2M3 are not yet supported\n"); return AVERROR(ENOSYS); } while (bytestream2_get_bytes_left(&bc) > 5) { chunk_size = bytestream2_get_le32(&bc) - 1; chunk_type = bytestream2_get_byte(&bc); if (chunk_size > bytestream2_get_bytes_left(&bc)) { av_log(avctx, AV_LOG_ERROR, "Invalid chunk size %d type %02X\n", chunk_size, chunk_type); break; } switch (chunk_type) { case FRAME_INFO: c->got_header = 0; if (chunk_size < 21) { av_log(avctx, AV_LOG_ERROR, "Invalid frame info size %d\n", chunk_size); break; } c->width = bytestream2_get_be32(&bc); c->height = bytestream2_get_be32(&bc); if (c->width < 16 || c->width > avctx->width || c->height < 16 || c->height > avctx->height) { av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d\n", c->width, c->height); c->width = c->height = 0; bytestream2_skip(&bc, bytestream2_get_bytes_left(&bc)); } if (c->width != avctx->width || c->height != avctx->height) avcodec_set_dimensions(avctx, c->width, c->height); c->compression = bytestream2_get_be32(&bc); if (c->compression != 2 && c->compression != 3) { av_log(avctx, AV_LOG_ERROR, "Unknown compression method %d\n", c->compression); return AVERROR_PATCHWELCOME; } c->tile_width = bytestream2_get_be32(&bc); c->tile_height = bytestream2_get_be32(&bc); if (!c->tile_width || !c->tile_height) { av_log(avctx, AV_LOG_ERROR, "Invalid tile dimensions %dx%d\n", c->tile_width, c->tile_height); return AVERROR_INVALIDDATA; } c->tiles_x = (c->width + c->tile_width - 1) / c->tile_width; c->tiles_y = (c->height + c->tile_height - 1) / c->tile_height; c->bpp = bytestream2_get_byte(&bc); chunk_size -= 21; bytestream2_skip(&bc, chunk_size); if (g2m_init_buffers(c)) return AVERROR(ENOMEM); got_header = 1; break; case TILE_DATA: if (!c->tiles_x || !c->tiles_y) { av_log(avctx, AV_LOG_WARNING, "No frame header - skipping tile\n"); bytestream2_skip(&bc, bytestream2_get_bytes_left(&bc)); break; } if (chunk_size < 2) { av_log(avctx, AV_LOG_ERROR, "Invalid tile data size %d\n", chunk_size); break; } c->tile_x = bytestream2_get_byte(&bc); c->tile_y = bytestream2_get_byte(&bc); if (c->tile_x >= c->tiles_x || c->tile_y >= c->tiles_y) { av_log(avctx, AV_LOG_ERROR, "Invalid tile pos %d,%d (in %dx%d grid)\n", c->tile_x, c->tile_y, c->tiles_x, c->tiles_y); break; } chunk_size -= 2; ret = 0; switch (c->compression) { case COMPR_EPIC_J_B: av_log(avctx, AV_LOG_ERROR, "ePIC j-b compression is not implemented yet\n"); return AVERROR(ENOSYS); case COMPR_KEMPF_J_B: ret = kempf_decode_tile(c, c->tile_x, c->tile_y, buf + bytestream2_tell(&bc), chunk_size); break; } if (ret && c->framebuf) av_log(avctx, AV_LOG_ERROR, "Error decoding tile %d,%d\n", c->tile_x, c->tile_y); bytestream2_skip(&bc, chunk_size); break; case CURSOR_POS: if (chunk_size < 5) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor pos size %d\n", chunk_size); break; } c->cursor_x = bytestream2_get_be16(&bc); c->cursor_y = bytestream2_get_be16(&bc); bytestream2_skip(&bc, chunk_size - 4); break; case CURSOR_SHAPE: if (chunk_size < 8) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d\n", chunk_size); break; } bytestream2_init(&tbc, buf + bytestream2_tell(&bc), chunk_size - 4); cur_size = bytestream2_get_be32(&tbc); c->cursor_w = bytestream2_get_byte(&tbc); c->cursor_h = bytestream2_get_byte(&tbc); c->cursor_hot_x = bytestream2_get_byte(&tbc); c->cursor_hot_y = bytestream2_get_byte(&tbc); c->cursor_fmt = bytestream2_get_byte(&tbc); if (cur_size >= chunk_size || c->cursor_w * c->cursor_h / 4 > cur_size) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d\n", chunk_size); break; } g2m_load_cursor(c, &tbc); bytestream2_skip(&bc, chunk_size); break; case CHUNK_CC: case CHUNK_CD: bytestream2_skip(&bc, chunk_size); break; default: av_log(avctx, AV_LOG_WARNING, "Skipping chunk type %02X\n", chunk_type); bytestream2_skip(&bc, chunk_size); } } if (got_header) c->got_header = 1; if (c->width && c->height) { if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } pic->key_frame = got_header; pic->pict_type = got_header ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; for (i = 0; i < avctx->height; i++) memcpy(pic->data[0] + i * pic->linesize[0], c->framebuf + i * c->framebuf_stride, c->width * 3); g2m_paint_cursor(c, pic->data[0], pic->linesize[0]); *got_picture_ptr = 1; } return buf_size; } static av_cold int g2m_decode_init(AVCodecContext *avctx) { G2MContext * const c = avctx->priv_data; int ret; if ((ret = jpg_init(avctx, &c->jc)) != 0) { av_log(avctx, AV_LOG_ERROR, "Cannot initialise VLCs\n"); jpg_free_context(&c->jc); return AVERROR(ENOMEM); } avctx->pix_fmt = PIX_FMT_RGB24; return 0; } static av_cold int g2m_decode_end(AVCodecContext *avctx) { G2MContext * const c = avctx->priv_data; jpg_free_context(&c->jc); av_freep(&c->kempf_buf); av_freep(&c->kempf_flags); av_freep(&c->synth_tile); av_freep(&c->jpeg_tile); av_freep(&c->cursor); av_freep(&c->framebuf); return 0; } AVCodec ff_g2m_decoder = { .name = "g2m", .long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_G2M, .priv_data_size = sizeof(G2MContext), .init = g2m_decode_init, .close = g2m_decode_end, .decode = g2m_decode_frame, .capabilities = CODEC_CAP_DR1, };