avcodec: Make ff_alloc_packet() based encoders accept user buffers

Up until now, these encoders received non-refcounted packets
(whose data was owned by the corresponding AVCodecContext)
from ff_alloc_packet(); these packets were made refcounted lateron
by av_packet_make_refcounted() generically.
This commit makes these encoders accept user-supplied buffers by
replacing av_packet_make_refcounted() with an equivalent function
that is based upon get_encode_buffer().

(I am pretty certain that one can also set the flag for mpegvideo-
based encoders, but I want to double-check this later. What is certain
is that it reallocates the buffer owned by the AVCodecContext
which should maybe be moved to encode.c, so that proresenc_kostya.c
and ttaenc.c can make use of it, too.)

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
Andreas Rheinhardt 2021-05-11 20:52:13 +02:00
parent 7360e97e4b
commit a499b4345b
50 changed files with 83 additions and 30 deletions

View File

@ -1417,6 +1417,8 @@ const FFCodec ff_aac_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_AAC,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(AACEncContext),
.init = aac_encode_init,
FF_CODEC_ENCODE_CB(aac_encode_frame),
@ -1424,7 +1426,6 @@ const FFCodec ff_aac_encoder = {
.defaults = aac_encode_defaults,
.p.supported_samplerates = ff_mpeg4audio_sample_rates,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.p.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
.p.priv_class = &aacenc_class,

View File

@ -654,12 +654,12 @@ const FFCodec ff_alac_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_ALAC,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(AlacEncodeContext),
.p.priv_class = &alacenc_class,
.init = alac_encode_init,
FF_CODEC_ENCODE_CB(alac_encode_frame),
.close = alac_encode_close,
.p.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
#if FF_API_OLD_CHANNEL_LAYOUT
.p.channel_layouts = alac_channel_layouts,
#endif

View File

@ -106,6 +106,7 @@ const FFCodec ff_alias_pix_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Alias/Wavefront PIX image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_ALIAS_PIX,
.p.capabilities = AV_CODEC_CAP_DR1,
FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_BGR24, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE

View File

@ -348,6 +348,7 @@ const FFCodec ff_asv1_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("ASUS V1"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_ASV1,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(ASV1Context),
.init = encode_init,
FF_CODEC_ENCODE_CB(encode_frame),
@ -362,6 +363,7 @@ const FFCodec ff_asv2_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("ASUS V2"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_ASV2,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(ASV1Context),
.init = encode_init,
FF_CODEC_ENCODE_CB(encode_frame),

View File

@ -851,12 +851,12 @@ const FFCodec ff_cfhd_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("GoPro CineForm HD"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_CFHD,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(CFHDEncContext),
.p.priv_class = &cfhd_class,
.init = cfhd_encode_init,
.close = cfhd_encode_close,
FF_CODEC_ENCODE_CB(cfhd_encode_frame),
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV422P10,
AV_PIX_FMT_GBRP12,

View File

@ -1219,6 +1219,7 @@ const FFCodec ff_cinepak_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Cinepak"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_CINEPAK,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(CinepakEncContext),
.init = cinepak_encode_init,
FF_CODEC_ENCODE_CB(cinepak_encode_frame),

View File

@ -107,6 +107,23 @@ fail:
return ret;
}
static int encode_make_refcounted(AVCodecContext *avctx, AVPacket *avpkt)
{
uint8_t *data = avpkt->data;
int ret;
if (avpkt->buf)
return 0;
avpkt->data = NULL;
ret = ff_get_encode_buffer(avctx, avpkt, avpkt->size, 0);
if (ret < 0)
return ret;
memcpy(avpkt->data, data, avpkt->size);
return 0;
}
/**
* Pad last frame with silence.
*/
@ -184,7 +201,7 @@ int ff_encode_encode_cb(AVCodecContext *avctx, AVPacket *avpkt,
if (!ret && *got_packet) {
if (avpkt->data) {
ret = av_packet_make_refcounted(avpkt);
ret = encode_make_refcounted(avctx, avpkt);
if (ret < 0)
goto unref;
// Date returned by encoders must always be ref-counted

View File

@ -1271,11 +1271,12 @@ const FFCodec ff_ffv1_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_FFV1,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_SLICE_THREADS,
.priv_data_size = sizeof(FFV1Context),
.init = encode_init,
FF_CODEC_ENCODE_CB(encode_frame),
.close = ff_ffv1_close,
.p.capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_DELAY,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,

View File

@ -915,6 +915,7 @@ const FFCodec ff_flashsv2_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video Version 2"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_FLASHSV2,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(FlashSV2Context),
.init = flashsv2_encode_init,
FF_CODEC_ENCODE_CB(flashsv2_encode_frame),

View File

@ -261,6 +261,7 @@ const FFCodec ff_flashsv_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_FLASHSV,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(FlashSVContext),
.init = flashsv_encode_init,
FF_CODEC_ENCODE_CB(flashsv_encode_frame),

View File

@ -553,6 +553,7 @@ const FFCodec ff_gif_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("GIF (Graphics Interchange Format)"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_GIF,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(GIFContext),
.init = gif_encode_init,
FF_CODEC_ENCODE_CB(gif_encode_frame),

View File

@ -351,9 +351,9 @@ const FFCodec ff_hap_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Vidvox Hap"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_HAP,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
.priv_data_size = sizeof(HapContext),
.p.priv_class = &hapenc_class,
.p.capabilities = AV_CODEC_CAP_SLICE_THREADS,
.init = hap_init,
FF_CODEC_ENCODE_CB(hap_encode),
.close = hap_close,

View File

@ -1054,11 +1054,11 @@ const FFCodec ff_huffyuv_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_HUFFYUV,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(HYuvContext),
.init = encode_init,
FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_end,
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.priv_class = &normal_class,
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
@ -1073,11 +1073,11 @@ const FFCodec ff_ffvhuff_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_FFVHUFF,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(HYuvContext),
.init = encode_init,
FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_end,
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.priv_class = &ff_class,
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV411P,

View File

@ -1835,6 +1835,7 @@ const FFCodec ff_jpeg2000_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("JPEG 2000"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_JPEG2000,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(Jpeg2000EncoderContext),
.init = j2kenc_init,
FF_CODEC_ENCODE_CB(encode_frame),

View File

@ -156,11 +156,11 @@ const FFCodec ff_zlib_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("LCL (LossLess Codec Library) ZLIB"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_ZLIB,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(LclEncContext),
.init = encode_init,
FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_end,
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
};

View File

@ -480,12 +480,13 @@ const FFCodec ff_libfdk_aac_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Fraunhofer FDK AAC"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_AAC,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_SMALL_LAST_FRAME,
.caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE,
.priv_data_size = sizeof(AACContext),
.init = aac_encode_init,
FF_CODEC_ENCODE_CB(aac_encode_frame),
.close = aac_encode_close,
.p.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.p.priv_class = &aac_enc_class,

View File

@ -205,6 +205,7 @@ const FFCodec ff_libilbc_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("iLBC (Internet Low Bitrate Codec)"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_ILBC,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE,
.priv_data_size = sizeof(ILBCEncContext),
.init = ilbc_encode_init,

View File

@ -294,12 +294,13 @@ const FFCodec ff_libopencore_amrnb_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("OpenCORE AMR-NB (Adaptive Multi-Rate Narrow-Band)"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_AMR_NB,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_SMALL_LAST_FRAME,
.caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE,
.priv_data_size = sizeof(AMRContext),
.init = amr_nb_encode_init,
FF_CODEC_ENCODE_CB(amr_nb_encode_frame),
.close = amr_nb_encode_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SMALL_LAST_FRAME,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.p.priv_class = &amrnb_class,

View File

@ -588,12 +588,13 @@ const FFCodec ff_libopus_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("libopus Opus"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_OPUS,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_SMALL_LAST_FRAME,
.caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE,
.priv_data_size = sizeof(LibopusEncContext),
.init = libopus_encode_init,
FF_CODEC_ENCODE_CB(libopus_encode),
.close = libopus_encode_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SMALL_LAST_FRAME,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },

View File

@ -353,12 +353,12 @@ const FFCodec ff_libspeex_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("libspeex Speex"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_SPEEX,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE,
.priv_data_size = sizeof(LibSpeexEncContext),
.init = encode_init,
FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_close,
.p.capabilities = AV_CODEC_CAP_DELAY,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
#if FF_API_OLD_CHANNEL_LAYOUT

View File

@ -214,12 +214,12 @@ const FFCodec ff_libtwolame_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("libtwolame MP2 (MPEG audio layer 2)"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_MP2,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE,
.priv_data_size = sizeof(TWOLAMEContext),
.init = twolame_encode_init,
FF_CODEC_ENCODE_CB(twolame_encode_frame),
.close = twolame_encode_close,
.p.capabilities = AV_CODEC_CAP_DELAY,
.defaults = twolame_defaults,
.p.priv_class = &twolame_class,
.p.sample_fmts = (const enum AVSampleFormat[]) {

View File

@ -146,6 +146,7 @@ const FFCodec ff_libvo_amrwbenc_encoder = {
"(Adaptive Multi-Rate Wide-Band)"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_AMR_WB,
.p.capabilities = AV_CODEC_CAP_DR1,
.p.priv_class = &amrwb_class,
.p.wrapper_name = "libvo_amrwbenc",
.caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE,

View File

@ -902,6 +902,7 @@ const FFCodec ff_libxvid_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("libxvidcore MPEG-4 part 2"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_MPEG4,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(struct xvid_context),
.init = xvid_encode_init,
FF_CODEC_ENCODE_CB(xvid_encode_frame),

View File

@ -328,12 +328,12 @@ const FFCodec ff_ljpeg_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_LJPEG,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(LJpegEncContext),
.p.priv_class = &ljpeg_class,
.init = ljpeg_encode_init,
FF_CODEC_ENCODE_CB(ljpeg_encode_frame),
.close = ljpeg_encode_close,
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_BGR24 , AV_PIX_FMT_BGRA , AV_PIX_FMT_BGR0,
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,

View File

@ -569,12 +569,12 @@ const FFCodec ff_magicyuv_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_MAGICYUV,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(MagicYUVContext),
.p.priv_class = &magicyuv_class,
.init = magy_encode_init,
.close = magy_encode_close,
FF_CODEC_ENCODE_CB(magy_encode_frame),
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_GRAY8,

View File

@ -2213,11 +2213,12 @@ const FFCodec ff_mlp_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("MLP (Meridian Lossless Packing)"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_MLP,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_EXPERIMENTAL,
.priv_data_size = sizeof(MLPEncodeContext),
.init = mlp_encode_init,
FF_CODEC_ENCODE_CB(mlp_encode_frame),
.close = mlp_encode_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_EXPERIMENTAL,
.p.sample_fmts = (const enum AVSampleFormat[]) {AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_NONE},
.p.supported_samplerates = (const int[]) {44100, 48000, 88200, 96000, 176400, 192000, 0},
#if FF_API_OLD_CHANNEL_LAYOUT
@ -2233,11 +2234,13 @@ const FFCodec ff_truehd_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("TrueHD"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_TRUEHD,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_SMALL_LAST_FRAME |
AV_CODEC_CAP_EXPERIMENTAL,
.priv_data_size = sizeof(MLPEncodeContext),
.init = mlp_encode_init,
FF_CODEC_ENCODE_CB(mlp_encode_frame),
.close = mlp_encode_close,
.p.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_EXPERIMENTAL,
.p.sample_fmts = (const enum AVSampleFormat[]) {AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_NONE},
.p.supported_samplerates = (const int[]) {44100, 48000, 88200, 96000, 176400, 192000, 0},
#if FF_API_OLD_CHANNEL_LAYOUT

View File

@ -28,6 +28,7 @@ const FFCodec ff_mp2fixed_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("MP2 fixed point (MPEG audio layer 2)"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_MP2,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(MpegAudioContext),
.init = MPA_encode_init,
FF_CODEC_ENCODE_CB(MPA_encode_frame),

View File

@ -29,6 +29,7 @@ const FFCodec ff_mp2_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_MP2,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(MpegAudioContext),
.init = MPA_encode_init,
FF_CODEC_ENCODE_CB(MPA_encode_frame),

View File

@ -730,6 +730,8 @@ const FFCodec ff_opus_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Opus"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_OPUS,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_EXPERIMENTAL,
.defaults = opusenc_defaults,
.p.priv_class = &opusenc_class,
.priv_data_size = sizeof(OpusEncContext),
@ -737,7 +739,6 @@ const FFCodec ff_opus_encoder = {
FF_CODEC_ENCODE_CB(opus_encode_frame),
.close = opus_encode_end,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.p.capabilities = AV_CODEC_CAP_EXPERIMENTAL | AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY,
.p.supported_samplerates = (const int []){ 48000, 0 },
#if FF_API_OLD_CHANNEL_LAYOUT
.p.channel_layouts = (const uint64_t []){ AV_CH_LAYOUT_MONO,

View File

@ -197,6 +197,7 @@ const FFCodec ff_pcx_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("PC Paintbrush PCX image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PCX,
.p.capabilities = AV_CODEC_CAP_DR1,
FF_CODEC_ENCODE_CB(pcx_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_RGB24,

View File

@ -1196,11 +1196,11 @@ const FFCodec ff_png_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PNG,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(PNGEncContext),
.init = png_enc_init,
.close = png_enc_close,
FF_CODEC_ENCODE_CB(encode_png),
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA,
AV_PIX_FMT_RGB48BE, AV_PIX_FMT_RGBA64BE,

View File

@ -945,12 +945,12 @@ const FFCodec ff_prores_aw_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PRORES,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = pix_fmts,
.priv_data_size = sizeof(ProresContext),
.init = prores_encode_init,
.close = prores_encode_close,
FF_CODEC_ENCODE_CB(prores_encode_frame),
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.priv_class = &prores_enc_class,
.p.profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles),
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
@ -961,12 +961,12 @@ const FFCodec ff_prores_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PRORES,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = pix_fmts,
.priv_data_size = sizeof(ProresContext),
.init = prores_encode_init,
.close = prores_encode_close,
FF_CODEC_ENCODE_CB(prores_encode_frame),
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.priv_class = &prores_enc_class,
.p.profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles),
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,

View File

@ -131,7 +131,7 @@ const FFCodec ff_qoi_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("QOI (Quite OK Image format) image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_QOI,
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
FF_CODEC_ENCODE_CB(qoi_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_RGBA, AV_PIX_FMT_RGB24,

View File

@ -404,6 +404,7 @@ const FFCodec ff_qtrle_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("QuickTime Animation (RLE) video"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_QTRLE,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(QtrleEncContext),
.init = qtrle_encode_init,
FF_CODEC_ENCODE_CB(qtrle_encode_frame),

View File

@ -1123,6 +1123,7 @@ const FFCodec ff_roq_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("id RoQ video"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_ROQ,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(RoqEncContext),
.init = roq_encode_init,
FF_CODEC_ENCODE_CB(roq_encode_frame),

View File

@ -849,6 +849,7 @@ const FFCodec ff_rpza_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("QuickTime video (RPZA)"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_RPZA,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(RpzaContext),
.p.priv_class = &rpza_class,
.init = rpza_encode_init,

View File

@ -275,6 +275,7 @@ const FFCodec ff_sgi_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("SGI image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_SGI,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(SgiContext),
.p.priv_class = &sgi_class,
.init = encode_init,

View File

@ -553,6 +553,7 @@ const FFCodec ff_smc_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("QuickTime Graphics (SMC)"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_SMC,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(SMCContext),
.init = smc_encode_init,
FF_CODEC_ENCODE_CB(smc_encode_frame),

View File

@ -1933,6 +1933,7 @@ const FFCodec ff_snow_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Snow"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_SNOW,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(SnowContext),
.init = encode_init,
FF_CODEC_ENCODE_CB(encode_frame),

View File

@ -1096,11 +1096,11 @@ const FFCodec ff_sonic_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Sonic"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_SONIC,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_EXPERIMENTAL,
.priv_data_size = sizeof(SonicContext),
.init = sonic_encode_init,
FF_CODEC_ENCODE_CB(sonic_encode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
.p.capabilities = AV_CODEC_CAP_EXPERIMENTAL,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.close = sonic_encode_close,
};
@ -1112,11 +1112,11 @@ const FFCodec ff_sonic_ls_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Sonic lossless"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_SONIC_LS,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_EXPERIMENTAL,
.priv_data_size = sizeof(SonicContext),
.init = sonic_encode_init,
FF_CODEC_ENCODE_CB(sonic_encode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
.p.capabilities = AV_CODEC_CAP_EXPERIMENTAL,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.close = sonic_encode_close,
};

View File

@ -213,6 +213,7 @@ const FFCodec ff_sunrast_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Sun Rasterfile image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_SUNRAST,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(SUNRASTContext),
.init = sunrast_encode_init,
FF_CODEC_ENCODE_CB(sunrast_encode_frame),

View File

@ -682,6 +682,7 @@ const FFCodec ff_svq1_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_SVQ1,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(SVQ1EncContext),
.p.priv_class = &svq1enc_class,
.init = svq1_encode_init,

View File

@ -207,6 +207,7 @@ const FFCodec ff_targa_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Truevision Targa image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_TARGA,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(TargaContext),
.p.priv_class = &targa_class,
.init = targa_encode_init,

View File

@ -574,10 +574,10 @@ const FFCodec ff_tiff_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_TIFF,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(TiffEncoderContext),
.init = encode_init,
.close = encode_close,
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB48LE, AV_PIX_FMT_PAL8,

View File

@ -204,11 +204,11 @@ const FFCodec ff_tta_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("TTA (True Audio)"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_TTA,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(TTAEncContext),
.init = tta_encode_init,
.close = tta_encode_close,
FF_CODEC_ENCODE_CB(tta_encode_frame),
.p.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S32,

View File

@ -648,12 +648,12 @@ const FFCodec ff_utvideo_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_UTVIDEO,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(UtvideoContext),
.p.priv_class = &utvideo_class,
.init = utvideo_encode_init,
FF_CODEC_ENCODE_CB(utvideo_encode_frame),
.close = utvideo_encode_close,
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_NONE

View File

@ -1300,11 +1300,12 @@ const FFCodec ff_vorbis_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Vorbis"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_VORBIS,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_EXPERIMENTAL,
.priv_data_size = sizeof(vorbis_enc_context),
.init = vorbis_encode_init,
FF_CODEC_ENCODE_CB(vorbis_encode_frame),
.close = vorbis_encode_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_EXPERIMENTAL,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,

View File

@ -2963,12 +2963,12 @@ const FFCodec ff_wavpack_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("WavPack"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_WAVPACK,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(WavPackEncodeContext),
.p.priv_class = &wavpack_encoder_class,
.init = wavpack_encode_init,
FF_CODEC_ENCODE_CB(wavpack_encode_frame),
.close = wavpack_encode_close,
.p.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32P,

View File

@ -438,6 +438,7 @@ const FFCodec ff_wmav1_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 1"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_WMAV1,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(WMACodecContext),
.init = encode_init,
FF_CODEC_ENCODE_CB(encode_superframe),
@ -453,6 +454,7 @@ const FFCodec ff_wmav2_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 2"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_WMAV2,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(WMACodecContext),
.init = encode_init,
FF_CODEC_ENCODE_CB(encode_superframe),

View File

@ -82,6 +82,7 @@ const FFCodec ff_xbm_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("XBM (X BitMap) image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_XBM,
.p.capabilities = AV_CODEC_CAP_DR1,
FF_CODEC_ENCODE_CB(xbm_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_MONOWHITE,
AV_PIX_FMT_NONE },