mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2024-12-13 02:34:54 +00:00
ape: decode directly to the user-provided AVFrame
This commit is contained in:
parent
da28bb3f4a
commit
5932e2d7d2
@ -129,7 +129,6 @@ typedef struct APEPredictor {
|
||||
typedef struct APEContext {
|
||||
AVClass *class; ///< class for AVOptions
|
||||
AVCodecContext *avctx;
|
||||
AVFrame frame;
|
||||
DSPContext dsp;
|
||||
int channels;
|
||||
int samples; ///< samples left to decode in current frame
|
||||
@ -235,9 +234,6 @@ static av_cold int ape_decode_init(AVCodecContext *avctx)
|
||||
ff_dsputil_init(&s->dsp, avctx);
|
||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
|
||||
|
||||
avcodec_get_frame_defaults(&s->frame);
|
||||
avctx->coded_frame = &s->frame;
|
||||
|
||||
return 0;
|
||||
filter_alloc_fail:
|
||||
ape_decode_close(avctx);
|
||||
@ -826,6 +822,7 @@ static void ape_unpack_stereo(APEContext *ctx, int count)
|
||||
static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame_ptr, AVPacket *avpkt)
|
||||
{
|
||||
AVFrame *frame = data;
|
||||
const uint8_t *buf = avpkt->data;
|
||||
APEContext *s = avctx->priv_data;
|
||||
uint8_t *sample8;
|
||||
@ -909,8 +906,8 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
||||
s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8);
|
||||
|
||||
/* get output buffer */
|
||||
s->frame.nb_samples = blockstodecode;
|
||||
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
|
||||
frame->nb_samples = blockstodecode;
|
||||
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return ret;
|
||||
}
|
||||
@ -932,21 +929,21 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
||||
switch (s->bps) {
|
||||
case 8:
|
||||
for (ch = 0; ch < s->channels; ch++) {
|
||||
sample8 = (uint8_t *)s->frame.data[ch];
|
||||
sample8 = (uint8_t *)frame->data[ch];
|
||||
for (i = 0; i < blockstodecode; i++)
|
||||
*sample8++ = (s->decoded[ch][i] + 0x80) & 0xff;
|
||||
}
|
||||
break;
|
||||
case 16:
|
||||
for (ch = 0; ch < s->channels; ch++) {
|
||||
sample16 = (int16_t *)s->frame.data[ch];
|
||||
sample16 = (int16_t *)frame->data[ch];
|
||||
for (i = 0; i < blockstodecode; i++)
|
||||
*sample16++ = s->decoded[ch][i];
|
||||
}
|
||||
break;
|
||||
case 24:
|
||||
for (ch = 0; ch < s->channels; ch++) {
|
||||
sample24 = (int32_t *)s->frame.data[ch];
|
||||
sample24 = (int32_t *)frame->data[ch];
|
||||
for (i = 0; i < blockstodecode; i++)
|
||||
*sample24++ = s->decoded[ch][i] << 8;
|
||||
}
|
||||
@ -955,8 +952,7 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
s->samples -= blockstodecode;
|
||||
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = s->frame;
|
||||
*got_frame_ptr = 1;
|
||||
|
||||
return bytes_used;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user