mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2024-12-24 00:02:52 +00:00
lagarith: add YUY2 decoding support
Unlike other variants, for YUY2 we need to use different prediction: * on line 0 for luma we should left predict starting from the second pixel * on line 1 we should left predict first 4 pixels for luma and 2 for chroma * median prediction employed here is taken directly from HuffYUV
This commit is contained in:
parent
58637a0b24
commit
464e9ab011
@ -269,6 +269,40 @@ static void lag_pred_line(LagarithContext *l, uint8_t *buf,
|
||||
}
|
||||
}
|
||||
|
||||
static void lag_pred_line_yuy2(LagarithContext *l, uint8_t *buf,
|
||||
int width, int stride, int line,
|
||||
int is_luma)
|
||||
{
|
||||
int L, TL;
|
||||
|
||||
if (!line) {
|
||||
if (is_luma) {
|
||||
buf++;
|
||||
width--;
|
||||
}
|
||||
l->dsp.add_hfyu_left_prediction(buf + 1, buf + 1, width - 1, buf[0]);
|
||||
return;
|
||||
}
|
||||
if (line == 1) {
|
||||
const int HEAD = is_luma ? 4 : 2;
|
||||
int i;
|
||||
|
||||
L = buf[width - stride - 1];
|
||||
TL = buf[HEAD - stride - 1];
|
||||
for (i = 0; i < HEAD; i++) {
|
||||
L += buf[i];
|
||||
buf[i] = L;
|
||||
}
|
||||
buf += HEAD;
|
||||
width -= HEAD;
|
||||
} else {
|
||||
TL = buf[width - (2 * stride) - 1];
|
||||
L = buf[width - stride - 1];
|
||||
}
|
||||
l->dsp.add_hfyu_median_prediction(buf, buf - stride, buf, width,
|
||||
&L, &TL);
|
||||
}
|
||||
|
||||
static int lag_decode_line(LagarithContext *l, lag_rac *rac,
|
||||
uint8_t *dst, int width, int stride,
|
||||
int esc_count)
|
||||
@ -432,9 +466,17 @@ static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < height; i++) {
|
||||
lag_pred_line(l, dst, width, stride, i);
|
||||
dst += stride;
|
||||
if (l->avctx->pix_fmt != PIX_FMT_YUV422P) {
|
||||
for (i = 0; i < height; i++) {
|
||||
lag_pred_line(l, dst, width, stride, i);
|
||||
dst += stride;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < height; i++) {
|
||||
lag_pred_line_yuy2(l, dst, width, stride, i,
|
||||
width == l->avctx->width);
|
||||
dst += stride;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -557,6 +599,32 @@ static int lag_decode_frame(AVCodecContext *avctx,
|
||||
srcs[i] += l->rgb_stride;
|
||||
}
|
||||
break;
|
||||
case FRAME_ARITH_YUY2:
|
||||
avctx->pix_fmt = PIX_FMT_YUV422P;
|
||||
|
||||
if (avctx->get_buffer(avctx, p) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (offset_ry >= buf_size ||
|
||||
offset_gu >= buf_size ||
|
||||
offset_bv >= buf_size) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Invalid frame offsets\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
|
||||
p->linesize[0], buf + offset_ry,
|
||||
buf_size - offset_ry);
|
||||
lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
|
||||
avctx->height, p->linesize[2],
|
||||
buf + offset_gu, buf_size - offset_gu);
|
||||
lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
|
||||
avctx->height, p->linesize[1],
|
||||
buf + offset_bv, buf_size - offset_bv);
|
||||
break;
|
||||
case FRAME_ARITH_YV12:
|
||||
avctx->pix_fmt = PIX_FMT_YUV420P;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user