mirror of https://git.ffmpeg.org/ffmpeg.git
pixlet: Fix architecture-dependent code and values
The constants used in the decoder used floating point precision, and this caused different values to be generated on different architectures. Additionally on big endian machines, the fate test would output bytes in native order, which is different from the one hardcoded in the test. So, eradicate floating point numbers and use fixed point (32.32) arithmetics everywhere, replacing constants with precomputed integer values, and force the pixel format output to be the same in the fate test. Signed-off-by: Vittorio Giovara <vittorio.giovara@gmail.com>
This commit is contained in:
parent
808ef43597
commit
b44bd7ee7f
|
@ -39,8 +39,6 @@
|
|||
#define H 0
|
||||
#define V 1
|
||||
|
||||
#define SQR(x) ((x) * (x))
|
||||
|
||||
typedef struct SubBand {
|
||||
size_t width, height;
|
||||
size_t size;
|
||||
|
@ -59,7 +57,7 @@ typedef struct PixletContext {
|
|||
|
||||
int16_t *filter[2];
|
||||
int16_t *prediction;
|
||||
float scaling[4][2][NB_LEVELS];
|
||||
int64_t scaling[4][2][NB_LEVELS];
|
||||
SubBand band[4][NB_LEVELS * 3 + 1];
|
||||
} PixletContext;
|
||||
|
||||
|
@ -364,11 +362,11 @@ static void lowpass_prediction(int16_t *dst, int16_t *pred,
|
|||
}
|
||||
}
|
||||
|
||||
static void filterfn(int16_t *dest, int16_t *tmp, size_t size, float SCALE)
|
||||
static void filterfn(int16_t *dest, int16_t *tmp, size_t size, int64_t scale)
|
||||
{
|
||||
int16_t *low, *high, *ll, *lh, *hl, *hh;
|
||||
int hsize, i, j;
|
||||
float value;
|
||||
int64_t value;
|
||||
|
||||
hsize = size >> 1;
|
||||
low = tmp + 4;
|
||||
|
@ -389,29 +387,29 @@ static void filterfn(int16_t *dest, int16_t *tmp, size_t size, float SCALE)
|
|||
}
|
||||
|
||||
for (i = 0; i < hsize; i++) {
|
||||
value = low [i + 1] * -0.07576144003329376f +
|
||||
low [i + 0] * 0.8586296626673486f +
|
||||
low [i - 1] * -0.07576144003329376f +
|
||||
high[i + 0] * 0.3535533905932737f +
|
||||
high[i - 1] * 0.3535533905932737f;
|
||||
dest[i * 2] = av_clipf(value * SCALE, INT16_MIN, INT16_MAX);
|
||||
value = (int64_t) low [i + 1] * -INT64_C(325392907) +
|
||||
(int64_t) low [i + 0] * INT64_C(3687786320) +
|
||||
(int64_t) low [i - 1] * -INT64_C(325392907) +
|
||||
(int64_t) high[i + 0] * INT64_C(1518500249) +
|
||||
(int64_t) high[i - 1] * INT64_C(1518500249);
|
||||
dest[i * 2] = av_clip_int16(((value >> 32) * scale) >> 32);
|
||||
}
|
||||
|
||||
for (i = 0; i < hsize; i++) {
|
||||
value = low [i + 2] * -0.01515228715813062f +
|
||||
low [i + 1] * 0.3687056777514043f +
|
||||
low [i + 0] * 0.3687056777514043f +
|
||||
low [i - 1] * -0.01515228715813062f +
|
||||
high[i + 1] * 0.07071067811865475f +
|
||||
high[i + 0] * -0.8485281374238569f +
|
||||
high[i - 1] * 0.07071067811865475f;
|
||||
dest[i * 2 + 1] = av_clipf(value * SCALE, INT16_MIN, INT16_MAX);
|
||||
value = (int64_t) low [i + 2] * -INT64_C(65078576) +
|
||||
(int64_t) low [i + 1] * INT64_C(1583578880) +
|
||||
(int64_t) low [i + 0] * INT64_C(1583578880) +
|
||||
(int64_t) low [i - 1] * -INT64_C(65078576) +
|
||||
(int64_t) high[i + 1] * INT64_C(303700064) +
|
||||
(int64_t) high[i + 0] * -INT64_C(3644400640) +
|
||||
(int64_t) high[i - 1] * INT64_C(303700064);
|
||||
dest[i * 2 + 1] = av_clip_int16(((value >> 32) * scale) >> 32);
|
||||
}
|
||||
}
|
||||
|
||||
static void reconstruction(AVCodecContext *avctx, int16_t *dest,
|
||||
size_t width, size_t height, ptrdiff_t stride,
|
||||
float *scaling_h, float *scaling_v)
|
||||
int64_t *scaling_h, int64_t *scaling_v)
|
||||
{
|
||||
PixletContext *ctx = avctx->priv_data;
|
||||
unsigned scaled_width, scaled_height;
|
||||
|
@ -423,8 +421,8 @@ static void reconstruction(AVCodecContext *avctx, int16_t *dest,
|
|||
tmp = ctx->filter[0];
|
||||
|
||||
for (i = 0; i < NB_LEVELS; i++) {
|
||||
float scale_v = scaling_v[i];
|
||||
float scale_h = scaling_h[i];
|
||||
int64_t scale_v = scaling_v[i];
|
||||
int64_t scale_h = scaling_h[i];
|
||||
scaled_width <<= 1;
|
||||
scaled_height <<= 1;
|
||||
|
||||
|
@ -457,12 +455,18 @@ static void postprocess_luma(AVFrame *frame, size_t w, size_t h, int depth)
|
|||
uint16_t *dsty = (uint16_t *)frame->data[0];
|
||||
int16_t *srcy = (int16_t *)frame->data[0];
|
||||
ptrdiff_t stridey = frame->linesize[0] / 2;
|
||||
const float factor = 1.0f / ((1 << depth) - 1);
|
||||
int i, j;
|
||||
|
||||
for (j = 0; j < h; j++) {
|
||||
for (i = 0; i < w; i++)
|
||||
dsty[i] = SQR(FFMAX(srcy[i], 0) * factor) * 65535;
|
||||
for (i = 0; i < w; i++) {
|
||||
if (srcy[i] <= 0)
|
||||
dsty[i] = 0;
|
||||
else if (srcy[i] > ((1 << depth) - 1))
|
||||
dsty[i] = 65535;
|
||||
else
|
||||
dsty[i] = ((int64_t) srcy[i] * srcy[i] * 65535) /
|
||||
((1 << depth) - 1) / ((1 << depth) - 1);
|
||||
}
|
||||
dsty += stridey;
|
||||
srcy += stridey;
|
||||
}
|
||||
|
@ -508,8 +512,8 @@ static int decode_plane(AVCodecContext *avctx, int plane,
|
|||
if (!h || !v)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
ctx->scaling[plane][H][i] = 1000000.0f / h;
|
||||
ctx->scaling[plane][V][i] = 1000000.0f / v;
|
||||
ctx->scaling[plane][H][i] = (1000000ULL << 32) / h;
|
||||
ctx->scaling[plane][V][i] = (1000000ULL << 32) / v;
|
||||
}
|
||||
|
||||
bytestream2_skip(&ctx->gb, 4);
|
||||
|
|
|
@ -247,7 +247,7 @@ FATE_SAMPLES_AVCONV-$(call DEMDEC, PAF, PAF_VIDEO) += fate-paf-video
|
|||
fate-paf-video: CMD = framecrc -i $(TARGET_SAMPLES)/paf/hod1-partial.paf -pix_fmt rgb24 -an
|
||||
|
||||
FATE_SAMPLES_AVCONV-$(call DEMDEC, MOV, PIXLET) += fate-pixlet
|
||||
fate-pixlet: CMD = framecrc -i $(TARGET_SAMPLES)/pxlt/pixlet.mov -an
|
||||
fate-pixlet: CMD = framecrc -i $(TARGET_SAMPLES)/pxlt/pixlet.mov -an -pix_fmt yuv420p16le
|
||||
|
||||
FATE_SAMPLES_AVCONV-$(call DEMDEC, AVI, QPEG) += fate-qpeg
|
||||
fate-qpeg: CMD = framecrc -i $(TARGET_SAMPLES)/qpeg/Clock.avi -an -pix_fmt rgb24
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#tb 0: 1/25
|
||||
0, 0, 0, 1, 2764800, 0xd0b6bf48
|
||||
0, 0, 0, 1, 2764800, 0xe8eff295
|
||||
|
|
Loading…
Reference in New Issue