mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2025-01-29 02:33:04 +00:00
swscale/ppc: VSX-optimize yuv2rgb_full_2
./ffmpeg -f lavfi -i yuvtestsrc=duration=1:size=1200x1440 -sws_flags area \ -s 1200x720 -f null -vframes 100 -pix_fmt $i -nostats \ -cpuflags 0 -v error - 32-bit mul, power8 only. ~4x speedup: rgb24 52763 UNITS in yuv2packed2, 16384 runs, 0 skips 13453 UNITS in yuv2packed2, 16384 runs, 0 skips bgr24 53144 UNITS in yuv2packed2, 16384 runs, 0 skips 13616 UNITS in yuv2packed2, 16384 runs, 0 skips rgba 52796 UNITS in yuv2packed2, 16384 runs, 0 skips 12904 UNITS in yuv2packed2, 16384 runs, 0 skips bgra 52732 UNITS in yuv2packed2, 16384 runs, 0 skips 13262 UNITS in yuv2packed2, 16384 runs, 0 skips argb 52661 UNITS in yuv2packed2, 16384 runs, 0 skips 12879 UNITS in yuv2packed2, 16384 runs, 0 skips bgra 52662 UNITS in yuv2packed2, 16384 runs, 0 skips 12932 UNITS in yuv2packed2, 16384 runs, 0 skips
This commit is contained in:
parent
50e672bc54
commit
3256e949be
@ -520,6 +520,148 @@ yuv2NBPSX(16, LE, 0, 16, int32_t)
|
||||
break; \
|
||||
}
|
||||
|
||||
#define SETUP(x, buf0, alpha1, buf1, alpha) { \
|
||||
x = vec_ld(0, buf0); \
|
||||
tmp = vec_mule(x, alpha1); \
|
||||
tmp2 = vec_mulo(x, alpha1); \
|
||||
tmp3 = vec_mergeh(tmp, tmp2); \
|
||||
tmp4 = vec_mergel(tmp, tmp2); \
|
||||
\
|
||||
x = vec_ld(0, buf1); \
|
||||
tmp = vec_mule(x, alpha); \
|
||||
tmp2 = vec_mulo(x, alpha); \
|
||||
tmp5 = vec_mergeh(tmp, tmp2); \
|
||||
tmp6 = vec_mergel(tmp, tmp2); \
|
||||
\
|
||||
tmp3 = vec_add(tmp3, tmp5); \
|
||||
tmp4 = vec_add(tmp4, tmp6); \
|
||||
}
|
||||
|
||||
|
||||
static av_always_inline void
|
||||
yuv2rgb_full_2_vsx_template(SwsContext *c, const int16_t *buf[2],
|
||||
const int16_t *ubuf[2], const int16_t *vbuf[2],
|
||||
const int16_t *abuf[2], uint8_t *dest, int dstW,
|
||||
int yalpha, int uvalpha, int y,
|
||||
enum AVPixelFormat target, int hasAlpha)
|
||||
{
|
||||
const int16_t *buf0 = buf[0], *buf1 = buf[1],
|
||||
*ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
|
||||
*vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
|
||||
*abuf0 = hasAlpha ? abuf[0] : NULL,
|
||||
*abuf1 = hasAlpha ? abuf[1] : NULL;
|
||||
const int16_t yalpha1 = 4096 - yalpha;
|
||||
const int16_t uvalpha1 = 4096 - uvalpha;
|
||||
vector int16_t vy, vu, vv, A = vec_splat_s16(0);
|
||||
vector int32_t vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32;
|
||||
vector int32_t R_l, R_r, G_l, G_r, B_l, B_r;
|
||||
vector int32_t tmp, tmp2, tmp3, tmp4, tmp5, tmp6;
|
||||
vector uint16_t rd16, gd16, bd16;
|
||||
vector uint8_t rd, bd, gd, ad, out0, out1, tmp8;
|
||||
const vector int16_t vyalpha1 = vec_splats(yalpha1);
|
||||
const vector int16_t vuvalpha1 = vec_splats(uvalpha1);
|
||||
const vector int16_t vyalpha = vec_splats((int16_t) yalpha);
|
||||
const vector int16_t vuvalpha = vec_splats((int16_t) uvalpha);
|
||||
const vector uint16_t zero16 = vec_splat_u16(0);
|
||||
const vector int32_t y_offset = vec_splats(c->yuv2rgb_y_offset);
|
||||
const vector int32_t y_coeff = vec_splats(c->yuv2rgb_y_coeff);
|
||||
const vector int32_t y_add = vec_splats(1 << 21);
|
||||
const vector int32_t v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
|
||||
const vector int32_t v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
|
||||
const vector int32_t u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
|
||||
const vector int32_t u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
|
||||
const vector int32_t rgbclip = vec_splats(1 << 30);
|
||||
const vector int32_t zero32 = vec_splat_s32(0);
|
||||
const vector uint32_t shift19 = vec_splats(19U);
|
||||
const vector uint32_t shift22 = vec_splats(22U);
|
||||
const vector uint32_t shift10 = vec_splat_u32(10);
|
||||
const vector int32_t dec128 = vec_splats(128 << 19);
|
||||
const vector int32_t add18 = vec_splats(1 << 18);
|
||||
int i;
|
||||
|
||||
// Various permutations
|
||||
const vector uint8_t perm3rg0 = (vector uint8_t) {0x0, 0x10, 0,
|
||||
0x1, 0x11, 0,
|
||||
0x2, 0x12, 0,
|
||||
0x3, 0x13, 0,
|
||||
0x4, 0x14, 0,
|
||||
0x5 };
|
||||
const vector uint8_t perm3rg1 = (vector uint8_t) { 0x15, 0,
|
||||
0x6, 0x16, 0,
|
||||
0x7, 0x17, 0 };
|
||||
const vector uint8_t perm3tb0 = (vector uint8_t) {0x0, 0x1, 0x10,
|
||||
0x3, 0x4, 0x11,
|
||||
0x6, 0x7, 0x12,
|
||||
0x9, 0xa, 0x13,
|
||||
0xc, 0xd, 0x14,
|
||||
0xf };
|
||||
const vector uint8_t perm3tb1 = (vector uint8_t) { 0x0, 0x15,
|
||||
0x2, 0x3, 0x16,
|
||||
0x5, 0x6, 0x17 };
|
||||
|
||||
av_assert2(yalpha <= 4096U);
|
||||
av_assert2(uvalpha <= 4096U);
|
||||
|
||||
for (i = 0; i < dstW; i += 8) {
|
||||
SETUP(vy, &buf0[i], vyalpha1, &buf1[i], vyalpha);
|
||||
vy32_l = vec_sra(tmp3, shift10);
|
||||
vy32_r = vec_sra(tmp4, shift10);
|
||||
|
||||
SETUP(vu, &ubuf0[i], vuvalpha1, &ubuf1[i], vuvalpha);
|
||||
tmp3 = vec_sub(tmp3, dec128);
|
||||
tmp4 = vec_sub(tmp4, dec128);
|
||||
vu32_l = vec_sra(tmp3, shift10);
|
||||
vu32_r = vec_sra(tmp4, shift10);
|
||||
|
||||
SETUP(vv, &vbuf0[i], vuvalpha1, &vbuf1[i], vuvalpha);
|
||||
tmp3 = vec_sub(tmp3, dec128);
|
||||
tmp4 = vec_sub(tmp4, dec128);
|
||||
vv32_l = vec_sra(tmp3, shift10);
|
||||
vv32_r = vec_sra(tmp4, shift10);
|
||||
|
||||
if (hasAlpha) {
|
||||
SETUP(A, &abuf0[i], vyalpha1, &abuf1[i], vyalpha);
|
||||
tmp3 = vec_add(tmp3, add18);
|
||||
tmp4 = vec_add(tmp4, add18);
|
||||
tmp3 = vec_sra(tmp3, shift19);
|
||||
tmp4 = vec_sra(tmp4, shift19);
|
||||
A = vec_packs(tmp3, tmp4);
|
||||
ad = vec_packsu(A, (vector int16_t) zero16);
|
||||
} else {
|
||||
ad = vec_splats((uint8_t) 255);
|
||||
}
|
||||
|
||||
vy32_l = vec_sub(vy32_l, y_offset);
|
||||
vy32_r = vec_sub(vy32_r, y_offset);
|
||||
vy32_l = vec_mul(vy32_l, y_coeff);
|
||||
vy32_r = vec_mul(vy32_r, y_coeff);
|
||||
vy32_l = vec_add(vy32_l, y_add);
|
||||
vy32_r = vec_add(vy32_r, y_add);
|
||||
|
||||
R_l = vec_mul(vv32_l, v2r_coeff);
|
||||
R_l = vec_add(R_l, vy32_l);
|
||||
R_r = vec_mul(vv32_r, v2r_coeff);
|
||||
R_r = vec_add(R_r, vy32_r);
|
||||
G_l = vec_mul(vv32_l, v2g_coeff);
|
||||
tmp32 = vec_mul(vu32_l, u2g_coeff);
|
||||
G_l = vec_add(G_l, vy32_l);
|
||||
G_l = vec_add(G_l, tmp32);
|
||||
G_r = vec_mul(vv32_r, v2g_coeff);
|
||||
tmp32 = vec_mul(vu32_r, u2g_coeff);
|
||||
G_r = vec_add(G_r, vy32_r);
|
||||
G_r = vec_add(G_r, tmp32);
|
||||
|
||||
B_l = vec_mul(vu32_l, u2b_coeff);
|
||||
B_l = vec_add(B_l, vy32_l);
|
||||
B_r = vec_mul(vu32_r, u2b_coeff);
|
||||
B_r = vec_add(B_r, vy32_r);
|
||||
|
||||
WRITERGB
|
||||
}
|
||||
}
|
||||
|
||||
#undef SETUP
|
||||
|
||||
static av_always_inline void
|
||||
yuv2rgb_full_1_vsx_template(SwsContext *c, const int16_t *buf0,
|
||||
const int16_t *ubuf[2], const int16_t *vbuf[2],
|
||||
@ -835,6 +977,16 @@ yuv2rgb_1_vsx_template(SwsContext *c, const int16_t *buf0,
|
||||
|
||||
#undef WRITERGB
|
||||
|
||||
#define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
|
||||
static void name ## ext ## _2_vsx(SwsContext *c, const int16_t *buf[2], \
|
||||
const int16_t *ubuf[2], const int16_t *vbuf[2], \
|
||||
const int16_t *abuf[2], uint8_t *dest, int dstW, \
|
||||
int yalpha, int uvalpha, int y) \
|
||||
{ \
|
||||
name ## base ## _2_vsx_template(c, buf, ubuf, vbuf, abuf, \
|
||||
dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
|
||||
}
|
||||
|
||||
#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
|
||||
static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
|
||||
const int16_t *ubuf[2], const int16_t *vbuf[2], \
|
||||
@ -861,6 +1013,14 @@ YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
|
||||
YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
|
||||
YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
|
||||
|
||||
YUV2RGBWRAPPERX2(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
|
||||
YUV2RGBWRAPPERX2(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
|
||||
YUV2RGBWRAPPERX2(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
|
||||
YUV2RGBWRAPPERX2(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
|
||||
|
||||
YUV2RGBWRAPPERX2(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
|
||||
YUV2RGBWRAPPERX2(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
|
||||
|
||||
static av_always_inline void
|
||||
write422(const vector int16_t vy1, const vector int16_t vy2,
|
||||
const vector int16_t vu, const vector int16_t vv,
|
||||
@ -1233,17 +1393,20 @@ av_cold void ff_sws_init_swscale_vsx(SwsContext *c)
|
||||
case AV_PIX_FMT_RGB24:
|
||||
if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
|
||||
c->yuv2packed1 = yuv2rgb24_full_1_vsx;
|
||||
c->yuv2packed2 = yuv2rgb24_full_2_vsx;
|
||||
}
|
||||
break;
|
||||
case AV_PIX_FMT_BGR24:
|
||||
if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
|
||||
c->yuv2packed1 = yuv2bgr24_full_1_vsx;
|
||||
c->yuv2packed2 = yuv2bgr24_full_2_vsx;
|
||||
}
|
||||
break;
|
||||
case AV_PIX_FMT_BGRA:
|
||||
if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
|
||||
if (!c->needAlpha) {
|
||||
c->yuv2packed1 = yuv2bgrx32_full_1_vsx;
|
||||
c->yuv2packed2 = yuv2bgrx32_full_2_vsx;
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -1251,6 +1414,7 @@ av_cold void ff_sws_init_swscale_vsx(SwsContext *c)
|
||||
if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
|
||||
if (!c->needAlpha) {
|
||||
c->yuv2packed1 = yuv2rgbx32_full_1_vsx;
|
||||
c->yuv2packed2 = yuv2rgbx32_full_2_vsx;
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -1258,6 +1422,7 @@ av_cold void ff_sws_init_swscale_vsx(SwsContext *c)
|
||||
if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
|
||||
if (!c->needAlpha) {
|
||||
c->yuv2packed1 = yuv2xrgb32_full_1_vsx;
|
||||
c->yuv2packed2 = yuv2xrgb32_full_2_vsx;
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -1265,6 +1430,7 @@ av_cold void ff_sws_init_swscale_vsx(SwsContext *c)
|
||||
if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
|
||||
if (!c->needAlpha) {
|
||||
c->yuv2packed1 = yuv2xbgr32_full_1_vsx;
|
||||
c->yuv2packed2 = yuv2xbgr32_full_2_vsx;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user