From 233066e85adbb3eb4d385369325a2ad44a625df2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Denis-Courmont?= Date: Wed, 15 May 2024 22:56:42 +0300 Subject: [PATCH] lavc/flacdsp: optimise RVV vector type for lpc32 This is pretty much the same as for lpc16, though it only improves half as large prediction orders. With 128-bit vectors, this gives: C V old V new 1 69.2 181.5 95.5 2 107.7 180.7 95.2 3 145.5 180.0 103.5 4 183.0 179.2 102.7 5 220.7 178.5 128.0 6 257.7 194.0 127.5 7 294.5 193.7 126.7 8 331.0 193.0 126.5 Larger prediction orders see no significant changes at that size. --- libavcodec/riscv/flacdsp_init.c | 15 ++++++++------- libavcodec/riscv/flacdsp_rvv.S | 12 +++++++----- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/libavcodec/riscv/flacdsp_init.c b/libavcodec/riscv/flacdsp_init.c index 735aec0691..830ae36534 100644 --- a/libavcodec/riscv/flacdsp_init.c +++ b/libavcodec/riscv/flacdsp_init.c @@ -71,17 +71,18 @@ av_cold void ff_flacdsp_init_riscv(FLACDSPContext *c, enum AVSampleFormat fmt, if ((flags & AV_CPU_FLAG_RVV_I32) && (flags & AV_CPU_FLAG_RVB_ADDR)) { int vlenb = ff_get_rv_vlenb(); - if ((flags & AV_CPU_FLAG_RVB_BASIC) && vlenb >= 16) + if ((flags & AV_CPU_FLAG_RVB_BASIC) && vlenb >= 16) { c->lpc16 = ff_flac_lpc16_rvv; # if (__riscv_xlen >= 64) - if (flags & AV_CPU_FLAG_RVV_I64) { - if (vlenb > 16) - c->lpc32 = ff_flac_lpc32_rvv_simple; - else - c->lpc32 = ff_flac_lpc32_rvv; - } + if (flags & AV_CPU_FLAG_RVV_I64) { + if (vlenb > 16) + c->lpc32 = ff_flac_lpc32_rvv_simple; + else + c->lpc32 = ff_flac_lpc32_rvv; + } # endif + } c->wasted32 = ff_flac_wasted32_rvv; diff --git a/libavcodec/riscv/flacdsp_rvv.S b/libavcodec/riscv/flacdsp_rvv.S index e1a20ce8e1..2941928465 100644 --- a/libavcodec/riscv/flacdsp_rvv.S +++ b/libavcodec/riscv/flacdsp_rvv.S @@ -76,22 +76,24 @@ func ff_flac_lpc32_rvv, zve64x ret endfunc -func ff_flac_lpc32_rvv_simple, zve64x - vsetivli zero, 1, e64, m1, ta, ma +func ff_flac_lpc32_rvv_simple, zve64x, zbb + vtype_vli t3, a2, t1, e64, ta, ma + vntypei t2, t3 + vsetvl zero, a2, t3 // e64 vmv.s.x v0, zero - vsetvli zero, a2, e32, m4, ta, ma + vsetvl zero, zero, t2 // e32 vle32.v v8, (a1) sub a4, a4, a2 vle32.v v16, (a0) sh2add a0, a2, a0 1: vwmul.vv v24, v8, v16 - vsetvli zero, zero, e64, m8, ta, ma + vsetvl zero, zero, t3 // e64 vredsum.vs v24, v24, v0 lw t0, (a0) addi a4, a4, -1 vmv.x.s t1, v24 - vsetvli zero, zero, e32, m4, ta, ma + vsetvl zero, zero, t2 // e32 sra t1, t1, a3 add t0, t0, t1 vslide1down.vx v16, v16, t0