lavc/sbrdsp: R-V V neg_odd_64

With 128-bit vectors, this is mostly pointless but also harmless.
Performance gains should be more noticeable with larger vector sizes.

neg_odd_64_c:       76.2
neg_odd_64_rvv_i64: 74.7
This commit is contained in:
Rémi Denis-Courmont 2023-10-29 18:22:08 +02:00
parent b0aba7dd0c
commit d06fd18f8f
2 changed files with 22 additions and 0 deletions

View File

@ -25,6 +25,7 @@
void ff_sbr_sum64x5_rvv(float *z);
float ff_sbr_sum_square_rvv(float (*x)[2], int n);
void ff_sbr_neg_odd_64_rvv(float *x);
av_cold void ff_sbrdsp_init_riscv(SBRDSPContext *c)
{
@ -35,5 +36,9 @@ av_cold void ff_sbrdsp_init_riscv(SBRDSPContext *c)
c->sum64x5 = ff_sbr_sum64x5_rvv;
c->sum_square = ff_sbr_sum_square_rvv;
}
#if __riscv_xlen >= 64
if ((flags & AV_CPU_FLAG_RVV_I64) && (flags & AV_CPU_FLAG_RVB_ADDR))
c->neg_odd_64 = ff_sbr_neg_odd_64_rvv;
#endif
#endif
}

View File

@ -67,3 +67,20 @@ func ff_sbr_sum_square_rvv, zve32f
NOHWF fmv.x.w a0, fa0
ret
endfunc
#if __riscv_xlen >= 64
func ff_sbr_neg_odd_64_rvv, zve64x
li a1, 32
li t1, 1 << 63
1:
vsetvli t0, a1, e64, m8, ta, ma
vle64.v v8, (a0)
sub a1, a1, t0
vxor.vx v8, v8, t1
vse64.v v8, (a0)
sh3add a0, t0, a0
bnez t0, 1b
ret
endfunc
#endif