From 90a779bed6a269d2fe63887d970461418ece72b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Denis-Courmont?= Date: Sun, 12 Nov 2023 15:38:45 +0200 Subject: [PATCH] lavc/huffyuvdsp: basic R-V V add_hfyu_left_pred_bgr32 Better performance can probably be achieved with a more intricate unrolled loop, but this is a start: add_hfyu_left_pred_bgr32_c: 15084.0 add_hfyu_left_pred_bgr32_rvv_i32: 10280.2 This would actually be cleaner with the RISC-V P extension, but that is not ratified yet (I think?) and usually not supported if V is supported. --- libavcodec/riscv/huffyuvdsp_init.c | 6 +++++- libavcodec/riscv/huffyuvdsp_rvv.S | 16 ++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/libavcodec/riscv/huffyuvdsp_init.c b/libavcodec/riscv/huffyuvdsp_init.c index 115b25881c..b49b3dc097 100644 --- a/libavcodec/riscv/huffyuvdsp_init.c +++ b/libavcodec/riscv/huffyuvdsp_init.c @@ -24,6 +24,8 @@ #include "libavcodec/huffyuvdsp.h" void ff_add_int16_rvv(uint16_t *dst, const uint16_t *src, unsigned m, int w); +void ff_add_hfyu_left_pred_bgr32_rvv(uint8_t *dst, const uint8_t *src, + intptr_t w, uint8_t *left); av_cold void ff_huffyuvdsp_init_riscv(HuffYUVDSPContext *c, enum AVPixelFormat pix_fmt) @@ -31,7 +33,9 @@ av_cold void ff_huffyuvdsp_init_riscv(HuffYUVDSPContext *c, #if HAVE_RVV int flags = av_get_cpu_flags(); - if ((flags & AV_CPU_FLAG_RVV_I32) && (flags & AV_CPU_FLAG_RVB_ADDR)) + if ((flags & AV_CPU_FLAG_RVV_I32) && (flags & AV_CPU_FLAG_RVB_ADDR)) { c->add_int16 = ff_add_int16_rvv; + c->add_hfyu_left_pred_bgr32 = ff_add_hfyu_left_pred_bgr32_rvv; + } #endif } diff --git a/libavcodec/riscv/huffyuvdsp_rvv.S b/libavcodec/riscv/huffyuvdsp_rvv.S index f8926fdaea..9c4434907d 100644 --- a/libavcodec/riscv/huffyuvdsp_rvv.S +++ b/libavcodec/riscv/huffyuvdsp_rvv.S @@ -35,3 +35,19 @@ func ff_add_int16_rvv, zve32x ret endfunc + +func ff_add_hfyu_left_pred_bgr32_rvv, zve32x + vsetivli zero, 4, e8, m1, ta, ma + vle8.v v8, (a3) + sh2add a2, a2, a1 +1: + vle8.v v0, (a1) + vadd.vv v8, v8, v0 + addi a1, a1, 4 + vse8.v v8, (a0) + addi a0, a0, 4 + bne a2, a1, 1b + + vse8.v v8, (a3) + ret +endfunc