lavc/vp7dsp: add R-V V vp7_idct_dc_add4uv

This is almost the same story as vp7_idct_add4y. We just have to use
strided loads of 2 64-bit elements to account for the different data
layout in memory.

T-Head C908:
vp7_idct_dc_add4uv_c:       7.5
vp7_idct_dc_add4uv_rvv_i64: 2.0
vp8_idct_dc_add4uv_c:       6.2
vp8_idct_dc_add4uv_rvv_i32: 2.2 (before)
vp8_idct_dc_add4uv_rvv_i64: 2.0

SpacemiT X60:
vp7_idct_dc_add4uv_c:       6.7
vp7_idct_dc_add4uv_rvv_i64: 2.2
vp8_idct_dc_add4uv_c:       5.7
vp8_idct_dc_add4uv_rvv_i32: 2.5 (before)
vp8_idct_dc_add4uv_rvv_i64: 2.0
This commit is contained in:
Rémi Denis-Courmont 2024-06-02 12:03:33 +03:00
parent 225de53c9d
commit 121fb846b9
4 changed files with 45 additions and 17 deletions

View File

@ -29,6 +29,7 @@ void ff_vp7_luma_dc_wht_rvv(int16_t block[4][4][16], int16_t dc[16]);
void ff_vp7_idct_add_rvv(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
void ff_vp78_idct_dc_add_rvv(uint8_t *, int16_t block[16], ptrdiff_t, int dc);
void ff_vp7_idct_dc_add4y_rvv(uint8_t *dst, int16_t block[4][16], ptrdiff_t);
void ff_vp7_idct_dc_add4uv_rvv(uint8_t *dst, int16_t block[4][16], ptrdiff_t);
static void ff_vp7_idct_dc_add_rvv(uint8_t *dst, int16_t block[16],
ptrdiff_t stride)
@ -51,6 +52,8 @@ av_cold void ff_vp7dsp_init_riscv(VP8DSPContext *c)
#endif
c->vp8_idct_dc_add = ff_vp7_idct_dc_add_rvv;
c->vp8_idct_dc_add4y = ff_vp7_idct_dc_add4y_rvv;
if (flags & AV_CPU_FLAG_RVV_I64)
c->vp8_idct_dc_add4uv = ff_vp7_idct_dc_add4uv_rvv;
}
#endif
}

View File

@ -128,7 +128,8 @@ func ff_vp7_idct_add_rvv, zve32x
endfunc
#endif
func ff_vp7_idct_dc_add4y_rvv, zve32x
.irp type, y, uv
func ff_vp7_idct_dc_add4\type\()_rvv, zve32x
li t0, 32
vsetivli zero, 4, e16, mf2, ta, ma
li t1, 23170
@ -141,5 +142,6 @@ func ff_vp7_idct_dc_add4y_rvv, zve32x
vadd.vx v0, v0, t2
vsetvli zero, zero, e16, mf2, ta, ma
vnsra.wi v8, v0, 18 # 4x DC
tail ff_vp78_idct_dc_add4y_rvv
tail ff_vp78_idct_dc_add4\type\()_rvv
endfunc
.endr

View File

@ -131,9 +131,8 @@ av_cold void ff_vp8dsp_init_riscv(VP8DSPContext *c)
#endif
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_rvv;
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_rvv;
if (flags & AV_CPU_FLAG_RVB_ADDR) {
if (flags & AV_CPU_FLAG_RVV_I64)
c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_rvv;
}
}
#endif
}

View File

@ -157,6 +157,43 @@ func ff_vp78_idct_dc_add4y_rvv, zve32x
ret
endfunc
func ff_vp8_idct_dc_add4uv_rvv, zve32x
li t0, 32
vsetivli zero, 4, e16, mf2, ta, ma
li t1, 4 - (128 << 3)
vlse16.v v8, (a1), t0
vadd.vx v8, v8, t1
vsra.vi v8, v8, 3
# fall through
endfunc
.variant_cc ff_vp78_idct_dc_add4uv_rvv
func ff_vp78_idct_dc_add4uv_rvv, zve64x
vsetivli zero, 16, e16, m2, ta, ma
vid.v v4
li a4, 4
vsrl.vi v4, v4, 2
li t1, 128
vrgather.vv v0, v8, v4 # replicate each DC four times
slli t2, a2, 2
vsetivli zero, 2, e64, m1, ta, ma
1:
vlse64.v v8, (a0), t2
addi a4, a4, -1
vsetivli zero, 16, e8, m1, ta, ma
vwaddu.wv v16, v0, v8
sh zero, (a1)
vnclip.wi v8, v16, 0
addi a1, a1, 32
vxor.vx v8, v8, t1
vsetivli zero, 2, e64, m1, ta, ma
vsse64.v v8, (a0), t2
add a0, a0, a2
bnez a4, 1b
ret
endfunc
.macro vp8_idct_dc_add
vlse32.v v0, (a0), a2
lh a5, 0(a1)
@ -179,19 +216,6 @@ endfunc
addi a1, a1, 32
.endm
func ff_vp8_idct_dc_add4uv_rvv, zve32x
vsetivli zero, 4, e8, mf4, ta, ma
vp8_idct_dc_addy
vp8_idct_dc_add
addi a0, a0, -4
sh2add a0, a2, a0
addi a1, a1, 32
vp8_idct_dc_addy
vp8_idct_dc_add
ret
endfunc
.macro bilin_load dst type mn
.ifc \type,v
add t5, a2, a3