diff --git a/libavcodec/riscv/me_cmp_init.c b/libavcodec/riscv/me_cmp_init.c index a6ef5addd0..24e78e3eeb 100644 --- a/libavcodec/riscv/me_cmp_init.c +++ b/libavcodec/riscv/me_cmp_init.c @@ -48,8 +48,12 @@ int ff_sse4_rvv(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_vsse16_rvv(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h); int ff_vsse8_rvv(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h); +int ff_vsse_intra16_rvv(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h); +int ff_vsse_intra8_rvv(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h); int ff_vsad16_rvv(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h); int ff_vsad8_rvv(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h); +int ff_vsad_intra16_rvv(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h); +int ff_vsad_intra8_rvv(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h); av_cold void ff_me_cmp_init_riscv(MECmpContext *c, AVCodecContext *avctx) { @@ -72,8 +76,12 @@ av_cold void ff_me_cmp_init_riscv(MECmpContext *c, AVCodecContext *avctx) c->vsse[0] = ff_vsse16_rvv; c->vsse[1] = ff_vsse8_rvv; + c->vsse[4] = ff_vsse_intra16_rvv; + c->vsse[5] = ff_vsse_intra8_rvv; c->vsad[0] = ff_vsad16_rvv; c->vsad[1] = ff_vsad8_rvv; + c->vsad[4] = ff_vsad_intra16_rvv; + c->vsad[5] = ff_vsad_intra8_rvv; } #endif } diff --git a/libavcodec/riscv/me_cmp_rvv.S b/libavcodec/riscv/me_cmp_rvv.S index 25b15c74ce..f32ae6b259 100644 --- a/libavcodec/riscv/me_cmp_rvv.S +++ b/libavcodec/riscv/me_cmp_rvv.S @@ -314,6 +314,68 @@ endfunc ret .endm +.macro vsad_vsse_intra16 type + vsetivli t0, 16, e32, m4, ta, ma + addi a4, a4, -1 + add t1, a1, a3 + vmv.v.x v24, zero + vmv.s.x v0, zero +1: + vsetvli zero, zero, e8, m1, tu, ma + vle8.v v4, (a1) + vle8.v v12, (t1) + addi a4, a4, -1 + vwsubu.vv v16, v4, v12 + vsetvli zero, zero, e16, m2, tu, ma + +.ifc \type,abs + vabsaddu v24, v16, v12 +.endif +.ifc \type,square + vwmacc.vv v24, v16, v16 +.endif + + add a1, a1, a3 + add t1, t1, a3 + bnez a4, 1b + + vsetvli zero, zero, e32, m4, tu, ma + vredsum.vs v0, v24, v0 + vmv.x.s a0, v0 + ret +.endm + +.macro vsad_vsse_intra8 type + vsetivli t0, 8, e32, m2, ta, ma + addi a4, a4, -1 + add t1, a1, a3 + vmv.v.x v24, zero + vmv.s.x v0, zero +1: + vsetvli zero, zero, e8, mf2, tu, ma + vle8.v v4, (a1) + vle8.v v12, (t1) + addi a4, a4, -1 + vwsubu.vv v16, v4, v12 + vsetvli zero, zero, e16, m1, tu, ma + +.ifc \type,abs + vabsaddu v24, v16, v12 +.endif +.ifc \type,square + vwmacc.vv v24, v16, v16 +.endif + + add a1, a1, a3 + add t1, t1, a3 + bnez a4, 1b + + vsetvli zero, zero, e32, m2, tu, ma + vredsum.vs v0, v24, v0 + vmv.x.s a0, v0 + ret +.endm + func ff_vsse16_rvv, zve32x vsad_vsse16 square endfunc @@ -322,6 +384,14 @@ func ff_vsse8_rvv, zve32x vsad_vsse8 square endfunc +func ff_vsse_intra16_rvv, zve32x + vsad_vsse_intra16 square +endfunc + +func ff_vsse_intra8_rvv, zve32x + vsad_vsse_intra8 square +endfunc + func ff_vsad16_rvv, zve32x vsad_vsse16 abs endfunc @@ -329,3 +399,11 @@ endfunc func ff_vsad8_rvv, zve32x vsad_vsse8 abs endfunc + +func ff_vsad_intra16_rvv, zve32x + vsad_vsse_intra16 abs +endfunc + +func ff_vsad_intra8_rvv, zve32x + vsad_vsse_intra8 abs +endfunc