avcodec/la: Add LSX optimization for h264 qpel.

./configure --disable-lasx
ffmpeg -i 1_h264_1080p_30fps_3Mbps.mp4 -f rawvideo -y /dev/null -an
before: 214fps
after:  274fps

Reviewed-by: Shiyou Yin <yinshiyou-hf@loongson.cn>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
This commit is contained in:
yuanhecai 2023-05-25 15:24:29 +08:00 committed by Michael Niedermayer
parent 8815a7719e
commit f6077cc666
No known key found for this signature in database
GPG Key ID: B18E8928B3948D64
7 changed files with 2561 additions and 559 deletions

View File

@ -31,5 +31,7 @@ LSX-OBJS-$(CONFIG_HEVC_DECODER) += loongarch/hevcdsp_lsx.o \
LSX-OBJS-$(CONFIG_H264DSP) += loongarch/h264idct.o \
loongarch/h264idct_loongarch.o \
loongarch/h264dsp.o
LSX-OBJS-$(CONFIG_H264QPEL) += loongarch/h264qpel.o \
loongarch/h264qpel_lsx.o
LSX-OBJS-$(CONFIG_H264CHROMA) += loongarch/h264chroma.o
LSX-OBJS-$(CONFIG_H264PRED) += loongarch/h264intrapred.o

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "h264qpel_lasx.h"
#include "h264qpel_loongarch.h"
#include "libavutil/attributes.h"
#include "libavutil/loongarch/cpu.h"
#include "libavcodec/h264qpel.h"
@ -27,6 +27,77 @@
av_cold void ff_h264qpel_init_loongarch(H264QpelContext *c, int bit_depth)
{
int cpu_flags = av_get_cpu_flags();
if (have_lsx(cpu_flags)) {
if (8 == bit_depth) {
c->put_h264_qpel_pixels_tab[0][0] = ff_put_h264_qpel16_mc00_lsx;
c->put_h264_qpel_pixels_tab[0][1] = ff_put_h264_qpel16_mc10_lsx;
c->put_h264_qpel_pixels_tab[0][2] = ff_put_h264_qpel16_mc20_lsx;
c->put_h264_qpel_pixels_tab[0][3] = ff_put_h264_qpel16_mc30_lsx;
c->put_h264_qpel_pixels_tab[0][4] = ff_put_h264_qpel16_mc01_lsx;
c->put_h264_qpel_pixels_tab[0][5] = ff_put_h264_qpel16_mc11_lsx;
c->put_h264_qpel_pixels_tab[0][6] = ff_put_h264_qpel16_mc21_lsx;
c->put_h264_qpel_pixels_tab[0][7] = ff_put_h264_qpel16_mc31_lsx;
c->put_h264_qpel_pixels_tab[0][8] = ff_put_h264_qpel16_mc02_lsx;
c->put_h264_qpel_pixels_tab[0][9] = ff_put_h264_qpel16_mc12_lsx;
c->put_h264_qpel_pixels_tab[0][10] = ff_put_h264_qpel16_mc22_lsx;
c->put_h264_qpel_pixels_tab[0][11] = ff_put_h264_qpel16_mc32_lsx;
c->put_h264_qpel_pixels_tab[0][12] = ff_put_h264_qpel16_mc03_lsx;
c->put_h264_qpel_pixels_tab[0][13] = ff_put_h264_qpel16_mc13_lsx;
c->put_h264_qpel_pixels_tab[0][14] = ff_put_h264_qpel16_mc23_lsx;
c->put_h264_qpel_pixels_tab[0][15] = ff_put_h264_qpel16_mc33_lsx;
c->avg_h264_qpel_pixels_tab[0][0] = ff_avg_h264_qpel16_mc00_lsx;
c->avg_h264_qpel_pixels_tab[0][1] = ff_avg_h264_qpel16_mc10_lsx;
c->avg_h264_qpel_pixels_tab[0][2] = ff_avg_h264_qpel16_mc20_lsx;
c->avg_h264_qpel_pixels_tab[0][3] = ff_avg_h264_qpel16_mc30_lsx;
c->avg_h264_qpel_pixels_tab[0][4] = ff_avg_h264_qpel16_mc01_lsx;
c->avg_h264_qpel_pixels_tab[0][5] = ff_avg_h264_qpel16_mc11_lsx;
c->avg_h264_qpel_pixels_tab[0][6] = ff_avg_h264_qpel16_mc21_lsx;
c->avg_h264_qpel_pixels_tab[0][7] = ff_avg_h264_qpel16_mc31_lsx;
c->avg_h264_qpel_pixels_tab[0][8] = ff_avg_h264_qpel16_mc02_lsx;
c->avg_h264_qpel_pixels_tab[0][9] = ff_avg_h264_qpel16_mc12_lsx;
c->avg_h264_qpel_pixels_tab[0][10] = ff_avg_h264_qpel16_mc22_lsx;
c->avg_h264_qpel_pixels_tab[0][11] = ff_avg_h264_qpel16_mc32_lsx;
c->avg_h264_qpel_pixels_tab[0][12] = ff_avg_h264_qpel16_mc03_lsx;
c->avg_h264_qpel_pixels_tab[0][13] = ff_avg_h264_qpel16_mc13_lsx;
c->avg_h264_qpel_pixels_tab[0][14] = ff_avg_h264_qpel16_mc23_lsx;
c->avg_h264_qpel_pixels_tab[0][15] = ff_avg_h264_qpel16_mc33_lsx;
c->put_h264_qpel_pixels_tab[1][0] = ff_put_h264_qpel8_mc00_lsx;
c->put_h264_qpel_pixels_tab[1][1] = ff_put_h264_qpel8_mc10_lsx;
c->put_h264_qpel_pixels_tab[1][2] = ff_put_h264_qpel8_mc20_lsx;
c->put_h264_qpel_pixels_tab[1][3] = ff_put_h264_qpel8_mc30_lsx;
c->put_h264_qpel_pixels_tab[1][4] = ff_put_h264_qpel8_mc01_lsx;
c->put_h264_qpel_pixels_tab[1][5] = ff_put_h264_qpel8_mc11_lsx;
c->put_h264_qpel_pixels_tab[1][6] = ff_put_h264_qpel8_mc21_lsx;
c->put_h264_qpel_pixels_tab[1][7] = ff_put_h264_qpel8_mc31_lsx;
c->put_h264_qpel_pixels_tab[1][8] = ff_put_h264_qpel8_mc02_lsx;
c->put_h264_qpel_pixels_tab[1][9] = ff_put_h264_qpel8_mc12_lsx;
c->put_h264_qpel_pixels_tab[1][10] = ff_put_h264_qpel8_mc22_lsx;
c->put_h264_qpel_pixels_tab[1][11] = ff_put_h264_qpel8_mc32_lsx;
c->put_h264_qpel_pixels_tab[1][12] = ff_put_h264_qpel8_mc03_lsx;
c->put_h264_qpel_pixels_tab[1][13] = ff_put_h264_qpel8_mc13_lsx;
c->put_h264_qpel_pixels_tab[1][14] = ff_put_h264_qpel8_mc23_lsx;
c->put_h264_qpel_pixels_tab[1][15] = ff_put_h264_qpel8_mc33_lsx;
c->avg_h264_qpel_pixels_tab[1][0] = ff_avg_h264_qpel8_mc00_lsx;
c->avg_h264_qpel_pixels_tab[1][1] = ff_avg_h264_qpel8_mc10_lsx;
c->avg_h264_qpel_pixels_tab[1][2] = ff_avg_h264_qpel8_mc20_lsx;
c->avg_h264_qpel_pixels_tab[1][3] = ff_avg_h264_qpel8_mc30_lsx;
c->avg_h264_qpel_pixels_tab[1][5] = ff_avg_h264_qpel8_mc11_lsx;
c->avg_h264_qpel_pixels_tab[1][6] = ff_avg_h264_qpel8_mc21_lsx;
c->avg_h264_qpel_pixels_tab[1][7] = ff_avg_h264_qpel8_mc31_lsx;
c->avg_h264_qpel_pixels_tab[1][8] = ff_avg_h264_qpel8_mc02_lsx;
c->avg_h264_qpel_pixels_tab[1][9] = ff_avg_h264_qpel8_mc12_lsx;
c->avg_h264_qpel_pixels_tab[1][10] = ff_avg_h264_qpel8_mc22_lsx;
c->avg_h264_qpel_pixels_tab[1][11] = ff_avg_h264_qpel8_mc32_lsx;
c->avg_h264_qpel_pixels_tab[1][13] = ff_avg_h264_qpel8_mc13_lsx;
c->avg_h264_qpel_pixels_tab[1][14] = ff_avg_h264_qpel8_mc23_lsx;
c->avg_h264_qpel_pixels_tab[1][15] = ff_avg_h264_qpel8_mc33_lsx;
}
}
#if HAVE_LASX
if (have_lasx(cpu_flags)) {
if (8 == bit_depth) {
c->put_h264_qpel_pixels_tab[0][0] = ff_put_h264_qpel16_mc00_lasx;
@ -95,4 +166,5 @@ av_cold void ff_h264qpel_init_loongarch(H264QpelContext *c, int bit_depth)
c->avg_h264_qpel_pixels_tab[1][15] = ff_avg_h264_qpel8_mc33_lasx;
}
}
#endif
}

View File

@ -21,7 +21,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "h264qpel_lasx.h"
#include "h264qpel_loongarch.h"
#include "libavutil/loongarch/loongson_intrinsics.h"
#include "libavutil/attributes.h"
@ -418,157 +418,6 @@ avg_pixels8_8_lsx(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
);
}
/* avg_pixels8_8_lsx : dst = avg(src, dst)
* put_pixels8_l2_8_lsx: dst = avg(src, half) , half stride is 8.
* avg_pixels8_l2_8_lsx: dst = avg(avg(src, half), dst) , half stride is 8.*/
static av_always_inline void
put_pixels8_l2_8_lsx(uint8_t *dst, const uint8_t *src, const uint8_t *half,
ptrdiff_t dstStride, ptrdiff_t srcStride)
{
ptrdiff_t stride_2, stride_3, stride_4;
__asm__ volatile (
/* h0~h7 */
"slli.d %[stride_2], %[srcStride], 1 \n\t"
"add.d %[stride_3], %[stride_2], %[srcStride] \n\t"
"slli.d %[stride_4], %[stride_2], 1 \n\t"
"vld $vr0, %[src], 0 \n\t"
"vldx $vr1, %[src], %[srcStride] \n\t"
"vldx $vr2, %[src], %[stride_2] \n\t"
"vldx $vr3, %[src], %[stride_3] \n\t"
"add.d %[src], %[src], %[stride_4] \n\t"
"vld $vr4, %[src], 0 \n\t"
"vldx $vr5, %[src], %[srcStride] \n\t"
"vldx $vr6, %[src], %[stride_2] \n\t"
"vldx $vr7, %[src], %[stride_3] \n\t"
"vld $vr8, %[half], 0x00 \n\t"
"vld $vr9, %[half], 0x08 \n\t"
"vld $vr10, %[half], 0x10 \n\t"
"vld $vr11, %[half], 0x18 \n\t"
"vld $vr12, %[half], 0x20 \n\t"
"vld $vr13, %[half], 0x28 \n\t"
"vld $vr14, %[half], 0x30 \n\t"
"vld $vr15, %[half], 0x38 \n\t"
"vavgr.bu $vr0, $vr8, $vr0 \n\t"
"vavgr.bu $vr1, $vr9, $vr1 \n\t"
"vavgr.bu $vr2, $vr10, $vr2 \n\t"
"vavgr.bu $vr3, $vr11, $vr3 \n\t"
"vavgr.bu $vr4, $vr12, $vr4 \n\t"
"vavgr.bu $vr5, $vr13, $vr5 \n\t"
"vavgr.bu $vr6, $vr14, $vr6 \n\t"
"vavgr.bu $vr7, $vr15, $vr7 \n\t"
"vstelm.d $vr0, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr1, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr2, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr3, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr4, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr5, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr6, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr7, %[dst], 0, 0 \n\t"
: [dst]"+&r"(dst), [half]"+&r"(half), [src]"+&r"(src),
[stride_2]"=&r"(stride_2), [stride_3]"=&r"(stride_3),
[stride_4]"=&r"(stride_4)
: [srcStride]"r"(srcStride), [dstStride]"r"(dstStride)
: "memory"
);
}
/* avg_pixels8_8_lsx : dst = avg(src, dst)
* put_pixels8_l2_8_lsx: dst = avg(src, half) , half stride is 8.
* avg_pixels8_l2_8_lsx: dst = avg(avg(src, half), dst) , half stride is 8.*/
static av_always_inline void
avg_pixels8_l2_8_lsx(uint8_t *dst, const uint8_t *src, const uint8_t *half,
ptrdiff_t dstStride, ptrdiff_t srcStride)
{
uint8_t *tmp = dst;
ptrdiff_t stride_2, stride_3, stride_4;
__asm__ volatile (
/* h0~h7 */
"slli.d %[stride_2], %[srcStride], 1 \n\t"
"add.d %[stride_3], %[stride_2], %[srcStride] \n\t"
"slli.d %[stride_4], %[stride_2], 1 \n\t"
"vld $vr0, %[src], 0 \n\t"
"vldx $vr1, %[src], %[srcStride] \n\t"
"vldx $vr2, %[src], %[stride_2] \n\t"
"vldx $vr3, %[src], %[stride_3] \n\t"
"add.d %[src], %[src], %[stride_4] \n\t"
"vld $vr4, %[src], 0 \n\t"
"vldx $vr5, %[src], %[srcStride] \n\t"
"vldx $vr6, %[src], %[stride_2] \n\t"
"vldx $vr7, %[src], %[stride_3] \n\t"
"vld $vr8, %[half], 0x00 \n\t"
"vld $vr9, %[half], 0x08 \n\t"
"vld $vr10, %[half], 0x10 \n\t"
"vld $vr11, %[half], 0x18 \n\t"
"vld $vr12, %[half], 0x20 \n\t"
"vld $vr13, %[half], 0x28 \n\t"
"vld $vr14, %[half], 0x30 \n\t"
"vld $vr15, %[half], 0x38 \n\t"
"vavgr.bu $vr0, $vr8, $vr0 \n\t"
"vavgr.bu $vr1, $vr9, $vr1 \n\t"
"vavgr.bu $vr2, $vr10, $vr2 \n\t"
"vavgr.bu $vr3, $vr11, $vr3 \n\t"
"vavgr.bu $vr4, $vr12, $vr4 \n\t"
"vavgr.bu $vr5, $vr13, $vr5 \n\t"
"vavgr.bu $vr6, $vr14, $vr6 \n\t"
"vavgr.bu $vr7, $vr15, $vr7 \n\t"
"slli.d %[stride_2], %[dstStride], 1 \n\t"
"add.d %[stride_3], %[stride_2], %[dstStride] \n\t"
"slli.d %[stride_4], %[stride_2], 1 \n\t"
"vld $vr8, %[tmp], 0 \n\t"
"vldx $vr9, %[tmp], %[dstStride] \n\t"
"vldx $vr10, %[tmp], %[stride_2] \n\t"
"vldx $vr11, %[tmp], %[stride_3] \n\t"
"add.d %[tmp], %[tmp], %[stride_4] \n\t"
"vld $vr12, %[tmp], 0 \n\t"
"vldx $vr13, %[tmp], %[dstStride] \n\t"
"vldx $vr14, %[tmp], %[stride_2] \n\t"
"vldx $vr15, %[tmp], %[stride_3] \n\t"
"vavgr.bu $vr0, $vr8, $vr0 \n\t"
"vavgr.bu $vr1, $vr9, $vr1 \n\t"
"vavgr.bu $vr2, $vr10, $vr2 \n\t"
"vavgr.bu $vr3, $vr11, $vr3 \n\t"
"vavgr.bu $vr4, $vr12, $vr4 \n\t"
"vavgr.bu $vr5, $vr13, $vr5 \n\t"
"vavgr.bu $vr6, $vr14, $vr6 \n\t"
"vavgr.bu $vr7, $vr15, $vr7 \n\t"
"vstelm.d $vr0, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr1, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr2, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr3, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr4, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr5, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr6, %[dst], 0, 0 \n\t"
"add.d %[dst], %[dst], %[dstStride] \n\t"
"vstelm.d $vr7, %[dst], 0, 0 \n\t"
: [dst]"+&r"(dst), [tmp]"+&r"(tmp), [half]"+&r"(half),
[src]"+&r"(src), [stride_2]"=&r"(stride_2),
[stride_3]"=&r"(stride_3), [stride_4]"=&r"(stride_4)
: [dstStride]"r"(dstStride), [srcStride]"r"(srcStride)
: "memory"
);
}
/* put_pixels16_8_lsx: dst = src */
static av_always_inline void
put_pixels16_8_lsx(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
@ -729,254 +578,6 @@ avg_pixels16_8_lsx(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
);
}
/* avg_pixels16_8_lsx : dst = avg(src, dst)
* put_pixels16_l2_8_lsx: dst = avg(src, half) , half stride is 8.
* avg_pixels16_l2_8_lsx: dst = avg(avg(src, half), dst) , half stride is 8.*/
static av_always_inline void
put_pixels16_l2_8_lsx(uint8_t *dst, const uint8_t *src, uint8_t *half,
ptrdiff_t dstStride, ptrdiff_t srcStride)
{
ptrdiff_t stride_2, stride_3, stride_4;
ptrdiff_t dstride_2, dstride_3, dstride_4;
__asm__ volatile (
"slli.d %[stride_2], %[srcStride], 1 \n\t"
"add.d %[stride_3], %[stride_2], %[srcStride] \n\t"
"slli.d %[stride_4], %[stride_2], 1 \n\t"
"slli.d %[dstride_2], %[dstStride], 1 \n\t"
"add.d %[dstride_3], %[dstride_2], %[dstStride] \n\t"
"slli.d %[dstride_4], %[dstride_2], 1 \n\t"
/* h0~h7 */
"vld $vr0, %[src], 0 \n\t"
"vldx $vr1, %[src], %[srcStride] \n\t"
"vldx $vr2, %[src], %[stride_2] \n\t"
"vldx $vr3, %[src], %[stride_3] \n\t"
"add.d %[src], %[src], %[stride_4] \n\t"
"vld $vr4, %[src], 0 \n\t"
"vldx $vr5, %[src], %[srcStride] \n\t"
"vldx $vr6, %[src], %[stride_2] \n\t"
"vldx $vr7, %[src], %[stride_3] \n\t"
"add.d %[src], %[src], %[stride_4] \n\t"
"vld $vr8, %[half], 0x00 \n\t"
"vld $vr9, %[half], 0x10 \n\t"
"vld $vr10, %[half], 0x20 \n\t"
"vld $vr11, %[half], 0x30 \n\t"
"vld $vr12, %[half], 0x40 \n\t"
"vld $vr13, %[half], 0x50 \n\t"
"vld $vr14, %[half], 0x60 \n\t"
"vld $vr15, %[half], 0x70 \n\t"
"vavgr.bu $vr0, $vr8, $vr0 \n\t"
"vavgr.bu $vr1, $vr9, $vr1 \n\t"
"vavgr.bu $vr2, $vr10, $vr2 \n\t"
"vavgr.bu $vr3, $vr11, $vr3 \n\t"
"vavgr.bu $vr4, $vr12, $vr4 \n\t"
"vavgr.bu $vr5, $vr13, $vr5 \n\t"
"vavgr.bu $vr6, $vr14, $vr6 \n\t"
"vavgr.bu $vr7, $vr15, $vr7 \n\t"
"vst $vr0, %[dst], 0 \n\t"
"vstx $vr1, %[dst], %[dstStride] \n\t"
"vstx $vr2, %[dst], %[dstride_2] \n\t"
"vstx $vr3, %[dst], %[dstride_3] \n\t"
"add.d %[dst], %[dst], %[dstride_4] \n\t"
"vst $vr4, %[dst], 0 \n\t"
"vstx $vr5, %[dst], %[dstStride] \n\t"
"vstx $vr6, %[dst], %[dstride_2] \n\t"
"vstx $vr7, %[dst], %[dstride_3] \n\t"
"add.d %[dst], %[dst], %[dstride_4] \n\t"
/* h8~h15 */
"vld $vr0, %[src], 0 \n\t"
"vldx $vr1, %[src], %[srcStride] \n\t"
"vldx $vr2, %[src], %[stride_2] \n\t"
"vldx $vr3, %[src], %[stride_3] \n\t"
"add.d %[src], %[src], %[stride_4] \n\t"
"vld $vr4, %[src], 0 \n\t"
"vldx $vr5, %[src], %[srcStride] \n\t"
"vldx $vr6, %[src], %[stride_2] \n\t"
"vldx $vr7, %[src], %[stride_3] \n\t"
"vld $vr8, %[half], 0x80 \n\t"
"vld $vr9, %[half], 0x90 \n\t"
"vld $vr10, %[half], 0xa0 \n\t"
"vld $vr11, %[half], 0xb0 \n\t"
"vld $vr12, %[half], 0xc0 \n\t"
"vld $vr13, %[half], 0xd0 \n\t"
"vld $vr14, %[half], 0xe0 \n\t"
"vld $vr15, %[half], 0xf0 \n\t"
"vavgr.bu $vr0, $vr8, $vr0 \n\t"
"vavgr.bu $vr1, $vr9, $vr1 \n\t"
"vavgr.bu $vr2, $vr10, $vr2 \n\t"
"vavgr.bu $vr3, $vr11, $vr3 \n\t"
"vavgr.bu $vr4, $vr12, $vr4 \n\t"
"vavgr.bu $vr5, $vr13, $vr5 \n\t"
"vavgr.bu $vr6, $vr14, $vr6 \n\t"
"vavgr.bu $vr7, $vr15, $vr7 \n\t"
"vst $vr0, %[dst], 0 \n\t"
"vstx $vr1, %[dst], %[dstStride] \n\t"
"vstx $vr2, %[dst], %[dstride_2] \n\t"
"vstx $vr3, %[dst], %[dstride_3] \n\t"
"add.d %[dst], %[dst], %[dstride_4] \n\t"
"vst $vr4, %[dst], 0 \n\t"
"vstx $vr5, %[dst], %[dstStride] \n\t"
"vstx $vr6, %[dst], %[dstride_2] \n\t"
"vstx $vr7, %[dst], %[dstride_3] \n\t"
: [dst]"+&r"(dst), [half]"+&r"(half), [src]"+&r"(src),
[stride_2]"=&r"(stride_2), [stride_3]"=&r"(stride_3),
[stride_4]"=&r"(stride_4), [dstride_2]"=&r"(dstride_2),
[dstride_3]"=&r"(dstride_3), [dstride_4]"=&r"(dstride_4)
: [dstStride]"r"(dstStride), [srcStride]"r"(srcStride)
: "memory"
);
}
/* avg_pixels16_8_lsx : dst = avg(src, dst)
* put_pixels16_l2_8_lsx: dst = avg(src, half) , half stride is 8.
* avg_pixels16_l2_8_lsx: dst = avg(avg(src, half), dst) , half stride is 8.*/
static av_always_inline void
avg_pixels16_l2_8_lsx(uint8_t *dst, const uint8_t *src, uint8_t *half,
ptrdiff_t dstStride, ptrdiff_t srcStride)
{
uint8_t *tmp = dst;
ptrdiff_t stride_2, stride_3, stride_4;
ptrdiff_t dstride_2, dstride_3, dstride_4;
__asm__ volatile (
"slli.d %[stride_2], %[srcStride], 1 \n\t"
"add.d %[stride_3], %[stride_2], %[srcStride] \n\t"
"slli.d %[stride_4], %[stride_2], 1 \n\t"
"slli.d %[dstride_2], %[dstStride], 1 \n\t"
"add.d %[dstride_3], %[dstride_2], %[dstStride] \n\t"
"slli.d %[dstride_4], %[dstride_2], 1 \n\t"
/* h0~h7 */
"vld $vr0, %[src], 0 \n\t"
"vldx $vr1, %[src], %[srcStride] \n\t"
"vldx $vr2, %[src], %[stride_2] \n\t"
"vldx $vr3, %[src], %[stride_3] \n\t"
"add.d %[src], %[src], %[stride_4] \n\t"
"vld $vr4, %[src], 0 \n\t"
"vldx $vr5, %[src], %[srcStride] \n\t"
"vldx $vr6, %[src], %[stride_2] \n\t"
"vldx $vr7, %[src], %[stride_3] \n\t"
"add.d %[src], %[src], %[stride_4] \n\t"
"vld $vr8, %[half], 0x00 \n\t"
"vld $vr9, %[half], 0x10 \n\t"
"vld $vr10, %[half], 0x20 \n\t"
"vld $vr11, %[half], 0x30 \n\t"
"vld $vr12, %[half], 0x40 \n\t"
"vld $vr13, %[half], 0x50 \n\t"
"vld $vr14, %[half], 0x60 \n\t"
"vld $vr15, %[half], 0x70 \n\t"
"vavgr.bu $vr0, $vr8, $vr0 \n\t"
"vavgr.bu $vr1, $vr9, $vr1 \n\t"
"vavgr.bu $vr2, $vr10, $vr2 \n\t"
"vavgr.bu $vr3, $vr11, $vr3 \n\t"
"vavgr.bu $vr4, $vr12, $vr4 \n\t"
"vavgr.bu $vr5, $vr13, $vr5 \n\t"
"vavgr.bu $vr6, $vr14, $vr6 \n\t"
"vavgr.bu $vr7, $vr15, $vr7 \n\t"
"vld $vr8, %[tmp], 0 \n\t"
"vldx $vr9, %[tmp], %[dstStride] \n\t"
"vldx $vr10, %[tmp], %[dstride_2] \n\t"
"vldx $vr11, %[tmp], %[dstride_3] \n\t"
"add.d %[tmp], %[tmp], %[dstride_4] \n\t"
"vld $vr12, %[tmp], 0 \n\t"
"vldx $vr13, %[tmp], %[dstStride] \n\t"
"vldx $vr14, %[tmp], %[dstride_2] \n\t"
"vldx $vr15, %[tmp], %[dstride_3] \n\t"
"add.d %[tmp], %[tmp], %[dstride_4] \n\t"
"vavgr.bu $vr0, $vr8, $vr0 \n\t"
"vavgr.bu $vr1, $vr9, $vr1 \n\t"
"vavgr.bu $vr2, $vr10, $vr2 \n\t"
"vavgr.bu $vr3, $vr11, $vr3 \n\t"
"vavgr.bu $vr4, $vr12, $vr4 \n\t"
"vavgr.bu $vr5, $vr13, $vr5 \n\t"
"vavgr.bu $vr6, $vr14, $vr6 \n\t"
"vavgr.bu $vr7, $vr15, $vr7 \n\t"
"vst $vr0, %[dst], 0 \n\t"
"vstx $vr1, %[dst], %[dstStride] \n\t"
"vstx $vr2, %[dst], %[dstride_2] \n\t"
"vstx $vr3, %[dst], %[dstride_3] \n\t"
"add.d %[dst], %[dst], %[dstride_4] \n\t"
"vst $vr4, %[dst], 0 \n\t"
"vstx $vr5, %[dst], %[dstStride] \n\t"
"vstx $vr6, %[dst], %[dstride_2] \n\t"
"vstx $vr7, %[dst], %[dstride_3] \n\t"
"add.d %[dst], %[dst], %[dstride_4] \n\t"
/* h8~h15 */
"vld $vr0, %[src], 0 \n\t"
"vldx $vr1, %[src], %[srcStride] \n\t"
"vldx $vr2, %[src], %[stride_2] \n\t"
"vldx $vr3, %[src], %[stride_3] \n\t"
"add.d %[src], %[src], %[stride_4] \n\t"
"vld $vr4, %[src], 0 \n\t"
"vldx $vr5, %[src], %[srcStride] \n\t"
"vldx $vr6, %[src], %[stride_2] \n\t"
"vldx $vr7, %[src], %[stride_3] \n\t"
"vld $vr8, %[half], 0x80 \n\t"
"vld $vr9, %[half], 0x90 \n\t"
"vld $vr10, %[half], 0xa0 \n\t"
"vld $vr11, %[half], 0xb0 \n\t"
"vld $vr12, %[half], 0xc0 \n\t"
"vld $vr13, %[half], 0xd0 \n\t"
"vld $vr14, %[half], 0xe0 \n\t"
"vld $vr15, %[half], 0xf0 \n\t"
"vavgr.bu $vr0, $vr8, $vr0 \n\t"
"vavgr.bu $vr1, $vr9, $vr1 \n\t"
"vavgr.bu $vr2, $vr10, $vr2 \n\t"
"vavgr.bu $vr3, $vr11, $vr3 \n\t"
"vavgr.bu $vr4, $vr12, $vr4 \n\t"
"vavgr.bu $vr5, $vr13, $vr5 \n\t"
"vavgr.bu $vr6, $vr14, $vr6 \n\t"
"vavgr.bu $vr7, $vr15, $vr7 \n\t"
"vld $vr8, %[tmp], 0 \n\t"
"vldx $vr9, %[tmp], %[dstStride] \n\t"
"vldx $vr10, %[tmp], %[dstride_2] \n\t"
"vldx $vr11, %[tmp], %[dstride_3] \n\t"
"add.d %[tmp], %[tmp], %[dstride_4] \n\t"
"vld $vr12, %[tmp], 0 \n\t"
"vldx $vr13, %[tmp], %[dstStride] \n\t"
"vldx $vr14, %[tmp], %[dstride_2] \n\t"
"vldx $vr15, %[tmp], %[dstride_3] \n\t"
"vavgr.bu $vr0, $vr8, $vr0 \n\t"
"vavgr.bu $vr1, $vr9, $vr1 \n\t"
"vavgr.bu $vr2, $vr10, $vr2 \n\t"
"vavgr.bu $vr3, $vr11, $vr3 \n\t"
"vavgr.bu $vr4, $vr12, $vr4 \n\t"
"vavgr.bu $vr5, $vr13, $vr5 \n\t"
"vavgr.bu $vr6, $vr14, $vr6 \n\t"
"vavgr.bu $vr7, $vr15, $vr7 \n\t"
"vst $vr0, %[dst], 0 \n\t"
"vstx $vr1, %[dst], %[dstStride] \n\t"
"vstx $vr2, %[dst], %[dstride_2] \n\t"
"vstx $vr3, %[dst], %[dstride_3] \n\t"
"add.d %[dst], %[dst], %[dstride_4] \n\t"
"vst $vr4, %[dst], 0 \n\t"
"vstx $vr5, %[dst], %[dstStride] \n\t"
"vstx $vr6, %[dst], %[dstride_2] \n\t"
"vstx $vr7, %[dst], %[dstride_3] \n\t"
: [dst]"+&r"(dst), [tmp]"+&r"(tmp), [half]"+&r"(half), [src]"+&r"(src),
[stride_2]"=&r"(stride_2), [stride_3]"=&r"(stride_3),
[stride_4]"=&r"(stride_4), [dstride_2]"=&r"(dstride_2),
[dstride_3]"=&r"(dstride_3), [dstride_4]"=&r"(dstride_4)
: [dstStride]"r"(dstStride), [srcStride]"r"(srcStride)
: "memory"
);
}
#define QPEL8_H_LOWPASS(out_v) \
src00 = __lasx_xvld(src, - 2); \
src += srcStride; \

View File

@ -1,158 +0,0 @@
/*
* Copyright (c) 2020 Loongson Technology Corporation Limited
* Contributed by Shiyou Yin <yinshiyou-hf@loongson.cn>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_LOONGARCH_H264QPEL_LASX_H
#define AVCODEC_LOONGARCH_H264QPEL_LASX_H
#include <stdint.h>
#include <stddef.h>
#include "libavcodec/h264.h"
void ff_h264_h_lpf_luma_inter_lasx(uint8_t *src, int stride,
int alpha, int beta, int8_t *tc0);
void ff_h264_v_lpf_luma_inter_lasx(uint8_t *src, int stride,
int alpha, int beta, int8_t *tc0);
void ff_put_h264_qpel16_mc00_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc10_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc20_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc30_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc01_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc11_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc21_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc31_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc02_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc12_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc32_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc22_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc03_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc13_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc23_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc33_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc00_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc10_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc20_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc30_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc01_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc11_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc21_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc31_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc02_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc12_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc22_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc32_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc03_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc13_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc23_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc33_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel8_mc00_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc10_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc20_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc30_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc01_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc11_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc21_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc31_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc02_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc12_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc22_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc32_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc03_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc13_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc23_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc33_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc00_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc10_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc20_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc30_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc11_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc21_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc31_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc02_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc12_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc22_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc32_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc13_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc23_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc33_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
#endif // #ifndef AVCODEC_LOONGARCH_H264QPEL_LASX_H

View File

@ -0,0 +1,312 @@
/*
* Copyright (c) 2023 Loongson Technology Corporation Limited
* Contributed by Shiyou Yin <yinshiyou-hf@loongson.cn>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_LOONGARCH_H264QPEL_LOONGARCH_H
#define AVCODEC_LOONGARCH_H264QPEL_LOONGARCH_H
#include <stdint.h>
#include <stddef.h>
#include "libavcodec/h264.h"
#include "config.h"
void put_h264_qpel8_hv_lowpass_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dstStride, ptrdiff_t srcStride);
void put_h264_qpel8_h_lowpass_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dstStride, ptrdiff_t srcStride);
void put_h264_qpel8_v_lowpass_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dstStride, ptrdiff_t srcStride);
void put_pixels16_l2_8_lsx(uint8_t *dst, const uint8_t *src, uint8_t *half,
ptrdiff_t dstStride, ptrdiff_t srcStride);
void put_pixels8_l2_8_lsx(uint8_t *dst, const uint8_t *src, const uint8_t *half,
ptrdiff_t dstStride, ptrdiff_t srcStride);
void avg_h264_qpel8_h_lowpass_lsx(uint8_t *dst, const uint8_t *src, int dstStride,
int srcStride);
void avg_h264_qpel8_v_lowpass_lsx(uint8_t *dst, uint8_t *src, int dstStride,
int srcStride);
void avg_pixels16_l2_8_lsx(uint8_t *dst, const uint8_t *src, uint8_t *half,
ptrdiff_t dstStride, ptrdiff_t srcStride);
void avg_h264_qpel8_hv_lowpass_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dstStride, ptrdiff_t srcStride);
void avg_pixels8_l2_8_lsx(uint8_t *dst, const uint8_t *src, const uint8_t *half,
ptrdiff_t dstStride, ptrdiff_t srcStride);
void ff_put_h264_qpel16_mc00_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc10_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc20_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc30_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc01_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc11_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc13_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc31_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc33_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc03_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc02_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc22_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc21_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel16_mc12_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel16_mc32_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel16_mc23_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel16_mc00_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc10_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc30_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc33_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc11_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc31_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc13_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc20_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc02_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel16_mc03_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel16_mc23_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel16_mc21_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel16_mc01_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel16_mc32_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel16_mc12_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel16_mc22_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc03_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc00_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc01_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc30_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc10_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc33_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc13_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc31_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc11_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc32_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc21_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc23_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc12_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc02_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc22_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc20_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc00_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc10_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc20_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc30_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc11_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc21_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc31_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc02_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc12_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc22_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc32_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc13_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc23_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc33_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
#if HAVE_LASX
void ff_h264_h_lpf_luma_inter_lasx(uint8_t *src, int stride,
int alpha, int beta, int8_t *tc0);
void ff_h264_v_lpf_luma_inter_lasx(uint8_t *src, int stride,
int alpha, int beta, int8_t *tc0);
void ff_put_h264_qpel16_mc00_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc10_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc20_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc30_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc01_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc11_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc21_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc31_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc02_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc12_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc32_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc22_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc03_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc13_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc23_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel16_mc33_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc00_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc10_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc20_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc30_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc01_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc11_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc21_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc31_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc02_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc12_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc22_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc32_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc03_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc13_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc23_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel16_mc33_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_put_h264_qpel8_mc00_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc10_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc20_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc30_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc01_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc11_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc21_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc31_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc02_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc12_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc22_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc32_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc03_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc13_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc23_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_put_h264_qpel8_mc33_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride);
void ff_avg_h264_qpel8_mc00_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc10_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc20_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc30_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc11_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc21_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc31_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc02_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc12_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc22_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc32_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc13_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc23_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
void ff_avg_h264_qpel8_mc33_lasx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dst_stride);
#endif
#endif // #ifndef AVCODEC_LOONGARCH_H264QPEL_LOONGARCH_H

View File

@ -0,0 +1,487 @@
/*
* Loongson LSX optimized h264qpel
*
* Copyright (c) 2023 Loongson Technology Corporation Limited
* Contributed by Hecai Yuan <yuanhecai@loongson.cn>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "h264qpel_loongarch.h"
#include "libavutil/loongarch/loongson_intrinsics.h"
#include "libavutil/attributes.h"
static void put_h264_qpel16_hv_lowpass_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dstStride, ptrdiff_t srcStride)
{
put_h264_qpel8_hv_lowpass_lsx(dst, src, dstStride, srcStride);
put_h264_qpel8_hv_lowpass_lsx(dst + 8, src + 8, dstStride, srcStride);
src += srcStride << 3;
dst += dstStride << 3;
put_h264_qpel8_hv_lowpass_lsx(dst, src, dstStride, srcStride);
put_h264_qpel8_hv_lowpass_lsx(dst + 8, src + 8, dstStride, srcStride);
}
void ff_put_h264_qpel16_mc22_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
put_h264_qpel16_hv_lowpass_lsx(dst, src, stride, stride);
}
static void put_h264_qpel16_h_lowpass_lsx(uint8_t *dst, const uint8_t *src,
int dstStride, int srcStride)
{
put_h264_qpel8_h_lowpass_lsx(dst, src, dstStride, srcStride);
put_h264_qpel8_h_lowpass_lsx(dst+8, src+8, dstStride, srcStride);
src += srcStride << 3;
dst += dstStride << 3;
put_h264_qpel8_h_lowpass_lsx(dst, src, dstStride, srcStride);
put_h264_qpel8_h_lowpass_lsx(dst+8, src+8, dstStride, srcStride);
}
static void put_h264_qpel16_v_lowpass_lsx(uint8_t *dst, const uint8_t *src,
int dstStride, int srcStride)
{
put_h264_qpel8_v_lowpass_lsx(dst, (uint8_t*)src, dstStride, srcStride);
put_h264_qpel8_v_lowpass_lsx(dst+8, (uint8_t*)src+8, dstStride, srcStride);
src += 8*srcStride;
dst += 8*dstStride;
put_h264_qpel8_v_lowpass_lsx(dst, (uint8_t*)src, dstStride, srcStride);
put_h264_qpel8_v_lowpass_lsx(dst+8, (uint8_t*)src+8, dstStride, srcStride);
}
void ff_put_h264_qpel16_mc21_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[512];
uint8_t *const halfH = temp;
uint8_t *const halfHV = temp + 256;
put_h264_qpel16_h_lowpass_lsx(halfH, src, 16, stride);
put_h264_qpel16_hv_lowpass_lsx(halfHV, src, 16, stride);
put_pixels16_l2_8_lsx(dst, halfH, halfHV, stride, 16);
}
void ff_put_h264_qpel16_mc12_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[512];
uint8_t *const halfHV = temp;
uint8_t *const halfH = temp + 256;
put_h264_qpel16_hv_lowpass_lsx(halfHV, src, 16, stride);
put_h264_qpel16_v_lowpass_lsx(halfH, src, 16, stride);
put_pixels16_l2_8_lsx(dst, halfH, halfHV, stride, 16);
}
void ff_put_h264_qpel16_mc32_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[512];
uint8_t *const halfHV = temp;
uint8_t *const halfH = temp + 256;
put_h264_qpel16_hv_lowpass_lsx(halfHV, src, 16, stride);
put_h264_qpel16_v_lowpass_lsx(halfH, src + 1, 16, stride);
put_pixels16_l2_8_lsx(dst, halfH, halfHV, stride, 16);
}
void ff_put_h264_qpel16_mc23_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[512];
uint8_t *const halfH = temp;
uint8_t *const halfHV = temp + 256;
put_h264_qpel16_h_lowpass_lsx(halfH, src + stride, 16, stride);
put_h264_qpel16_hv_lowpass_lsx(halfHV, src, 16, stride);
put_pixels16_l2_8_lsx(dst, halfH, halfHV, stride, 16);
}
static void avg_h264_qpel16_v_lowpass_lsx(uint8_t *dst, const uint8_t *src,
int dstStride, int srcStride)
{
avg_h264_qpel8_v_lowpass_lsx(dst, (uint8_t*)src, dstStride, srcStride);
avg_h264_qpel8_v_lowpass_lsx(dst+8, (uint8_t*)src+8, dstStride, srcStride);
src += 8*srcStride;
dst += 8*dstStride;
avg_h264_qpel8_v_lowpass_lsx(dst, (uint8_t*)src, dstStride, srcStride);
avg_h264_qpel8_v_lowpass_lsx(dst+8, (uint8_t*)src+8, dstStride, srcStride);
}
void ff_avg_h264_qpel16_mc02_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avg_h264_qpel16_v_lowpass_lsx(dst, src, stride, stride);
}
void ff_avg_h264_qpel16_mc03_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t half[256];
put_h264_qpel16_v_lowpass_lsx(half, src, 16, stride);
avg_pixels16_l2_8_lsx(dst, src + stride, half, stride, stride);
}
void ff_avg_h264_qpel16_mc23_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[512];
uint8_t *const halfH = temp;
uint8_t *const halfHV = temp + 256;
put_h264_qpel16_h_lowpass_lsx(halfH, src + stride, 16, stride);
put_h264_qpel16_hv_lowpass_lsx(halfHV, src, 16, stride);
avg_pixels16_l2_8_lsx(dst, halfH, halfHV, stride, 16);
}
void ff_avg_h264_qpel16_mc21_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[512];
uint8_t *const halfH = temp;
uint8_t *const halfHV = temp + 256;
put_h264_qpel16_h_lowpass_lsx(halfH, src, 16, stride);
put_h264_qpel16_hv_lowpass_lsx(halfHV, src, 16, stride);
avg_pixels16_l2_8_lsx(dst, halfH, halfHV, stride, 16);
}
void ff_avg_h264_qpel16_mc01_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t half[256];
put_h264_qpel16_v_lowpass_lsx(half, src, 16, stride);
avg_pixels16_l2_8_lsx(dst, src, half, stride, stride);
}
void ff_avg_h264_qpel16_mc32_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[512];
uint8_t *const halfHV = temp;
uint8_t *const halfH = temp + 256;
put_h264_qpel16_hv_lowpass_lsx(halfHV, src, 16, stride);
put_h264_qpel16_v_lowpass_lsx(halfH, src + 1, 16, stride);
avg_pixels16_l2_8_lsx(dst, halfH, halfHV, stride, 16);
}
void ff_avg_h264_qpel16_mc12_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[512];
uint8_t *const halfHV = temp;
uint8_t *const halfH = temp + 256;
put_h264_qpel16_hv_lowpass_lsx(halfHV, src, 16, stride);
put_h264_qpel16_v_lowpass_lsx(halfH, src, 16, stride);
avg_pixels16_l2_8_lsx(dst, halfH, halfHV, stride, 16);
}
static void avg_h264_qpel16_hv_lowpass_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t dstStride, ptrdiff_t srcStride)
{
avg_h264_qpel8_hv_lowpass_lsx(dst, src, dstStride, srcStride);
avg_h264_qpel8_hv_lowpass_lsx(dst + 8, src + 8, dstStride, srcStride);
src += srcStride << 3;
dst += dstStride << 3;
avg_h264_qpel8_hv_lowpass_lsx(dst, src, dstStride, srcStride);
avg_h264_qpel8_hv_lowpass_lsx(dst + 8, src + 8, dstStride, srcStride);
}
void ff_avg_h264_qpel16_mc22_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avg_h264_qpel16_hv_lowpass_lsx(dst, src, stride, stride);
}
void ff_put_h264_qpel8_mc03_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t half[64];
put_h264_qpel8_v_lowpass_lsx(half, (uint8_t*)src, 8, stride);
put_pixels8_l2_8_lsx(dst, src + stride, half, stride, stride);
}
void ff_put_h264_qpel8_mc01_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t half[64];
put_h264_qpel8_v_lowpass_lsx(half, (uint8_t*)src, 8, stride);
put_pixels8_l2_8_lsx(dst, src, half, stride, stride);
}
void ff_put_h264_qpel8_mc30_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t half[64];
put_h264_qpel8_h_lowpass_lsx(half, src, 8, stride);
put_pixels8_l2_8_lsx(dst, src+1, half, stride, stride);
}
void ff_put_h264_qpel8_mc10_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t half[64];
put_h264_qpel8_h_lowpass_lsx(half, src, 8, stride);
put_pixels8_l2_8_lsx(dst, src, half, stride, stride);
}
void ff_put_h264_qpel8_mc33_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t halfH[64];
uint8_t halfV[64];
put_h264_qpel8_h_lowpass_lsx(halfH, src + stride, 8, stride);
put_h264_qpel8_v_lowpass_lsx(halfV, (uint8_t*)src + 1, 8, stride);
put_pixels8_l2_8_lsx(dst, halfH, halfV, stride, 8);
}
void ff_put_h264_qpel8_mc13_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t halfH[64];
uint8_t halfV[64];
put_h264_qpel8_h_lowpass_lsx(halfH, src + stride, 8, stride);
put_h264_qpel8_v_lowpass_lsx(halfV, (uint8_t*)src, 8, stride);
put_pixels8_l2_8_lsx(dst, halfH, halfV, stride, 8);
}
void ff_put_h264_qpel8_mc31_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t halfH[64];
uint8_t halfV[64];
put_h264_qpel8_h_lowpass_lsx(halfH, src, 8, stride);
put_h264_qpel8_v_lowpass_lsx(halfV, (uint8_t*)src + 1, 8, stride);
put_pixels8_l2_8_lsx(dst, halfH, halfV, stride, 8);
}
void ff_put_h264_qpel8_mc11_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t halfH[64];
uint8_t halfV[64];
put_h264_qpel8_h_lowpass_lsx(halfH, src, 8, stride);
put_h264_qpel8_v_lowpass_lsx(halfV, (uint8_t*)src, 8, stride);
put_pixels8_l2_8_lsx(dst, halfH, halfV, stride, 8);
}
void ff_put_h264_qpel8_mc32_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[128];
uint8_t *const halfHV = temp;
uint8_t *const halfH = temp + 64;
put_h264_qpel8_hv_lowpass_lsx(halfHV, src, 8, stride);
put_h264_qpel8_v_lowpass_lsx(halfH, (uint8_t*)src + 1, 8, stride);
put_pixels8_l2_8_lsx(dst, halfH, halfHV, stride, 8);
}
void ff_put_h264_qpel8_mc21_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[128];
uint8_t *const halfH = temp;
uint8_t *const halfHV = temp + 64;
put_h264_qpel8_h_lowpass_lsx(halfH, src, 8, stride);
put_h264_qpel8_hv_lowpass_lsx(halfHV, src, 8, stride);
put_pixels8_l2_8_lsx(dst, halfH, halfHV, stride, 8);
}
void ff_put_h264_qpel8_mc23_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[128];
uint8_t *const halfH = temp;
uint8_t *const halfHV = temp + 64;
put_h264_qpel8_h_lowpass_lsx(halfH, src + stride, 8, stride);
put_h264_qpel8_hv_lowpass_lsx(halfHV, src, 8, stride);
put_pixels8_l2_8_lsx(dst, halfH, halfHV, stride, 8);
}
void ff_put_h264_qpel8_mc12_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[128];
uint8_t *const halfHV = temp;
uint8_t *const halfH = temp + 64;
put_h264_qpel8_hv_lowpass_lsx(halfHV, src, 8, stride);
put_h264_qpel8_v_lowpass_lsx(halfH, (uint8_t*)src, 8, stride);
put_pixels8_l2_8_lsx(dst, halfH, halfHV, stride, 8);
}
void ff_put_h264_qpel8_mc02_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
put_h264_qpel8_v_lowpass_lsx(dst, (uint8_t*)src, stride, stride);
}
void ff_put_h264_qpel8_mc22_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
put_h264_qpel8_hv_lowpass_lsx(dst, src, stride, stride);
}
void ff_put_h264_qpel8_mc20_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
put_h264_qpel8_h_lowpass_lsx(dst, src, stride, stride);
}
void ff_avg_h264_qpel8_mc10_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t half[64];
put_h264_qpel8_h_lowpass_lsx(half, src, 8, stride);
avg_pixels8_l2_8_lsx(dst, src, half, stride, stride);
}
void ff_avg_h264_qpel8_mc20_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avg_h264_qpel8_h_lowpass_lsx(dst, src, stride, stride);
}
void ff_avg_h264_qpel8_mc30_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t half[64];
put_h264_qpel8_h_lowpass_lsx(half, src, 8, stride);
avg_pixels8_l2_8_lsx(dst, src+1, half, stride, stride);
}
void ff_avg_h264_qpel8_mc11_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t halfH[64];
uint8_t halfV[64];
put_h264_qpel8_h_lowpass_lsx(halfH, src, 8, stride);
put_h264_qpel8_v_lowpass_lsx(halfV, (uint8_t*)src, 8, stride);
avg_pixels8_l2_8_lsx(dst, halfH, halfV, stride, 8);
}
void ff_avg_h264_qpel8_mc21_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[128];
uint8_t *const halfH = temp;
uint8_t *const halfHV = temp + 64;
put_h264_qpel8_h_lowpass_lsx(halfH, src, 8, stride);
put_h264_qpel8_hv_lowpass_lsx(halfHV, src, 8, stride);
avg_pixels8_l2_8_lsx(dst, halfH, halfHV, stride, 8);
}
void ff_avg_h264_qpel8_mc31_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t halfH[64];
uint8_t halfV[64];
put_h264_qpel8_h_lowpass_lsx(halfH, src, 8, stride);
put_h264_qpel8_v_lowpass_lsx(halfV, (uint8_t*)src + 1, 8, stride);
avg_pixels8_l2_8_lsx(dst, halfH, halfV, stride, 8);
}
void ff_avg_h264_qpel8_mc02_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avg_h264_qpel8_v_lowpass_lsx(dst, (uint8_t*)src, stride, stride);
}
void ff_avg_h264_qpel8_mc12_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[128];
uint8_t *const halfHV = temp;
uint8_t *const halfH = temp + 64;
put_h264_qpel8_hv_lowpass_lsx(halfHV, src, 8, stride);
put_h264_qpel8_v_lowpass_lsx(halfH, (uint8_t*)src, 8, stride);
avg_pixels8_l2_8_lsx(dst, halfH, halfHV, stride, 8);
}
void ff_avg_h264_qpel8_mc22_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avg_h264_qpel8_hv_lowpass_lsx(dst, src, stride, stride);
}
void ff_avg_h264_qpel8_mc32_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[128];
uint8_t *const halfHV = temp;
uint8_t *const halfH = temp + 64;
put_h264_qpel8_hv_lowpass_lsx(halfHV, src, 8, stride);
put_h264_qpel8_v_lowpass_lsx(halfH, (uint8_t*)src + 1, 8, stride);
avg_pixels8_l2_8_lsx(dst, halfH, halfHV, stride, 8);
}
void ff_avg_h264_qpel8_mc13_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t halfH[64];
uint8_t halfV[64];
put_h264_qpel8_h_lowpass_lsx(halfH, src + stride, 8, stride);
put_h264_qpel8_v_lowpass_lsx(halfV, (uint8_t*)src, 8, stride);
avg_pixels8_l2_8_lsx(dst, halfH, halfV, stride, 8);
}
void ff_avg_h264_qpel8_mc23_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t temp[128];
uint8_t *const halfH = temp;
uint8_t *const halfHV = temp + 64;
put_h264_qpel8_h_lowpass_lsx(halfH, src + stride, 8, stride);
put_h264_qpel8_hv_lowpass_lsx(halfHV, src, 8, stride);
avg_pixels8_l2_8_lsx(dst, halfH, halfHV, stride, 8);
}
void ff_avg_h264_qpel8_mc33_lsx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
uint8_t halfH[64];
uint8_t halfV[64];
put_h264_qpel8_h_lowpass_lsx(halfH, src + stride, 8, stride);
put_h264_qpel8_v_lowpass_lsx(halfV, (uint8_t*)src + 1, 8, stride);
avg_pixels8_l2_8_lsx(dst, halfH, halfV, stride, 8);
}