From c013ca58c5349e34610e94336d0fae6ef7a403a0 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 19 Dec 2014 21:44:57 -0500 Subject: [PATCH 01/16] vp9/x86: save one register in loopfilter surface coverage. --- libavcodec/x86/vp9lpf.asm | 56 +++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 416f08f090..e0f7386a08 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -278,22 +278,22 @@ SECTION .text %endmacro %macro DEFINE_REAL_P7_TO_Q7 0-1 0 -%define P7 dst1q + 2*mstrideq + %1 -%define P6 dst1q + mstrideq + %1 -%define P5 dst1q + %1 -%define P4 dst1q + strideq + %1 -%define P3 dstq + 4*mstrideq + %1 -%define P2 dstq + mstride3q + %1 -%define P1 dstq + 2*mstrideq + %1 -%define P0 dstq + mstrideq + %1 -%define Q0 dstq + %1 -%define Q1 dstq + strideq + %1 -%define Q2 dstq + 2*strideq + %1 -%define Q3 dstq + stride3q + %1 -%define Q4 dstq + 4*strideq + %1 -%define Q5 dst2q + mstrideq + %1 -%define Q6 dst2q + %1 -%define Q7 dst2q + strideq + %1 +%define P7 dstq + 4*mstrideq + %1 +%define P6 dstq + mstride3q + %1 +%define P5 dstq + 2*mstrideq + %1 +%define P4 dstq + mstrideq + %1 +%define P3 dstq + %1 +%define P2 dstq + strideq + %1 +%define P1 dstq + 2* strideq + %1 +%define P0 dstq + stride3q + %1 +%define Q0 dstq + 4* strideq + %1 +%define Q1 dst2q + mstride3q + %1 +%define Q2 dst2q + 2*mstrideq + %1 +%define Q3 dst2q + mstrideq + %1 +%define Q4 dst2q + %1 +%define Q5 dst2q + strideq + %1 +%define Q6 dst2q + 2* strideq + %1 +%define Q7 dst2q + stride3q + %1 %endmacro ; ..............AB -> AAAAAAAABBBBBBBB @@ -308,26 +308,26 @@ SECTION .text %endmacro %macro LOOPFILTER 2 ; %1=v/h %2=size1 - lea mstrideq, [strideq] - neg mstrideq + mov mstrideq, strideq + neg mstrideq - lea stride3q, [strideq+2*strideq] - mov mstride3q, stride3q - neg mstride3q + lea stride3q, [strideq*3] + lea mstride3q, [mstrideq*3] %ifidn %1, h %if %2 > 16 %define movx movh - lea dstq, [dstq + 8*strideq - 4] + lea dstq, [dstq + 4*strideq - 4] %else %define movx movu - lea dstq, [dstq + 8*strideq - 8] ; go from top center (h pos) to center left (v pos) + lea dstq, [dstq + 4*strideq - 8] ; go from top center (h pos) to center left (v pos) %endif + lea dst2q, [dstq + 8*strideq] +%else + lea dstq, [dstq + 4*mstrideq] + lea dst2q, [dstq + 8*strideq] %endif - lea dst1q, [dstq + 2*mstride3q] ; dst1q = &dst[stride * -6] - lea dst2q, [dstq + 2* stride3q] ; dst2q = &dst[stride * +6] - DEFINE_REAL_P7_TO_Q7 %ifidn %1, h @@ -795,9 +795,9 @@ SECTION .text %macro LPF_16_VH 2 INIT_XMM %2 -cglobal vp9_loop_filter_v_%1_16, 5,10,16, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 +cglobal vp9_loop_filter_v_%1_16, 5,10,16, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 LOOPFILTER v, %1 -cglobal vp9_loop_filter_h_%1_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3 +cglobal vp9_loop_filter_h_%1_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 LOOPFILTER h, %1 %endmacro From 8132629bd5875e6993a8b810f12c7dec00844944 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 19 Dec 2014 22:09:30 -0500 Subject: [PATCH 02/16] vp9/x86: make cglobal statement more conservative in register allocation. --- libavcodec/x86/vp9lpf.asm | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index e0f7386a08..c62ac462b0 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -307,7 +307,20 @@ SECTION .text %endif %endmacro -%macro LOOPFILTER 2 ; %1=v/h %2=size1 +%macro LOOPFILTER 3 ; %1=v/h %2=size1 %3=stack +%if UNIX64 +cglobal vp9_loop_filter_%1_%2_16, 5, 9, 16, %3, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 +%else +%if WIN64 +cglobal vp9_loop_filter_%1_%2_16, 4, 8, 16, %3, dst, stride, E, I, mstride, dst2, stride3, mstride3 +%else +cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stride3, mstride3 +%define Ed dword r2m +%define Id dword r3m +%endif +%define Hd dword r4m +%endif + mov mstrideq, strideq neg mstrideq @@ -795,10 +808,8 @@ SECTION .text %macro LPF_16_VH 2 INIT_XMM %2 -cglobal vp9_loop_filter_v_%1_16, 5,10,16, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 - LOOPFILTER v, %1 -cglobal vp9_loop_filter_h_%1_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 - LOOPFILTER h, %1 +LOOPFILTER v, %1, 0 +LOOPFILTER h, %1, 256 %endmacro %macro LPF_16_VH_ALL_OPTS 1 From e59bd089868603c09faa2893fe68b426c07b46b6 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 19 Dec 2014 22:18:42 -0500 Subject: [PATCH 03/16] vp9/x86: slightly simplify 44/48/84/88 h stores. --- libavcodec/x86/vp9lpf.asm | 88 ++++++++++++++++++--------------------- 1 file changed, 40 insertions(+), 48 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index c62ac462b0..64d3b268cf 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -724,34 +724,34 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri SBUTTERFLY bw, 2, 3, 8 SBUTTERFLY wd, 0, 2, 8 SBUTTERFLY wd, 1, 3, 8 - SBUTTERFLY dq, 0, 4, 8 - SBUTTERFLY dq, 1, 5, 8 - SBUTTERFLY dq, 2, 6, 8 - SBUTTERFLY dq, 3, 7, 8 movd [P7], m0 - punpckhqdq m0, m8 - movd [P6], m0 - movd [Q0], m1 - punpckhqdq m1, m9 - movd [Q1], m1 movd [P3], m2 - punpckhqdq m2, m10 - movd [P2], m2 + movd [Q0], m1 movd [Q4], m3 - punpckhqdq m3, m11 + psrldq m0, 4 + psrldq m1, 4 + psrldq m2, 4 + psrldq m3, 4 + movd [P6], m0 + movd [P2], m2 + movd [Q1], m1 movd [Q5], m3 - movd [P5], m4 - punpckhqdq m4, m12 - movd [P4], m4 - movd [Q2], m5 - punpckhqdq m5, m13 - movd [Q3], m5 - movd [P1], m6 - punpckhqdq m6, m14 - movd [P0], m6 - movd [Q6], m7 - punpckhqdq m7, m8 - movd [Q7], m7 + psrldq m0, 4 + psrldq m1, 4 + psrldq m2, 4 + psrldq m3, 4 + movd [P5], m0 + movd [P1], m2 + movd [Q2], m1 + movd [Q6], m3 + psrldq m0, 4 + psrldq m1, 4 + psrldq m2, 4 + psrldq m3, 4 + movd [P4], m0 + movd [P0], m2 + movd [Q3], m1 + movd [Q7], m3 %else ; the following code do a transpose of 8 full lines to 16 half ; lines (high part). It is inlined to avoid the need of a staging area @@ -776,30 +776,22 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri SBUTTERFLY dq, 1, 5, 8 SBUTTERFLY dq, 2, 6, 8 SBUTTERFLY dq, 3, 7, 8 - movh [P7], m0 - punpckhqdq m0, m8 - movh [P6], m0 - movh [Q0], m1 - punpckhqdq m1, m9 - movh [Q1], m1 - movh [P3], m2 - punpckhqdq m2, m10 - movh [P2], m2 - movh [Q4], m3 - punpckhqdq m3, m11 - movh [Q5], m3 - movh [P5], m4 - punpckhqdq m4, m12 - movh [P4], m4 - movh [Q2], m5 - punpckhqdq m5, m13 - movh [Q3], m5 - movh [P1], m6 - punpckhqdq m6, m14 - movh [P0], m6 - movh [Q6], m7 - punpckhqdq m7, m8 - movh [Q7], m7 + movh [P7], m0 + movhps [P6], m0 + movh [Q0], m1 + movhps [Q1], m1 + movh [P3], m2 + movhps [P2], m2 + movh [Q4], m3 + movhps [Q5], m3 + movh [P5], m4 + movhps [P4], m4 + movh [Q2], m5 + movhps [Q3], m5 + movh [P1], m6 + movhps [P0], m6 + movh [Q6], m7 + movhps [Q7], m7 %endif %endif From d1c55654e11c6aa1e032c6a9dc7cc13bfce4c5d5 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sat, 20 Dec 2014 11:13:06 -0500 Subject: [PATCH 04/16] vp8/x86: remove unused register from ABSSUB_CMP macro. --- libavcodec/x86/vp9lpf.asm | 42 +++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 64d3b268cf..44db472806 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -69,9 +69,9 @@ SECTION .text %endmacro ; %1 = abs(%2-%3) <= %4 -%macro ABSSUB_CMP 6-7 [pb_80]; dst, src1, src2, cmp, tmp1, tmp2, [pb_80] - ABSSUB %1, %2, %3, %6 ; dst = abs(src1-src2) - CMP_LTE %1, %4, %6, %7 ; dst <= cmp +%macro ABSSUB_CMP 5-6 [pb_80]; dst, src1, src2, cmp, tmp, [pb_80] + ABSSUB %1, %2, %3, %5 ; dst = abs(src1-src2) + CMP_LTE %1, %4, %5, %6 ; dst <= cmp %endmacro %macro MASK_APPLY 4 ; %1=new_data/dst %2=old_data %3=mask %4=tmp @@ -438,16 +438,16 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri SWAP 10, 6, 14 SWAP 11, 7, 15 %endif - ABSSUB_CMP m5, m8, m9, m2, m6, m7, m0 ; m5 = abs(p3-p2) <= I - ABSSUB_CMP m1, m9, m10, m2, m6, m7, m0 ; m1 = abs(p2-p1) <= I + ABSSUB_CMP m5, m8, m9, m2, m7, m0 ; m5 = abs(p3-p2) <= I + ABSSUB_CMP m1, m9, m10, m2, m7, m0 ; m1 = abs(p2-p1) <= I pand m5, m1 - ABSSUB_CMP m1, m10, m11, m2, m6, m7, m0 ; m1 = abs(p1-p0) <= I + ABSSUB_CMP m1, m10, m11, m2, m7, m0 ; m1 = abs(p1-p0) <= I pand m5, m1 - ABSSUB_CMP m1, m12, m13, m2, m6, m7, m0 ; m1 = abs(q1-q0) <= I + ABSSUB_CMP m1, m12, m13, m2, m7, m0 ; m1 = abs(q1-q0) <= I pand m5, m1 - ABSSUB_CMP m1, m13, m14, m2, m6, m7, m0 ; m1 = abs(q2-q1) <= I + ABSSUB_CMP m1, m13, m14, m2, m7, m0 ; m1 = abs(q2-q1) <= I pand m5, m1 - ABSSUB_CMP m1, m14, m15, m2, m6, m7, m0 ; m1 = abs(q3-q2) <= I + ABSSUB_CMP m1, m14, m15, m2, m7, m0 ; m1 = abs(q3-q2) <= I pand m5, m1 ABSSUB m1, m11, m12, m7 ; abs(p0-q0) paddusb m1, m1 ; abs(p0-q0) * 2 @@ -465,9 +465,9 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri ; calc flat8in (if not 44_16) and hev masks mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 %if %2 != 44 - ABSSUB_CMP m2, m8, m11, m6, m4, m5 ; abs(p3 - p0) <= 1 + ABSSUB_CMP m2, m8, m11, m6, m5 ; abs(p3 - p0) <= 1 mova m8, [pb_80] - ABSSUB_CMP m1, m9, m11, m6, m4, m5, m8 ; abs(p2 - p0) <= 1 + ABSSUB_CMP m1, m9, m11, m6, m5, m8 ; abs(p2 - p0) <= 1 pand m2, m1 ABSSUB m4, m10, m11, m5 ; abs(p1 - p0) %if %2 == 16 @@ -490,9 +490,9 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri por m0, m5 ; hev final value CMP_LTE m4, m6, m5 ; abs(q1 - q0) <= 1 pand m2, m4 ; (flat8in) - ABSSUB_CMP m1, m14, m12, m6, m4, m5, m8 ; abs(q2 - q0) <= 1 + ABSSUB_CMP m1, m14, m12, m6, m5, m8 ; abs(q2 - q0) <= 1 pand m2, m1 - ABSSUB_CMP m1, m15, m12, m6, m4, m5, m8 ; abs(q3 - q0) <= 1 + ABSSUB_CMP m1, m15, m12, m6, m5, m8 ; abs(q3 - q0) <= 1 pand m2, m1 ; flat8in final value %if %2 == 84 || %2 == 48 pand m2, [mask_mix%2] @@ -516,26 +516,26 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri ; calc flat8out mask mova m8, [P7] mova m9, [P6] - ABSSUB_CMP m1, m8, m11, m6, m4, m5 ; abs(p7 - p0) <= 1 - ABSSUB_CMP m7, m9, m11, m6, m4, m5 ; abs(p6 - p0) <= 1 + ABSSUB_CMP m1, m8, m11, m6, m5 ; abs(p7 - p0) <= 1 + ABSSUB_CMP m7, m9, m11, m6, m5 ; abs(p6 - p0) <= 1 pand m1, m7 mova m8, [P5] mova m9, [P4] - ABSSUB_CMP m7, m8, m11, m6, m4, m5 ; abs(p5 - p0) <= 1 + ABSSUB_CMP m7, m8, m11, m6, m5 ; abs(p5 - p0) <= 1 pand m1, m7 - ABSSUB_CMP m7, m9, m11, m6, m4, m5 ; abs(p4 - p0) <= 1 + ABSSUB_CMP m7, m9, m11, m6, m5 ; abs(p4 - p0) <= 1 pand m1, m7 mova m14, [Q4] mova m15, [Q5] - ABSSUB_CMP m7, m14, m12, m6, m4, m5 ; abs(q4 - q0) <= 1 + ABSSUB_CMP m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 pand m1, m7 - ABSSUB_CMP m7, m15, m12, m6, m4, m5 ; abs(q5 - q0) <= 1 + ABSSUB_CMP m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 pand m1, m7 mova m14, [Q6] mova m15, [Q7] - ABSSUB_CMP m7, m14, m12, m6, m4, m5 ; abs(q4 - q0) <= 1 + ABSSUB_CMP m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 pand m1, m7 - ABSSUB_CMP m7, m15, m12, m6, m4, m5 ; abs(q5 - q0) <= 1 + ABSSUB_CMP m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 pand m1, m7 ; flat8out final value %endif From 418c202c6363c617adbb353a63db983a744e0b34 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sun, 21 Dec 2014 19:34:03 -0500 Subject: [PATCH 05/16] vp9/x86: simplify ABSSUM_CMP by inverting the comparison meaning. --- libavcodec/x86/vp9lpf.asm | 103 +++++++++++++++++++------------------- 1 file changed, 52 insertions(+), 51 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 44db472806..613a104cc6 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -35,6 +35,7 @@ pb_40: times 16 db 0x40 pb_81: times 16 db 0x81 pb_f8: times 16 db 0xf8 pb_fe: times 16 db 0xfe +pb_ff: times 16 db 0xff cextern pw_4 cextern pw_8 @@ -58,20 +59,18 @@ SECTION .text por %1, %4 %endmacro -; %1 = %1<=%2 -%macro CMP_LTE 3-4 ; src/dst, cmp, tmp, pb_80 -%if %0 == 4 - pxor %1, %4 +; %1 = %1>%2 +%macro CMP_GT 2-3 ; src/dst, cmp, pb_80 +%if %0 == 3 + pxor %1, %3 %endif - pcmpgtb %3, %2, %1 ; cmp > src? - pcmpeqb %1, %2 ; cmp == src? XXX: avoid this with a -1/+1 well placed? - por %1, %3 ; cmp >= src? + pcmpgtb %1, %2 %endmacro -; %1 = abs(%2-%3) <= %4 -%macro ABSSUB_CMP 5-6 [pb_80]; dst, src1, src2, cmp, tmp, [pb_80] +; %1 = abs(%2-%3) > %4 +%macro ABSSUB_GT 5-6 [pb_80]; dst, src1, src2, cmp, tmp, [pb_80] ABSSUB %1, %2, %3, %5 ; dst = abs(src1-src2) - CMP_LTE %1, %4, %5, %6 ; dst <= cmp + CMP_GT %1, %4, %6 ; dst > cmp %endmacro %macro MASK_APPLY 4 ; %1=new_data/dst %2=old_data %3=mask %4=tmp @@ -438,17 +437,17 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri SWAP 10, 6, 14 SWAP 11, 7, 15 %endif - ABSSUB_CMP m5, m8, m9, m2, m7, m0 ; m5 = abs(p3-p2) <= I - ABSSUB_CMP m1, m9, m10, m2, m7, m0 ; m1 = abs(p2-p1) <= I - pand m5, m1 - ABSSUB_CMP m1, m10, m11, m2, m7, m0 ; m1 = abs(p1-p0) <= I - pand m5, m1 - ABSSUB_CMP m1, m12, m13, m2, m7, m0 ; m1 = abs(q1-q0) <= I - pand m5, m1 - ABSSUB_CMP m1, m13, m14, m2, m7, m0 ; m1 = abs(q2-q1) <= I - pand m5, m1 - ABSSUB_CMP m1, m14, m15, m2, m7, m0 ; m1 = abs(q3-q2) <= I - pand m5, m1 + ABSSUB_GT m5, m8, m9, m2, m7, m0 ; m5 = abs(p3-p2) <= I + ABSSUB_GT m1, m9, m10, m2, m7, m0 ; m1 = abs(p2-p1) <= I + por m5, m1 + ABSSUB_GT m1, m10, m11, m2, m7, m0 ; m1 = abs(p1-p0) <= I + por m5, m1 + ABSSUB_GT m1, m12, m13, m2, m7, m0 ; m1 = abs(q1-q0) <= I + por m5, m1 + ABSSUB_GT m1, m13, m14, m2, m7, m0 ; m1 = abs(q2-q1) <= I + por m5, m1 + ABSSUB_GT m1, m14, m15, m2, m7, m0 ; m1 = abs(q3-q2) <= I + por m5, m1 ABSSUB m1, m11, m12, m7 ; abs(p0-q0) paddusb m1, m1 ; abs(p0-q0) * 2 ABSSUB m2, m10, m13, m7 ; abs(p1-q1) @@ -456,19 +455,19 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri psrlq m2, 1 ; abs(p1-q1)/2 paddusb m1, m2 ; abs(p0-q0)*2 + abs(p1-q1)/2 pxor m1, m0 - pcmpgtb m4, m3, m1 ; E > X? - pcmpeqb m3, m1 ; E == X? - por m3, m4 ; E >= X? - pand m3, m5 ; fm final value + pcmpgtb m1, m3 + por m1, m5 ; fm final value + SWAP 1, 3 + pxor m3, [pb_ff] ; (m3: fm, m8..15: p3 p2 p1 p0 q0 q1 q2 q3) ; calc flat8in (if not 44_16) and hev masks mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 %if %2 != 44 - ABSSUB_CMP m2, m8, m11, m6, m5 ; abs(p3 - p0) <= 1 + ABSSUB_GT m2, m8, m11, m6, m5 ; abs(p3 - p0) <= 1 mova m8, [pb_80] - ABSSUB_CMP m1, m9, m11, m6, m5, m8 ; abs(p2 - p0) <= 1 - pand m2, m1 + ABSSUB_GT m1, m9, m11, m6, m5, m8 ; abs(p2 - p0) <= 1 + por m2, m1 ABSSUB m4, m10, m11, m5 ; abs(p1 - p0) %if %2 == 16 %if cpuflag(ssse3) @@ -482,18 +481,19 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri pxor m7, m8 pxor m4, m8 pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) - CMP_LTE m4, m6, m5 ; abs(p1 - p0) <= 1 - pand m2, m4 ; (flat8in) + CMP_GT m4, m6 ; abs(p1 - p0) <= 1 + por m2, m4 ; (flat8in) ABSSUB m4, m13, m12, m1 ; abs(q1 - q0) pxor m4, m8 pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition) por m0, m5 ; hev final value - CMP_LTE m4, m6, m5 ; abs(q1 - q0) <= 1 - pand m2, m4 ; (flat8in) - ABSSUB_CMP m1, m14, m12, m6, m5, m8 ; abs(q2 - q0) <= 1 - pand m2, m1 - ABSSUB_CMP m1, m15, m12, m6, m5, m8 ; abs(q3 - q0) <= 1 - pand m2, m1 ; flat8in final value + CMP_GT m4, m6 ; abs(q1 - q0) <= 1 + por m2, m4 ; (flat8in) + ABSSUB_GT m1, m14, m12, m6, m5, m8 ; abs(q2 - q0) <= 1 + por m2, m1 + ABSSUB_GT m1, m15, m12, m6, m5, m8 ; abs(q3 - q0) <= 1 + por m2, m1 ; flat8in final value + pxor m2, [pb_ff] %if %2 == 84 || %2 == 48 pand m2, [mask_mix%2] %endif @@ -516,27 +516,28 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri ; calc flat8out mask mova m8, [P7] mova m9, [P6] - ABSSUB_CMP m1, m8, m11, m6, m5 ; abs(p7 - p0) <= 1 - ABSSUB_CMP m7, m9, m11, m6, m5 ; abs(p6 - p0) <= 1 - pand m1, m7 + ABSSUB_GT m1, m8, m11, m6, m5 ; abs(p7 - p0) <= 1 + ABSSUB_GT m7, m9, m11, m6, m5 ; abs(p6 - p0) <= 1 + por m1, m7 mova m8, [P5] mova m9, [P4] - ABSSUB_CMP m7, m8, m11, m6, m5 ; abs(p5 - p0) <= 1 - pand m1, m7 - ABSSUB_CMP m7, m9, m11, m6, m5 ; abs(p4 - p0) <= 1 - pand m1, m7 + ABSSUB_GT m7, m8, m11, m6, m5 ; abs(p5 - p0) <= 1 + por m1, m7 + ABSSUB_GT m7, m9, m11, m6, m5 ; abs(p4 - p0) <= 1 + por m1, m7 mova m14, [Q4] mova m15, [Q5] - ABSSUB_CMP m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 - pand m1, m7 - ABSSUB_CMP m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 - pand m1, m7 + ABSSUB_GT m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 + por m1, m7 + ABSSUB_GT m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 + por m1, m7 mova m14, [Q6] mova m15, [Q7] - ABSSUB_CMP m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 - pand m1, m7 - ABSSUB_CMP m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 - pand m1, m7 ; flat8out final value + ABSSUB_GT m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 + por m1, m7 + ABSSUB_GT m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 + por m1, m7 ; flat8out final value + pxor m1, [pb_ff] %endif ; if (fm) { From e42409479f09761a5d613b3fdba72062d909f0ee Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Wed, 24 Dec 2014 14:22:19 -0500 Subject: [PATCH 06/16] vp8/x86: move variable assigned inside macro branch. The value is not used outside the branch. --- libavcodec/x86/vp9lpf.asm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 613a104cc6..73b965ada5 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -462,8 +462,8 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri ; (m3: fm, m8..15: p3 p2 p1 p0 q0 q1 q2 q3) ; calc flat8in (if not 44_16) and hev masks - mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 %if %2 != 44 + mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 ABSSUB_GT m2, m8, m11, m6, m5 ; abs(p3 - p0) <= 1 mova m8, [pb_80] ABSSUB_GT m1, m9, m11, m6, m5, m8 ; abs(p2 - p0) <= 1 From 8ea2194ebb5786d2e25e61bedc28e004b2d760be Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Wed, 24 Dec 2014 14:17:28 -0500 Subject: [PATCH 07/16] vp9/x86: store unpacked intermediates for filter6/14 on stack. filter16 goes from 508 to 482 (h) or 346 to 314 (v) cycles; filter88 goes from 240 to 238 (h) or 174 to 165 (v) cycles, measured on TOS. --- libavcodec/x86/vp9lpf.asm | 151 ++++++++++++++++++++------------------ 1 file changed, 79 insertions(+), 72 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 73b965ada5..15d415942e 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -79,39 +79,42 @@ SECTION .text por %1, %4 ; new&mask | old&~mask %endmacro -%macro FILTER_SUBx2_ADDx2 8 ; %1=dst %2=h/l %3=cache %4=sub1 %5=sub2 %6=add1 %7=add2 %8=rshift - punpck%2bw %3, %4, m0 - psubw %1, %3 - punpck%2bw %3, %5, m0 - psubw %1, %3 - punpck%2bw %3, %6, m0 - paddw %1, %3 - punpck%2bw %3, %7, m0 +%macro FILTER_SUBx2_ADDx2 9-10 "" ; %1=dst %2=h/l %3=cache %4=stack_off %5=sub1 %6=sub2 %7=add1 %8=add2 %9=rshift, [unpack] + psubw %3, [rsp+%4+%5*32] + psubw %3, [rsp+%4+%6*32] + paddw %3, [rsp+%4+%7*32] +%ifnidn %10, "" + punpck%2bw %1, %10, m0 + mova [rsp+%4+%8*32], %1 paddw %3, %1 - psraw %1, %3, %8 +%else + paddw %3, [rsp+%4+%8*32] +%endif + psraw %1, %3, %9 %endmacro -%macro FILTER_INIT 8 ; tmp1, tmp2, cacheL, cacheH, dstp, filterid, mask, source - FILTER%6_INIT %1, l, %3 - FILTER%6_INIT %2, h, %4 +; FIXME interleave l/h better (for instruction pairing) +%macro FILTER_INIT 9 ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, filterid, mask, source + FILTER%7_INIT %1, l, %3, %6 + 0 + FILTER%7_INIT %2, h, %4, %6 + 16 packuswb %1, %2 - MASK_APPLY %1, %8, %7, %2 + MASK_APPLY %1, %9, %8, %2 mova %5, %1 %endmacro -%macro FILTER_UPDATE 11-14 ; tmp1, tmp2, cacheL, cacheH, dstp, -, -, +, +, rshift, mask, [source], [preload reg + value] -%if %0 == 13 ; no source + preload - mova %12, %13 -%elif %0 == 14 ; source + preload - mova %13, %14 + +%macro FILTER_UPDATE 12-15 "", "" ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, -, -, +, +, rshift, mask, [source], [unpack] +; FIXME interleave this properly with the subx2/addx2 +%if %0 == 15 + mova %14, %15 %endif - FILTER_SUBx2_ADDx2 %1, l, %3, %6, %7, %8, %9, %10 - FILTER_SUBx2_ADDx2 %2, h, %4, %6, %7, %8, %9, %10 + FILTER_SUBx2_ADDx2 %1, l, %3, %6 + 0, %7, %8, %9, %10, %11, %14 + FILTER_SUBx2_ADDx2 %2, h, %4, %6 + 16, %7, %8, %9, %10, %11, %14 packuswb %1, %2 -%if %0 == 12 || %0 == 14 - MASK_APPLY %1, %12, %11, %2 +%ifnidn %13, "" + MASK_APPLY %1, %13, %12, %2 %else - MASK_APPLY %1, %5, %11, %2 + MASK_APPLY %1, %5, %12, %2 %endif mova %5, %1 %endmacro @@ -151,44 +154,48 @@ SECTION .text paddusb %1, %4 ; add the negatives %endmacro -%macro FILTER6_INIT 3 ; %1=dst %2=h/l %3=cache +%macro FILTER6_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off punpck%2bw %1, m14, m0 ; p3: B->W + mova [rsp+%4+0*32], %1 paddw %3, %1, %1 ; p3*2 paddw %3, %1 ; p3*3 punpck%2bw %1, m15, m0 ; p2: B->W + mova [rsp+%4+1*32], %1 paddw %3, %1 ; p3*3 + p2 paddw %3, %1 ; p3*3 + p2*2 punpck%2bw %1, m10, m0 ; p1: B->W + mova [rsp+%4+2*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 punpck%2bw %1, m11, m0 ; p0: B->W + mova [rsp+%4+3*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 punpck%2bw %1, m12, m0 ; q0: B->W + mova [rsp+%4+4*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 + q0 paddw %3, [pw_4] ; p3*3 + p2*2 + p1 + p0 + q0 + 4 psraw %1, %3, 3 ; (p3*3 + p2*2 + p1 + p0 + q0 + 4) >> 3 %endmacro -%macro FILTER14_INIT 3 ; %1=dst %2=h/l %3=cache +%macro FILTER14_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off punpck%2bw %1, m2, m0 ; p7: B->W + mova [rsp+%4+ 8*32], %1 psllw %3, %1, 3 ; p7*8 psubw %3, %1 ; p7*7 punpck%2bw %1, m3, m0 ; p6: B->W + mova [rsp+%4+ 9*32], %1 paddw %3, %1 ; p7*7 + p6 paddw %3, %1 ; p7*7 + p6*2 punpck%2bw %1, m8, m0 ; p5: B->W + mova [rsp+%4+10*32], %1 paddw %3, %1 ; p7*7 + p6*2 + p5 punpck%2bw %1, m9, m0 ; p4: B->W + mova [rsp+%4+11*32], %1 paddw %3, %1 ; p7*7 + p6*2 + p5 + p4 - punpck%2bw %1, m14, m0 ; p3: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + p4 + p3 - punpck%2bw %1, m15, m0 ; p2: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p2 - punpck%2bw %1, m10, m0 ; p1: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p1 - punpck%2bw %1, m11, m0 ; p0: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p0 - punpck%2bw %1, m12, m0 ; q0: B->W - paddw %3, %1 ; p7*7 + p6*2 + p5 + .. + p0 + q0 + paddw %3, [rsp+%4+ 0*32] ; p7*7 + p6*2 + p5 + p4 + p3 + paddw %3, [rsp+%4+ 1*32] ; p7*7 + p6*2 + p5 + .. + p2 + paddw %3, [rsp+%4+ 2*32] ; p7*7 + p6*2 + p5 + .. + p1 + paddw %3, [rsp+%4+ 3*32] ; p7*7 + p6*2 + p5 + .. + p0 + paddw %3, [rsp+%4+ 4*32] ; p7*7 + p6*2 + p5 + .. + p0 + q0 paddw %3, [pw_8] ; p7*7 + p6*2 + p5 + .. + p0 + q0 + 8 psraw %1, %3, 4 ; (p7*7 + p6*2 + p5 + .. + p0 + q0 + 8) >> 4 %endmacro @@ -306,14 +313,14 @@ SECTION .text %endif %endmacro -%macro LOOPFILTER 3 ; %1=v/h %2=size1 %3=stack +%macro LOOPFILTER 4 ; %1=v/h %2=size1 %3+%4=stack %if UNIX64 -cglobal vp9_loop_filter_%1_%2_16, 5, 9, 16, %3, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 +cglobal vp9_loop_filter_%1_%2_16, 5, 9, 16, %3 + %4, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 %else %if WIN64 -cglobal vp9_loop_filter_%1_%2_16, 4, 8, 16, %3, dst, stride, E, I, mstride, dst2, stride3, mstride3 +cglobal vp9_loop_filter_%1_%2_16, 4, 8, 16, %3 + %4, dst, stride, E, I, mstride, dst2, stride3, mstride3 %else -cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stride3, mstride3 +cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, stride3, mstride3 %define Ed dword r2m %define Id dword r3m %endif @@ -630,12 +637,12 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri mova m15, [P2] mova m8, [Q2] mova m9, [Q3] - FILTER_INIT m4, m5, m6, m7, [P2], 6, m3, m15 ; [p2] - FILTER_UPDATE m6, m7, m4, m5, [P1], m14, m15, m10, m13, 3, m3 ; [p1] -p3 -p2 +p1 +q1 - FILTER_UPDATE m4, m5, m6, m7, [P0], m14, m10, m11, m8, 3, m3 ; [p0] -p3 -p1 +p0 +q2 - FILTER_UPDATE m6, m7, m4, m5, [Q0], m14, m11, m12, m9, 3, m3 ; [q0] -p3 -p0 +q0 +q3 - FILTER_UPDATE m4, m5, m6, m7, [Q1], m15, m12, m13, m9, 3, m3 ; [q1] -p2 -q0 +q1 +q3 - FILTER_UPDATE m6, m7, m4, m5, [Q2], m10, m13, m8, m9, 3, m3, m8 ; [q2] -p1 -q1 +q2 +q3 + FILTER_INIT m4, m5, m6, m7, [P2], %4, 6, m3, m15 ; [p2] + FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 0, 1, 2, 5, 3, m3, "", m13 ; [p1] -p3 -p2 +p1 +q1 + FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3, "", m8 ; [p0] -p3 -p1 +p0 +q2 + FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 0, 3, 4, 7, 3, m3, "", m9 ; [q0] -p3 -p0 +q0 +q3 + FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 1, 4, 5, 7, 3, m3, "" ; [q1] -p2 -q0 +q1 +q3 + FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3, m8 ; [q2] -p1 -q1 +q2 +q3 %endif ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2) @@ -665,20 +672,20 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri mova m3, [P6] mova m8, [P5] mova m9, [P4] - FILTER_INIT m4, m5, m6, m7, [P6], 14, m1, m3 - FILTER_UPDATE m6, m7, m4, m5, [P5], m2, m3, m8, m13, 4, m1, m8 ; [p5] -p7 -p6 +p5 +q1 - FILTER_UPDATE m4, m5, m6, m7, [P4], m2, m8, m9, m13, 4, m1, m9, m13, [Q2] ; [p4] -p7 -p5 +p4 +q2 - FILTER_UPDATE m6, m7, m4, m5, [P3], m2, m9, m14, m13, 4, m1, m14, m13, [Q3] ; [p3] -p7 -p4 +p3 +q3 - FILTER_UPDATE m4, m5, m6, m7, [P2], m2, m14, m15, m13, 4, m1, m13, [Q4] ; [p2] -p7 -p3 +p2 +q4 - FILTER_UPDATE m6, m7, m4, m5, [P1], m2, m15, m10, m13, 4, m1, m13, [Q5] ; [p1] -p7 -p2 +p1 +q5 - FILTER_UPDATE m4, m5, m6, m7, [P0], m2, m10, m11, m13, 4, m1, m13, [Q6] ; [p0] -p7 -p1 +p0 +q6 - FILTER_UPDATE m6, m7, m4, m5, [Q0], m2, m11, m12, m13, 4, m1, m13, [Q7] ; [q0] -p7 -p0 +q0 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q1], m3, m12, m2, m13, 4, m1, m2, [Q1] ; [q1] -p6 -q0 +q1 +q7 - FILTER_UPDATE m6, m7, m4, m5, [Q2], m8, m2, m3, m13, 4, m1, m3, [Q2] ; [q2] -p5 -q1 +q2 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q3], m9, m3, m8, m13, 4, m1, m8, m8, [Q3] ; [q3] -p4 -q2 +q3 +q7 - FILTER_UPDATE m6, m7, m4, m5, [Q4], m14, m8, m9, m13, 4, m1, m9, m9, [Q4] ; [q4] -p3 -q3 +q4 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q5], m15, m9, m14, m13, 4, m1, m14, m14, [Q5] ; [q5] -p2 -q4 +q5 +q7 - FILTER_UPDATE m6, m7, m4, m5, [Q6], m10, m14, m15, m13, 4, m1, m15, m15, [Q6] ; [q6] -p1 -q5 +q6 +q7 + FILTER_INIT m4, m5, m6, m7, [P6], %4, 14, m1, m3 ; [p6] + FILTER_UPDATE m4, m5, m6, m7, [P5], %4, 8, 9, 10, 5, 4, m1, m8 ; [p5] -p7 -p6 +p5 +q1 + FILTER_UPDATE m4, m5, m6, m7, [P4], %4, 8, 10, 11, 6, 4, m1, m9 ; [p4] -p7 -p5 +p4 +q2 + FILTER_UPDATE m4, m5, m6, m7, [P3], %4, 8, 11, 0, 7, 4, m1, m14 ; [p3] -p7 -p4 +p3 +q3 + FILTER_UPDATE m4, m5, m6, m7, [P2], %4, 8, 0, 1, 12, 4, m1, "", m8, [Q4] ; [p2] -p7 -p3 +p2 +q4 + FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 8, 1, 2, 13, 4, m1, "", m9, [Q5] ; [p1] -p7 -p2 +p1 +q5 + FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 8, 2, 3, 14, 4, m1, "", m14, [Q6] ; [p0] -p7 -p1 +p0 +q6 + FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 8, 3, 4, 15, 4, m1, "", m15, [Q7] ; [q0] -p7 -p0 +q0 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 9, 4, 5, 15, 4, m1, "" ; [q1] -p6 -q0 +q1 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 10, 5, 6, 15, 4, m1, "" ; [q2] -p5 -q1 +q2 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q3], %4, 11, 6, 7, 15, 4, m1, "" ; [q3] -p4 -q2 +q3 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q4], %4, 0, 7, 12, 15, 4, m1, m8 ; [q4] -p3 -q3 +q4 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q5], %4, 1, 12, 13, 15, 4, m1, m9 ; [q5] -p2 -q4 +q5 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q6], %4, 2, 13, 14, 15, 4, m1, m14 ; [q6] -p1 -q5 +q6 +q7 %endif %ifidn %1, h @@ -799,22 +806,22 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3, dst, stride, mstride, dst2, stri RET %endmacro -%macro LPF_16_VH 2 -INIT_XMM %2 -LOOPFILTER v, %1, 0 -LOOPFILTER h, %1, 256 +%macro LPF_16_VH 3 +INIT_XMM %3 +LOOPFILTER v, %1, %2, 0 +LOOPFILTER h, %1, %2, 256 %endmacro -%macro LPF_16_VH_ALL_OPTS 1 -LPF_16_VH %1, sse2 -LPF_16_VH %1, ssse3 -LPF_16_VH %1, avx +%macro LPF_16_VH_ALL_OPTS 2 +LPF_16_VH %1, %2, sse2 +LPF_16_VH %1, %2, ssse3 +LPF_16_VH %1, %2, avx %endmacro -LPF_16_VH_ALL_OPTS 16 -LPF_16_VH_ALL_OPTS 44 -LPF_16_VH_ALL_OPTS 48 -LPF_16_VH_ALL_OPTS 84 -LPF_16_VH_ALL_OPTS 88 +LPF_16_VH_ALL_OPTS 16, 512 +LPF_16_VH_ALL_OPTS 44, 0 +LPF_16_VH_ALL_OPTS 48, 256 +LPF_16_VH_ALL_OPTS 84, 256 +LPF_16_VH_ALL_OPTS 88, 256 %endif ; x86-64 From 7f80c3344cc0ddeae216392b2e981b36a51f4cdd Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sat, 27 Dec 2014 14:47:07 -0500 Subject: [PATCH 08/16] vp8/x86: save one register in SIGN_ADD/SUB. --- libavcodec/x86/vp9lpf.asm | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 15d415942e..25b5591c96 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -141,17 +141,17 @@ SECTION .text %endmacro ; clip_u8(u8 + i8) -%macro SIGN_ADD 5 ; dst, u8, i8, tmp1, tmp2 - EXTRACT_POS_NEG %3, %4, %5 - psubusb %1, %2, %4 ; sub the negatives - paddusb %1, %5 ; add the positives +%macro SIGN_ADD 4 ; dst, u8, i8, tmp1 + EXTRACT_POS_NEG %3, %4, %1 + paddusb %1, %2 ; add the positives + psubusb %1, %4 ; sub the negatives %endmacro ; clip_u8(u8 - i8) -%macro SIGN_SUB 5 ; dst, u8, i8, tmp1, tmp2 - EXTRACT_POS_NEG %3, %4, %5 - psubusb %1, %2, %5 ; sub the positives - paddusb %1, %4 ; add the negatives +%macro SIGN_SUB 4 ; dst, u8, i8, tmp1 + EXTRACT_POS_NEG %3, %1, %4 + paddusb %1, %2 ; add the negatives + psubusb %1, %4 ; sub the positives %endmacro %macro FILTER6_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off @@ -577,8 +577,8 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, paddsb m4, [pb_3] ; m4: f2 = clip(f + 3, 127) mova m14, [pb_10] ; will be reused in filter4() SRSHIFT3B_2X m6, m4, m14, m7 ; f1 and f2 sign byte shift by 3 - SIGN_SUB m7, m12, m6, m5, m9 ; m7 = q0 - f1 - SIGN_ADD m8, m11, m4, m5, m9 ; m8 = p0 + f2 + SIGN_SUB m7, m12, m6, m5 ; m7 = q0 - f1 + SIGN_ADD m8, m11, m4, m5 ; m8 = p0 + f2 %if %2 != 44 pandn m6, m2, m3 ; ~mask(in) & mask(fm) pand m6, m0 ; (~mask(in) & mask(fm)) & mask(hev) @@ -606,18 +606,18 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, %define q0tmp m2 pandn m0, m3 %endif - SIGN_SUB q0tmp, m12, m6, m4, m14 ; q0 - f1 + SIGN_SUB q0tmp, m12, m6, m4 ; q0 - f1 MASK_APPLY q0tmp, m7, m0, m5 ; filter4(q0) & mask mova [Q0], q0tmp - SIGN_ADD p0tmp, m11, m15, m4, m14 ; p0 + f2 + SIGN_ADD p0tmp, m11, m15, m4 ; p0 + f2 MASK_APPLY p0tmp, m8, m0, m5 ; filter4(p0) & mask mova [P0], p0tmp paddb m6, [pb_80] ; pxor m8, m8 ; f=(f1+1)>>1 pavgb m6, m8 ; psubb m6, [pb_40] ; - SIGN_ADD m7, m10, m6, m8, m9 ; p1 + f - SIGN_SUB m4, m13, m6, m8, m9 ; q1 - f + SIGN_ADD m7, m10, m6, m8 ; p1 + f + SIGN_SUB m4, m13, m6, m8 ; q1 - f MASK_APPLY m7, m10, m0, m14 ; m7 = filter4(p1) MASK_APPLY m4, m13, m0, m14 ; m4 = filter4(q1) mova [P1], m7 From 75f8e520897f58fb509238c7548bfa9f960db899 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sat, 27 Dec 2014 15:08:48 -0500 Subject: [PATCH 09/16] vp9/x86: make filter_44_v work on 32-bit. --- libavcodec/x86/vp9dsp_init.c | 4 +- libavcodec/x86/vp9lpf.asm | 154 ++++++++++++++++++++++------------- 2 files changed, 99 insertions(+), 59 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 722b525426..f3c8dc6262 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -346,7 +346,9 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ - dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ + } \ + dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ + if (ARCH_X86_64) { \ dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \ dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 25b5591c96..e4730d67bb 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -22,8 +22,6 @@ %include "libavutil/x86/x86util.asm" -%if ARCH_X86_64 - SECTION_RODATA cextern pb_3 @@ -54,8 +52,15 @@ SECTION .text ; %1 = abs(%2-%3) %macro ABSSUB 4 ; dst, src1 (RO), src2 (RO), tmp +%if ARCH_X86_64 psubusb %1, %3, %2 psubusb %4, %2, %3 +%else + mova %1, %3 + mova %4, %2 + psubusb %1, %2 + psubusb %4, %3 +%endif por %1, %4 %endmacro @@ -427,6 +432,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, mova m0, [pb_80] pxor m2, m0 pxor m3, m0 +%if ARCH_X86_64 %ifidn %1, v mova m8, [P3] mova m9, [P2] @@ -444,20 +450,38 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, SWAP 10, 6, 14 SWAP 11, 7, 15 %endif - ABSSUB_GT m5, m8, m9, m2, m7, m0 ; m5 = abs(p3-p2) <= I - ABSSUB_GT m1, m9, m10, m2, m7, m0 ; m1 = abs(p2-p1) <= I +%define rp3 m8 +%define rp2 m9 +%define rp1 m10 +%define rp0 m11 +%define rq0 m12 +%define rq1 m13 +%define rq2 m14 +%define rq3 m15 +%else +%define rp3 [P3] +%define rp2 [P2] +%define rp1 [P1] +%define rp0 [P0] +%define rq0 [Q0] +%define rq1 [Q1] +%define rq2 [Q2] +%define rq3 [Q3] +%endif + ABSSUB_GT m5, rp3, rp2, m2, m7, m0 ; m5 = abs(p3-p2) <= I + ABSSUB_GT m1, rp2, rp1, m2, m7, m0 ; m1 = abs(p2-p1) <= I por m5, m1 - ABSSUB_GT m1, m10, m11, m2, m7, m0 ; m1 = abs(p1-p0) <= I + ABSSUB_GT m1, rp1, rp0, m2, m7, m0 ; m1 = abs(p1-p0) <= I por m5, m1 - ABSSUB_GT m1, m12, m13, m2, m7, m0 ; m1 = abs(q1-q0) <= I + ABSSUB_GT m1, rq0, rq1, m2, m7, m0 ; m1 = abs(q1-q0) <= I por m5, m1 - ABSSUB_GT m1, m13, m14, m2, m7, m0 ; m1 = abs(q2-q1) <= I + ABSSUB_GT m1, rq1, rq2, m2, m7, m0 ; m1 = abs(q2-q1) <= I por m5, m1 - ABSSUB_GT m1, m14, m15, m2, m7, m0 ; m1 = abs(q3-q2) <= I + ABSSUB_GT m1, rq2, rq3, m2, m7, m0 ; m1 = abs(q3-q2) <= I por m5, m1 - ABSSUB m1, m11, m12, m7 ; abs(p0-q0) + ABSSUB m1, rp0, rq0, m7 ; abs(p0-q0) paddusb m1, m1 ; abs(p0-q0) * 2 - ABSSUB m2, m10, m13, m7 ; abs(p1-q1) + ABSSUB m2, rp1, rq1, m7 ; abs(p1-q1) pand m2, [pb_fe] ; drop lsb so shift can work psrlq m2, 1 ; abs(p1-q1)/2 paddusb m1, m2 ; abs(p0-q0)*2 + abs(p1-q1)/2 @@ -509,10 +533,10 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, movd m7, Hd SPLATB_MIX m7 pxor m7, m6 - ABSSUB m4, m10, m11, m1 ; abs(p1 - p0) + ABSSUB m4, rp1, rp0, m1 ; abs(p1 - p0) pxor m4, m6 pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) - ABSSUB m4, m13, m12, m1 ; abs(q1 - q0) + ABSSUB m4, rq1, rq0, m1 ; abs(q1 - q0) pxor m4, m6 pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition) por m0, m5 ; hev final value @@ -563,66 +587,74 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, ; filter2() %if %2 != 44 mova m6, [pb_80] ; already in m6 if 44_16 + SWAP 2, 15 + SWAP 1, 8 %endif - pxor m15, m12, m6 ; q0 ^ 0x80 - pxor m14, m11, m6 ; p0 ^ 0x80 - psubsb m15, m14 ; (signed) q0 - p0 - pxor m4, m10, m6 ; p1 ^ 0x80 - pxor m5, m13, m6 ; q1 ^ 0x80 + pxor m2, m6, rq0 ; q0 ^ 0x80 + pxor m4, m6, rp0 ; p0 ^ 0x80 + psubsb m2, m4 ; (signed) q0 - p0 + pxor m4, m6, rp1 ; p1 ^ 0x80 + pxor m5, m6, rq1 ; q1 ^ 0x80 psubsb m4, m5 ; (signed) p1 - q1 - paddsb m4, m15 ; (q0 - p0) + (p1 - q1) - paddsb m4, m15 ; 2*(q0 - p0) + (p1 - q1) - paddsb m4, m15 ; 3*(q0 - p0) + (p1 - q1) + paddsb m4, m2 ; (q0 - p0) + (p1 - q1) + paddsb m4, m2 ; 2*(q0 - p0) + (p1 - q1) + paddsb m4, m2 ; 3*(q0 - p0) + (p1 - q1) paddsb m6, m4, [pb_4] ; m6: f1 = clip(f + 4, 127) paddsb m4, [pb_3] ; m4: f2 = clip(f + 3, 127) +%if ARCH_X86_64 mova m14, [pb_10] ; will be reused in filter4() - SRSHIFT3B_2X m6, m4, m14, m7 ; f1 and f2 sign byte shift by 3 - SIGN_SUB m7, m12, m6, m5 ; m7 = q0 - f1 - SIGN_ADD m8, m11, m4, m5 ; m8 = p0 + f2 +%define rb10 m14 +%else +%define rb10 [pb_10] +%endif + SRSHIFT3B_2X m6, m4, rb10, m7 ; f1 and f2 sign byte shift by 3 + SIGN_SUB m7, rq0, m6, m5 ; m7 = q0 - f1 + SIGN_ADD m1, rp0, m4, m5 ; m1 = p0 + f2 %if %2 != 44 - pandn m6, m2, m3 ; ~mask(in) & mask(fm) + pandn m6, m15, m3 ; ~mask(in) & mask(fm) pand m6, m0 ; (~mask(in) & mask(fm)) & mask(hev) %else pand m6, m3, m0 %endif - MASK_APPLY m7, m12, m6, m5 ; m7 = filter2(q0) & mask / we write it in filter4() - MASK_APPLY m8, m11, m6, m5 ; m8 = filter2(p0) & mask / we write it in filter4() + MASK_APPLY m7, rq0, m6, m5 ; m7 = filter2(q0) & mask / we write it in filter4() + MASK_APPLY m1, rp0, m6, m5 ; m1 = filter2(p0) & mask / we write it in filter4() - ; (m0: hev, [m1: flat8out], [m2: flat8in], m3: fm, m7..m8: q0' p0', m10..13: p1 p0 q0 q1, m14: pb_10, m15: q0-p0) + ; (m0: hev, m1: p0', m2: q0-p0, m3: fm, m7: q0', [m8: flat8out], m10..13: p1 p0 q0 q1, m14: pb_10, [m15: flat8in], ) ; filter4() - mova m4, m15 - paddsb m15, m4 ; 2 * (q0 - p0) - paddsb m15, m4 ; 3 * (q0 - p0) - paddsb m6, m15, [pb_4] ; m6: f1 = clip(f + 4, 127) - paddsb m15, [pb_3] ; m15: f2 = clip(f + 3, 127) - SRSHIFT3B_2X m6, m15, m14, m9 ; f1 and f2 sign byte shift by 3 + mova m4, m2 + paddsb m2, m4 ; 2 * (q0 - p0) + paddsb m2, m4 ; 3 * (q0 - p0) + paddsb m6, m2, [pb_4] ; m6: f1 = clip(f + 4, 127) + paddsb m2, [pb_3] ; m2: f2 = clip(f + 3, 127) + SRSHIFT3B_2X m6, m2, rb10, m4 ; f1 and f2 sign byte shift by 3 %if %2 != 44 -%define p0tmp m7 -%define q0tmp m9 - pandn m5, m2, m3 ; ~mask(in) & mask(fm) + pandn m5, m15, m3 ; ~mask(in) & mask(fm) pandn m0, m5 ; ~mask(hev) & (~mask(in) & mask(fm)) %else -%define p0tmp m1 -%define q0tmp m2 pandn m0, m3 %endif - SIGN_SUB q0tmp, m12, m6, m4 ; q0 - f1 - MASK_APPLY q0tmp, m7, m0, m5 ; filter4(q0) & mask - mova [Q0], q0tmp - SIGN_ADD p0tmp, m11, m15, m4 ; p0 + f2 - MASK_APPLY p0tmp, m8, m0, m5 ; filter4(p0) & mask - mova [P0], p0tmp + SIGN_SUB m5, rq0, m6, m4 ; q0 - f1 + MASK_APPLY m5, m7, m0, m4 ; filter4(q0) & mask + mova [Q0], m5 + SIGN_ADD m7, rp0, m2, m4 ; p0 + f2 + MASK_APPLY m7, m1, m0, m4 ; filter4(p0) & mask + mova [P0], m7 paddb m6, [pb_80] ; - pxor m8, m8 ; f=(f1+1)>>1 - pavgb m6, m8 ; + pxor m1, m1 ; f=(f1+1)>>1 + pavgb m6, m1 ; psubb m6, [pb_40] ; - SIGN_ADD m7, m10, m6, m8 ; p1 + f - SIGN_SUB m4, m13, m6, m8 ; q1 - f - MASK_APPLY m7, m10, m0, m14 ; m7 = filter4(p1) - MASK_APPLY m4, m13, m0, m14 ; m4 = filter4(q1) - mova [P1], m7 + SIGN_ADD m1, rp1, m6, m2 ; p1 + f + SIGN_SUB m4, rq1, m6, m2 ; q1 - f + MASK_APPLY m1, rp1, m0, m2 ; m1 = filter4(p1) + MASK_APPLY m4, rq1, m0, m2 ; m4 = filter4(q1) + mova [P1], m1 mova [Q1], m4 +%if %2 != 44 +SWAP 1, 8 +SWAP 2, 15 +%endif + ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1) ; filter6() %if %2 != 44 @@ -725,13 +757,15 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, movu [Q6], m14 movu [Q7], m15 %elif %2 == 44 - SWAP 0, 7 ; m0 = p1 + SWAP 0, 1 ; m0 = p1 + SWAP 1, 7 ; m1 = p0 + SWAP 2, 5 ; m2 = q0 SWAP 3, 4 ; m3 = q1 DEFINE_REAL_P7_TO_Q7 2 - SBUTTERFLY bw, 0, 1, 8 - SBUTTERFLY bw, 2, 3, 8 - SBUTTERFLY wd, 0, 2, 8 - SBUTTERFLY wd, 1, 3, 8 + SBUTTERFLY bw, 0, 1, 4 + SBUTTERFLY bw, 2, 3, 4 + SBUTTERFLY wd, 0, 2, 4 + SBUTTERFLY wd, 1, 3, 4 movd [P7], m0 movd [P3], m2 movd [Q0], m1 @@ -809,7 +843,9 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, %macro LPF_16_VH 3 INIT_XMM %3 LOOPFILTER v, %1, %2, 0 +%if ARCH_X86_64 LOOPFILTER h, %1, %2, 256 +%endif %endmacro %macro LPF_16_VH_ALL_OPTS 2 @@ -818,10 +854,12 @@ LPF_16_VH %1, %2, ssse3 LPF_16_VH %1, %2, avx %endmacro +%if ARCH_X86_64 LPF_16_VH_ALL_OPTS 16, 512 +%endif LPF_16_VH_ALL_OPTS 44, 0 +%if ARCH_X86_64 LPF_16_VH_ALL_OPTS 48, 256 LPF_16_VH_ALL_OPTS 84, 256 LPF_16_VH_ALL_OPTS 88, 256 - -%endif ; x86-64 +%endif From 6433a9133f418c59c5f60eff0f8a2187b623a2f1 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sat, 27 Dec 2014 15:12:01 -0500 Subject: [PATCH 10/16] vp9/x86: make filter_88_v work on 32-bit. --- libavcodec/x86/vp9dsp_init.c | 2 +- libavcodec/x86/vp9lpf.asm | 155 ++++++++++++++++++++++++----------- 2 files changed, 109 insertions(+), 48 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index f3c8dc6262..5d375aa9dc 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -354,8 +354,8 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \ dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \ - dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \ } \ + dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \ } while (0) #define init_ipred(sz, opt, t, e) \ diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index e4730d67bb..1e65d8eaca 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -50,6 +50,22 @@ mask_mix48: times 8 db 0x00 SECTION .text +%macro SCRATCH 3 +%if ARCH_X86_64 + SWAP %1, %2 +%else + mova [%3], m%1 +%endif +%endmacro + +%macro UNSCRATCH 3 +%if ARCH_X86_64 + SWAP %1, %2 +%else + mova m%1, [%3] +%endif +%endmacro + ; %1 = abs(%2-%3) %macro ABSSUB 4 ; dst, src1 (RO), src2 (RO), tmp %if ARCH_X86_64 @@ -84,12 +100,26 @@ SECTION .text por %1, %4 ; new&mask | old&~mask %endmacro -%macro FILTER_SUBx2_ADDx2 9-10 "" ; %1=dst %2=h/l %3=cache %4=stack_off %5=sub1 %6=sub2 %7=add1 %8=add2 %9=rshift, [unpack] +%macro UNPACK 4 +%if ARCH_X86_64 + punpck%1bw %2, %3, %4 +%else + mova %2, %3 + punpck%1bw %2, %4 +%endif +%endmacro + +%macro FILTER_SUBx2_ADDx2 11 ; %1=dst %2=h/l %3=cache %4=stack_off %5=sub1 %6=sub2 %7=add1 + ; %8=add2 %9=rshift, [unpack], [unpack_is_mem_on_x86_32] psubw %3, [rsp+%4+%5*32] psubw %3, [rsp+%4+%6*32] paddw %3, [rsp+%4+%7*32] %ifnidn %10, "" +%if %11 == 0 punpck%2bw %1, %10, m0 +%else + UNPACK %2, %1, %10, m0 +%endif mova [rsp+%4+%8*32], %1 paddw %3, %1 %else @@ -108,13 +138,14 @@ SECTION .text %endmacro -%macro FILTER_UPDATE 12-15 "", "" ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, -, -, +, +, rshift, mask, [source], [unpack] +%macro FILTER_UPDATE 12-16 "", "", "", 0 ; tmp1, tmp2, cacheL, cacheH, dstp, stack_off, -, -, +, +, rshift, + ; mask, [source], [unpack + src], [unpack_is_mem_on_x86_32] ; FIXME interleave this properly with the subx2/addx2 -%if %0 == 15 +%ifnidn %15, "" mova %14, %15 %endif - FILTER_SUBx2_ADDx2 %1, l, %3, %6 + 0, %7, %8, %9, %10, %11, %14 - FILTER_SUBx2_ADDx2 %2, h, %4, %6 + 16, %7, %8, %9, %10, %11, %14 + FILTER_SUBx2_ADDx2 %1, l, %3, %6 + 0, %7, %8, %9, %10, %11, %14, %16 + FILTER_SUBx2_ADDx2 %2, h, %4, %6 + 16, %7, %8, %9, %10, %11, %14, %16 packuswb %1, %2 %ifnidn %13, "" MASK_APPLY %1, %13, %12, %2 @@ -160,21 +191,21 @@ SECTION .text %endmacro %macro FILTER6_INIT 4 ; %1=dst %2=h/l %3=cache, %4=stack_off - punpck%2bw %1, m14, m0 ; p3: B->W + UNPACK %2, %1, rp3, m0 ; p3: B->W mova [rsp+%4+0*32], %1 paddw %3, %1, %1 ; p3*2 paddw %3, %1 ; p3*3 - punpck%2bw %1, m15, m0 ; p2: B->W + punpck%2bw %1, m2, m0 ; p2: B->W mova [rsp+%4+1*32], %1 paddw %3, %1 ; p3*3 + p2 paddw %3, %1 ; p3*3 + p2*2 - punpck%2bw %1, m10, m0 ; p1: B->W + UNPACK %2, %1, rp1, m0 ; p1: B->W mova [rsp+%4+2*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 - punpck%2bw %1, m11, m0 ; p0: B->W + UNPACK %2, %1, rp0, m0 ; p0: B->W mova [rsp+%4+3*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 - punpck%2bw %1, m12, m0 ; q0: B->W + UNPACK %2, %1, rq0, m0 ; q0: B->W mova [rsp+%4+4*32], %1 paddw %3, %1 ; p3*3 + p2*2 + p1 + p0 + q0 paddw %3, [pw_4] ; p3*3 + p2*2 + p1 + p0 + q0 + 4 @@ -318,14 +349,14 @@ SECTION .text %endif %endmacro -%macro LOOPFILTER 4 ; %1=v/h %2=size1 %3+%4=stack +%macro LOOPFILTER 5 ; %1=v/h %2=size1 %3+%4=stack, %5=32bit stack only %if UNIX64 cglobal vp9_loop_filter_%1_%2_16, 5, 9, 16, %3 + %4, dst, stride, E, I, H, mstride, dst2, stride3, mstride3 %else %if WIN64 cglobal vp9_loop_filter_%1_%2_16, 4, 8, 16, %3 + %4, dst, stride, E, I, mstride, dst2, stride3, mstride3 %else -cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, stride3, mstride3 +cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, dst2, stride3, mstride3 %define Ed dword r2m %define Id dword r3m %endif @@ -495,11 +526,16 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, ; calc flat8in (if not 44_16) and hev masks %if %2 != 44 mova m6, [pb_81] ; [1 1 1 1 ...] ^ 0x80 - ABSSUB_GT m2, m8, m11, m6, m5 ; abs(p3 - p0) <= 1 + ABSSUB_GT m2, rp3, rp0, m6, m5 ; abs(p3 - p0) <= 1 +%if ARCH_X86_64 mova m8, [pb_80] - ABSSUB_GT m1, m9, m11, m6, m5, m8 ; abs(p2 - p0) <= 1 +%define rb80 m8 +%else +%define rb80 [pb_80] +%endif + ABSSUB_GT m1, rp2, rp0, m6, m5, rb80 ; abs(p2 - p0) <= 1 por m2, m1 - ABSSUB m4, m10, m11, m5 ; abs(p1 - p0) + ABSSUB m4, rp1, rp0, m5 ; abs(p1 - p0) %if %2 == 16 %if cpuflag(ssse3) pxor m0, m0 @@ -509,20 +545,20 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, movd m7, Hd SPLATB_MIX m7 %endif - pxor m7, m8 - pxor m4, m8 + pxor m7, rb80 + pxor m4, rb80 pcmpgtb m0, m4, m7 ; abs(p1 - p0) > H (1/2 hev condition) CMP_GT m4, m6 ; abs(p1 - p0) <= 1 por m2, m4 ; (flat8in) - ABSSUB m4, m13, m12, m1 ; abs(q1 - q0) - pxor m4, m8 + ABSSUB m4, rq1, rq0, m1 ; abs(q1 - q0) + pxor m4, rb80 pcmpgtb m5, m4, m7 ; abs(q1 - q0) > H (2/2 hev condition) por m0, m5 ; hev final value CMP_GT m4, m6 ; abs(q1 - q0) <= 1 por m2, m4 ; (flat8in) - ABSSUB_GT m1, m14, m12, m6, m5, m8 ; abs(q2 - q0) <= 1 + ABSSUB_GT m1, rq2, rq0, m6, m5, rb80 ; abs(q2 - q0) <= 1 por m2, m1 - ABSSUB_GT m1, m15, m12, m6, m5, m8 ; abs(q3 - q0) <= 1 + ABSSUB_GT m1, rq3, rq0, m6, m5, rb80 ; abs(q3 - q0) <= 1 por m2, m1 ; flat8in final value pxor m2, [pb_ff] %if %2 == 84 || %2 == 48 @@ -587,8 +623,10 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, ; filter2() %if %2 != 44 mova m6, [pb_80] ; already in m6 if 44_16 - SWAP 2, 15 + SCRATCH 2, 15, rsp+%3+%4 +%if %2 == 16 SWAP 1, 8 +%endif %endif pxor m2, m6, rq0 ; q0 ^ 0x80 pxor m4, m6, rp0 ; p0 ^ 0x80 @@ -611,7 +649,12 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, SIGN_SUB m7, rq0, m6, m5 ; m7 = q0 - f1 SIGN_ADD m1, rp0, m4, m5 ; m1 = p0 + f2 %if %2 != 44 +%if ARCH_X86_64 pandn m6, m15, m3 ; ~mask(in) & mask(fm) +%else + mova m6, [rsp+%3+%4] + pandn m6, m3 +%endif pand m6, m0 ; (~mask(in) & mask(fm)) & mask(hev) %else pand m6, m3, m0 @@ -628,7 +671,12 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, paddsb m2, [pb_3] ; m2: f2 = clip(f + 3, 127) SRSHIFT3B_2X m6, m2, rb10, m4 ; f1 and f2 sign byte shift by 3 %if %2 != 44 +%if ARCH_X86_64 pandn m5, m15, m3 ; ~mask(in) & mask(fm) +%else + mova m5, [rsp+%3+%4] + pandn m5, m3 +%endif pandn m0, m5 ; ~mask(hev) & (~mask(in) & mask(fm)) %else pandn m0, m3 @@ -650,31 +698,44 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4, dst, stride, mstride, dst2, mova [P1], m1 mova [Q1], m4 -%if %2 != 44 -SWAP 1, 8 -SWAP 2, 15 -%endif - ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1) ; filter6() %if %2 != 44 pxor m0, m0 %if %2 > 16 - pand m3, m2 +%if ARCH_X86_64 + pand m3, m15 %else - pand m2, m3 ; mask(fm) & mask(in) - pandn m3, m1, m2 ; ~mask(out) & (mask(fm) & mask(in)) + pand m3, [rsp+%3+%4] %endif +%else + pand m15, m3 ; mask(fm) & mask(in) + pandn m3, m8, m15 ; ~mask(out) & (mask(fm) & mask(in)) +%endif +%if ARCH_X86_64 mova m14, [P3] - mova m15, [P2] - mova m8, [Q2] mova m9, [Q3] - FILTER_INIT m4, m5, m6, m7, [P2], %4, 6, m3, m15 ; [p2] - FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 0, 1, 2, 5, 3, m3, "", m13 ; [p1] -p3 -p2 +p1 +q1 - FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3, "", m8 ; [p0] -p3 -p1 +p0 +q2 - FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 0, 3, 4, 7, 3, m3, "", m9 ; [q0] -p3 -p0 +q0 +q3 +%define rp3 m14 +%define rq3 m9 +%else +%define rp3 [P3] +%define rq3 [Q3] +%endif + mova m2, [P2] + mova m1, [Q2] + FILTER_INIT m4, m5, m6, m7, [P2], %4, 6, m3, m2 ; [p2] + FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 0, 1, 2, 5, 3, m3, "", rq1, "", 1 ; [p1] -p3 -p2 +p1 +q1 + FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3, "", m1 ; [p0] -p3 -p1 +p0 +q2 + FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 0, 3, 4, 7, 3, m3, "", rq3, "", 1 ; [q0] -p3 -p0 +q0 +q3 FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 1, 4, 5, 7, 3, m3, "" ; [q1] -p2 -q0 +q1 +q3 - FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3, m8 ; [q2] -p1 -q1 +q2 +q3 + FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3, m1 ; [q2] -p1 -q1 +q2 +q3 +%endif + +%if %2 != 44 +%if %2 == 16 +SWAP 1, 8 +%endif +SWAP 2, 15 %endif ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2) @@ -840,26 +901,26 @@ SWAP 2, 15 RET %endmacro -%macro LPF_16_VH 3 -INIT_XMM %3 -LOOPFILTER v, %1, %2, 0 +%macro LPF_16_VH 4 +INIT_XMM %4 +LOOPFILTER v, %1, %2, 0, %3 %if ARCH_X86_64 -LOOPFILTER h, %1, %2, 256 +LOOPFILTER h, %1, %2, 256, %3 %endif %endmacro -%macro LPF_16_VH_ALL_OPTS 2 -LPF_16_VH %1, %2, sse2 -LPF_16_VH %1, %2, ssse3 -LPF_16_VH %1, %2, avx +%macro LPF_16_VH_ALL_OPTS 2-3 0 +LPF_16_VH %1, %2, %3, sse2 +LPF_16_VH %1, %2, %3, ssse3 +LPF_16_VH %1, %2, %3, avx %endmacro %if ARCH_X86_64 LPF_16_VH_ALL_OPTS 16, 512 %endif -LPF_16_VH_ALL_OPTS 44, 0 +LPF_16_VH_ALL_OPTS 44, 0, 0 %if ARCH_X86_64 LPF_16_VH_ALL_OPTS 48, 256 LPF_16_VH_ALL_OPTS 84, 256 -LPF_16_VH_ALL_OPTS 88, 256 %endif +LPF_16_VH_ALL_OPTS 88, 256, 16 From 0cc9c23ea171e1b7515334032a5c684472e91bc4 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 26 Dec 2014 12:10:26 -0500 Subject: [PATCH 11/16] vp9/x86: make filter_48/84_v work on 32-bit. --- libavcodec/x86/vp9dsp_init.c | 8 ++++++-- libavcodec/x86/vp9lpf.asm | 8 +++----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 5d375aa9dc..f3a8de3626 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -350,9 +350,13 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ if (ARCH_X86_64) { \ dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ - dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \ + } \ + dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \ + if (ARCH_X86_64) { \ dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ - dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \ + } \ + dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \ + if (ARCH_X86_64) { \ dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \ } \ dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \ diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 1e65d8eaca..94939d8505 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -918,9 +918,7 @@ LPF_16_VH %1, %2, %3, avx %if ARCH_X86_64 LPF_16_VH_ALL_OPTS 16, 512 %endif -LPF_16_VH_ALL_OPTS 44, 0, 0 -%if ARCH_X86_64 -LPF_16_VH_ALL_OPTS 48, 256 -LPF_16_VH_ALL_OPTS 84, 256 -%endif +LPF_16_VH_ALL_OPTS 44, 0, 0 +LPF_16_VH_ALL_OPTS 48, 256, 16 +LPF_16_VH_ALL_OPTS 84, 256, 16 LPF_16_VH_ALL_OPTS 88, 256, 16 From 047088b8c6cf9a2da8385194fb6af748c8583fbf Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 26 Dec 2014 14:05:23 -0500 Subject: [PATCH 12/16] vp9/x86: make filter_16_v work on 32-bit. --- libavcodec/x86/vp9dsp_init.c | 4 +- libavcodec/x86/vp9lpf.asm | 135 +++++++++++++++++++++++++---------- 2 files changed, 99 insertions(+), 40 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index f3a8de3626..6e0498647d 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -344,7 +344,9 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) #define init_lpf(opt) do { \ if (ARCH_X86_64) { \ dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ - dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ + } \ + dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ + if (ARCH_X86_64) { \ dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ } \ dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 94939d8505..43e7ef56da 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -142,7 +142,9 @@ SECTION .text ; mask, [source], [unpack + src], [unpack_is_mem_on_x86_32] ; FIXME interleave this properly with the subx2/addx2 %ifnidn %15, "" +%if %16 == 0 || ARCH_X86_64 mova %14, %15 +%endif %endif FILTER_SUBx2_ADDx2 %1, l, %3, %6 + 0, %7, %8, %9, %10, %11, %14, %16 FILTER_SUBx2_ADDx2 %2, h, %4, %6 + 16, %7, %8, %9, %10, %11, %14, %16 @@ -195,7 +197,7 @@ SECTION .text mova [rsp+%4+0*32], %1 paddw %3, %1, %1 ; p3*2 paddw %3, %1 ; p3*3 - punpck%2bw %1, m2, m0 ; p2: B->W + punpck%2bw %1, m1, m0 ; p2: B->W mova [rsp+%4+1*32], %1 paddw %3, %1 ; p3*3 + p2 paddw %3, %1 ; p3*3 + p2*2 @@ -221,10 +223,10 @@ SECTION .text mova [rsp+%4+ 9*32], %1 paddw %3, %1 ; p7*7 + p6 paddw %3, %1 ; p7*7 + p6*2 - punpck%2bw %1, m8, m0 ; p5: B->W + UNPACK %2, %1, rp5, m0 ; p5: B->W mova [rsp+%4+10*32], %1 paddw %3, %1 ; p7*7 + p6*2 + p5 - punpck%2bw %1, m9, m0 ; p4: B->W + UNPACK %2, %1, rp4, m0 ; p4: B->W mova [rsp+%4+11*32], %1 paddw %3, %1 ; p7*7 + p6*2 + p5 + p4 paddw %3, [rsp+%4+ 0*32] ; p7*7 + p6*2 + p5 + p4 + p3 @@ -581,28 +583,56 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, %if %2 == 16 ; (m0: hev, m2: flat8in, m3: fm, m6: pb_81, m9..15: p2 p1 p0 q0 q1 q2 q3) ; calc flat8out mask +%if ARCH_X86_64 mova m8, [P7] mova m9, [P6] - ABSSUB_GT m1, m8, m11, m6, m5 ; abs(p7 - p0) <= 1 - ABSSUB_GT m7, m9, m11, m6, m5 ; abs(p6 - p0) <= 1 +%define rp7 m8 +%define rp6 m9 +%else +%define rp7 [P7] +%define rp6 [P6] +%endif + ABSSUB_GT m1, rp7, rp0, m6, m5 ; abs(p7 - p0) <= 1 + ABSSUB_GT m7, rp6, rp0, m6, m5 ; abs(p6 - p0) <= 1 por m1, m7 +%if ARCH_X86_64 mova m8, [P5] mova m9, [P4] - ABSSUB_GT m7, m8, m11, m6, m5 ; abs(p5 - p0) <= 1 +%define rp5 m8 +%define rp4 m9 +%else +%define rp5 [P5] +%define rp4 [P4] +%endif + ABSSUB_GT m7, rp5, rp0, m6, m5 ; abs(p5 - p0) <= 1 por m1, m7 - ABSSUB_GT m7, m9, m11, m6, m5 ; abs(p4 - p0) <= 1 + ABSSUB_GT m7, rp4, rp0, m6, m5 ; abs(p4 - p0) <= 1 por m1, m7 +%if ARCH_X86_64 mova m14, [Q4] mova m15, [Q5] - ABSSUB_GT m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 +%define rq4 m14 +%define rq5 m15 +%else +%define rq4 [Q4] +%define rq5 [Q5] +%endif + ABSSUB_GT m7, rq4, rq0, m6, m5 ; abs(q4 - q0) <= 1 por m1, m7 - ABSSUB_GT m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 + ABSSUB_GT m7, rq5, rq0, m6, m5 ; abs(q5 - q0) <= 1 por m1, m7 +%if ARCH_X86_64 mova m14, [Q6] mova m15, [Q7] - ABSSUB_GT m7, m14, m12, m6, m5 ; abs(q4 - q0) <= 1 +%define rq6 m14 +%define rq7 m15 +%else +%define rq6 [Q6] +%define rq7 [Q7] +%endif + ABSSUB_GT m7, rq6, rq0, m6, m5 ; abs(q4 - q0) <= 1 por m1, m7 - ABSSUB_GT m7, m15, m12, m6, m5 ; abs(q5 - q0) <= 1 + ABSSUB_GT m7, rq7, rq0, m6, m5 ; abs(q5 - q0) <= 1 por m1, m7 ; flat8out final value pxor m1, [pb_ff] %endif @@ -625,7 +655,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, mova m6, [pb_80] ; already in m6 if 44_16 SCRATCH 2, 15, rsp+%3+%4 %if %2 == 16 - SWAP 1, 8 + SCRATCH 1, 8, rsp+%3+%4+16 %endif %endif pxor m2, m6, rq0 ; q0 ^ 0x80 @@ -698,19 +728,24 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, mova [P1], m1 mova [Q1], m4 +%if %2 != 44 + UNSCRATCH 2, 15, rsp+%3+%4 +%endif + ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1) ; filter6() %if %2 != 44 pxor m0, m0 %if %2 > 16 + pand m3, m2 +%else + pand m2, m3 ; mask(fm) & mask(in) %if ARCH_X86_64 - pand m3, m15 + pandn m3, m8, m2 ; ~mask(out) & (mask(fm) & mask(in)) %else - pand m3, [rsp+%3+%4] + mova m3, [rsp+%3+%4+16] + pandn m3, m2 %endif -%else - pand m15, m3 ; mask(fm) & mask(in) - pandn m3, m8, m15 ; ~mask(out) & (mask(fm) & mask(in)) %endif %if ARCH_X86_64 mova m14, [P3] @@ -721,21 +756,18 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, %define rp3 [P3] %define rq3 [Q3] %endif - mova m2, [P2] + mova m1, [P2] + FILTER_INIT m4, m5, m6, m7, [P2], %4, 6, m3, m1 ; [p2] mova m1, [Q2] - FILTER_INIT m4, m5, m6, m7, [P2], %4, 6, m3, m2 ; [p2] FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 0, 1, 2, 5, 3, m3, "", rq1, "", 1 ; [p1] -p3 -p2 +p1 +q1 - FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3, "", m1 ; [p0] -p3 -p1 +p0 +q2 + FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 0, 2, 3, 6, 3, m3, "", m1 ; [p0] -p3 -p1 +p0 +q2 FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 0, 3, 4, 7, 3, m3, "", rq3, "", 1 ; [q0] -p3 -p0 +q0 +q3 - FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 1, 4, 5, 7, 3, m3, "" ; [q1] -p2 -q0 +q1 +q3 - FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3, m1 ; [q2] -p1 -q1 +q2 +q3 + FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 1, 4, 5, 7, 3, m3, "" ; [q1] -p2 -q0 +q1 +q3 + FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 2, 5, 6, 7, 3, m3, m1 ; [q2] -p1 -q1 +q2 +q3 %endif -%if %2 != 44 %if %2 == 16 -SWAP 1, 8 -%endif -SWAP 2, 15 + UNSCRATCH 1, 8, rsp+%3+%4+16 %endif ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2) @@ -763,22 +795,49 @@ SWAP 2, 15 pand m1, m2 ; mask(out) & (mask(fm) & mask(in)) mova m2, [P7] mova m3, [P6] +%if ARCH_X86_64 mova m8, [P5] mova m9, [P4] +%define rp5 m8 +%define rp4 m9 +%define rp5s m8 +%define rp4s m9 +%define rp3s m14 +%define rq4 m8 +%define rq5 m9 +%define rq6 m14 +%define rq7 m15 +%define rq4s m8 +%define rq5s m9 +%define rq6s m14 +%else +%define rp5 [P5] +%define rp4 [P4] +%define rp5s "" +%define rp4s "" +%define rp3s "" +%define rq4 [Q4] +%define rq5 [Q5] +%define rq6 [Q6] +%define rq7 [Q7] +%define rq4s "" +%define rq5s "" +%define rq6s "" +%endif FILTER_INIT m4, m5, m6, m7, [P6], %4, 14, m1, m3 ; [p6] - FILTER_UPDATE m4, m5, m6, m7, [P5], %4, 8, 9, 10, 5, 4, m1, m8 ; [p5] -p7 -p6 +p5 +q1 - FILTER_UPDATE m4, m5, m6, m7, [P4], %4, 8, 10, 11, 6, 4, m1, m9 ; [p4] -p7 -p5 +p4 +q2 - FILTER_UPDATE m4, m5, m6, m7, [P3], %4, 8, 11, 0, 7, 4, m1, m14 ; [p3] -p7 -p4 +p3 +q3 - FILTER_UPDATE m4, m5, m6, m7, [P2], %4, 8, 0, 1, 12, 4, m1, "", m8, [Q4] ; [p2] -p7 -p3 +p2 +q4 - FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 8, 1, 2, 13, 4, m1, "", m9, [Q5] ; [p1] -p7 -p2 +p1 +q5 - FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 8, 2, 3, 14, 4, m1, "", m14, [Q6] ; [p0] -p7 -p1 +p0 +q6 - FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 8, 3, 4, 15, 4, m1, "", m15, [Q7] ; [q0] -p7 -p0 +q0 +q7 + FILTER_UPDATE m4, m5, m6, m7, [P5], %4, 8, 9, 10, 5, 4, m1, rp5s ; [p5] -p7 -p6 +p5 +q1 + FILTER_UPDATE m4, m5, m6, m7, [P4], %4, 8, 10, 11, 6, 4, m1, rp4s ; [p4] -p7 -p5 +p4 +q2 + FILTER_UPDATE m4, m5, m6, m7, [P3], %4, 8, 11, 0, 7, 4, m1, rp3s ; [p3] -p7 -p4 +p3 +q3 + FILTER_UPDATE m4, m5, m6, m7, [P2], %4, 8, 0, 1, 12, 4, m1, "", rq4, [Q4], 1 ; [p2] -p7 -p3 +p2 +q4 + FILTER_UPDATE m4, m5, m6, m7, [P1], %4, 8, 1, 2, 13, 4, m1, "", rq5, [Q5], 1 ; [p1] -p7 -p2 +p1 +q5 + FILTER_UPDATE m4, m5, m6, m7, [P0], %4, 8, 2, 3, 14, 4, m1, "", rq6, [Q6], 1 ; [p0] -p7 -p1 +p0 +q6 + FILTER_UPDATE m4, m5, m6, m7, [Q0], %4, 8, 3, 4, 15, 4, m1, "", rq7, [Q7], 1 ; [q0] -p7 -p0 +q0 +q7 FILTER_UPDATE m4, m5, m6, m7, [Q1], %4, 9, 4, 5, 15, 4, m1, "" ; [q1] -p6 -q0 +q1 +q7 FILTER_UPDATE m4, m5, m6, m7, [Q2], %4, 10, 5, 6, 15, 4, m1, "" ; [q2] -p5 -q1 +q2 +q7 FILTER_UPDATE m4, m5, m6, m7, [Q3], %4, 11, 6, 7, 15, 4, m1, "" ; [q3] -p4 -q2 +q3 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q4], %4, 0, 7, 12, 15, 4, m1, m8 ; [q4] -p3 -q3 +q4 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q5], %4, 1, 12, 13, 15, 4, m1, m9 ; [q5] -p2 -q4 +q5 +q7 - FILTER_UPDATE m4, m5, m6, m7, [Q6], %4, 2, 13, 14, 15, 4, m1, m14 ; [q6] -p1 -q5 +q6 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q4], %4, 0, 7, 12, 15, 4, m1, rq4s ; [q4] -p3 -q3 +q4 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q5], %4, 1, 12, 13, 15, 4, m1, rq5s ; [q5] -p2 -q4 +q5 +q7 + FILTER_UPDATE m4, m5, m6, m7, [Q6], %4, 2, 13, 14, 15, 4, m1, rq6s ; [q6] -p1 -q5 +q6 +q7 %endif %ifidn %1, h @@ -915,9 +974,7 @@ LPF_16_VH %1, %2, %3, ssse3 LPF_16_VH %1, %2, %3, avx %endmacro -%if ARCH_X86_64 -LPF_16_VH_ALL_OPTS 16, 512 -%endif +LPF_16_VH_ALL_OPTS 16, 512, 32 LPF_16_VH_ALL_OPTS 44, 0, 0 LPF_16_VH_ALL_OPTS 48, 256, 16 LPF_16_VH_ALL_OPTS 84, 256, 16 From 8a1cff1c355d031b63e0183c9820dff55b783d61 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 26 Dec 2014 14:48:01 -0500 Subject: [PATCH 13/16] vp9/x86: make filter_44_h work on 32-bit. --- libavcodec/x86/vp9dsp_init.c | 4 +- libavcodec/x86/vp9lpf.asm | 138 +++++++++++++++++++---------------- 2 files changed, 77 insertions(+), 65 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 6e0498647d..37b5e0fc2e 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -346,9 +346,7 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ } \ dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ - if (ARCH_X86_64) { \ - dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ - } \ + dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ if (ARCH_X86_64) { \ dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 43e7ef56da..deec530265 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -289,38 +289,6 @@ SECTION .text SWAP %12, %14 %endmacro -; transpose 16 half lines (high part) to 8 full centered lines -%macro TRANSPOSE16x8B 16 - punpcklbw m%1, m%2 - punpcklbw m%3, m%4 - punpcklbw m%5, m%6 - punpcklbw m%7, m%8 - punpcklbw m%9, m%10 - punpcklbw m%11, m%12 - punpcklbw m%13, m%14 - punpcklbw m%15, m%16 - SBUTTERFLY wd, %1, %3, %2 - SBUTTERFLY wd, %5, %7, %2 - SBUTTERFLY wd, %9, %11, %2 - SBUTTERFLY wd, %13, %15, %2 - SBUTTERFLY dq, %1, %5, %2 - SBUTTERFLY dq, %3, %7, %2 - SBUTTERFLY dq, %9, %13, %2 - SBUTTERFLY dq, %11, %15, %2 - SBUTTERFLY qdq, %1, %9, %2 - SBUTTERFLY qdq, %3, %11, %2 - SBUTTERFLY qdq, %5, %13, %2 - SBUTTERFLY qdq, %7, %15, %2 - SWAP %5, %1 - SWAP %6, %9 - SWAP %7, %1 - SWAP %8, %13 - SWAP %9, %3 - SWAP %10, %11 - SWAP %11, %1 - SWAP %12, %15 -%endmacro - %macro DEFINE_REAL_P7_TO_Q7 0-1 0 %define P7 dstq + 4*mstrideq + %1 %define P6 dstq + mstride3q + %1 @@ -396,6 +364,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, movx m5, [P2] movx m6, [P1] movx m7, [P0] +%if ARCH_X86_64 movx m8, [Q0] movx m9, [Q1] movx m10, [Q2] @@ -404,32 +373,67 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, movx m13, [Q5] movx m14, [Q6] movx m15, [Q7] -%define P7 rsp + 0 -%define P6 rsp + 16 -%define P5 rsp + 32 -%define P4 rsp + 48 -%define P3 rsp + 64 -%define P2 rsp + 80 -%define P1 rsp + 96 -%define P0 rsp + 112 -%define Q0 rsp + 128 -%define Q1 rsp + 144 -%define Q2 rsp + 160 -%define Q3 rsp + 176 +%if %2 == 16 + TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] +%define P7 rsp + 128 +%define P6 rsp + 144 +%define P5 rsp + 160 +%define P4 rsp + 176 %define Q4 rsp + 192 %define Q5 rsp + 208 %define Q6 rsp + 224 %define Q7 rsp + 240 - -%if %2 == 16 - TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] mova [P7], m0 mova [P6], m1 mova [P5], m2 mova [P4], m3 %else - TRANSPOSE16x8B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 + ; 8x16 transpose + punpcklbw m0, m1 + punpcklbw m2, m3 + punpcklbw m4, m5 + punpcklbw m6, m7 + punpcklbw m8, m9 + punpcklbw m10, m11 + punpcklbw m12, m13 + punpcklbw m14, m15 + TRANSPOSE8x8W 0, 2, 4, 6, 8, 10, 12, 14, 15 + SWAP 0, 4 + SWAP 2, 5 + SWAP 0, 6 + SWAP 0, 7 + SWAP 10, 9 + SWAP 12, 10 + SWAP 14, 11 %endif +%else ; x86-32 + punpcklbw m0, m1 + punpcklbw m2, m3 + punpcklbw m4, m5 + punpcklbw m6, m7 + movx m1, [Q0] + movx m3, [Q1] + movx m5, [Q2] + movx m7, [Q3] + punpcklbw m1, m3 + punpcklbw m5, m7 + movx m3, [Q4] + movx m7, [Q5] + punpcklbw m3, m7 + mova [rsp], m3 + movx m3, [Q6] + movx m7, [Q7] + punpcklbw m3, m7 +%endif +%define P3 rsp + 0 +%define P2 rsp + 16 +%define P1 rsp + 32 +%define P0 rsp + 48 +%define Q0 rsp + 64 +%define Q1 rsp + 80 +%define Q2 rsp + 96 +%define Q3 rsp + 112 +%if ARCH_X86_64 mova [P3], m4 mova [P2], m5 mova [P1], m6 @@ -444,7 +448,17 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, mova [Q6], m14 mova [Q7], m15 %endif +%else ; x86-32 + TRANSPOSE8x8W 0, 2, 4, 6, 1, 5, 7, 3, [rsp], [Q0], 1 + mova [P3], m0 + mova [P2], m2 + mova [P1], m4 + mova [P0], m6 + mova [Q1], m5 + mova [Q2], m7 + mova [Q3], m3 %endif +%endif ; %1 == h ; calc fm mask %if %2 == 16 @@ -960,22 +974,22 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, RET %endmacro -%macro LPF_16_VH 4 -INIT_XMM %4 -LOOPFILTER v, %1, %2, 0, %3 -%if ARCH_X86_64 -LOOPFILTER h, %1, %2, 256, %3 +%macro LPF_16_VH 5 +INIT_XMM %5 +LOOPFILTER v, %1, %2, 0, %4 +%if ARCH_X86_64 || %1 == 44 +LOOPFILTER h, %1, %2, %3, %4 %endif %endmacro -%macro LPF_16_VH_ALL_OPTS 2-3 0 -LPF_16_VH %1, %2, %3, sse2 -LPF_16_VH %1, %2, %3, ssse3 -LPF_16_VH %1, %2, %3, avx +%macro LPF_16_VH_ALL_OPTS 4 +LPF_16_VH %1, %2, %3, %4, sse2 +LPF_16_VH %1, %2, %3, %4, ssse3 +LPF_16_VH %1, %2, %3, %4, avx %endmacro -LPF_16_VH_ALL_OPTS 16, 512, 32 -LPF_16_VH_ALL_OPTS 44, 0, 0 -LPF_16_VH_ALL_OPTS 48, 256, 16 -LPF_16_VH_ALL_OPTS 84, 256, 16 -LPF_16_VH_ALL_OPTS 88, 256, 16 +LPF_16_VH_ALL_OPTS 16, 512, 256, 32 +LPF_16_VH_ALL_OPTS 44, 0, 128, 0 +LPF_16_VH_ALL_OPTS 48, 256, 128, 16 +LPF_16_VH_ALL_OPTS 84, 256, 128, 16 +LPF_16_VH_ALL_OPTS 88, 256, 128, 16 From b26bc3520f9ae0f025d2b3787a3fa33febae24af Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 26 Dec 2014 15:15:50 -0500 Subject: [PATCH 14/16] vp9/x86: make filter_48/84/88_h work on 32-bit. --- libavcodec/x86/vp9dsp_init.c | 12 ++----- libavcodec/x86/vp9lpf.asm | 62 ++++++++++++++++++++++++++---------- 2 files changed, 48 insertions(+), 26 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 37b5e0fc2e..7bb31ff742 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -348,17 +348,11 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ - if (ARCH_X86_64) { \ - dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ - } \ + dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_##opt; \ dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_##opt; \ - if (ARCH_X86_64) { \ - dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ - } \ + dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_##opt; \ dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_##opt; \ - if (ARCH_X86_64) { \ - dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \ - } \ + dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_##opt; \ dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_##opt; \ } while (0) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index deec530265..838b78530e 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -937,9 +937,12 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, mova m3, [P0] mova m4, [Q0] mova m5, [Q1] +%if ARCH_X86_64 mova m6, [Q2] +%endif mova m7, [Q3] DEFINE_REAL_P7_TO_Q7 +%if ARCH_X86_64 SBUTTERFLY bw, 0, 1, 8 SBUTTERFLY bw, 2, 3, 8 SBUTTERFLY bw, 4, 5, 8 @@ -952,22 +955,47 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, SBUTTERFLY dq, 1, 5, 8 SBUTTERFLY dq, 2, 6, 8 SBUTTERFLY dq, 3, 7, 8 - movh [P7], m0 - movhps [P6], m0 - movh [Q0], m1 - movhps [Q1], m1 - movh [P3], m2 - movhps [P2], m2 - movh [Q4], m3 - movhps [Q5], m3 - movh [P5], m4 - movhps [P4], m4 - movh [Q2], m5 - movhps [Q3], m5 - movh [P1], m6 - movhps [P0], m6 - movh [Q6], m7 - movhps [Q7], m7 +%else + SBUTTERFLY bw, 0, 1, 6 + mova [rsp+64], m1 + mova m6, [rsp+96] + SBUTTERFLY bw, 2, 3, 1 + SBUTTERFLY bw, 4, 5, 1 + SBUTTERFLY bw, 6, 7, 1 + SBUTTERFLY wd, 0, 2, 1 + mova [rsp+96], m2 + mova m1, [rsp+64] + SBUTTERFLY wd, 1, 3, 2 + SBUTTERFLY wd, 4, 6, 2 + SBUTTERFLY wd, 5, 7, 2 + SBUTTERFLY dq, 0, 4, 2 + SBUTTERFLY dq, 1, 5, 2 + movh [Q0], m1 + movhps [Q1], m1 + mova m2, [rsp+96] + SBUTTERFLY dq, 2, 6, 1 + SBUTTERFLY dq, 3, 7, 1 +%endif + SWAP 3, 6 + SWAP 1, 4 + movh [P7], m0 + movhps [P6], m0 + movh [P5], m1 + movhps [P4], m1 + movh [P3], m2 + movhps [P2], m2 + movh [P1], m3 + movhps [P0], m3 +%if ARCH_X86_64 + movh [Q0], m4 + movhps [Q1], m4 +%endif + movh [Q2], m5 + movhps [Q3], m5 + movh [Q4], m6 + movhps [Q5], m6 + movh [Q6], m7 + movhps [Q7], m7 %endif %endif @@ -977,7 +1005,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, %macro LPF_16_VH 5 INIT_XMM %5 LOOPFILTER v, %1, %2, 0, %4 -%if ARCH_X86_64 || %1 == 44 +%if ARCH_X86_64 || %1 != 16 LOOPFILTER h, %1, %2, %3, %4 %endif %endmacro From afd8c464b7eaa79b902079be295763469eecbb46 Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Fri, 26 Dec 2014 17:50:38 -0500 Subject: [PATCH 15/16] vp9/x86: make filter_16_h work on 32-bit. --- libavcodec/x86/vp9dsp_init.c | 4 +- libavcodec/x86/vp9lpf.asm | 191 ++++++++++++++++++++++++++++------- 2 files changed, 154 insertions(+), 41 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 7bb31ff742..3a306428de 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -342,9 +342,7 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) init_subpel2(4, idx, 4, type, opt) #define init_lpf(opt) do { \ - if (ARCH_X86_64) { \ - dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ - } \ + dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_##opt; \ dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_##opt; \ dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_##opt; \ dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_##opt; \ diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index 838b78530e..b5f8c0d46d 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -289,6 +289,30 @@ SECTION .text SWAP %12, %14 %endmacro +%macro TRANSPOSE8x8B 13 + SBUTTERFLY bw, %1, %2, %7 + movdq%10 m%7, %9 + movdqa %11, m%2 + SBUTTERFLY bw, %3, %4, %2 + SBUTTERFLY bw, %5, %6, %2 + SBUTTERFLY bw, %7, %8, %2 + SBUTTERFLY wd, %1, %3, %2 + movdqa m%2, %11 + movdqa %11, m%3 + SBUTTERFLY wd, %2, %4, %3 + SBUTTERFLY wd, %5, %7, %3 + SBUTTERFLY wd, %6, %8, %3 + SBUTTERFLY dq, %1, %5, %3 + SBUTTERFLY dq, %2, %6, %3 + movdqa m%3, %11 + movh %12, m%2 + movhps %13, m%2 + SBUTTERFLY dq, %3, %7, %2 + SBUTTERFLY dq, %4, %8, %2 + SWAP %2, %5 + SWAP %4, %7 +%endmacro + %macro DEFINE_REAL_P7_TO_Q7 0-1 0 %define P7 dstq + 4*mstrideq + %1 %define P6 dstq + mstride3q + %1 @@ -308,6 +332,25 @@ SECTION .text %define Q7 dst2q + stride3q + %1 %endmacro +%macro DEFINE_TRANSPOSED_P7_TO_Q7 0-1 0 +%define P3 rsp + 0 + %1 +%define P2 rsp + 16 + %1 +%define P1 rsp + 32 + %1 +%define P0 rsp + 48 + %1 +%define Q0 rsp + 64 + %1 +%define Q1 rsp + 80 + %1 +%define Q2 rsp + 96 + %1 +%define Q3 rsp + 112 + %1 +%define P7 rsp + 128 + %1 +%define P6 rsp + 144 + %1 +%define P5 rsp + 160 + %1 +%define P4 rsp + 176 + %1 +%define Q4 rsp + 192 + %1 +%define Q5 rsp + 208 + %1 +%define Q6 rsp + 224 + %1 +%define Q7 rsp + 240 + %1 +%endmacro + ; ..............AB -> AAAAAAAABBBBBBBB %macro SPLATB_MIX 1-2 [mask_mix] %if cpuflag(ssse3) @@ -362,7 +405,9 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, movx m3, [P4] movx m4, [P3] movx m5, [P2] +%if ARCH_X86_64 || %2 != 16 movx m6, [P1] +%endif movx m7, [P0] %if ARCH_X86_64 movx m8, [Q0] @@ -373,21 +418,14 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, movx m13, [Q5] movx m14, [Q6] movx m15, [Q7] + DEFINE_TRANSPOSED_P7_TO_Q7 %if %2 == 16 TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp] -%define P7 rsp + 128 -%define P6 rsp + 144 -%define P5 rsp + 160 -%define P4 rsp + 176 -%define Q4 rsp + 192 -%define Q5 rsp + 208 -%define Q6 rsp + 224 -%define Q7 rsp + 240 mova [P7], m0 mova [P6], m1 mova [P5], m2 mova [P4], m3 -%else +%else ; %2 == 44/48/84/88 ; 8x16 transpose punpcklbw m0, m1 punpcklbw m2, m3 @@ -405,8 +443,65 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, SWAP 10, 9 SWAP 12, 10 SWAP 14, 11 -%endif +%endif ; %2 + mova [P3], m4 + mova [P2], m5 + mova [P1], m6 + mova [P0], m7 + mova [Q0], m8 + mova [Q1], m9 + mova [Q2], m10 + mova [Q3], m11 +%if %2 == 16 + mova [Q4], m12 + mova [Q5], m13 + mova [Q6], m14 + mova [Q7], m15 +%endif ; %2 %else ; x86-32 +%if %2 == 16 + TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [P1], u, [rsp+%3+%4], [rsp+64], [rsp+80] + DEFINE_TRANSPOSED_P7_TO_Q7 + movh [P7], m0 + movh [P5], m1 + movh [P3], m2 + movh [P1], m3 + movh [Q2], m5 + movh [Q4], m6 + movh [Q6], m7 + movhps [P6], m0 + movhps [P4], m1 + movhps [P2], m2 + movhps [P0], m3 + movhps [Q3], m5 + movhps [Q5], m6 + movhps [Q7], m7 + DEFINE_REAL_P7_TO_Q7 + movx m0, [Q0] + movx m1, [Q1] + movx m2, [Q2] + movx m3, [Q3] + movx m4, [Q4] + movx m5, [Q5] + movx m7, [Q7] + TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [Q6], u, [rsp+%3+%4], [rsp+72], [rsp+88] + DEFINE_TRANSPOSED_P7_TO_Q7 8 + movh [P7], m0 + movh [P5], m1 + movh [P3], m2 + movh [P1], m3 + movh [Q2], m5 + movh [Q4], m6 + movh [Q6], m7 + movhps [P6], m0 + movhps [P4], m1 + movhps [P2], m2 + movhps [P0], m3 + movhps [Q3], m5 + movhps [Q5], m6 + movhps [Q7], m7 + DEFINE_TRANSPOSED_P7_TO_Q7 +%else ; %2 == 44/48/84/88 punpcklbw m0, m1 punpcklbw m2, m3 punpcklbw m4, m5 @@ -424,31 +519,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, movx m3, [Q6] movx m7, [Q7] punpcklbw m3, m7 -%endif -%define P3 rsp + 0 -%define P2 rsp + 16 -%define P1 rsp + 32 -%define P0 rsp + 48 -%define Q0 rsp + 64 -%define Q1 rsp + 80 -%define Q2 rsp + 96 -%define Q3 rsp + 112 -%if ARCH_X86_64 - mova [P3], m4 - mova [P2], m5 - mova [P1], m6 - mova [P0], m7 - mova [Q0], m8 - mova [Q1], m9 - mova [Q2], m10 - mova [Q3], m11 -%if %2 == 16 - mova [Q4], m12 - mova [Q5], m13 - mova [Q6], m14 - mova [Q7], m15 -%endif -%else ; x86-32 + DEFINE_TRANSPOSED_P7_TO_Q7 TRANSPOSE8x8W 0, 2, 4, 6, 1, 5, 7, 3, [rsp], [Q0], 1 mova [P3], m0 mova [P2], m2 @@ -457,7 +528,8 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, mova [Q1], m5 mova [Q2], m7 mova [Q3], m3 -%endif +%endif ; %2 +%endif ; x86-32/64 %endif ; %1 == h ; calc fm mask @@ -862,8 +934,11 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, mova m3, [P4] mova m4, [P3] mova m5, [P2] +%if ARCH_X86_64 mova m6, [P1] +%endif mova m7, [P0] +%if ARCH_X86_64 mova m8, [Q0] mova m9, [Q1] mova m10, [Q2] @@ -890,6 +965,48 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, movu [Q5], m13 movu [Q6], m14 movu [Q7], m15 +%else + DEFINE_REAL_P7_TO_Q7 + TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [rsp+32], a, [rsp+%3+%4], [Q0], [Q1] + movh [P7], m0 + movh [P5], m1 + movh [P3], m2 + movh [P1], m3 + movh [Q2], m5 + movh [Q4], m6 + movh [Q6], m7 + movhps [P6], m0 + movhps [P4], m1 + movhps [P2], m2 + movhps [P0], m3 + movhps [Q3], m5 + movhps [Q5], m6 + movhps [Q7], m7 + DEFINE_TRANSPOSED_P7_TO_Q7 + mova m0, [Q0] + mova m1, [Q1] + mova m2, [Q2] + mova m3, [Q3] + mova m4, [Q4] + mova m5, [Q5] + mova m7, [Q7] + DEFINE_REAL_P7_TO_Q7 8 + TRANSPOSE8x8B 0, 1, 2, 3, 4, 5, 6, 7, [rsp+224], a, [rsp+%3+%4], [Q0], [Q1] + movh [P7], m0 + movh [P5], m1 + movh [P3], m2 + movh [P1], m3 + movh [Q2], m5 + movh [Q4], m6 + movh [Q6], m7 + movhps [P6], m0 + movhps [P4], m1 + movhps [P2], m2 + movhps [P0], m3 + movhps [Q3], m5 + movhps [Q5], m6 + movhps [Q7], m7 +%endif %elif %2 == 44 SWAP 0, 1 ; m0 = p1 SWAP 1, 7 ; m1 = p0 @@ -1005,9 +1122,7 @@ cglobal vp9_loop_filter_%1_%2_16, 2, 6, 16, %3 + %4 + %5, dst, stride, mstride, %macro LPF_16_VH 5 INIT_XMM %5 LOOPFILTER v, %1, %2, 0, %4 -%if ARCH_X86_64 || %1 != 16 LOOPFILTER h, %1, %2, %3, %4 -%endif %endmacro %macro LPF_16_VH_ALL_OPTS 4 From 3aefca68cae603aac77a826de20d94ce24c7ec8f Mon Sep 17 00:00:00 2001 From: "Ronald S. Bultje" Date: Sat, 27 Dec 2014 14:55:04 -0500 Subject: [PATCH 16/16] vp9/x86: add myself to copyright holders for loopfilter assembly. --- libavcodec/x86/vp9lpf.asm | 1 + 1 file changed, 1 insertion(+) diff --git a/libavcodec/x86/vp9lpf.asm b/libavcodec/x86/vp9lpf.asm index b5f8c0d46d..2c4fe214da 100644 --- a/libavcodec/x86/vp9lpf.asm +++ b/libavcodec/x86/vp9lpf.asm @@ -2,6 +2,7 @@ ;* VP9 loop filter SIMD optimizations ;* ;* Copyright (C) 2013-2014 Clément Bœsch +;* Copyright (C) 2014 Ronald S. Bultje ;* ;* This file is part of FFmpeg. ;*