2014-05-15 14:31:11 +00:00
|
|
|
;*****************************************************************************
|
|
|
|
;* SSE2-optimized HEVC deblocking code
|
|
|
|
;*****************************************************************************
|
|
|
|
;* Copyright (C) 2013 VTT
|
|
|
|
;*
|
|
|
|
;* Authors: Seppo Tomperi <seppo.tomperi@vtt.fi>
|
|
|
|
;*
|
|
|
|
;* This file is part of FFmpeg.
|
|
|
|
;*
|
|
|
|
;* FFmpeg is free software; you can redistribute it and/or
|
|
|
|
;* modify it under the terms of the GNU Lesser General Public
|
|
|
|
;* License as published by the Free Software Foundation; either
|
|
|
|
;* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
;*
|
|
|
|
;* FFmpeg is distributed in the hope that it will be useful,
|
|
|
|
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
;* Lesser General Public License for more details.
|
|
|
|
;*
|
|
|
|
;* You should have received a copy of the GNU Lesser General Public
|
|
|
|
;* License along with FFmpeg; if not, write to the Free Software
|
|
|
|
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
;******************************************************************************
|
|
|
|
|
|
|
|
%include "libavutil/x86/x86util.asm"
|
|
|
|
|
|
|
|
SECTION_RODATA
|
|
|
|
|
|
|
|
pw_pixel_max: times 8 dw ((1 << 10)-1)
|
2014-05-19 20:39:02 +00:00
|
|
|
pw_m1: times 8 dw -1
|
|
|
|
pw_m2: times 8 dw -2
|
|
|
|
pd_1 : times 4 dd 1
|
|
|
|
|
|
|
|
cextern pw_4
|
|
|
|
cextern pw_8
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
SECTION .text
|
|
|
|
INIT_XMM sse2
|
|
|
|
|
|
|
|
; expands to [base],...,[base+7*stride]
|
|
|
|
%define PASS8ROWS(base, base3, stride, stride3) \
|
|
|
|
[base], [base+stride], [base+stride*2], [base3], \
|
|
|
|
[base3+stride], [base3+stride*2], [base3+stride3], [base3+stride*4]
|
|
|
|
|
|
|
|
; in: 8 rows of 4 bytes in %4..%11
|
|
|
|
; out: 4 rows of 8 words in m0..m3
|
|
|
|
%macro TRANSPOSE4x8B_LOAD 8
|
|
|
|
movd m0, %1
|
|
|
|
movd m2, %2
|
|
|
|
movd m1, %3
|
|
|
|
movd m3, %4
|
|
|
|
|
|
|
|
punpcklbw m0, m2
|
|
|
|
punpcklbw m1, m3
|
|
|
|
punpcklwd m0, m1
|
|
|
|
|
|
|
|
movd m4, %5
|
|
|
|
movd m6, %6
|
|
|
|
movd m5, %7
|
2014-05-23 03:37:24 +00:00
|
|
|
movd m3, %8
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
punpcklbw m4, m6
|
2014-05-23 03:37:24 +00:00
|
|
|
punpcklbw m5, m3
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklwd m4, m5
|
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckhdq m2, m0, m4
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckldq m0, m4
|
|
|
|
|
|
|
|
pxor m5, m5
|
2014-05-17 00:59:03 +00:00
|
|
|
punpckhbw m1, m0, m5
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklbw m0, m5
|
2014-05-17 00:59:03 +00:00
|
|
|
punpckhbw m3, m2, m5
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklbw m2, m5
|
|
|
|
%endmacro
|
|
|
|
|
|
|
|
; in: 4 rows of 8 words in m0..m3
|
|
|
|
; out: 8 rows of 4 bytes in %1..%8
|
|
|
|
%macro TRANSPOSE8x4B_STORE 8
|
|
|
|
packuswb m0, m0
|
|
|
|
packuswb m1, m1
|
|
|
|
packuswb m2, m2
|
|
|
|
packuswb m3, m3
|
|
|
|
|
|
|
|
punpcklbw m0, m1
|
|
|
|
punpcklbw m2, m3
|
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckhwd m6, m0, m2
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklwd m0, m2
|
|
|
|
|
|
|
|
movd %1, m0
|
|
|
|
pshufd m0, m0, 0x39
|
|
|
|
movd %2, m0
|
|
|
|
pshufd m0, m0, 0x39
|
|
|
|
movd %3, m0
|
|
|
|
pshufd m0, m0, 0x39
|
|
|
|
movd %4, m0
|
|
|
|
|
|
|
|
movd %5, m6
|
|
|
|
pshufd m6, m6, 0x39
|
|
|
|
movd %6, m6
|
|
|
|
pshufd m6, m6, 0x39
|
|
|
|
movd %7, m6
|
|
|
|
pshufd m6, m6, 0x39
|
|
|
|
movd %8, m6
|
|
|
|
%endmacro
|
|
|
|
|
|
|
|
; in: 8 rows of 4 words in %4..%11
|
|
|
|
; out: 4 rows of 8 words in m0..m3
|
|
|
|
%macro TRANSPOSE4x8W_LOAD 8
|
|
|
|
movq m0, %1
|
|
|
|
movq m2, %2
|
|
|
|
movq m1, %3
|
|
|
|
movq m3, %4
|
|
|
|
|
|
|
|
punpcklwd m0, m2
|
|
|
|
punpcklwd m1, m3
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckhdq m2, m0, m1
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckldq m0, m1
|
|
|
|
|
|
|
|
movq m4, %5
|
|
|
|
movq m6, %6
|
|
|
|
movq m5, %7
|
2014-05-23 03:37:24 +00:00
|
|
|
movq m3, %8
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
punpcklwd m4, m6
|
2014-05-23 03:37:24 +00:00
|
|
|
punpcklwd m5, m3
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckhdq m6, m4, m5
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckldq m4, m5
|
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckhqdq m1, m0, m4
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklqdq m0, m4
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckhqdq m3, m2, m6
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklqdq m2, m6
|
|
|
|
|
|
|
|
%endmacro
|
|
|
|
|
|
|
|
; in: 4 rows of 8 words in m0..m3
|
|
|
|
; out: 8 rows of 4 words in %1..%8
|
|
|
|
%macro TRANSPOSE8x4W_STORE 8
|
|
|
|
pxor m5, m5; zeros reg
|
|
|
|
CLIPW m0, m5, [pw_pixel_max]
|
|
|
|
CLIPW m1, m5, [pw_pixel_max]
|
|
|
|
CLIPW m2, m5, [pw_pixel_max]
|
|
|
|
CLIPW m3, m5, [pw_pixel_max]
|
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
punpckhwd m4, m0, m1
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklwd m0, m1
|
2014-05-17 00:59:03 +00:00
|
|
|
punpckhwd m5, m2, m3
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklwd m2, m3
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckhdq m6, m0, m2
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckldq m0, m2
|
|
|
|
|
|
|
|
movq %1, m0
|
2014-05-17 00:59:06 +00:00
|
|
|
movhps %2, m0
|
2014-05-15 14:31:11 +00:00
|
|
|
movq %3, m6
|
2014-05-17 00:59:06 +00:00
|
|
|
movhps %4, m6
|
2014-05-15 14:31:11 +00:00
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckhdq m6, m4, m5
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckldq m4, m5
|
|
|
|
|
|
|
|
movq %5, m4
|
2014-05-17 00:59:06 +00:00
|
|
|
movhps %6, m4
|
2014-05-15 14:31:11 +00:00
|
|
|
movq %7, m6
|
2014-05-17 00:59:06 +00:00
|
|
|
movhps %8, m6
|
2014-05-15 14:31:11 +00:00
|
|
|
%endmacro
|
|
|
|
|
|
|
|
; in: 8 rows of 8 bytes in %1..%8
|
|
|
|
; out: 8 rows of 8 words in m0..m7
|
|
|
|
%macro TRANSPOSE8x8B_LOAD 8
|
|
|
|
movq m7, %1
|
|
|
|
movq m2, %2
|
|
|
|
movq m1, %3
|
|
|
|
movq m3, %4
|
|
|
|
|
|
|
|
punpcklbw m7, m2
|
|
|
|
punpcklbw m1, m3
|
2014-05-16 09:44:20 +00:00
|
|
|
punpcklwd m3, m7, m1
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckhwd m7, m1
|
|
|
|
|
|
|
|
movq m4, %5
|
|
|
|
movq m6, %6
|
|
|
|
movq m5, %7
|
|
|
|
movq m15, %8
|
|
|
|
|
|
|
|
punpcklbw m4, m6
|
|
|
|
punpcklbw m5, m15
|
2014-05-16 09:44:20 +00:00
|
|
|
punpcklwd m9, m4, m5
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckhwd m4, m5
|
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckldq m1, m3, m9; 0, 1
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckhdq m3, m9; 2, 3
|
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckldq m5, m7, m4; 4, 5
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckhdq m7, m4; 6, 7
|
|
|
|
|
|
|
|
pxor m13, m13
|
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpcklbw m0, m1, m13; 0 in 16 bit
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckhbw m1, m13; 1 in 16 bit
|
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpcklbw m2, m3, m13; 2
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckhbw m3, m13; 3
|
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpcklbw m4, m5, m13; 4
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckhbw m5, m13; 5
|
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpcklbw m6, m7, m13; 6
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckhbw m7, m13; 7
|
|
|
|
%endmacro
|
|
|
|
|
|
|
|
|
|
|
|
; in: 8 rows of 8 words in m0..m8
|
|
|
|
; out: 8 rows of 8 bytes in %1..%8
|
|
|
|
%macro TRANSPOSE8x8B_STORE 8
|
|
|
|
packuswb m0, m0
|
|
|
|
packuswb m1, m1
|
|
|
|
packuswb m2, m2
|
|
|
|
packuswb m3, m3
|
|
|
|
packuswb m4, m4
|
|
|
|
packuswb m5, m5
|
|
|
|
packuswb m6, m6
|
|
|
|
packuswb m7, m7
|
|
|
|
|
|
|
|
punpcklbw m0, m1
|
|
|
|
punpcklbw m2, m3
|
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckhwd m8, m0, m2
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklwd m0, m2
|
|
|
|
|
|
|
|
punpcklbw m4, m5
|
|
|
|
punpcklbw m6, m7
|
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckhwd m9, m4, m6
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklwd m4, m6
|
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckhdq m10, m0, m4; 2, 3
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckldq m0, m4; 0, 1
|
|
|
|
|
2014-05-16 09:44:20 +00:00
|
|
|
punpckldq m11, m8, m9; 4, 5
|
2014-05-15 14:31:11 +00:00
|
|
|
punpckhdq m8, m9; 6, 7
|
|
|
|
movq %1, m0
|
2014-05-17 00:59:06 +00:00
|
|
|
movhps %2, m0
|
2014-05-15 14:31:11 +00:00
|
|
|
movq %3, m10
|
2014-05-17 00:59:06 +00:00
|
|
|
movhps %4, m10
|
2014-05-15 14:31:11 +00:00
|
|
|
movq %5, m11
|
2014-05-17 00:59:06 +00:00
|
|
|
movhps %6, m11
|
2014-05-15 14:31:11 +00:00
|
|
|
movq %7, m8
|
2014-05-17 00:59:06 +00:00
|
|
|
movhps %8, m8
|
2014-05-15 14:31:11 +00:00
|
|
|
%endmacro
|
|
|
|
|
|
|
|
; in: 8 rows of 8 words in %1..%8
|
|
|
|
; out: 8 rows of 8 words in m0..m7
|
|
|
|
%macro TRANSPOSE8x8W_LOAD 8
|
|
|
|
movdqu m0, %1
|
|
|
|
movdqu m1, %2
|
|
|
|
movdqu m2, %3
|
|
|
|
movdqu m3, %4
|
|
|
|
movdqu m4, %5
|
|
|
|
movdqu m5, %6
|
|
|
|
movdqu m6, %7
|
|
|
|
movdqu m7, %8
|
|
|
|
TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
|
|
|
|
%endmacro
|
|
|
|
|
|
|
|
; in: 8 rows of 8 words in m0..m8
|
|
|
|
; out: 8 rows of 8 words in %1..%8
|
|
|
|
%macro TRANSPOSE8x8W_STORE 8
|
|
|
|
TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
|
|
|
|
|
|
|
|
pxor m8, m8
|
|
|
|
CLIPW m0, m8, [pw_pixel_max]
|
|
|
|
CLIPW m1, m8, [pw_pixel_max]
|
|
|
|
CLIPW m2, m8, [pw_pixel_max]
|
|
|
|
CLIPW m3, m8, [pw_pixel_max]
|
|
|
|
CLIPW m4, m8, [pw_pixel_max]
|
|
|
|
CLIPW m5, m8, [pw_pixel_max]
|
|
|
|
CLIPW m6, m8, [pw_pixel_max]
|
|
|
|
CLIPW m7, m8, [pw_pixel_max]
|
|
|
|
|
|
|
|
movdqu %1, m0
|
|
|
|
movdqu %2, m1
|
|
|
|
movdqu %3, m2
|
|
|
|
movdqu %4, m3
|
|
|
|
movdqu %5, m4
|
|
|
|
movdqu %6, m5
|
|
|
|
movdqu %7, m6
|
|
|
|
movdqu %8, m7
|
|
|
|
%endmacro
|
|
|
|
|
|
|
|
|
|
|
|
; in: %2 clobbered
|
|
|
|
; out: %1
|
|
|
|
; mask in m11
|
|
|
|
; clobbers m10
|
|
|
|
%macro MASKED_COPY 2
|
|
|
|
pand %2, m11 ; and mask
|
2014-05-17 00:59:03 +00:00
|
|
|
pandn m10, m11, %1; and -mask
|
2014-05-15 14:31:11 +00:00
|
|
|
por %2, m10
|
2014-05-16 09:44:20 +00:00
|
|
|
mova %1, %2
|
2014-05-15 14:31:11 +00:00
|
|
|
%endmacro
|
|
|
|
|
|
|
|
; in: %2 clobbered
|
|
|
|
; out: %1
|
|
|
|
; mask in %3, will be clobbered
|
|
|
|
%macro MASKED_COPY2 3
|
|
|
|
pand %2, %3 ; and mask
|
|
|
|
pandn %3, %1; and -mask
|
|
|
|
por %2, %3
|
2014-05-16 09:44:20 +00:00
|
|
|
mova %1, %2
|
2014-05-15 14:31:11 +00:00
|
|
|
%endmacro
|
|
|
|
|
|
|
|
ALIGN 16
|
2014-05-23 03:37:24 +00:00
|
|
|
; input in m0 ... m3 and tcs in tc (r2). Output in m1 and m2
|
2014-05-15 14:31:11 +00:00
|
|
|
%macro CHROMA_DEBLOCK_BODY 1
|
2014-05-17 00:59:03 +00:00
|
|
|
psubw m4, m2, m1; q0 - p0
|
|
|
|
psubw m5, m0, m3; p1 - q1
|
2014-05-15 14:31:11 +00:00
|
|
|
psllw m4, 2; << 2
|
|
|
|
paddw m5, m4;
|
|
|
|
|
|
|
|
;tc calculations
|
2014-05-23 03:37:24 +00:00
|
|
|
movd m6, [tcq]; tc0
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklwd m6, m6
|
2014-05-23 03:37:24 +00:00
|
|
|
movd m4, [tcq+4]; tc1
|
|
|
|
punpcklwd m4, m4
|
|
|
|
shufps m6, m4, 0; tc0, tc1
|
2014-05-19 20:39:02 +00:00
|
|
|
pmullw m4, m6, [pw_m1]; -tc0, -tc1
|
2014-05-15 14:31:11 +00:00
|
|
|
;end tc calculations
|
|
|
|
|
2014-05-19 20:39:02 +00:00
|
|
|
paddw m5, [pw_4]; +4
|
2014-05-15 14:31:11 +00:00
|
|
|
psraw m5, 3; >> 3
|
|
|
|
|
2014-07-19 11:53:56 +00:00
|
|
|
%if %1 > 8
|
2014-05-15 14:31:11 +00:00
|
|
|
psllw m4, %1-8; << (BIT_DEPTH - 8)
|
|
|
|
psllw m6, %1-8; << (BIT_DEPTH - 8)
|
2014-07-19 11:53:56 +00:00
|
|
|
%endif
|
2014-05-15 14:31:11 +00:00
|
|
|
pmaxsw m5, m4
|
|
|
|
pminsw m5, m6
|
|
|
|
paddw m1, m5; p0 + delta0
|
|
|
|
psubw m2, m5; q0 - delta0
|
|
|
|
%endmacro
|
|
|
|
|
|
|
|
; input in m0 ... m7, betas in r2 tcs in r3. Output in m1...m6
|
|
|
|
%macro LUMA_DEBLOCK_BODY 2
|
2014-05-17 00:59:03 +00:00
|
|
|
psllw m9, m2, 1; *2
|
|
|
|
psubw m10, m1, m9
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m10, m3
|
2014-05-17 00:59:04 +00:00
|
|
|
ABS1 m10, m11 ; 0dp0, 0dp3 , 1dp0, 1dp3
|
2014-05-15 14:31:11 +00:00
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
psllw m9, m5, 1; *2
|
|
|
|
psubw m11, m6, m9
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m11, m4
|
2014-05-17 00:59:04 +00:00
|
|
|
ABS1 m11, m13 ; 0dq0, 0dq3 , 1dq0, 1dq3
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
;beta calculations
|
|
|
|
mov r11, [betaq];
|
2014-07-19 12:18:03 +00:00
|
|
|
%if %1 > 8
|
2014-05-15 14:31:11 +00:00
|
|
|
shl r11, %1 - 8
|
2014-07-19 12:18:03 +00:00
|
|
|
%endif
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m13, r11d; beta0
|
2014-05-15 14:31:11 +00:00
|
|
|
add betaq, 4;
|
|
|
|
punpcklwd m13, m13
|
|
|
|
mov r12, [betaq];
|
2014-07-19 12:18:03 +00:00
|
|
|
%if %1 > 8
|
2014-05-15 14:31:11 +00:00
|
|
|
shl r12, %1 - 8
|
2014-07-19 12:18:03 +00:00
|
|
|
%endif
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m14, r12d; beta1
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklwd m14, m14
|
|
|
|
pshufd m13, m14, 0; beta0, beta1
|
|
|
|
;end beta calculations
|
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
paddw m9, m10, m11; 0d0, 0d3 , 1d0, 1d3
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
pshufhw m14, m9, 0x0f ;0b00001111; 0d3 0d3 0d0 0d0 in high
|
|
|
|
pshuflw m14, m14, 0x0f ;0b00001111; 1d3 1d3 1d0 1d0 in low
|
|
|
|
|
|
|
|
pshufhw m9, m9, 0xf0 ;0b11110000; 0d0 0d0 0d3 0d3
|
|
|
|
pshuflw m9, m9, 0xf0 ;0b11110000; 1d0 1d0 1d3 1d3
|
|
|
|
|
|
|
|
paddw m14, m9; 0d0+0d3, 1d0+1d3
|
|
|
|
|
|
|
|
;compare
|
2014-05-17 00:59:03 +00:00
|
|
|
pcmpgtw m15, m13, m14; beta0, beta1
|
2014-05-15 14:31:11 +00:00
|
|
|
movmskps r13, m15 ;filtering mask 0d0 + 0d3 < beta0 (bit 2 or 3) , 1d0 + 1d3 < beta1 (bit 0 or 1)
|
|
|
|
cmp r13, 0
|
2014-07-19 11:53:56 +00:00
|
|
|
je .bypassluma
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
;weak / strong decision compare to beta_2
|
2014-05-17 00:59:03 +00:00
|
|
|
psraw m15, m13, 2; beta >> 2
|
|
|
|
psllw m8, m9, 1;
|
2014-05-15 14:31:11 +00:00
|
|
|
pcmpgtw m15, m8; (d0 << 1) < beta_2, (d3 << 1) < beta_2
|
|
|
|
movmskps r14, m15;
|
|
|
|
;end weak / strong decision
|
|
|
|
|
|
|
|
; weak filter nd_p/q calculation
|
|
|
|
pshufd m8, m10, 0x31
|
|
|
|
psrld m8, 16
|
|
|
|
paddw m8, m10
|
2014-05-17 10:28:14 +00:00
|
|
|
movd r7d, m8
|
2014-05-15 14:31:11 +00:00
|
|
|
and r7, 0xffff; 1dp0 + 1dp3
|
|
|
|
pshufd m8, m8, 0x4E
|
2014-05-17 10:28:14 +00:00
|
|
|
movd r8d, m8
|
2014-05-15 14:31:11 +00:00
|
|
|
and r8, 0xffff; 0dp0 + 0dp3
|
|
|
|
|
|
|
|
pshufd m8, m11, 0x31
|
|
|
|
psrld m8, 16
|
|
|
|
paddw m8, m11
|
2014-05-17 10:28:14 +00:00
|
|
|
movd r9d, m8
|
2014-05-15 14:31:11 +00:00
|
|
|
and r9, 0xffff; 1dq0 + 1dq3
|
|
|
|
pshufd m8, m8, 0x4E
|
2014-05-17 10:28:14 +00:00
|
|
|
movd r10d, m8
|
2014-05-15 14:31:11 +00:00
|
|
|
and r10, 0xffff; 0dq0 + 0dq3
|
|
|
|
; end calc for weak filter
|
|
|
|
|
|
|
|
; filtering mask
|
|
|
|
mov r2, r13
|
|
|
|
shr r2, 3
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m15, r2d
|
2014-05-15 14:31:11 +00:00
|
|
|
and r13, 1
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m11, r13d
|
2014-05-15 14:31:11 +00:00
|
|
|
shufps m11, m15, 0
|
|
|
|
shl r2, 1
|
|
|
|
or r13, r2
|
|
|
|
|
2014-05-19 20:39:02 +00:00
|
|
|
pcmpeqd m11, [pd_1]; filtering mask
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
;decide between strong and weak filtering
|
|
|
|
;tc25 calculations
|
|
|
|
mov r2d, [tcq];
|
2014-07-19 11:53:56 +00:00
|
|
|
%if %1 > 8
|
2014-05-15 14:31:11 +00:00
|
|
|
shl r2, %1 - 8
|
2014-07-19 11:53:56 +00:00
|
|
|
%endif
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m8, r2d; tc0
|
2014-05-15 14:31:11 +00:00
|
|
|
add tcq, 4;
|
|
|
|
mov r3d, [tcq];
|
2014-07-19 11:53:56 +00:00
|
|
|
%if %1 > 8
|
2014-05-15 14:31:11 +00:00
|
|
|
shl r3, %1 - 8
|
2014-07-19 11:53:56 +00:00
|
|
|
%endif
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m9, r3d; tc1
|
2014-05-15 14:31:11 +00:00
|
|
|
add r2d, r3d; tc0 + tc1
|
2014-07-19 11:53:56 +00:00
|
|
|
jz .bypassluma
|
2014-05-18 21:19:49 +00:00
|
|
|
punpcklwd m8, m8
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklwd m9, m9
|
|
|
|
shufps m8, m9, 0; tc0, tc1
|
2014-05-16 09:44:20 +00:00
|
|
|
mova m9, m8
|
2014-05-15 14:31:11 +00:00
|
|
|
psllw m8, 2; tc << 2
|
|
|
|
pavgw m8, m9; tc25 = ((tc * 5 + 1) >> 1)
|
|
|
|
;end tc25 calculations
|
|
|
|
|
|
|
|
;----beta_3 comparison-----
|
2014-05-17 00:59:03 +00:00
|
|
|
psubw m12, m0, m3; p3 - p0
|
2014-05-17 00:59:04 +00:00
|
|
|
ABS1 m12, m14; abs(p3 - p0)
|
2014-05-15 14:31:11 +00:00
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
psubw m15, m7, m4; q3 - q0
|
2014-05-17 00:59:04 +00:00
|
|
|
ABS1 m15, m14; abs(q3 - q0)
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
paddw m12, m15; abs(p3 - p0) + abs(q3 - q0)
|
|
|
|
|
|
|
|
pshufhw m12, m12, 0xf0 ;0b11110000;
|
|
|
|
pshuflw m12, m12, 0xf0 ;0b11110000;
|
|
|
|
|
|
|
|
psraw m13, 3; beta >> 3
|
|
|
|
pcmpgtw m13, m12;
|
|
|
|
movmskps r2, m13;
|
|
|
|
and r14, r2; strong mask , beta_2 and beta_3 comparisons
|
|
|
|
;----beta_3 comparison end-----
|
|
|
|
;----tc25 comparison---
|
2014-05-17 00:59:03 +00:00
|
|
|
psubw m12, m3, m4; p0 - q0
|
2014-05-17 00:59:04 +00:00
|
|
|
ABS1 m12, m14; abs(p0 - q0)
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
pshufhw m12, m12, 0xf0 ;0b11110000;
|
|
|
|
pshuflw m12, m12, 0xf0 ;0b11110000;
|
|
|
|
|
|
|
|
pcmpgtw m8, m12; tc25 comparisons
|
|
|
|
movmskps r2, m8;
|
|
|
|
and r14, r2; strong mask, beta_2, beta_3 and tc25 comparisons
|
|
|
|
;----tc25 comparison end---
|
|
|
|
mov r2, r14;
|
|
|
|
shr r2, 1;
|
|
|
|
and r14, r2; strong mask, bits 2 and 0
|
|
|
|
|
2014-05-19 20:39:02 +00:00
|
|
|
pmullw m14, m9, [pw_m2]; -tc * 2
|
2014-07-19 12:18:03 +00:00
|
|
|
paddw m9, m9
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
and r14, 5; 0b101
|
|
|
|
mov r2, r14; strong mask
|
|
|
|
shr r14, 2;
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m12, r14d; store to xmm for mask generation
|
2014-05-15 14:31:11 +00:00
|
|
|
shl r14, 1
|
|
|
|
and r2, 1
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m10, r2d; store to xmm for mask generation
|
2014-05-15 14:31:11 +00:00
|
|
|
or r14, r2; final strong mask, bits 1 and 0
|
2014-05-18 21:19:49 +00:00
|
|
|
jz .weakfilter
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
shufps m10, m12, 0
|
2014-05-19 20:39:02 +00:00
|
|
|
pcmpeqd m10, [pd_1]; strong mask
|
2014-05-15 14:31:11 +00:00
|
|
|
|
2014-05-19 20:39:02 +00:00
|
|
|
mova m13, [pw_4]; 4 in every cell
|
2014-05-15 14:31:11 +00:00
|
|
|
pand m11, m10; combine filtering mask and strong mask
|
2014-05-17 00:59:03 +00:00
|
|
|
paddw m12, m2, m3; p1 + p0
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m12, m4; p1 + p0 + q0
|
2014-05-16 09:44:20 +00:00
|
|
|
mova m10, m12; copy
|
2014-07-19 12:18:03 +00:00
|
|
|
paddw m12, m12; 2*p1 + 2*p0 + 2*q0
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m12, m1; p2 + 2*p1 + 2*p0 + 2*q0
|
|
|
|
paddw m12, m5; p2 + 2*p1 + 2*p0 + 2*q0 + q1
|
|
|
|
paddw m12, m13; p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4
|
|
|
|
psraw m12, 3; ((p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4) >> 3)
|
|
|
|
psubw m12, m3; ((p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4) >> 3) - p0
|
|
|
|
pmaxsw m12, m14
|
|
|
|
pminsw m12, m9; av_clip( , -2 * tc, 2 * tc)
|
|
|
|
paddw m12, m3; p0'
|
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
paddw m15, m1, m10; p2 + p1 + p0 + q0
|
2014-05-15 14:31:11 +00:00
|
|
|
psrlw m13, 1; 2 in every cell
|
|
|
|
paddw m15, m13; p2 + p1 + p0 + q0 + 2
|
|
|
|
psraw m15, 2; (p2 + p1 + p0 + q0 + 2) >> 2
|
|
|
|
psubw m15, m2;((p2 + p1 + p0 + q0 + 2) >> 2) - p1
|
|
|
|
pmaxsw m15, m14
|
|
|
|
pminsw m15, m9; av_clip( , -2 * tc, 2 * tc)
|
|
|
|
paddw m15, m2; p1'
|
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
paddw m8, m1, m0; p3 + p2
|
2014-07-19 12:18:03 +00:00
|
|
|
paddw m8, m8; 2*p3 + 2*p2
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m8, m1; 2*p3 + 3*p2
|
|
|
|
paddw m8, m10; 2*p3 + 3*p2 + p1 + p0 + q0
|
2014-07-19 12:18:03 +00:00
|
|
|
paddw m13, m13
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m8, m13; 2*p3 + 3*p2 + p1 + p0 + q0 + 4
|
|
|
|
psraw m8, 3; (2*p3 + 3*p2 + p1 + p0 + q0 + 4) >> 3
|
|
|
|
psubw m8, m1; ((2*p3 + 3*p2 + p1 + p0 + q0 + 4) >> 3) - p2
|
|
|
|
pmaxsw m8, m14
|
|
|
|
pminsw m8, m9; av_clip( , -2 * tc, 2 * tc)
|
|
|
|
paddw m8, m1; p2'
|
|
|
|
MASKED_COPY m1, m8
|
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
paddw m8, m3, m4; p0 + q0
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m8, m5; p0 + q0 + q1
|
2014-07-19 12:18:03 +00:00
|
|
|
paddw m8, m8; 2*p0 + 2*q0 + 2*q1
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m8, m2; p1 + 2*p0 + 2*q0 + 2*q1
|
|
|
|
paddw m8, m6; p1 + 2*p0 + 2*q0 + 2*q1 + q2
|
|
|
|
paddw m8, m13; p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4
|
|
|
|
psraw m8, 3; (p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4) >>3
|
|
|
|
psubw m8, m4;
|
|
|
|
pmaxsw m8, m14
|
|
|
|
pminsw m8, m9; av_clip( , -2 * tc, 2 * tc)
|
|
|
|
paddw m8, m4; q0'
|
|
|
|
MASKED_COPY m2, m15
|
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
paddw m15, m3, m4; p0 + q0
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m15, m5; p0 + q0 + q1
|
2014-05-16 09:44:20 +00:00
|
|
|
mova m10, m15;
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m15, m6; p0 + q0 + q1 + q2
|
|
|
|
psrlw m13, 1; 2 in every cell
|
|
|
|
paddw m15, m13; p0 + q0 + q1 + q2 + 2
|
|
|
|
psraw m15, 2; (p0 + q0 + q1 + q2 + 2) >> 2
|
|
|
|
psubw m15, m5; ((p0 + q0 + q1 + q2 + 2) >> 2) - q1
|
|
|
|
pmaxsw m15, m14
|
|
|
|
pminsw m15, m9; av_clip( , -2 * tc, 2 * tc)
|
|
|
|
paddw m15, m5; q1'
|
|
|
|
|
|
|
|
paddw m13, m7; q3 + 2
|
|
|
|
paddw m13, m6; q3 + q2 + 2
|
2014-07-19 12:18:03 +00:00
|
|
|
paddw m13, m13; 2*q3 + 2*q2 + 4
|
|
|
|
paddw m13, m6; 2*q3 + 3*q2 + 4
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m13, m10; 2*q3 + 3*q2 + q1 + q0 + p0 + 4
|
|
|
|
psraw m13, 3; (2*q3 + 3*q2 + q1 + q0 + p0 + 4) >> 3
|
|
|
|
psubw m13, m6; ((2*q3 + 3*q2 + q1 + q0 + p0 + 4) >> 3) - q2
|
|
|
|
pmaxsw m13, m14
|
|
|
|
pminsw m13, m9; av_clip( , -2 * tc, 2 * tc)
|
|
|
|
paddw m13, m6; q2'
|
|
|
|
|
|
|
|
MASKED_COPY m6, m13
|
|
|
|
MASKED_COPY m5, m15
|
|
|
|
MASKED_COPY m4, m8
|
|
|
|
MASKED_COPY m3, m12
|
|
|
|
|
2014-05-17 00:59:05 +00:00
|
|
|
.weakfilter:
|
2014-05-15 14:31:11 +00:00
|
|
|
not r14; strong mask -> weak mask
|
|
|
|
and r14, r13; final weak filtering mask, bits 0 and 1
|
2014-07-19 11:53:56 +00:00
|
|
|
jz .store
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
; weak filtering mask
|
|
|
|
mov r2, r14
|
|
|
|
shr r2, 1
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m12, r2d
|
2014-05-15 14:31:11 +00:00
|
|
|
and r14, 1
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m11, r14d
|
2014-05-15 14:31:11 +00:00
|
|
|
shufps m11, m12, 0
|
2014-05-19 20:39:02 +00:00
|
|
|
pcmpeqd m11, [pd_1]; filtering mask
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
mov r13, r11; beta0
|
|
|
|
shr r13, 1;
|
|
|
|
add r11, r13
|
|
|
|
shr r11, 3; ((beta0+(beta0>>1))>>3))
|
|
|
|
|
|
|
|
mov r13, r12; beta1
|
|
|
|
shr r13, 1;
|
|
|
|
add r12, r13
|
|
|
|
shr r12, 3; ((beta1+(beta1>>1))>>3))
|
|
|
|
|
2014-05-19 20:39:02 +00:00
|
|
|
mova m13, [pw_8]
|
2014-05-17 00:59:03 +00:00
|
|
|
psubw m12, m4, m3 ; q0 - p0
|
|
|
|
psllw m10, m12, 3; 8 * (q0 - p0)
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m12, m10 ; 9 * (q0 - p0)
|
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
psubw m10, m5, m2 ; q1 - p1
|
|
|
|
psllw m8, m10, 1; 2 * ( q1 - p1 )
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m10, m8; 3 * ( q1 - p1 )
|
|
|
|
psubw m12, m10; 9 * (q0 - p0) - 3 * ( q1 - p1 )
|
|
|
|
paddw m12, m13; + 8
|
|
|
|
psraw m12, 4; >> 4 , delta0
|
2014-05-16 09:44:20 +00:00
|
|
|
PABSW m13, m12; abs(delta0)
|
2014-05-15 14:31:11 +00:00
|
|
|
|
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
psllw m10, m9, 2; 8 * tc
|
2014-05-15 14:31:11 +00:00
|
|
|
paddw m10, m9; 10 * tc
|
|
|
|
pcmpgtw m10, m13
|
|
|
|
pand m11, m10
|
|
|
|
|
|
|
|
psraw m9, 1; tc * 2 -> tc
|
|
|
|
psraw m14, 1; -tc * 2 -> -tc
|
|
|
|
|
|
|
|
pmaxsw m12, m14
|
|
|
|
pminsw m12, m9; av_clip(delta0, -tc, tc)
|
|
|
|
|
|
|
|
psraw m9, 1; tc -> tc / 2
|
2014-05-19 20:39:02 +00:00
|
|
|
pmullw m14, m9, [pw_m1]; -tc / 2
|
2014-05-15 14:31:11 +00:00
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
pavgw m15, m1, m3; (p2 + p0 + 1) >> 1
|
2014-05-15 14:31:11 +00:00
|
|
|
psubw m15, m2; ((p2 + p0 + 1) >> 1) - p1
|
|
|
|
paddw m15, m12; ((p2 + p0 + 1) >> 1) - p1 + delta0
|
|
|
|
psraw m15, 1; (((p2 + p0 + 1) >> 1) - p1 + delta0) >> 1
|
|
|
|
pmaxsw m15, m14
|
|
|
|
pminsw m15, m9; av_clip(deltap1, -tc/2, tc/2)
|
|
|
|
paddw m15, m2; p1'
|
|
|
|
|
|
|
|
;beta calculations
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m10, r11d; beta0
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklwd m10, m10
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m13, r12d; beta1
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklwd m13, m13
|
|
|
|
shufps m10, m13, 0; betax0, betax1
|
|
|
|
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m13, r7d; 1dp0 + 1dp3
|
|
|
|
movd m8, r8d; 0dp0 + 0dp3
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklwd m8, m8
|
|
|
|
punpcklwd m13, m13
|
|
|
|
shufps m13, m8, 0;
|
2014-05-17 00:59:03 +00:00
|
|
|
pcmpgtw m8, m10, m13
|
2014-05-15 14:31:11 +00:00
|
|
|
pand m8, m11
|
|
|
|
;end beta calculations
|
|
|
|
MASKED_COPY2 m2, m15, m8; write p1'
|
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
pavgw m8, m6, m4; (q2 + q0 + 1) >> 1
|
2014-05-15 14:31:11 +00:00
|
|
|
psubw m8, m5; ((q2 + q0 + 1) >> 1) - q1
|
|
|
|
psubw m8, m12; ((q2 + q0 + 1) >> 1) - q1 - delta0)
|
|
|
|
psraw m8, 1; ((q2 + q0 + 1) >> 1) - q1 - delta0) >> 1
|
|
|
|
pmaxsw m8, m14
|
|
|
|
pminsw m8, m9; av_clip(deltaq1, -tc/2, tc/2)
|
|
|
|
paddw m8, m5; q1'
|
|
|
|
|
2014-05-17 10:28:14 +00:00
|
|
|
movd m13, r9d;
|
|
|
|
movd m15, r10d;
|
2014-05-15 14:31:11 +00:00
|
|
|
punpcklwd m15, m15
|
|
|
|
punpcklwd m13, m13
|
|
|
|
shufps m13, m15, 0; dq0 + dq3
|
|
|
|
|
|
|
|
pcmpgtw m10, m13; compare to ((beta+(beta>>1))>>3)
|
|
|
|
pand m10, m11
|
|
|
|
MASKED_COPY2 m5, m8, m10; write q1'
|
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
paddw m15, m3, m12 ; p0 + delta0
|
2014-05-15 14:31:11 +00:00
|
|
|
MASKED_COPY m3, m15
|
|
|
|
|
2014-05-17 00:59:03 +00:00
|
|
|
psubw m8, m4, m12 ; q0 - delta0
|
2014-05-15 14:31:11 +00:00
|
|
|
MASKED_COPY m4, m8
|
|
|
|
%endmacro
|
|
|
|
|
|
|
|
INIT_XMM sse2
|
|
|
|
;-----------------------------------------------------------------------------
|
|
|
|
; void ff_hevc_v_loop_filter_chroma(uint8_t *_pix, ptrdiff_t _stride, int *_tc, uint8_t *_no_p, uint8_t *_no_q)
|
|
|
|
;-----------------------------------------------------------------------------
|
2014-05-23 03:37:24 +00:00
|
|
|
cglobal hevc_v_loop_filter_chroma_8, 3, 5, 7, pix, stride, tc, pix0, r3stride
|
|
|
|
sub pixq, 2
|
|
|
|
lea r3strideq, [3*strideq]
|
|
|
|
mov pix0q, pixq
|
|
|
|
add pixq, r3strideq
|
|
|
|
TRANSPOSE4x8B_LOAD PASS8ROWS(pix0q, pixq, strideq, r3strideq)
|
2014-05-15 14:31:11 +00:00
|
|
|
CHROMA_DEBLOCK_BODY 8
|
2014-05-23 03:37:24 +00:00
|
|
|
TRANSPOSE8x4B_STORE PASS8ROWS(pix0q, pixq, strideq, r3strideq)
|
2014-05-15 14:31:11 +00:00
|
|
|
RET
|
|
|
|
|
2014-05-23 03:37:24 +00:00
|
|
|
cglobal hevc_v_loop_filter_chroma_10, 3, 5, 7, pix, stride, tc, pix0, r3stride
|
|
|
|
sub pixq, 4
|
|
|
|
lea r3strideq, [3*strideq]
|
|
|
|
mov pix0q, pixq
|
|
|
|
add pixq, r3strideq
|
|
|
|
TRANSPOSE4x8W_LOAD PASS8ROWS(pix0q, pixq, strideq, r3strideq)
|
2014-05-15 14:31:11 +00:00
|
|
|
CHROMA_DEBLOCK_BODY 10
|
2014-05-23 03:37:24 +00:00
|
|
|
TRANSPOSE8x4W_STORE PASS8ROWS(pix0q, pixq, strideq, r3strideq)
|
2014-05-15 14:31:11 +00:00
|
|
|
RET
|
|
|
|
|
|
|
|
;-----------------------------------------------------------------------------
|
|
|
|
; void ff_hevc_h_loop_filter_chroma(uint8_t *_pix, ptrdiff_t _stride, int *_tc, uint8_t *_no_p, uint8_t *_no_q
|
|
|
|
;-----------------------------------------------------------------------------
|
2014-05-23 03:37:24 +00:00
|
|
|
cglobal hevc_h_loop_filter_chroma_8, 3, 4, 7, pix, stride, tc, pix0
|
|
|
|
mov pix0q, pixq
|
|
|
|
sub pix0q, strideq
|
|
|
|
sub pix0q, strideq
|
|
|
|
movq m0, [pix0q]; p1
|
|
|
|
movq m1, [pix0q+strideq]; p0
|
|
|
|
movq m2, [pixq]; q0
|
|
|
|
movq m3, [pixq+strideq]; q1
|
2014-05-15 14:31:11 +00:00
|
|
|
pxor m5, m5; zeros reg
|
|
|
|
punpcklbw m0, m5
|
|
|
|
punpcklbw m1, m5
|
|
|
|
punpcklbw m2, m5
|
|
|
|
punpcklbw m3, m5
|
|
|
|
CHROMA_DEBLOCK_BODY 8
|
|
|
|
packuswb m1, m1 ; p0' packed in bytes on low quadword
|
|
|
|
packuswb m2, m2 ; q0' packed in bytes on low quadword
|
2014-05-23 03:37:24 +00:00
|
|
|
movq [pix0q+strideq], m1
|
|
|
|
movq [pixq], m2
|
2014-05-15 14:31:11 +00:00
|
|
|
RET
|
|
|
|
|
2014-05-23 03:37:24 +00:00
|
|
|
cglobal hevc_h_loop_filter_chroma_10, 3, 4, 7, pix, stride, tc, pix0
|
|
|
|
mov pix0q, pixq
|
|
|
|
sub pix0q, strideq
|
|
|
|
sub pix0q, strideq
|
|
|
|
movu m0, [pix0q]; p1
|
|
|
|
movu m1, [pix0q+strideq]; p0
|
|
|
|
movu m2, [pixq]; q0
|
|
|
|
movu m3, [pixq+strideq]; q1
|
2014-05-15 14:31:11 +00:00
|
|
|
CHROMA_DEBLOCK_BODY 10
|
|
|
|
pxor m5, m5; zeros reg
|
|
|
|
CLIPW m1, m5, [pw_pixel_max]
|
|
|
|
CLIPW m2, m5, [pw_pixel_max]
|
2014-05-23 03:37:24 +00:00
|
|
|
movu [pix0q+strideq], m1
|
|
|
|
movu [pixq], m2
|
2014-05-15 14:31:11 +00:00
|
|
|
RET
|
|
|
|
|
|
|
|
%if ARCH_X86_64
|
2014-07-13 06:00:50 +00:00
|
|
|
%macro LOOP_FILTER_LUMA 0
|
2014-05-15 14:31:11 +00:00
|
|
|
;-----------------------------------------------------------------------------
|
|
|
|
; void ff_hevc_v_loop_filter_luma(uint8_t *_pix, ptrdiff_t _stride, int *_beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
|
|
|
|
;-----------------------------------------------------------------------------
|
|
|
|
cglobal hevc_v_loop_filter_luma_8, 4, 15, 16, pix, stride, beta, tc
|
|
|
|
sub r0, 4
|
2014-07-19 11:53:56 +00:00
|
|
|
lea r5, [3 * r1]
|
2014-05-15 14:31:11 +00:00
|
|
|
mov r6, r0
|
|
|
|
add r0, r5
|
|
|
|
TRANSPOSE8x8B_LOAD PASS8ROWS(r6, r0, r1, r5)
|
2014-07-19 11:53:56 +00:00
|
|
|
LUMA_DEBLOCK_BODY 8, v
|
2014-05-17 00:59:05 +00:00
|
|
|
.store:
|
2014-05-15 14:31:11 +00:00
|
|
|
TRANSPOSE8x8B_STORE PASS8ROWS(r6, r0, r1, r5)
|
2014-05-17 00:59:05 +00:00
|
|
|
.bypassluma:
|
2014-05-15 14:31:11 +00:00
|
|
|
RET
|
|
|
|
|
|
|
|
cglobal hevc_v_loop_filter_luma_10, 4, 15, 16, pix, stride, beta, tc
|
|
|
|
sub pixq, 8
|
2014-07-19 11:53:56 +00:00
|
|
|
lea r5, [3 * strideq]
|
2014-05-15 14:31:11 +00:00
|
|
|
mov r6, pixq
|
|
|
|
add pixq, r5
|
|
|
|
TRANSPOSE8x8W_LOAD PASS8ROWS(r6, pixq, strideq, r5)
|
2014-07-19 11:53:56 +00:00
|
|
|
LUMA_DEBLOCK_BODY 10, v
|
2014-05-17 00:59:05 +00:00
|
|
|
.store:
|
2014-05-15 14:31:11 +00:00
|
|
|
TRANSPOSE8x8W_STORE PASS8ROWS(r6, r0, r1, r5)
|
2014-05-17 00:59:05 +00:00
|
|
|
.bypassluma:
|
2014-05-15 14:31:11 +00:00
|
|
|
RET
|
|
|
|
|
|
|
|
;-----------------------------------------------------------------------------
|
|
|
|
; void ff_hevc_h_loop_filter_luma(uint8_t *_pix, ptrdiff_t _stride, int *_beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
|
|
|
|
;-----------------------------------------------------------------------------
|
|
|
|
cglobal hevc_h_loop_filter_luma_8, 4, 15, 16, pix, stride, beta, tc, count, pix0, src3stride
|
2014-07-19 11:53:56 +00:00
|
|
|
lea src3strideq, [3 * strideq]
|
2014-05-15 14:31:11 +00:00
|
|
|
mov pix0q, pixq
|
|
|
|
sub pix0q, src3strideq
|
|
|
|
sub pix0q, strideq
|
2014-07-19 11:53:56 +00:00
|
|
|
movdqu m0, [pix0q]; p3
|
|
|
|
movdqu m1, [pix0q + strideq]; p2
|
|
|
|
movdqu m2, [pix0q + 2 * strideq]; p1
|
|
|
|
movdqu m3, [pix0q + src3strideq]; p0
|
|
|
|
movdqu m4, [pixq]; q0
|
|
|
|
movdqu m5, [pixq + strideq]; q1
|
|
|
|
movdqu m6, [pixq + 2 * strideq]; q2
|
|
|
|
movdqu m7, [pixq + src3strideq]; q3
|
2014-05-15 14:31:11 +00:00
|
|
|
pxor m8, m8
|
|
|
|
punpcklbw m0, m8
|
|
|
|
punpcklbw m1, m8
|
|
|
|
punpcklbw m2, m8
|
|
|
|
punpcklbw m3, m8
|
|
|
|
punpcklbw m4, m8
|
|
|
|
punpcklbw m5, m8
|
|
|
|
punpcklbw m6, m8
|
|
|
|
punpcklbw m7, m8
|
2014-07-19 11:53:56 +00:00
|
|
|
LUMA_DEBLOCK_BODY 8, h
|
2014-05-17 00:59:05 +00:00
|
|
|
.store:
|
2014-05-15 14:31:11 +00:00
|
|
|
packuswb m1, m1; p2
|
|
|
|
packuswb m2, m2; p1
|
|
|
|
packuswb m3, m3; p0
|
|
|
|
packuswb m4, m4; q0
|
|
|
|
packuswb m5, m5; q1
|
|
|
|
packuswb m6, m6; q2
|
|
|
|
movq [r5+r1], m1; p2
|
|
|
|
movq [r5+2*r1], m2; p1
|
|
|
|
movq [r5+r6], m3; p0
|
|
|
|
movq [r0], m4; q0
|
|
|
|
movq [r0+r1], m5; q1
|
|
|
|
movq [r0+2*r1], m6; q2
|
2014-05-17 00:59:05 +00:00
|
|
|
.bypassluma:
|
2014-05-15 14:31:11 +00:00
|
|
|
RET
|
|
|
|
|
|
|
|
cglobal hevc_h_loop_filter_luma_10, 4, 15, 16, pix, stride, beta, tc, count, pix0, src3stride
|
2014-07-19 11:53:56 +00:00
|
|
|
lea src3strideq, [3 * strideq]
|
|
|
|
mov pix0q, pixq
|
|
|
|
sub pix0q, src3strideq
|
|
|
|
sub pix0q, strideq
|
|
|
|
movdqu m0, [pix0q]; p3
|
|
|
|
movdqu m1, [pix0q + strideq]; p2
|
|
|
|
movdqu m2, [pix0q + 2 * strideq]; p1
|
|
|
|
movdqu m3, [pix0q + src3strideq]; p0
|
|
|
|
movdqu m4, [pixq]; q0
|
|
|
|
movdqu m5, [pixq + strideq]; q1
|
|
|
|
movdqu m6, [pixq + 2 * strideq]; q2
|
|
|
|
movdqu m7, [pixq + src3strideq]; q3
|
|
|
|
LUMA_DEBLOCK_BODY 10, h
|
2014-05-17 00:59:05 +00:00
|
|
|
.store:
|
2014-07-19 11:53:56 +00:00
|
|
|
pxor m8, m8; zeros reg
|
|
|
|
CLIPW m1, m8, [pw_pixel_max]
|
|
|
|
CLIPW m2, m8, [pw_pixel_max]
|
|
|
|
CLIPW m3, m8, [pw_pixel_max]
|
|
|
|
CLIPW m4, m8, [pw_pixel_max]
|
|
|
|
CLIPW m5, m8, [pw_pixel_max]
|
|
|
|
CLIPW m6, m8, [pw_pixel_max]
|
|
|
|
movdqu [pix0q + strideq], m1; p2
|
|
|
|
movdqu [pix0q + 2 * strideq], m2; p1
|
|
|
|
movdqu [pix0q + src3strideq], m3; p0
|
|
|
|
movdqu [pixq ], m4; q0
|
|
|
|
movdqu [pixq + strideq], m5; q1
|
|
|
|
movdqu [pixq + 2 * strideq], m6; q2
|
2014-05-17 00:59:05 +00:00
|
|
|
.bypassluma:
|
2014-05-15 14:31:11 +00:00
|
|
|
RET
|
2014-07-13 06:00:50 +00:00
|
|
|
%endmacro
|
|
|
|
|
|
|
|
INIT_XMM sse2
|
|
|
|
LOOP_FILTER_LUMA
|
|
|
|
INIT_XMM ssse3
|
|
|
|
LOOP_FILTER_LUMA
|
2014-05-15 14:31:11 +00:00
|
|
|
%endif
|