x86: xvid_idct: port MMX iDCT to yasm

Also reduce the table duplication with SSE2 code, remove duplicated
macro parameters.

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Christophe Gisquet 2015-03-10 23:11:52 +00:00 committed by Michael Niedermayer
parent 7b05b5093e
commit c3bf52713a
5 changed files with 484 additions and 564 deletions

View File

@ -73,7 +73,6 @@ MMX-OBJS-$(CONFIG_FDCTDSP) += x86/fdct.o
MMX-OBJS-$(CONFIG_IDCTDSP) += x86/simple_idct.o MMX-OBJS-$(CONFIG_IDCTDSP) += x86/simple_idct.o
# decoders/encoders # decoders/encoders
MMX-OBJS-$(CONFIG_MPEG4_DECODER) += x86/xvididct_mmx.o
MMX-OBJS-$(CONFIG_SNOW_DECODER) += x86/snowdsp.o MMX-OBJS-$(CONFIG_SNOW_DECODER) += x86/snowdsp.o
MMX-OBJS-$(CONFIG_SNOW_ENCODER) += x86/snowdsp.o MMX-OBJS-$(CONFIG_SNOW_ENCODER) += x86/snowdsp.o
MMX-OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp_mmx.o MMX-OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp_mmx.o

View File

@ -60,11 +60,9 @@ static const struct algo idct_tab_arch[] = {
#if HAVE_MMX_INLINE #if HAVE_MMX_INLINE
{ "SIMPLE-MMX", ff_simple_idct_mmx, FF_IDCT_PERM_SIMPLE, AV_CPU_FLAG_MMX }, { "SIMPLE-MMX", ff_simple_idct_mmx, FF_IDCT_PERM_SIMPLE, AV_CPU_FLAG_MMX },
#endif #endif
#if CONFIG_MPEG4_DECODER #if CONFIG_MPEG4_DECODER && HAVE_YASM
#if HAVE_MMX_INLINE #if ARCH_X86_32
{ "XVID-MMX", ff_xvid_idct_mmx, FF_IDCT_PERM_NONE, AV_CPU_FLAG_MMX, 1 }, { "XVID-MMX", ff_xvid_idct_mmx, FF_IDCT_PERM_NONE, AV_CPU_FLAG_MMX, 1 },
#endif
#if HAVE_MMXEXT_INLINE
{ "XVID-MMXEXT", ff_xvid_idct_mmxext, FF_IDCT_PERM_NONE, AV_CPU_FLAG_MMXEXT, 1 }, { "XVID-MMXEXT", ff_xvid_idct_mmxext, FF_IDCT_PERM_NONE, AV_CPU_FLAG_MMXEXT, 1 },
#endif #endif
#if HAVE_SSE2_EXTERNAL #if HAVE_SSE2_EXTERNAL
@ -73,7 +71,7 @@ static const struct algo idct_tab_arch[] = {
{ "PR-SSE2", ff_prores_idct_put_10_sse2_wrap, FF_IDCT_PERM_TRANSPOSE, AV_CPU_FLAG_SSE2, 1 }, { "PR-SSE2", ff_prores_idct_put_10_sse2_wrap, FF_IDCT_PERM_TRANSPOSE, AV_CPU_FLAG_SSE2, 1 },
#endif #endif
#endif #endif
#endif /* CONFIG_MPEG4_DECODER */ #endif /* CONFIG_MPEG4_DECODER && HAVE_YASM */
{ 0 } { 0 }
}; };

View File

@ -1,5 +1,9 @@
; XVID MPEG-4 VIDEO CODEC ; XVID MPEG-4 VIDEO CODEC
; - SSE2 inverse discrete cosine transform - ;
; Conversion from gcc syntax to x264asm syntax with modifications
; by Christophe Gisquet <christophe.gisquet@gmail.com>
;
; =========== SSE2 inverse discrete cosine transform ===========
; ;
; Copyright(C) 2003 Pascal Massimino <skal@planet-d.net> ; Copyright(C) 2003 Pascal Massimino <skal@planet-d.net>
; ;
@ -8,8 +12,6 @@
; ;
; Originally from dct/x86_asm/fdct_sse2_skal.asm in Xvid. ; Originally from dct/x86_asm/fdct_sse2_skal.asm in Xvid.
; ;
; This file is part of FFmpeg.
;
; Vertical pass is an implementation of the scheme: ; Vertical pass is an implementation of the scheme:
; Loeffler C., Ligtenberg A., and Moschytz C.S.: ; Loeffler C., Ligtenberg A., and Moschytz C.S.:
; Practical Fast 1D DCT Algorithm with Eleven Multiplications, ; Practical Fast 1D DCT Algorithm with Eleven Multiplications,
@ -22,6 +24,32 @@
; ;
; More details at http://skal.planet-d.net/coding/dct.html ; More details at http://skal.planet-d.net/coding/dct.html
; ;
; ======= MMX and XMM forward discrete cosine transform =======
;
; Copyright(C) 2001 Peter Ross <pross@xvid.org>
;
; Originally provided by Intel at AP-922
; http://developer.intel.com/vtune/cbts/strmsimd/922down.htm
; (See more app notes at http://developer.intel.com/vtune/cbts/strmsimd/appnotes.htm)
; but in a limited edition.
; New macro implements a column part for precise iDCT
; The routine precision now satisfies IEEE standard 1180-1990.
;
; Copyright(C) 2000-2001 Peter Gubanov <peter@elecard.net.ru>
; Rounding trick Copyright(C) 2000 Michel Lespinasse <walken@zoy.org>
;
; http://www.elecard.com/peter/idct.html
; http://www.linuxvideo.org/mpeg2dec/
;
; These examples contain code fragments for first stage iDCT 8x8
; (for rows) and first stage DCT 8x8 (for columns)
;
; conversion to gcc syntax by Michael Niedermayer
;
; ======================================================================
;
; This file is part of FFmpeg.
;
; FFmpeg is free software; you can redistribute it and/or ; FFmpeg is free software; you can redistribute it and/or
; modify it under the terms of the GNU Lesser General Public ; modify it under the terms of the GNU Lesser General Public
; License as published by the Free Software Foundation; either ; License as published by the Free Software Foundation; either
@ -39,11 +67,13 @@
%include "libavutil/x86/x86util.asm" %include "libavutil/x86/x86util.asm"
SECTION_RODATA SECTION_RODATA
; Similar to tg_1_16 in MMX code
tan1: times 8 dw 13036 tan1: times 8 dw 13036
tan2: times 8 dw 27146 tan2: times 8 dw 27146
tan3: times 8 dw 43790 tan3: times 8 dw 43790
sqrt2: times 8 dw 23170 sqrt2: times 8 dw 23170
; SSE2 tables
iTab1: dw 0x4000, 0x539f, 0xc000, 0xac61, 0x4000, 0xdd5d, 0x4000, 0xdd5d iTab1: dw 0x4000, 0x539f, 0xc000, 0xac61, 0x4000, 0xdd5d, 0x4000, 0xdd5d
dw 0x4000, 0x22a3, 0x4000, 0x22a3, 0xc000, 0x539f, 0x4000, 0xac61 dw 0x4000, 0x22a3, 0x4000, 0x22a3, 0xc000, 0x539f, 0x4000, 0xac61
dw 0x3249, 0x11a8, 0x4b42, 0xee58, 0x11a8, 0x4b42, 0x11a8, 0xcdb7 dw 0x3249, 0x11a8, 0x4b42, 0xee58, 0x11a8, 0x4b42, 0x11a8, 0xcdb7
@ -61,12 +91,154 @@ iTab4: dw 0x4b42, 0x6254, 0xb4be, 0x9dac, 0x4b42, 0xd746, 0x4b42, 0xd746
dw 0x3b21, 0x14c3, 0x587e, 0xeb3d, 0x14c3, 0x587e, 0x14c3, 0xc4df dw 0x3b21, 0x14c3, 0x587e, 0xeb3d, 0x14c3, 0x587e, 0x14c3, 0xc4df
dw 0x6862, 0x587e, 0x979e, 0xc4df, 0x3b21, 0x979e, 0x587e, 0x979e dw 0x6862, 0x587e, 0x979e, 0xc4df, 0x3b21, 0x979e, 0x587e, 0x979e
%if ARCH_X86_32
; -----------------------------------------------------------------------------
;
; The first stage iDCT 8x8 - inverse DCTs of rows
;
; -----------------------------------------------------------------------------
; The 8-point inverse DCT direct algorithm
; -----------------------------------------------------------------------------
;
; static const short w[32] = {
; FIX(cos_4_16), FIX(cos_2_16), FIX(cos_4_16), FIX(cos_6_16),
; FIX(cos_4_16), FIX(cos_6_16), -FIX(cos_4_16), -FIX(cos_2_16),
; FIX(cos_4_16), -FIX(cos_6_16), -FIX(cos_4_16), FIX(cos_2_16),
; FIX(cos_4_16), -FIX(cos_2_16), FIX(cos_4_16), -FIX(cos_6_16),
; FIX(cos_1_16), FIX(cos_3_16), FIX(cos_5_16), FIX(cos_7_16),
; FIX(cos_3_16), -FIX(cos_7_16), -FIX(cos_1_16), -FIX(cos_5_16),
; FIX(cos_5_16), -FIX(cos_1_16), FIX(cos_7_16), FIX(cos_3_16),
; FIX(cos_7_16), -FIX(cos_5_16), FIX(cos_3_16), -FIX(cos_1_16) };
;
; #define DCT_8_INV_ROW(x, y)
; {
; int a0, a1, a2, a3, b0, b1, b2, b3;
;
; a0 = x[0] * w[0] + x[2] * w[1] + x[4] * w[2] + x[6] * w[3];
; a1 = x[0] * w[4] + x[2] * w[5] + x[4] * w[6] + x[6] * w[7];
; a2 = x[0] * w[8] + x[2] * w[9] + x[4] * w[10] + x[6] * w[11];
; a3 = x[0] * w[12] + x[2] * w[13] + x[4] * w[14] + x[6] * w[15];
; b0 = x[1] * w[16] + x[3] * w[17] + x[5] * w[18] + x[7] * w[19];
; b1 = x[1] * w[20] + x[3] * w[21] + x[5] * w[22] + x[7] * w[23];
; b2 = x[1] * w[24] + x[3] * w[25] + x[5] * w[26] + x[7] * w[27];
; b3 = x[1] * w[28] + x[3] * w[29] + x[5] * w[30] + x[7] * w[31];
;
; y[0] = SHIFT_ROUND(a0 + b0);
; y[1] = SHIFT_ROUND(a1 + b1);
; y[2] = SHIFT_ROUND(a2 + b2);
; y[3] = SHIFT_ROUND(a3 + b3);
; y[4] = SHIFT_ROUND(a3 - b3);
; y[5] = SHIFT_ROUND(a2 - b2);
; y[6] = SHIFT_ROUND(a1 - b1);
; y[7] = SHIFT_ROUND(a0 - b0);
; }
;
; -----------------------------------------------------------------------------
;
; In this implementation the outputs of the iDCT-1D are multiplied
; for rows 0,4 - by cos_4_16,
; for rows 1,7 - by cos_1_16,
; for rows 2,6 - by cos_2_16,
; for rows 3,5 - by cos_3_16
; and are shifted to the left for better accuracy.
;
; For the constants used,
; FIX(float_const) = (short) (float_const * (1 << 15) + 0.5)
;
; -----------------------------------------------------------------------------
; -----------------------------------------------------------------------------
; Tables for mmx processors
; -----------------------------------------------------------------------------
; Table for rows 0,4 - constants are multiplied by cos_4_16
tab_i_04_mmx: dw 16384, 16384, 16384, -16384
dw 21407, 8867, 8867, -21407 ; w07 w05 w03 w01
dw 16384, -16384, 16384, 16384 ; w14 w12 w10 w08
dw -8867, 21407, -21407, -8867 ; w15 w13 w11 w09
dw 22725, 12873, 19266, -22725 ; w22 w20 w18 w16
dw 19266, 4520, -4520, -12873 ; w23 w21 w19 w17
dw 12873, 4520, 4520, 19266 ; w30 w28 w26 w24
dw -22725, 19266, -12873, -22725 ; w31 w29 w27 w25
; Table for rows 1,7 - constants are multiplied by cos_1_16
dw 22725, 22725, 22725, -22725 ; movq-> w06 w04 w02 w00
dw 29692, 12299, 12299, -29692 ; w07 w05 w03 w01
dw 22725, -22725, 22725, 22725 ; w14 w12 w10 w08
dw -12299, 29692, -29692, -12299 ; w15 w13 w11 w09
dw 31521, 17855, 26722, -31521 ; w22 w20 w18 w16
dw 26722, 6270, -6270, -17855 ; w23 w21 w19 w17
dw 17855, 6270, 6270, 26722 ; w30 w28 w26 w24
dw -31521, 26722, -17855, -31521 ; w31 w29 w27 w25
; Table for rows 2,6 - constants are multiplied by cos_2_16
dw 21407, 21407, 21407, -21407 ; movq-> w06 w04 w02 w00
dw 27969, 11585, 11585, -27969 ; w07 w05 w03 w01
dw 21407, -21407, 21407, 21407 ; w14 w12 w10 w08
dw -11585, 27969, -27969, -11585 ; w15 w13 w11 w09
dw 29692, 16819, 25172, -29692 ; w22 w20 w18 w16
dw 25172, 5906, -5906, -16819 ; w23 w21 w19 w17
dw 16819, 5906, 5906, 25172 ; w30 w28 w26 w24
dw -29692, 25172, -16819, -29692 ; w31 w29 w27 w25
; Table for rows 3,5 - constants are multiplied by cos_3_16
dw 19266, 19266, 19266, -19266 ; movq-> w06 w04 w02 w00
dw 25172, 10426, 10426, -25172 ; w07 w05 w03 w01
dw 19266, -19266, 19266, 19266 ; w14 w12 w10 w08
dw -10426, 25172, -25172, -10426 ; w15 w13 w11 w09
dw 26722, 15137, 22654, -26722 ; w22 w20 w18 w16
dw 22654, 5315, -5315, -15137 ; w23 w21 w19 w17
dw 15137, 5315, 5315, 22654 ; w30 w28 w26 w24
dw -26722, 22654, -15137, -26722 ; w31 w29 w27 w25
; -----------------------------------------------------------------------------
; Tables for xmm processors
; -----------------------------------------------------------------------------
; %3 for rows 0,4 - constants are multiplied by cos_4_16
tab_i_04_xmm: dw 16384, 21407, 16384, 8867 ; movq-> w05 w04 w01 w00
dw 16384, 8867, -16384, -21407 ; w07 w06 w03 w02
dw 16384, -8867, 16384, -21407 ; w13 w12 w09 w08
dw -16384, 21407, 16384, -8867 ; w15 w14 w11 w10
dw 22725, 19266, 19266, -4520 ; w21 w20 w17 w16
dw 12873, 4520, -22725, -12873 ; w23 w22 w19 w18
dw 12873, -22725, 4520, -12873 ; w29 w28 w25 w24
dw 4520, 19266, 19266, -22725 ; w31 w30 w27 w26
; %3 for rows 1,7 - constants are multiplied by cos_1_16
dw 22725, 29692, 22725, 12299 ; movq-> w05 w04 w01 w00
dw 22725, 12299, -22725, -29692 ; w07 w06 w03 w02
dw 22725, -12299, 22725, -29692 ; w13 w12 w09 w08
dw -22725, 29692, 22725, -12299 ; w15 w14 w11 w10
dw 31521, 26722, 26722, -6270 ; w21 w20 w17 w16
dw 17855, 6270, -31521, -17855 ; w23 w22 w19 w18
dw 17855, -31521, 6270, -17855 ; w29 w28 w25 w24
dw 6270, 26722, 26722, -31521 ; w31 w30 w27 w26
; %3 for rows 2,6 - constants are multiplied by cos_2_16
dw 21407, 27969, 21407, 11585 ; movq-> w05 w04 w01 w00
dw 21407, 11585, -21407, -27969 ; w07 w06 w03 w02
dw 21407, -11585, 21407, -27969 ; w13 w12 w09 w08
dw -21407, 27969, 21407, -11585 ; w15 w14 w11 w10
dw 29692, 25172, 25172, -5906 ; w21 w20 w17 w16
dw 16819, 5906, -29692, -16819 ; w23 w22 w19 w18
dw 16819, -29692, 5906, -16819 ; w29 w28 w25 w24
dw 5906, 25172, 25172, -29692 ; w31 w30 w27 w26
; %3 for rows 3,5 - constants are multiplied by cos_3_16
dw 19266, 25172, 19266, 10426 ; movq-> w05 w04 w01 w00
dw 19266, 10426, -19266, -25172 ; w07 w06 w03 w02
dw 19266, -10426, 19266, -25172 ; w13 w12 w09 w08
dw -19266, 25172, 19266, -10426 ; w15 w14 w11 w10
dw 26722, 22654, 22654, -5315 ; w21 w20 w17 w16
dw 15137, 5315, -26722, -15137 ; w23 w22 w19 w18
dw 15137, -26722, 5315, -15137 ; w29 w28 w25 w24
dw 5315, 22654, 22654, -26722 ; w31 w30 w27 w26
%endif ; ~ARCH_X86_32
; Similar to rounder_0 in MMX code
; 4 first similar, then: 4*8->6*16 5*8->4*16 6/7*8->5*16
walkenIdctRounders: times 4 dd 65536 walkenIdctRounders: times 4 dd 65536
times 4 dd 3597 times 4 dd 3597
times 4 dd 2260 times 4 dd 2260
times 4 dd 1203 times 4 dd 1203
times 4 dd 120 times 4 dd 120
times 4 dd 512 times 4 dd 512
times 2 dd 0
pb_127: times 8 db 127 pb_127: times 8 db 127
@ -377,3 +549,275 @@ cglobal xvid_idct, 1, 5, 8+7*ARCH_X86_64, block
iLLM_PASS r0 iLLM_PASS r0
.6: .6:
RET RET
%if ARCH_X86_32
; %1=offset %2=tab_offset
; %3=rnd_offset where 4*8->6*16 5*8->4*16 6/7*8->5*16
%macro DCT_8_INV_ROW 3
movq mm0, [r0+16*%1+0] ; 0 ; x3 x2 x1 x0
movq mm1, [r0+16*%1+8] ; 1 ; x7 x6 x5 x4
movq mm2, mm0 ; 2 ; x3 x2 x1 x0
movq mm3, [%2+ 0] ; 3 ; w06 w04 w02 w00
%if cpuflag(mmxext)
pshufw mm0, mm0, 0x88 ; x2 x0 x2 x0
movq mm4, [%2+ 8] ; 4 ; w07 w06 w03 w02
movq mm5, mm1 ; 5 ; x7 x6 x5 x4
pmaddwd mm3, mm0 ; x2*w05+x0*w04 x2*w01+x0*w00
movq mm6, [%2+32] ; 6 ; w21 w20 w17 w16
pshufw mm1, mm1, 0x88 ; x6 x4 x6 x4
pmaddwd mm4, mm1 ; x6*w07+x4*w06 x6*w03+x4*w02
movq mm7, [%2+40] ; 7; w23 w22 w19 w18
pshufw mm2, mm2, 0xdd ; x3 x1 x3 x1
pmaddwd mm6, mm2 ; x3*w21+x1*w20 x3*w17+x1*w16
pshufw mm5, mm5, 0xdd ; x7 x5 x7 x5
pmaddwd mm7, mm5 ; x7*w23+x5*w22 x7*w19+x5*w18
paddd mm3, [walkenIdctRounders + %3] ; +%3
pmaddwd mm0, [%2+16] ; x2*w13+x0*w12 x2*w09+x0*w08
paddd mm3, mm4 ; 4 ; a1=sum(even1) a0=sum(even0)
pmaddwd mm1, [%2+24] ; x6*w15+x4*w14 x6*w11+x4*w10
movq mm4, mm3 ; 4 ; a1 a0
pmaddwd mm2, [%2+48] ; x3*w29+x1*w28 x3*w25+x1*w24
paddd mm6, mm7 ; 7 ; b1=sum(odd1) b0=sum(odd0)
pmaddwd mm5, [%2+56] ; x7*w31+x5*w30 x7*w27+x5*w26
paddd mm3, mm6 ; a1+b1 a0+b0
paddd mm0, [walkenIdctRounders + %3] ; +%3
psrad mm3, 11 ; y1=a1+b1 y0=a0+b0
paddd mm0, mm1 ; 1 ; a3=sum(even3) a2=sum(even2)
psubd mm4, mm6 ; 6 ; a1-b1 a0-b0
movq mm7, mm0 ; 7 ; a3 a2
paddd mm2, mm5 ; 5 ; b3=sum(odd3) b2=sum(odd2)
paddd mm0, mm2 ; a3+b3 a2+b2
psrad mm4, 11 ; y6=a1-b1 y7=a0-b0
psubd mm7, mm2 ; 2 ; a3-b3 a2-b2
psrad mm0, 11 ; y3=a3+b3 y2=a2+b2
psrad mm7, 11 ; y4=a3-b3 y5=a2-b2
packssdw mm3, mm0 ; 0 ; y3 y2 y1 y0
packssdw mm7, mm4 ; 4 ; y6 y7 y4 y5
movq [r0+16*%1+0], mm3 ; 3 ; save y3 y2 y1 y0
pshufw mm7, mm7, 0xb1 ; y7 y6 y5 y4
%else
punpcklwd mm0, mm1 ; x5 x1 x4 x0
movq mm5, mm0 ; 5 ; x5 x1 x4 x0
punpckldq mm0, mm0 ; x4 x0 x4 x0
movq mm4, [%2+ 8] ; 4 ; w07 w05 w03 w01
punpckhwd mm2, mm1 ; 1 ; x7 x3 x6 x2
pmaddwd mm3, mm0 ; x4*w06+x0*w04 x4*w02+x0*w00
movq mm6, mm2 ; 6 ; x7 x3 x6 x2
movq mm1, [%2+32] ; 1 ; w22 w20 w18 w16
punpckldq mm2, mm2 ; x6 x2 x6 x2
pmaddwd mm4, mm2 ; x6*w07+x2*w05 x6*w03+x2*w01
punpckhdq mm5, mm5 ; x5 x1 x5 x1
pmaddwd mm0, [%2+16] ; x4*w14+x0*w12 x4*w10+x0*w08
punpckhdq mm6, mm6 ; x7 x3 x7 x3
movq mm7, [%2+40] ; 7 ; w23 w21 w19 w17
pmaddwd mm1, mm5 ; x5*w22+x1*w20 x5*w18+x1*w16
paddd mm3, [walkenIdctRounders + %3] ; +%3
pmaddwd mm7, mm6 ; x7*w23+x3*w21 x7*w19+x3*w17
pmaddwd mm2, [%2+24] ; x6*w15+x2*w13 x6*w11+x2*w09
paddd mm3, mm4 ; 4 ; a1=sum(even1) a0=sum(even0)
pmaddwd mm5, [%2+48] ; x5*w30+x1*w28 x5*w26+x1*w24
movq mm4, mm3 ; 4 ; a1 a0
pmaddwd mm6, [%2+56] ; x7*w31+x3*w29 x7*w27+x3*w25
paddd mm1, mm7 ; 7 ; b1=sum(odd1) b0=sum(odd0)
paddd mm0, [walkenIdctRounders + %3] ; +%3
psubd mm3, mm1 ; a1-b1 a0-b0
psrad mm3, 11 ; y6=a1-b1 y7=a0-b0
paddd mm1, mm4 ; 4 ; a1+b1 a0+b0
paddd mm0, mm2 ; 2 ; a3=sum(even3) a2=sum(even2)
psrad mm1, 11 ; y1=a1+b1 y0=a0+b0
paddd mm5, mm6 ; 6 ; b3=sum(odd3) b2=sum(odd2)
movq mm4, mm0 ; 4 ; a3 a2
paddd mm0, mm5 ; a3+b3 a2+b2
psubd mm4, mm5 ; 5 ; a3-b3 a2-b2
psrad mm0, 11 ; y3=a3+b3 y2=a2+b2
psrad mm4, 11 ; y4=a3-b3 y5=a2-b2
packssdw mm1, mm0 ; 0 ; y3 y2 y1 y0
packssdw mm4, mm3 ; 3 ; y6 y7 y4 y5
movq mm7, mm4 ; 7 ; y6 y7 y4 y5
psrld mm4, 16 ; 0 y6 0 y4
pslld mm7, 16 ; y7 0 y5 0
movq [r0+16*%1+0], mm1 ; 1 ; save y3 y2 y1 y0
por mm7, mm4 ; 4 ; y7 y6 y5 y4
%endif
movq [r0+16*%1+8], mm7 ; 7 ; save y7 y6 y5 y4
%endmacro
; -----------------------------------------------------------------------------
;
; The first stage DCT 8x8 - forward DCTs of columns
;
; The %2puts are multiplied
; for rows 0,4 - on cos_4_16,
; for rows 1,7 - on cos_1_16,
; for rows 2,6 - on cos_2_16,
; for rows 3,5 - on cos_3_16
; and are shifted to the left for rise of accuracy
;
; -----------------------------------------------------------------------------
;
; The 8-point scaled forward DCT algorithm (26a8m)
;
; -----------------------------------------------------------------------------
;
;#define DCT_8_FRW_COL(x, y)
; {
; short t0, t1, t2, t3, t4, t5, t6, t7;
; short tp03, tm03, tp12, tm12, tp65, tm65;
; short tp465, tm465, tp765, tm765;
;
; t0 = LEFT_SHIFT(x[0] + x[7]);
; t1 = LEFT_SHIFT(x[1] + x[6]);
; t2 = LEFT_SHIFT(x[2] + x[5]);
; t3 = LEFT_SHIFT(x[3] + x[4]);
; t4 = LEFT_SHIFT(x[3] - x[4]);
; t5 = LEFT_SHIFT(x[2] - x[5]);
; t6 = LEFT_SHIFT(x[1] - x[6]);
; t7 = LEFT_SHIFT(x[0] - x[7]);
;
; tp03 = t0 + t3;
; tm03 = t0 - t3;
; tp12 = t1 + t2;
; tm12 = t1 - t2;
;
; y[0] = tp03 + tp12;
; y[4] = tp03 - tp12;
;
; y[2] = tm03 + tm12 * tg_2_16;
; y[6] = tm03 * tg_2_16 - tm12;
;
; tp65 = (t6 + t5) * cos_4_16;
; tm65 = (t6 - t5) * cos_4_16;
;
; tp765 = t7 + tp65;
; tm765 = t7 - tp65;
; tp465 = t4 + tm65;
; tm465 = t4 - tm65;
;
; y[1] = tp765 + tp465 * tg_1_16;
; y[7] = tp765 * tg_1_16 - tp465;
; y[5] = tm765 * tg_3_16 + tm465;
; y[3] = tm765 - tm465 * tg_3_16;
; }
;
; -----------------------------------------------------------------------------
; -----------------------------------------------------------------------------
; DCT_8_INV_COL_4 INP,OUT
; -----------------------------------------------------------------------------
%macro DCT_8_INV_COL 1
movq mm0, [tan3]
movq mm3, [%1+16*3]
movq mm1, mm0 ; tg_3_16
movq mm5, [%1+16*5]
pmulhw mm0, mm3 ; x3*(tg_3_16-1)
movq mm4, [tan1]
pmulhw mm1, mm5 ; x5*(tg_3_16-1)
movq mm7, [%1+16*7]
movq mm2, mm4 ; tg_1_16
movq mm6, [%1+16*1]
pmulhw mm4, mm7 ; x7*tg_1_16
paddsw mm0, mm3 ; x3*tg_3_16
pmulhw mm2, mm6 ; x1*tg_1_16
paddsw mm1, mm3 ; x3+x5*(tg_3_16-1)
psubsw mm0, mm5 ; x3*tg_3_16-x5 = tm35
movq mm3, [sqrt2]
paddsw mm1, mm5 ; x3+x5*tg_3_16 = tp35
paddsw mm4, mm6 ; x1+tg_1_16*x7 = tp17
psubsw mm2, mm7 ; x1*tg_1_16-x7 = tm17
movq mm5, mm4 ; tp17
movq mm6, mm2 ; tm17
paddsw mm5, mm1 ; tp17+tp35 = b0
psubsw mm6, mm0 ; tm17-tm35 = b3
psubsw mm4, mm1 ; tp17-tp35 = t1
paddsw mm2, mm0 ; tm17+tm35 = t2
movq mm7, [tan2]
movq mm1, mm4 ; t1
movq [%1+3*16], mm5 ; save b0
paddsw mm1, mm2 ; t1+t2
movq [%1+5*16], mm6 ; save b3
psubsw mm4, mm2 ; t1-t2
movq mm5, [%1+2*16]
movq mm0, mm7 ; tg_2_16
movq mm6, [%1+6*16]
pmulhw mm0, mm5 ; x2*tg_2_16
pmulhw mm7, mm6 ; x6*tg_2_16
pmulhw mm1, mm3 ; ocos_4_16*(t1+t2) = b1/2
movq mm2, [%1+0*16]
pmulhw mm4, mm3 ; ocos_4_16*(t1-t2) = b2/2
psubsw mm0, mm6 ; t2*tg_2_16-x6 = tm26
movq mm3, mm2 ; x0
movq mm6, [%1+4*16]
paddsw mm7, mm5 ; x2+x6*tg_2_16 = tp26
paddsw mm2, mm6 ; x0+x4 = tp04
psubsw mm3, mm6 ; x0-x4 = tm04
movq mm5, mm2 ; tp04
movq mm6, mm3 ; tm04
psubsw mm2, mm7 ; tp04-tp26 = a3
paddsw mm3, mm0 ; tm04+tm26 = a1
paddsw mm1, mm1 ; b1
paddsw mm4, mm4 ; b2
paddsw mm5, mm7 ; tp04+tp26 = a0
psubsw mm6, mm0 ; tm04-tm26 = a2
movq mm7, mm3 ; a1
movq mm0, mm6 ; a2
paddsw mm3, mm1 ; a1+b1
paddsw mm6, mm4 ; a2+b2
psraw mm3, 6 ; dst1
psubsw mm7, mm1 ; a1-b1
psraw mm6, 6 ; dst2
psubsw mm0, mm4 ; a2-b2
movq mm1, [%1+3*16] ; load b0
psraw mm7, 6 ; dst6
movq mm4, mm5 ; a0
psraw mm0, 6 ; dst5
movq [%1+1*16], mm3
paddsw mm5, mm1 ; a0+b0
movq [%1+2*16], mm6
psubsw mm4, mm1 ; a0-b0
movq mm3, [%1+5*16] ; load b3
psraw mm5, 6 ; dst0
movq mm6, mm2 ; a3
psraw mm4, 6 ; dst7
movq [%1+5*16], mm0
paddsw mm2, mm3 ; a3+b3
movq [%1+6*16], mm7
psubsw mm6, mm3 ; a3-b3
movq [%1+0*16], mm5
psraw mm2, 6 ; dst3
movq [%1+7*16], mm4
psraw mm6, 6 ; dst4
movq [%1+3*16], mm2
movq [%1+4*16], mm6
%endmacro
%macro XVID_IDCT_MMX 0
cglobal xvid_idct, 1, 1, 0, block
%if cpuflag(mmxext)
%define TAB tab_i_04_xmm
%else
%define TAB tab_i_04_mmx
%endif
; Process each row - beware of rounder offset
DCT_8_INV_ROW 0, TAB + 64 * 0, 0*16
DCT_8_INV_ROW 1, TAB + 64 * 1, 1*16
DCT_8_INV_ROW 2, TAB + 64 * 2, 2*16
DCT_8_INV_ROW 3, TAB + 64 * 3, 3*16
DCT_8_INV_ROW 4, TAB + 64 * 0, 6*16
DCT_8_INV_ROW 5, TAB + 64 * 3, 4*16
DCT_8_INV_ROW 6, TAB + 64 * 2, 5*16
DCT_8_INV_ROW 7, TAB + 64 * 1, 5*16
; Process the columns (4 at a time)
DCT_8_INV_COL r0+0
DCT_8_INV_COL r0+8
RET
%endmacro
INIT_MMX mmx
XVID_IDCT_MMX
INIT_MMX mmxext
XVID_IDCT_MMX
%endif ; ~ARCH_X86_32

View File

@ -38,6 +38,32 @@ static void xvid_idct_sse2_add(uint8_t *dest, int line_size, short *block)
ff_add_pixels_clamped(block, dest, line_size); ff_add_pixels_clamped(block, dest, line_size);
} }
#if ARCH_X86_32
static void xvid_idct_mmx_put(uint8_t *dest, int line_size, short *block)
{
ff_xvid_idct_mmx(block);
ff_put_pixels_clamped(block, dest, line_size);
}
static void xvid_idct_mmx_add(uint8_t *dest, int line_size, short *block)
{
ff_xvid_idct_mmx(block);
ff_add_pixels_clamped(block, dest, line_size);
}
static void xvid_idct_mmxext_put(uint8_t *dest, int line_size, short *block)
{
ff_xvid_idct_mmxext(block);
ff_put_pixels_clamped(block, dest, line_size);
}
static void xvid_idct_mmxext_add(uint8_t *dest, int line_size, short *block)
{
ff_xvid_idct_mmxext(block);
ff_add_pixels_clamped(block, dest, line_size);
}
#endif
av_cold void ff_xvid_idct_init_x86(IDCTDSPContext *c, AVCodecContext *avctx, av_cold void ff_xvid_idct_init_x86(IDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth) unsigned high_bit_depth)
{ {
@ -48,19 +74,21 @@ av_cold void ff_xvid_idct_init_x86(IDCTDSPContext *c, AVCodecContext *avctx,
avctx->idct_algo == FF_IDCT_XVID)) avctx->idct_algo == FF_IDCT_XVID))
return; return;
if (INLINE_MMX(cpu_flags)) { #if ARCH_X86_32
c->idct_put = ff_xvid_idct_mmx_put; if (EXTERNAL_MMX(cpu_flags)) {
c->idct_add = ff_xvid_idct_mmx_add; c->idct_put = xvid_idct_mmx_put;
c->idct_add = xvid_idct_mmx_add;
c->idct = ff_xvid_idct_mmx; c->idct = ff_xvid_idct_mmx;
c->perm_type = FF_IDCT_PERM_NONE; c->perm_type = FF_IDCT_PERM_NONE;
} }
if (INLINE_MMXEXT(cpu_flags)) { if (EXTERNAL_MMXEXT(cpu_flags)) {
c->idct_put = ff_xvid_idct_mmxext_put; c->idct_put = xvid_idct_mmxext_put;
c->idct_add = ff_xvid_idct_mmxext_add; c->idct_add = xvid_idct_mmxext_add;
c->idct = ff_xvid_idct_mmxext; c->idct = ff_xvid_idct_mmxext;
c->perm_type = FF_IDCT_PERM_NONE; c->perm_type = FF_IDCT_PERM_NONE;
} }
#endif
if (EXTERNAL_SSE2(cpu_flags)) { if (EXTERNAL_SSE2(cpu_flags)) {
c->idct_put = xvid_idct_sse2_put; c->idct_put = xvid_idct_sse2_put;

View File

@ -1,549 +0,0 @@
/*
* XVID MPEG-4 VIDEO CODEC
* - MMX and XMM forward discrete cosine transform -
*
* Copyright(C) 2001 Peter Ross <pross@xvid.org>
*
* Originally provided by Intel at AP-922
* http://developer.intel.com/vtune/cbts/strmsimd/922down.htm
* (See more app notes at http://developer.intel.com/vtune/cbts/strmsimd/appnotes.htm)
* but in a limited edition.
* New macro implements a column part for precise iDCT
* The routine precision now satisfies IEEE standard 1180-1990.
*
* Copyright(C) 2000-2001 Peter Gubanov <peter@elecard.net.ru>
* Rounding trick Copyright(C) 2000 Michel Lespinasse <walken@zoy.org>
*
* http://www.elecard.com/peter/idct.html
* http://www.linuxvideo.org/mpeg2dec/
*
* These examples contain code fragments for first stage iDCT 8x8
* (for rows) and first stage DCT 8x8 (for columns)
*
* conversion to gcc syntax by Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <inttypes.h>
#include "config.h"
#include "libavutil/mem.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/idctdsp.h"
#include "idctdsp.h"
#include "xvididct.h"
#if HAVE_MMX_INLINE
// -----------------------------------------------------------------------------
// Various memory constants (trigonometric values or rounding values)
// -----------------------------------------------------------------------------
DECLARE_ALIGNED(8, static const int16_t, tg_1_16)[4 * 4] = {
13036, 13036, 13036, 13036, // tg * (2 << 16) + 0.5
27146, 27146, 27146, 27146, // tg * (2 << 16) + 0.5
-21746, -21746, -21746, -21746, // tg * (2 << 16) + 0.5
23170, 23170, 23170, 23170
}; // cos * (2 << 15) + 0.5
DECLARE_ALIGNED(8, static const int32_t, rounder_0)[2 * 8] = {
65536, 65536,
3597, 3597,
2260, 2260,
1203, 1203,
0, 0,
120, 120,
512, 512,
512, 512
};
// -----------------------------------------------------------------------------
//
// The first stage iDCT 8x8 - inverse DCTs of rows
//
// -----------------------------------------------------------------------------
// The 8-point inverse DCT direct algorithm
// -----------------------------------------------------------------------------
//
// static const short w[32] = {
// FIX(cos_4_16), FIX(cos_2_16), FIX(cos_4_16), FIX(cos_6_16),
// FIX(cos_4_16), FIX(cos_6_16), -FIX(cos_4_16), -FIX(cos_2_16),
// FIX(cos_4_16), -FIX(cos_6_16), -FIX(cos_4_16), FIX(cos_2_16),
// FIX(cos_4_16), -FIX(cos_2_16), FIX(cos_4_16), -FIX(cos_6_16),
// FIX(cos_1_16), FIX(cos_3_16), FIX(cos_5_16), FIX(cos_7_16),
// FIX(cos_3_16), -FIX(cos_7_16), -FIX(cos_1_16), -FIX(cos_5_16),
// FIX(cos_5_16), -FIX(cos_1_16), FIX(cos_7_16), FIX(cos_3_16),
// FIX(cos_7_16), -FIX(cos_5_16), FIX(cos_3_16), -FIX(cos_1_16) };
//
// #define DCT_8_INV_ROW(x, y)
// {
// int a0, a1, a2, a3, b0, b1, b2, b3;
//
// a0 = x[0] * w[0] + x[2] * w[1] + x[4] * w[2] + x[6] * w[3];
// a1 = x[0] * w[4] + x[2] * w[5] + x[4] * w[6] + x[6] * w[7];
// a2 = x[0] * w[8] + x[2] * w[9] + x[4] * w[10] + x[6] * w[11];
// a3 = x[0] * w[12] + x[2] * w[13] + x[4] * w[14] + x[6] * w[15];
// b0 = x[1] * w[16] + x[3] * w[17] + x[5] * w[18] + x[7] * w[19];
// b1 = x[1] * w[20] + x[3] * w[21] + x[5] * w[22] + x[7] * w[23];
// b2 = x[1] * w[24] + x[3] * w[25] + x[5] * w[26] + x[7] * w[27];
// b3 = x[1] * w[28] + x[3] * w[29] + x[5] * w[30] + x[7] * w[31];
//
// y[0] = SHIFT_ROUND(a0 + b0);
// y[1] = SHIFT_ROUND(a1 + b1);
// y[2] = SHIFT_ROUND(a2 + b2);
// y[3] = SHIFT_ROUND(a3 + b3);
// y[4] = SHIFT_ROUND(a3 - b3);
// y[5] = SHIFT_ROUND(a2 - b2);
// y[6] = SHIFT_ROUND(a1 - b1);
// y[7] = SHIFT_ROUND(a0 - b0);
// }
//
// -----------------------------------------------------------------------------
//
// In this implementation the outputs of the iDCT-1D are multiplied
// for rows 0,4 - by cos_4_16,
// for rows 1,7 - by cos_1_16,
// for rows 2,6 - by cos_2_16,
// for rows 3,5 - by cos_3_16
// and are shifted to the left for better accuracy.
//
// For the constants used,
// FIX(float_const) = (short) (float_const * (1 << 15) + 0.5)
//
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
// Tables for mmx processors
// -----------------------------------------------------------------------------
// Table for rows 0,4 - constants are multiplied by cos_4_16
DECLARE_ALIGNED(8, static const int16_t, tab_i_04_mmx)[32 * 4] = {
16384, 16384, 16384, -16384, // movq-> w06 w04 w02 w00
21407, 8867, 8867, -21407, // w07 w05 w03 w01
16384, -16384, 16384, 16384, // w14 w12 w10 w08
-8867, 21407, -21407, -8867, // w15 w13 w11 w09
22725, 12873, 19266, -22725, // w22 w20 w18 w16
19266, 4520, -4520, -12873, // w23 w21 w19 w17
12873, 4520, 4520, 19266, // w30 w28 w26 w24
-22725, 19266, -12873, -22725, // w31 w29 w27 w25
// Table for rows 1,7 - constants are multiplied by cos_1_16
22725, 22725, 22725, -22725, // movq-> w06 w04 w02 w00
29692, 12299, 12299, -29692, // w07 w05 w03 w01
22725, -22725, 22725, 22725, // w14 w12 w10 w08
-12299, 29692, -29692, -12299, // w15 w13 w11 w09
31521, 17855, 26722, -31521, // w22 w20 w18 w16
26722, 6270, -6270, -17855, // w23 w21 w19 w17
17855, 6270, 6270, 26722, // w30 w28 w26 w24
-31521, 26722, -17855, -31521, // w31 w29 w27 w25
// Table for rows 2,6 - constants are multiplied by cos_2_16
21407, 21407, 21407, -21407, // movq-> w06 w04 w02 w00
27969, 11585, 11585, -27969, // w07 w05 w03 w01
21407, -21407, 21407, 21407, // w14 w12 w10 w08
-11585, 27969, -27969, -11585, // w15 w13 w11 w09
29692, 16819, 25172, -29692, // w22 w20 w18 w16
25172, 5906, -5906, -16819, // w23 w21 w19 w17
16819, 5906, 5906, 25172, // w30 w28 w26 w24
-29692, 25172, -16819, -29692, // w31 w29 w27 w25
// Table for rows 3,5 - constants are multiplied by cos_3_16
19266, 19266, 19266, -19266, // movq-> w06 w04 w02 w00
25172, 10426, 10426, -25172, // w07 w05 w03 w01
19266, -19266, 19266, 19266, // w14 w12 w10 w08
-10426, 25172, -25172, -10426, // w15 w13 w11 w09
26722, 15137, 22654, -26722, // w22 w20 w18 w16
22654, 5315, -5315, -15137, // w23 w21 w19 w17
15137, 5315, 5315, 22654, // w30 w28 w26 w24
-26722, 22654, -15137, -26722, // w31 w29 w27 w25
};
// -----------------------------------------------------------------------------
// Tables for xmm processors
// -----------------------------------------------------------------------------
// %3 for rows 0,4 - constants are multiplied by cos_4_16
DECLARE_ALIGNED(8, static const int16_t, tab_i_04_xmm)[32 * 4] = {
16384, 21407, 16384, 8867, // movq-> w05 w04 w01 w00
16384, 8867, -16384, -21407, // w07 w06 w03 w02
16384, -8867, 16384, -21407, // w13 w12 w09 w08
-16384, 21407, 16384, -8867, // w15 w14 w11 w10
22725, 19266, 19266, -4520, // w21 w20 w17 w16
12873, 4520, -22725, -12873, // w23 w22 w19 w18
12873, -22725, 4520, -12873, // w29 w28 w25 w24
4520, 19266, 19266, -22725, // w31 w30 w27 w26
// %3 for rows 1,7 - constants are multiplied by cos_1_16
22725, 29692, 22725, 12299, // movq-> w05 w04 w01 w00
22725, 12299, -22725, -29692, // w07 w06 w03 w02
22725, -12299, 22725, -29692, // w13 w12 w09 w08
-22725, 29692, 22725, -12299, // w15 w14 w11 w10
31521, 26722, 26722, -6270, // w21 w20 w17 w16
17855, 6270, -31521, -17855, // w23 w22 w19 w18
17855, -31521, 6270, -17855, // w29 w28 w25 w24
6270, 26722, 26722, -31521, // w31 w30 w27 w26
// %3 for rows 2,6 - constants are multiplied by cos_2_16
21407, 27969, 21407, 11585, // movq-> w05 w04 w01 w00
21407, 11585, -21407, -27969, // w07 w06 w03 w02
21407, -11585, 21407, -27969, // w13 w12 w09 w08
-21407, 27969, 21407, -11585, // w15 w14 w11 w10
29692, 25172, 25172, -5906, // w21 w20 w17 w16
16819, 5906, -29692, -16819, // w23 w22 w19 w18
16819, -29692, 5906, -16819, // w29 w28 w25 w24
5906, 25172, 25172, -29692, // w31 w30 w27 w26
// %3 for rows 3,5 - constants are multiplied by cos_3_16
19266, 25172, 19266, 10426, // movq-> w05 w04 w01 w00
19266, 10426, -19266, -25172, // w07 w06 w03 w02
19266, -10426, 19266, -25172, // w13 w12 w09 w08
-19266, 25172, 19266, -10426, // w15 w14 w11 w10
26722, 22654, 22654, -5315, // w21 w20 w17 w16
15137, 5315, -26722, -15137, // w23 w22 w19 w18
15137, -26722, 5315, -15137, // w29 w28 w25 w24
5315, 22654, 22654, -26722, // w31 w30 w27 w26
};
// =============================================================================
// Helper macros for the code
// =============================================================================
// -----------------------------------------------------------------------------
// DCT_8_INV_ROW_MMX( INP, OUT, TABLE, ROUNDER
// -----------------------------------------------------------------------------
#define DCT_8_INV_ROW_MMX(A1, A2, A3, A4) \
"movq "#A1", %%mm0 \n\t" /* 0 ; x3 x2 x1 x0 */ \
"movq 8+"#A1", %%mm1 \n\t" /* 1 ; x7 x6 x5 x4 */ \
"movq %%mm0, %%mm2 \n\t" /* 2 ; x3 x2 x1 x0 */ \
"movq "#A3", %%mm3 \n\t" /* 3 ; w06 w04 w02 w00 */ \
"punpcklwd %%mm1, %%mm0 \n\t" /* x5 x1 x4 x0 */ \
"movq %%mm0, %%mm5 \n\t" /* 5 ; x5 x1 x4 x0 */ \
"punpckldq %%mm0, %%mm0 \n\t" /* x4 x0 x4 x0 */ \
"movq 8+"#A3", %%mm4 \n\t" /* 4 ; w07 w05 w03 w01 */ \
"punpckhwd %%mm1, %%mm2 \n\t" /* 1 ; x7 x3 x6 x2 */ \
"pmaddwd %%mm0, %%mm3 \n\t" /* x4*w06+x0*w04 x4*w02+x0*w00 */ \
"movq %%mm2, %%mm6 \n\t" /* 6 ; x7 x3 x6 x2 */ \
"movq 32+"#A3", %%mm1 \n\t" /* 1 ; w22 w20 w18 w16 */ \
"punpckldq %%mm2, %%mm2 \n\t" /* x6 x2 x6 x2 */ \
"pmaddwd %%mm2, %%mm4 \n\t" /* x6*w07+x2*w05 x6*w03+x2*w01 */ \
"punpckhdq %%mm5, %%mm5 \n\t" /* x5 x1 x5 x1 */ \
"pmaddwd 16+"#A3", %%mm0 \n\t" /* x4*w14+x0*w12 x4*w10+x0*w08 */ \
"punpckhdq %%mm6, %%mm6 \n\t" /* x7 x3 x7 x3 */ \
"movq 40+ "#A3", %%mm7 \n\t" /* 7 ; w23 w21 w19 w17 */ \
"pmaddwd %%mm5, %%mm1 \n\t" /* x5*w22+x1*w20 x5*w18+x1*w16 */ \
"paddd "#A4", %%mm3 \n\t" /* +%4 */ \
"pmaddwd %%mm6, %%mm7 \n\t" /* x7*w23+x3*w21 x7*w19+x3*w17 */ \
"pmaddwd 24+"#A3", %%mm2 \n\t" /* x6*w15+x2*w13 x6*w11+x2*w09 */ \
"paddd %%mm4, %%mm3 \n\t" /* 4 ; a1=sum(even1) a0=sum(even0) */ \
"pmaddwd 48+"#A3", %%mm5 \n\t" /* x5*w30+x1*w28 x5*w26+x1*w24 */ \
"movq %%mm3, %%mm4 \n\t" /* 4 ; a1 a0 */ \
"pmaddwd 56+"#A3", %%mm6 \n\t" /* x7*w31+x3*w29 x7*w27+x3*w25 */ \
"paddd %%mm7, %%mm1 \n\t" /* 7 ; b1=sum(odd1) b0=sum(odd0) */ \
"paddd "#A4", %%mm0 \n\t" /* +%4 */ \
"psubd %%mm1, %%mm3 \n\t" /* a1-b1 a0-b0 */ \
"psrad $11, %%mm3 \n\t" /* y6=a1-b1 y7=a0-b0 */ \
"paddd %%mm4, %%mm1 \n\t" /* 4 ; a1+b1 a0+b0 */ \
"paddd %%mm2, %%mm0 \n\t" /* 2 ; a3=sum(even3) a2=sum(even2) */ \
"psrad $11, %%mm1 \n\t" /* y1=a1+b1 y0=a0+b0 */ \
"paddd %%mm6, %%mm5 \n\t" /* 6 ; b3=sum(odd3) b2=sum(odd2) */ \
"movq %%mm0, %%mm4 \n\t" /* 4 ; a3 a2 */ \
"paddd %%mm5, %%mm0 \n\t" /* a3+b3 a2+b2 */ \
"psubd %%mm5, %%mm4 \n\t" /* 5 ; a3-b3 a2-b2 */ \
"psrad $11, %%mm0 \n\t" /* y3=a3+b3 y2=a2+b2 */ \
"psrad $11, %%mm4 \n\t" /* y4=a3-b3 y5=a2-b2 */ \
"packssdw %%mm0, %%mm1 \n\t" /* 0 ; y3 y2 y1 y0 */ \
"packssdw %%mm3, %%mm4 \n\t" /* 3 ; y6 y7 y4 y5 */ \
"movq %%mm4, %%mm7 \n\t" /* 7 ; y6 y7 y4 y5 */ \
"psrld $16, %%mm4 \n\t" /* 0 y6 0 y4 */ \
"pslld $16, %%mm7 \n\t" /* y7 0 y5 0 */ \
"movq %%mm1, "#A2" \n\t" /* 1 ; save y3 y2 y1 y0 */ \
"por %%mm4, %%mm7 \n\t" /* 4 ; y7 y6 y5 y4 */ \
"movq %%mm7, 8+"#A2" \n\t" /* 7 ; save y7 y6 y5 y4 */ \
// -----------------------------------------------------------------------------
// DCT_8_INV_ROW_XMM( INP, OUT, TABLE, ROUNDER
// -----------------------------------------------------------------------------
#define DCT_8_INV_ROW_XMM(A1, A2, A3, A4) \
"movq "#A1", %%mm0 \n\t" /* 0 ; x3 x2 x1 x0 */ \
"movq 8+"#A1", %%mm1 \n\t" /* 1 ; x7 x6 x5 x4 */ \
"movq %%mm0, %%mm2 \n\t" /* 2 ; x3 x2 x1 x0 */ \
"movq "#A3", %%mm3 \n\t" /* 3 ; w05 w04 w01 w00 */ \
"pshufw $0x88, %%mm0, %%mm0 \n\t" /* x2 x0 x2 x0 */ \
"movq 8+"#A3", %%mm4 \n\t" /* 4 ; w07 w06 w03 w02 */ \
"movq %%mm1, %%mm5 \n\t" /* 5 ; x7 x6 x5 x4 */ \
"pmaddwd %%mm0, %%mm3 \n\t" /* x2*w05+x0*w04 x2*w01+x0*w00 */ \
"movq 32+"#A3", %%mm6 \n\t" /* 6 ; w21 w20 w17 w16 */ \
"pshufw $0x88, %%mm1, %%mm1 \n\t" /* x6 x4 x6 x4 */ \
"pmaddwd %%mm1, %%mm4 \n\t" /* x6*w07+x4*w06 x6*w03+x4*w02 */ \
"movq 40+"#A3", %%mm7 \n\t" /* 7; w23 w22 w19 w18 */ \
"pshufw $0xdd, %%mm2, %%mm2 \n\t" /* x3 x1 x3 x1 */ \
"pmaddwd %%mm2, %%mm6 \n\t" /* x3*w21+x1*w20 x3*w17+x1*w16 */ \
"pshufw $0xdd, %%mm5, %%mm5 \n\t" /* x7 x5 x7 x5 */ \
"pmaddwd %%mm5, %%mm7 \n\t" /* x7*w23+x5*w22 x7*w19+x5*w18 */ \
"paddd "#A4", %%mm3 \n\t" /* +%4 */ \
"pmaddwd 16+"#A3", %%mm0 \n\t" /* x2*w13+x0*w12 x2*w09+x0*w08 */ \
"paddd %%mm4, %%mm3 \n\t" /* 4 ; a1=sum(even1) a0=sum(even0) */ \
"pmaddwd 24+"#A3", %%mm1 \n\t" /* x6*w15+x4*w14 x6*w11+x4*w10 */ \
"movq %%mm3, %%mm4 \n\t" /* 4 ; a1 a0 */ \
"pmaddwd 48+"#A3", %%mm2 \n\t" /* x3*w29+x1*w28 x3*w25+x1*w24 */ \
"paddd %%mm7, %%mm6 \n\t" /* 7 ; b1=sum(odd1) b0=sum(odd0) */ \
"pmaddwd 56+"#A3", %%mm5 \n\t" /* x7*w31+x5*w30 x7*w27+x5*w26 */ \
"paddd %%mm6, %%mm3 \n\t" /* a1+b1 a0+b0 */ \
"paddd "#A4", %%mm0 \n\t" /* +%4 */ \
"psrad $11, %%mm3 \n\t" /* y1=a1+b1 y0=a0+b0 */ \
"paddd %%mm1, %%mm0 \n\t" /* 1 ; a3=sum(even3) a2=sum(even2) */ \
"psubd %%mm6, %%mm4 \n\t" /* 6 ; a1-b1 a0-b0 */ \
"movq %%mm0, %%mm7 \n\t" /* 7 ; a3 a2 */ \
"paddd %%mm5, %%mm2 \n\t" /* 5 ; b3=sum(odd3) b2=sum(odd2) */ \
"paddd %%mm2, %%mm0 \n\t" /* a3+b3 a2+b2 */ \
"psrad $11, %%mm4 \n\t" /* y6=a1-b1 y7=a0-b0 */ \
"psubd %%mm2, %%mm7 \n\t" /* 2 ; a3-b3 a2-b2 */ \
"psrad $11, %%mm0 \n\t" /* y3=a3+b3 y2=a2+b2 */ \
"psrad $11, %%mm7 \n\t" /* y4=a3-b3 y5=a2-b2 */ \
"packssdw %%mm0, %%mm3 \n\t" /* 0 ; y3 y2 y1 y0 */ \
"packssdw %%mm4, %%mm7 \n\t" /* 4 ; y6 y7 y4 y5 */ \
"movq %%mm3, "#A2" \n\t" /* 3 ; save y3 y2 y1 y0 */ \
"pshufw $0xb1, %%mm7, %%mm7 \n\t" /* y7 y6 y5 y4 */ \
"movq %%mm7, 8+"#A2" \n\t" /* 7 ; save y7 y6 y5 y4 */ \
// -----------------------------------------------------------------------------
//
// The first stage DCT 8x8 - forward DCTs of columns
//
// The %2puts are multiplied
// for rows 0,4 - on cos_4_16,
// for rows 1,7 - on cos_1_16,
// for rows 2,6 - on cos_2_16,
// for rows 3,5 - on cos_3_16
// and are shifted to the left for rise of accuracy
//
// -----------------------------------------------------------------------------
//
// The 8-point scaled forward DCT algorithm (26a8m)
//
// -----------------------------------------------------------------------------
//
//#define DCT_8_FRW_COL(x, y)
// {
// short t0, t1, t2, t3, t4, t5, t6, t7;
// short tp03, tm03, tp12, tm12, tp65, tm65;
// short tp465, tm465, tp765, tm765;
//
// t0 = LEFT_SHIFT(x[0] + x[7]);
// t1 = LEFT_SHIFT(x[1] + x[6]);
// t2 = LEFT_SHIFT(x[2] + x[5]);
// t3 = LEFT_SHIFT(x[3] + x[4]);
// t4 = LEFT_SHIFT(x[3] - x[4]);
// t5 = LEFT_SHIFT(x[2] - x[5]);
// t6 = LEFT_SHIFT(x[1] - x[6]);
// t7 = LEFT_SHIFT(x[0] - x[7]);
//
// tp03 = t0 + t3;
// tm03 = t0 - t3;
// tp12 = t1 + t2;
// tm12 = t1 - t2;
//
// y[0] = tp03 + tp12;
// y[4] = tp03 - tp12;
//
// y[2] = tm03 + tm12 * tg_2_16;
// y[6] = tm03 * tg_2_16 - tm12;
//
// tp65 = (t6 + t5) * cos_4_16;
// tm65 = (t6 - t5) * cos_4_16;
//
// tp765 = t7 + tp65;
// tm765 = t7 - tp65;
// tp465 = t4 + tm65;
// tm465 = t4 - tm65;
//
// y[1] = tp765 + tp465 * tg_1_16;
// y[7] = tp765 * tg_1_16 - tp465;
// y[5] = tm765 * tg_3_16 + tm465;
// y[3] = tm765 - tm465 * tg_3_16;
// }
//
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
// DCT_8_INV_COL_4 INP,OUT
// -----------------------------------------------------------------------------
#define DCT_8_INV_COL(A1, A2) \
"movq 2*8(%3), %%mm0 \n\t" \
"movq 16*3+"#A1", %%mm3 \n\t" \
"movq %%mm0, %%mm1 \n\t" /* tg_3_16 */ \
"movq 16*5+"#A1", %%mm5 \n\t" \
"pmulhw %%mm3, %%mm0 \n\t" /* x3*(tg_3_16-1) */ \
"movq (%3), %%mm4 \n\t" \
"pmulhw %%mm5, %%mm1 \n\t" /* x5*(tg_3_16-1) */ \
"movq 16*7+"#A1", %%mm7 \n\t" \
"movq %%mm4, %%mm2 \n\t" /* tg_1_16 */ \
"movq 16*1+"#A1", %%mm6 \n\t" \
"pmulhw %%mm7, %%mm4 \n\t" /* x7*tg_1_16 */ \
"paddsw %%mm3, %%mm0 \n\t" /* x3*tg_3_16 */ \
"pmulhw %%mm6, %%mm2 \n\t" /* x1*tg_1_16 */ \
"paddsw %%mm3, %%mm1 \n\t" /* x3+x5*(tg_3_16-1) */ \
"psubsw %%mm5, %%mm0 \n\t" /* x3*tg_3_16-x5 = tm35 */ \
"movq 3*8(%3), %%mm3 \n\t" \
"paddsw %%mm5, %%mm1 \n\t" /* x3+x5*tg_3_16 = tp35 */ \
"paddsw %%mm6, %%mm4 \n\t" /* x1+tg_1_16*x7 = tp17 */ \
"psubsw %%mm7, %%mm2 \n\t" /* x1*tg_1_16-x7 = tm17 */ \
"movq %%mm4, %%mm5 \n\t" /* tp17 */ \
"movq %%mm2, %%mm6 \n\t" /* tm17 */ \
"paddsw %%mm1, %%mm5 \n\t" /* tp17+tp35 = b0 */ \
"psubsw %%mm0, %%mm6 \n\t" /* tm17-tm35 = b3 */ \
"psubsw %%mm1, %%mm4 \n\t" /* tp17-tp35 = t1 */ \
"paddsw %%mm0, %%mm2 \n\t" /* tm17+tm35 = t2 */ \
"movq 1*8(%3), %%mm7 \n\t" \
"movq %%mm4, %%mm1 \n\t" /* t1 */ \
"movq %%mm5, 3*16+"#A2" \n\t" /* save b0 */ \
"paddsw %%mm2, %%mm1 \n\t" /* t1+t2 */ \
"movq %%mm6, 5*16+"#A2" \n\t" /* save b3 */ \
"psubsw %%mm2, %%mm4 \n\t" /* t1-t2 */ \
"movq 2*16+"#A1", %%mm5 \n\t" \
"movq %%mm7, %%mm0 \n\t" /* tg_2_16 */ \
"movq 6*16+"#A1", %%mm6 \n\t" \
"pmulhw %%mm5, %%mm0 \n\t" /* x2*tg_2_16 */ \
"pmulhw %%mm6, %%mm7 \n\t" /* x6*tg_2_16 */ \
"pmulhw %%mm3, %%mm1 \n\t" /* ocos_4_16*(t1+t2) = b1/2 */ \
"movq 0*16+"#A1", %%mm2 \n\t" \
"pmulhw %%mm3, %%mm4 \n\t" /* ocos_4_16*(t1-t2) = b2/2 */ \
"psubsw %%mm6, %%mm0 \n\t" /* t2*tg_2_16-x6 = tm26 */ \
"movq %%mm2, %%mm3 \n\t" /* x0 */ \
"movq 4*16+"#A1", %%mm6 \n\t" \
"paddsw %%mm5, %%mm7 \n\t" /* x2+x6*tg_2_16 = tp26 */ \
"paddsw %%mm6, %%mm2 \n\t" /* x0+x4 = tp04 */ \
"psubsw %%mm6, %%mm3 \n\t" /* x0-x4 = tm04 */ \
"movq %%mm2, %%mm5 \n\t" /* tp04 */ \
"movq %%mm3, %%mm6 \n\t" /* tm04 */ \
"psubsw %%mm7, %%mm2 \n\t" /* tp04-tp26 = a3 */ \
"paddsw %%mm0, %%mm3 \n\t" /* tm04+tm26 = a1 */ \
"paddsw %%mm1, %%mm1 \n\t" /* b1 */ \
"paddsw %%mm4, %%mm4 \n\t" /* b2 */ \
"paddsw %%mm7, %%mm5 \n\t" /* tp04+tp26 = a0 */ \
"psubsw %%mm0, %%mm6 \n\t" /* tm04-tm26 = a2 */ \
"movq %%mm3, %%mm7 \n\t" /* a1 */ \
"movq %%mm6, %%mm0 \n\t" /* a2 */ \
"paddsw %%mm1, %%mm3 \n\t" /* a1+b1 */ \
"paddsw %%mm4, %%mm6 \n\t" /* a2+b2 */ \
"psraw $6, %%mm3 \n\t" /* dst1 */ \
"psubsw %%mm1, %%mm7 \n\t" /* a1-b1 */ \
"psraw $6, %%mm6 \n\t" /* dst2 */ \
"psubsw %%mm4, %%mm0 \n\t" /* a2-b2 */ \
"movq 3*16+"#A2", %%mm1 \n\t" /* load b0 */ \
"psraw $6, %%mm7 \n\t" /* dst6 */ \
"movq %%mm5, %%mm4 \n\t" /* a0 */ \
"psraw $6, %%mm0 \n\t" /* dst5 */ \
"movq %%mm3, 1*16+"#A2" \n\t" \
"paddsw %%mm1, %%mm5 \n\t" /* a0+b0 */ \
"movq %%mm6, 2*16+"#A2" \n\t" \
"psubsw %%mm1, %%mm4 \n\t" /* a0-b0 */ \
"movq 5*16+"#A2", %%mm3 \n\t" /* load b3 */ \
"psraw $6, %%mm5 \n\t" /* dst0 */ \
"movq %%mm2, %%mm6 \n\t" /* a3 */ \
"psraw $6, %%mm4 \n\t" /* dst7 */ \
"movq %%mm0, 5*16+"#A2" \n\t" \
"paddsw %%mm3, %%mm2 \n\t" /* a3+b3 */ \
"movq %%mm7, 6*16+"#A2" \n\t" \
"psubsw %%mm3, %%mm6 \n\t" /* a3-b3 */ \
"movq %%mm5, 0*16+"#A2" \n\t" \
"psraw $6, %%mm2 \n\t" /* dst3 */ \
"movq %%mm4, 7*16+"#A2" \n\t" \
"psraw $6, %%mm6 \n\t" /* dst4 */ \
"movq %%mm2, 3*16+"#A2" \n\t" \
"movq %%mm6, 4*16+"#A2" \n\t" \
// =============================================================================
// Code
// =============================================================================
// -----------------------------------------------------------------------------
// void idct_mmx(uint16_t block[64]);
// -----------------------------------------------------------------------------
void ff_xvid_idct_mmx(short *block)
{
__asm__ volatile (
// # Process each row
DCT_8_INV_ROW_MMX(0 * 16(%0), 0 * 16(%0), 64 * 0(%2), 8 * 0(%1))
DCT_8_INV_ROW_MMX(1 * 16(%0), 1 * 16(%0), 64 * 1(%2), 8 * 1(%1))
DCT_8_INV_ROW_MMX(2 * 16(%0), 2 * 16(%0), 64 * 2(%2), 8 * 2(%1))
DCT_8_INV_ROW_MMX(3 * 16(%0), 3 * 16(%0), 64 * 3(%2), 8 * 3(%1))
DCT_8_INV_ROW_MMX(4 * 16(%0), 4 * 16(%0), 64 * 0(%2), 8 * 4(%1))
DCT_8_INV_ROW_MMX(5 * 16(%0), 5 * 16(%0), 64 * 3(%2), 8 * 5(%1))
DCT_8_INV_ROW_MMX(6 * 16(%0), 6 * 16(%0), 64 * 2(%2), 8 * 6(%1))
DCT_8_INV_ROW_MMX(7 * 16(%0), 7 * 16(%0), 64 * 1(%2), 8 * 7(%1))
// # Process the columns (4 at a time)
DCT_8_INV_COL(0(%0), 0(%0))
DCT_8_INV_COL(8(%0), 8(%0))
:: "r" (block), "r" (rounder_0), "r" (tab_i_04_mmx), "r" (tg_1_16));
}
void ff_xvid_idct_mmx_put(uint8_t *dest, int line_size, int16_t *block)
{
ff_xvid_idct_mmx(block);
ff_put_pixels_clamped(block, dest, line_size);
}
void ff_xvid_idct_mmx_add(uint8_t *dest, int line_size, int16_t *block)
{
ff_xvid_idct_mmx(block);
ff_add_pixels_clamped(block, dest, line_size);
}
#endif /* HAVE_MMX_INLINE */
#if HAVE_MMXEXT_INLINE
// -----------------------------------------------------------------------------
// void idct_xmm(uint16_t block[64]);
// -----------------------------------------------------------------------------
void ff_xvid_idct_mmxext(short *block)
{
__asm__ volatile (
// # Process each row
DCT_8_INV_ROW_XMM(0 * 16(%0), 0 * 16(%0), 64 * 0(%2), 8 * 0(%1))
DCT_8_INV_ROW_XMM(1 * 16(%0), 1 * 16(%0), 64 * 1(%2), 8 * 1(%1))
DCT_8_INV_ROW_XMM(2 * 16(%0), 2 * 16(%0), 64 * 2(%2), 8 * 2(%1))
DCT_8_INV_ROW_XMM(3 * 16(%0), 3 * 16(%0), 64 * 3(%2), 8 * 3(%1))
DCT_8_INV_ROW_XMM(4 * 16(%0), 4 * 16(%0), 64 * 0(%2), 8 * 4(%1))
DCT_8_INV_ROW_XMM(5 * 16(%0), 5 * 16(%0), 64 * 3(%2), 8 * 5(%1))
DCT_8_INV_ROW_XMM(6 * 16(%0), 6 * 16(%0), 64 * 2(%2), 8 * 6(%1))
DCT_8_INV_ROW_XMM(7 * 16(%0), 7 * 16(%0), 64 * 1(%2), 8 * 7(%1))
// # Process the columns (4 at a time)
DCT_8_INV_COL(0(%0), 0(%0))
DCT_8_INV_COL(8(%0), 8(%0))
:: "r" (block), "r" (rounder_0), "r" (tab_i_04_xmm), "r" (tg_1_16));
}
void ff_xvid_idct_mmxext_put(uint8_t *dest, int line_size, int16_t *block)
{
ff_xvid_idct_mmxext(block);
ff_put_pixels_clamped(block, dest, line_size);
}
void ff_xvid_idct_mmxext_add(uint8_t *dest, int line_size, int16_t *block)
{
ff_xvid_idct_mmxext(block);
ff_add_pixels_clamped(block, dest, line_size);
}
#endif /* HAVE_MMXEXT_INLINE */