lavr: x86: optimized 2-channel s16 to s16p conversion

This commit is contained in:
Justin Ruggles 2012-05-02 17:53:15 -04:00
parent b66e20d2aa
commit 8eeffa8ada
3 changed files with 71 additions and 0 deletions

View File

@ -33,6 +33,7 @@ pf_s16_scale: times 4 dd 0x47000000
pb_shuf_unpack_even: db -1, -1, 0, 1, -1, -1, 2, 3, -1, -1, 8, 9, -1, -1, 10, 11
pb_shuf_unpack_odd: db -1, -1, 4, 5, -1, -1, 6, 7, -1, -1, 12, 13, -1, -1, 14, 15
pb_interleave_words: SHUFFLE_MASK_W 0, 4, 1, 5, 2, 6, 3, 7
pb_deinterleave_words: SHUFFLE_MASK_W 0, 2, 4, 6, 1, 3, 5, 7
SECTION_TEXT
@ -812,3 +813,52 @@ CONV_FLTP_TO_FLT_6CH
INIT_XMM avx
CONV_FLTP_TO_FLT_6CH
%endif
;------------------------------------------------------------------------------
; void ff_conv_s16_to_s16p_2ch(int16_t *const *dst, int16_t *src, int len,
; int channels);
;------------------------------------------------------------------------------
%macro CONV_S16_TO_S16P_2CH 0
cglobal conv_s16_to_s16p_2ch, 3,4,4, dst0, src, len, dst1
lea lenq, [2*lend]
mov dst1q, [dst0q+gprsize]
mov dst0q, [dst0q ]
lea srcq, [srcq+2*lenq]
add dst0q, lenq
add dst1q, lenq
neg lenq
%if cpuflag(ssse3)
mova m3, [pb_deinterleave_words]
%endif
.loop:
mova m0, [srcq+2*lenq ] ; m0 = 0, 1, 2, 3, 4, 5, 6, 7
mova m1, [srcq+2*lenq+mmsize] ; m1 = 8, 9, 10, 11, 12, 13, 14, 15
%if cpuflag(ssse3)
pshufb m0, m3 ; m0 = 0, 2, 4, 6, 1, 3, 5, 7
pshufb m1, m3 ; m1 = 8, 10, 12, 14, 9, 11, 13, 15
SBUTTERFLY2 qdq, 0, 1, 2 ; m0 = 0, 2, 4, 6, 8, 10, 12, 14
; m1 = 1, 3, 5, 7, 9, 11, 13, 15
%else ; sse2
pshuflw m0, m0, q3120 ; m0 = 0, 2, 1, 3, 4, 5, 6, 7
pshufhw m0, m0, q3120 ; m0 = 0, 2, 1, 3, 4, 6, 5, 7
pshuflw m1, m1, q3120 ; m1 = 8, 10, 9, 11, 12, 13, 14, 15
pshufhw m1, m1, q3120 ; m1 = 8, 10, 9, 11, 12, 14, 13, 15
DEINT2_PS 0, 1, 2 ; m0 = 0, 2, 4, 6, 8, 10, 12, 14
; m1 = 1, 3, 5, 7, 9, 11, 13, 15
%endif
mova [dst0q+lenq], m0
mova [dst1q+lenq], m1
add lenq, mmsize
jl .loop
REP_RET
%endmacro
INIT_XMM sse2
CONV_S16_TO_S16P_2CH
INIT_XMM ssse3
CONV_S16_TO_S16P_2CH
%if HAVE_AVX
INIT_XMM avx
CONV_S16_TO_S16P_2CH
%endif

View File

@ -90,6 +90,15 @@ extern void ff_conv_fltp_to_flt_6ch_sse4(float *dst, float *const *src, int len,
extern void ff_conv_fltp_to_flt_6ch_avx (float *dst, float *const *src, int len,
int channels);
/* deinterleave conversions */
extern void ff_conv_s16_to_s16p_2ch_sse2(int16_t *const *dst, int16_t *src,
int len, int channels);
extern void ff_conv_s16_to_s16p_2ch_ssse3(int16_t *const *dst, int16_t *src,
int len, int channels);
extern void ff_conv_s16_to_s16p_2ch_avx (int16_t *const *dst, int16_t *src,
int len, int channels);
av_cold void ff_audio_convert_init_x86(AudioConvert *ac)
{
#if HAVE_YASM
@ -137,12 +146,16 @@ av_cold void ff_audio_convert_init_x86(AudioConvert *ac)
6, 16, 4, "SSE2", ff_conv_s16p_to_flt_6ch_sse2);
ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLTP,
2, 16, 4, "SSE2", ff_conv_fltp_to_s16_2ch_sse2);
ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S16,
2, 16, 8, "SSE2", ff_conv_s16_to_s16p_2ch_sse2);
}
if (mm_flags & AV_CPU_FLAG_SSSE3 && HAVE_SSE) {
ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16P,
6, 16, 4, "SSSE3", ff_conv_s16p_to_flt_6ch_ssse3);
ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLTP,
2, 16, 4, "SSSE3", ff_conv_fltp_to_s16_2ch_ssse3);
ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S16,
2, 16, 8, "SSSE3", ff_conv_s16_to_s16p_2ch_ssse3);
}
if (mm_flags & AV_CPU_FLAG_SSE4 && HAVE_SSE) {
ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16,
@ -167,6 +180,8 @@ av_cold void ff_audio_convert_init_x86(AudioConvert *ac)
6, 16, 4, "AVX", ff_conv_fltp_to_s16_6ch_avx);
ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
6, 16, 4, "AVX", ff_conv_fltp_to_flt_6ch_avx);
ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S16,
2, 16, 8, "AVX", ff_conv_s16_to_s16p_2ch_avx);
}
#endif
}

View File

@ -33,3 +33,9 @@
psrad m%1, 16
%endif
%endmacro
%macro DEINT2_PS 3 ; src0/even dst, src1/odd dst, temp
shufps m%3, m%1, m%2, q3131
shufps m%1, m%2, q2020
SWAP %2,%3
%endmacro