From c4148a6668b768a6256db3ff554aae2bab6f30b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20B=C5=93sch?= Date: Fri, 28 Mar 2014 22:33:51 +0100 Subject: [PATCH] x86/vp9mc: add vp9 namespace. --- libavcodec/x86/vp9dsp_init.c | 40 ++++++++++++++++++------------------ libavcodec/x86/vp9mc.asm | 14 ++++++------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c index 011e13d9da..3fd274d17f 100644 --- a/libavcodec/x86/vp9dsp_init.c +++ b/libavcodec/x86/vp9dsp_init.c @@ -30,9 +30,9 @@ #if HAVE_YASM #define fpel_func(avg, sz, opt) \ -void ff_##avg##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ - const uint8_t *src, ptrdiff_t src_stride, \ - int h, int mx, int my) +void ff_vp9_##avg##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ + const uint8_t *src, ptrdiff_t src_stride, \ + int h, int mx, int my) fpel_func(put, 4, mmx); fpel_func(put, 8, mmx); fpel_func(put, 16, sse); @@ -46,9 +46,9 @@ fpel_func(avg, 64, sse2); #undef fpel_func #define mc_func(avg, sz, dir, opt) \ -void ff_##avg##_8tap_1d_##dir##_##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ - const uint8_t *src, ptrdiff_t src_stride, \ - int h, const int8_t (*filter)[16]) +void ff_vp9_##avg##_8tap_1d_##dir##_##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ + const uint8_t *src, ptrdiff_t src_stride, \ + int h, const int8_t (*filter)[16]) #define mc_funcs(sz) \ mc_func(put, sz, h, ssse3); \ mc_func(avg, sz, h, ssse3); \ @@ -66,14 +66,14 @@ mc_funcs(16); #define mc_rep_func(avg, sz, hsz, dir, opt) \ static av_always_inline void \ -ff_##avg##_8tap_1d_##dir##_##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ - const uint8_t *src, ptrdiff_t src_stride, \ - int h, const int8_t (*filter)[16]) \ +ff_vp9_##avg##_8tap_1d_##dir##_##sz##_##opt(uint8_t *dst, ptrdiff_t dst_stride, \ + const uint8_t *src, ptrdiff_t src_stride, \ + int h, const int8_t (*filter)[16]) \ { \ - ff_##avg##_8tap_1d_##dir##_##hsz##_##opt(dst, dst_stride, src, \ - src_stride, h, filter); \ - ff_##avg##_8tap_1d_##dir##_##hsz##_##opt(dst + hsz, dst_stride, src + hsz, \ - src_stride, h, filter); \ + ff_vp9_##avg##_8tap_1d_##dir##_##hsz##_##opt(dst, dst_stride, src, \ + src_stride, h, filter); \ + ff_vp9_##avg##_8tap_1d_##dir##_##hsz##_##opt(dst + hsz, dst_stride, src + hsz, \ + src_stride, h, filter); \ } #define mc_rep_funcs(sz, hsz) \ @@ -99,10 +99,10 @@ static void op##_8tap_##fname##_##sz##hv_ssse3(uint8_t *dst, ptrdiff_t dst_strid int h, int mx, int my) \ { \ LOCAL_ALIGNED_16(uint8_t, temp, [71 * 64]); \ - ff_put_8tap_1d_h_##sz##_ssse3(temp, 64, src - 3 * src_stride, src_stride, \ - h + 7, ff_filters_ssse3[f][mx - 1]); \ - ff_##op##_8tap_1d_v_##sz##_ssse3(dst, dst_stride, temp + 3 * 64, 64, \ - h, ff_filters_ssse3[f][my - 1]); \ + ff_vp9_put_8tap_1d_h_##sz##_ssse3(temp, 64, src - 3 * src_stride, src_stride, \ + h + 7, ff_filters_ssse3[f][mx - 1]); \ + ff_vp9_##op##_8tap_1d_v_##sz##_ssse3(dst, dst_stride, temp + 3 * 64, 64, \ + h, ff_filters_ssse3[f][my - 1]); \ } #define filters_8tap_2d_fn(op, sz) \ @@ -129,8 +129,8 @@ static void op##_8tap_##fname##_##sz##dir##_ssse3(uint8_t *dst, ptrdiff_t dst_st const uint8_t *src, ptrdiff_t src_stride, \ int h, int mx, int my) \ { \ - ff_##op##_8tap_1d_##dir##_##sz##_ssse3(dst, dst_stride, src, src_stride, \ - h, ff_filters_ssse3[f][dvar - 1]); \ + ff_vp9_##op##_8tap_1d_##dir##_##sz##_ssse3(dst, dst_stride, src, src_stride, \ + h, ff_filters_ssse3[f][dvar - 1]); \ } #define filters_8tap_1d_fn(op, sz, dir, dvar) \ @@ -256,7 +256,7 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp) dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = \ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = \ - dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_##type##sz##_##opt + dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_vp9_##type##sz##_##opt #define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type, opt) \ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = type##_8tap_smooth_##sz##dir##_##opt; \ diff --git a/libavcodec/x86/vp9mc.asm b/libavcodec/x86/vp9mc.asm index 154ab2b4f9..7c2a38c8b3 100644 --- a/libavcodec/x86/vp9mc.asm +++ b/libavcodec/x86/vp9mc.asm @@ -87,7 +87,7 @@ SECTION .text %macro filter_h_fn 1 %assign %%px mmsize/2 -cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, dstride, src, sstride, h, filtery +cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, dstride, src, sstride, h, filtery mova m6, [pw_256] mova m7, [filteryq+ 0] %if ARCH_X86_64 && mmsize > 8 @@ -148,7 +148,7 @@ filter_h_fn avg %if ARCH_X86_64 %macro filter_hx2_fn 1 %assign %%px mmsize -cglobal %1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, dstride, src, sstride, h, filtery +cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, dstride, src, sstride, h, filtery mova m13, [pw_256] mova m8, [filteryq+ 0] mova m9, [filteryq+16] @@ -204,9 +204,9 @@ filter_hx2_fn avg %macro filter_v_fn 1 %assign %%px mmsize/2 %if ARCH_X86_64 -cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3 +cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 11, dst, dstride, src, sstride, h, filtery, src4, sstride3 %else -cglobal %1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3 +cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, dstride, src, sstride, filtery, src4, sstride3 mov filteryq, r5mp %define hd r4mp %endif @@ -278,7 +278,7 @@ filter_v_fn avg %macro filter_vx2_fn 1 %assign %%px mmsize -cglobal %1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, dstride, src, sstride, h, filtery, src4, sstride3 +cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, dstride, src, sstride, h, filtery, src4, sstride3 mova m13, [pw_256] lea sstride3q, [sstrideq*3] lea src4q, [srcq+sstrideq] @@ -348,11 +348,11 @@ filter_vx2_fn avg %endif %if %2 <= 16 -cglobal %1%2, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3 +cglobal vp9_%1%2, 5, 7, 4, dst, dstride, src, sstride, h, dstride3, sstride3 lea sstride3q, [sstrideq*3] lea dstride3q, [dstrideq*3] %else -cglobal %1%2, 5, 5, 4, dst, dstride, src, sstride, h +cglobal vp9_%1%2, 5, 5, 4, dst, dstride, src, sstride, h %endif .loop: %%srcfn m0, [srcq]