PPC: Altivec split-radix FFT

1.8x faster than altivec radix-2 on a G4
8% faster vorbis decoding

Patch (mostly) by Loren Merritt

Originally committed as revision 23956 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
Måns Rullgård 2010-07-01 23:21:39 +00:00
parent 135448fa1a
commit bf7ba15372
4 changed files with 398 additions and 100 deletions

View File

@ -13,10 +13,14 @@ ALTIVEC-OBJS-$(CONFIG_MP3FLOAT_DECODER) += ppc/mpegaudiodec_altivec.o
ALTIVEC-OBJS-$(CONFIG_MP3ON4FLOAT_DECODER) += ppc/mpegaudiodec_altivec.o
ALTIVEC-OBJS-$(CONFIG_MP3ADUFLOAT_DECODER) += ppc/mpegaudiodec_altivec.o
FFT-OBJS-$(HAVE_GNU_AS) += ppc/fft_altivec_s.o \
ALTIVEC-OBJS-$(CONFIG_FFT) += ppc/fft_altivec.o \
$(FFT-OBJS-yes)
OBJS-$(HAVE_ALTIVEC) += ppc/check_altivec.o \
ppc/dsputil_altivec.o \
ppc/fdct_altivec.o \
ppc/fft_altivec.o \
ppc/float_altivec.o \
ppc/gmc_altivec.o \
ppc/idct_altivec.o \

View File

@ -1,8 +1,7 @@
/*
* FFT/IFFT transforms
* AltiVec-enabled
* Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
* Based on code Copyright (c) 2002 Fabrice Bellard
* Copyright (c) 2009 Loren Merritt
*
* This file is part of FFmpeg.
*
@ -22,7 +21,8 @@
*/
#include "libavcodec/fft.h"
#include "util_altivec.h"
#include "dsputil_altivec.h"
#include "types_altivec.h"
#include "regs.h"
/**
* Do a complex FFT with the parameters defined in ff_fft_init(). The
@ -31,107 +31,65 @@
* AltiVec-enabled
* This code assumes that the 'z' pointer is 16 bytes-aligned
* It also assumes all FFTComplex are 8 bytes-aligned pair of float
* The code is exactly the same as the SSE version, except
* that successive MUL + ADD/SUB have been merged into
* fused multiply-add ('vec_madd' in altivec)
*/
// Pointers to functions. Not using function pointer syntax, because
// that involves an extra level of indirection on some PPC ABIs.
extern void *ff_fft_dispatch_altivec[2][15];
// Convert from simd order to C order.
static void swizzle(vec_f *z, int n)
{
int i;
n >>= 1;
for (i = 0; i < n; i += 2) {
vec_f re = z[i];
vec_f im = z[i+1];
z[i] = vec_mergeh(re, im);
z[i+1] = vec_mergel(re, im);
}
}
static void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
{
register const vector float vczero = (const vector float)vec_splat_u32(0.);
int ln = s->nbits;
int j, np, np2;
int nblocks, nloops;
register FFTComplex *p, *q;
FFTComplex *cptr, *cptr1;
int k;
np = 1 << ln;
{
vector float *r, a, b, a1, c1, c2;
r = (vector float *)&z[0];
c1 = vcii(p,p,n,n);
if (s->inverse) {
c2 = vcii(p,p,n,p);
} else {
c2 = vcii(p,p,p,n);
}
j = (np >> 2);
do {
a = vec_ld(0, r);
a1 = vec_ld(sizeof(vector float), r);
b = vec_perm(a,a,vcprmle(1,0,3,2));
a = vec_madd(a,c1,b);
/* do the pass 0 butterfly */
b = vec_perm(a1,a1,vcprmle(1,0,3,2));
b = vec_madd(a1,c1,b);
/* do the pass 0 butterfly */
/* multiply third by -i */
b = vec_perm(b,b,vcprmle(2,3,1,0));
/* do the pass 1 butterfly */
vec_st(vec_madd(b,c2,a), 0, r);
vec_st(vec_nmsub(b,c2,a), sizeof(vector float), r);
r += 2;
} while (--j != 0);
}
/* pass 2 .. ln-1 */
nblocks = np >> 3;
nloops = 1 << 2;
np2 = np >> 1;
cptr1 = s->exptab1;
do {
p = z;
q = z + nloops;
j = nblocks;
do {
cptr = cptr1;
k = nloops >> 1;
do {
vector float a,b,c,t1;
a = vec_ld(0, (float*)p);
b = vec_ld(0, (float*)q);
/* complex mul */
c = vec_ld(0, (float*)cptr);
/* cre*re cim*re */
t1 = vec_madd(c, vec_perm(b,b,vcprmle(2,2,0,0)),vczero);
c = vec_ld(sizeof(vector float), (float*)cptr);
/* -cim*im cre*im */
b = vec_madd(c, vec_perm(b,b,vcprmle(3,3,1,1)),t1);
/* butterfly */
vec_st(vec_add(a,b), 0, (float*)p);
vec_st(vec_sub(a,b), 0, (float*)q);
p += 2;
q += 2;
cptr += 4;
} while (--k);
p += nloops;
q += nloops;
} while (--j);
cptr1 += nloops * 2;
nblocks = nblocks >> 1;
nloops = nloops << 1;
} while (nblocks != 0);
register vec_f v14 __asm__("v14") = {0,0,0,0};
register vec_f v15 __asm__("v15") = *(const vec_f*)ff_cos_16;
register vec_f v16 __asm__("v16") = {0, 0.38268343, M_SQRT1_2, 0.92387953};
register vec_f v17 __asm__("v17") = {-M_SQRT1_2, M_SQRT1_2, M_SQRT1_2,-M_SQRT1_2};
register vec_f v18 __asm__("v18") = { M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2};
register vec_u8 v19 __asm__("v19") = vcprm(s0,3,2,1);
register vec_u8 v20 __asm__("v20") = vcprm(0,1,s2,s1);
register vec_u8 v21 __asm__("v21") = vcprm(2,3,s0,s3);
register vec_u8 v22 __asm__("v22") = vcprm(2,s3,3,s2);
register vec_u8 v23 __asm__("v23") = vcprm(0,1,s0,s1);
register vec_u8 v24 __asm__("v24") = vcprm(2,3,s2,s3);
register vec_u8 v25 __asm__("v25") = vcprm(2,3,0,1);
register vec_u8 v26 __asm__("v26") = vcprm(1,2,s3,s0);
register vec_u8 v27 __asm__("v27") = vcprm(0,3,s2,s1);
register vec_u8 v28 __asm__("v28") = vcprm(0,2,s1,s3);
register vec_u8 v29 __asm__("v29") = vcprm(1,3,s0,s2);
register FFTSample *const*cos_tabs __asm__("r12") = ff_cos_tabs;
register FFTComplex *zarg __asm__("r3") = z;
__asm__(
"mtctr %0 \n"
"li "r(9)", 16 \n"
"subi "r(1)","r(1) ",%1 \n"
"bctrl \n"
"addi "r(1)","r(1) ",%1 \n"
::"r"(ff_fft_dispatch_altivec[1][s->nbits-2]), "i"(12*sizeof(void*)),
"r"(zarg), "r"(cos_tabs),
"v"(v14),"v"(v15),"v"(v16),"v"(v17),"v"(v18),"v"(v19),"v"(v20),"v"(v21),
"v"(v22),"v"(v23),"v"(v24),"v"(v25),"v"(v26),"v"(v27),"v"(v28),"v"(v29)
: "lr","ctr","r0","r4","r5","r6","r7","r8","r9","r10","r11",
"v0","v1","v2","v3","v4","v5","v6","v7","v8","v9","v10","v11","v12","v13"
);
if (s->nbits <= 4)
swizzle((vec_f*)z, 1<<s->nbits);
}
av_cold void ff_fft_init_altivec(FFTContext *s)
{
s->fft_calc = ff_fft_calc_altivec;
s->split_radix = 0;
if (HAVE_GNU_AS)
s->fft_calc = ff_fft_calc_altivec;
s->split_radix = 1;
}

View File

@ -0,0 +1,335 @@
/*
* FFT transform with Altivec optimizations
* Copyright (c) 2009 Loren Merritt
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* These functions are not individually interchangeable with the C versions.
* While C takes arrays of FFTComplex, Altivec leaves intermediate results
* in blocks as convenient to the vector size.
* i.e. {4x real, 4x imaginary, 4x real, ...}
*
* I ignore standard calling convention.
* Instead, the following registers are treated as global constants:
* v14: zero
* v15..v18: cosines
* v19..v29: permutations
* r9: 16
* r12: ff_cos_tabs
* and the rest are free for local use.
*/
#include "config.h"
#include "asm.S"
.text
/* Apple gas doesn't support this shorthand */
.macro mtctr rx
mtspr 9, \rx
.endm
.macro addi2 ra, imm // add 32-bit immediate
.if \imm & 0xffff
addi \ra, \ra, \imm@l
.endif
.if (\imm+0x8000)>>16
addis \ra, \ra, \imm@ha
.endif
.endm
#if ARCH_PPC64
#define PTR .quad
.macro LOAD_PTR ra, rbase, offset
ld \ra,(\offset)*8(\rbase)
.endm
.macro STORE_PTR ra, rbase, offset
std \ra,(\offset)*8(\rbase)
.endm
#else
#define PTR .int
.macro LOAD_PTR ra, rbase, offset
lwz \ra,(\offset)*4(\rbase)
.endm
.macro STORE_PTR ra, rbase, offset
stw \ra,(\offset)*4(\rbase)
.endm
#endif
.macro FFT4 a0, a1, a2, a3 // in:0-1 out:2-3
vperm \a2,\a0,\a1,v20 // vcprm(0,1,s2,s1) // {r0,i0,r3,i2}
vperm \a3,\a0,\a1,v21 // vcprm(2,3,s0,s3) // {r1,i1,r2,i3}
vaddfp \a0,\a2,\a3 // {t1,t2,t6,t5}
vsubfp \a1,\a2,\a3 // {t3,t4,t8,t7}
vmrghw \a2,\a0,\a1 // vcprm(0,s0,1,s1) // {t1,t3,t2,t4}
vperm \a3,\a0,\a1,v22 // vcprm(2,s3,3,s2) // {t6,t7,t5,t8}
vaddfp \a0,\a2,\a3 // {r0,r1,i0,i1}
vsubfp \a1,\a2,\a3 // {r2,r3,i2,i3}
vperm \a2,\a0,\a1,v23 // vcprm(0,1,s0,s1) // {r0,r1,r2,r3}
vperm \a3,\a0,\a1,v24 // vcprm(2,3,s2,s3) // {i0,i1,i2,i3}
.endm
.macro FFT4x2 a0, a1, b0, b1, a2, a3, b2, b3
vperm \a2,\a0,\a1,v20 // vcprm(0,1,s2,s1) // {r0,i0,r3,i2}
vperm \a3,\a0,\a1,v21 // vcprm(2,3,s0,s3) // {r1,i1,r2,i3}
vperm \b2,\b0,\b1,v20
vperm \b3,\b0,\b1,v21
vaddfp \a0,\a2,\a3 // {t1,t2,t6,t5}
vsubfp \a1,\a2,\a3 // {t3,t4,t8,t7}
vaddfp \b0,\b2,\b3
vsubfp \b1,\b2,\b3
vmrghw \a2,\a0,\a1 // vcprm(0,s0,1,s1) // {t1,t3,t2,t4}
vperm \a3,\a0,\a1,v22 // vcprm(2,s3,3,s2) // {t6,t7,t5,t8}
vmrghw \b2,\b0,\b1
vperm \b3,\b0,\b1,v22
vaddfp \a0,\a2,\a3 // {r0,r1,i0,i1}
vsubfp \a1,\a2,\a3 // {r2,r3,i2,i3}
vaddfp \b0,\b2,\b3
vsubfp \b1,\b2,\b3
vperm \a2,\a0,\a1,v23 // vcprm(0,1,s0,s1) // {r0,r1,r2,r3}
vperm \a3,\a0,\a1,v24 // vcprm(2,3,s2,s3) // {i0,i1,i2,i3}
vperm \b2,\b0,\b1,v23
vperm \b3,\b0,\b1,v24
.endm
.macro FFT8 a0, a1, b0, b1, a2, a3, b2, b3, b4 // in,out:a0-b1
vmrghw \b2,\b0,\b1 // vcprm(0,s0,1,s1) // {r4,r6,i4,i6}
vmrglw \b3,\b0,\b1 // vcprm(2,s2,3,s3) // {r5,r7,i5,i7}
vperm \a2,\a0,\a1,v20 // FFT4 ...
vperm \a3,\a0,\a1,v21
vaddfp \b0,\b2,\b3 // {t1,t3,t2,t4}
vsubfp \b1,\b2,\b3 // {r5,r7,i5,i7}
vperm \b4,\b1,\b1,v25 // vcprm(2,3,0,1) // {i5,i7,r5,r7}
vaddfp \a0,\a2,\a3
vsubfp \a1,\a2,\a3
vmaddfp \b1,\b1,v17,v14 // * {-1,1,1,-1}/sqrt(2)
vmaddfp \b1,\b4,v18,\b1 // * { 1,1,1,1 }/sqrt(2) // {t8,ta,t7,t9}
vmrghw \a2,\a0,\a1
vperm \a3,\a0,\a1,v22
vperm \b2,\b0,\b1,v26 // vcprm(1,2,s3,s0) // {t3,t2,t9,t8}
vperm \b3,\b0,\b1,v27 // vcprm(0,3,s2,s1) // {t1,t4,t7,ta}
vaddfp \a0,\a2,\a3
vsubfp \a1,\a2,\a3
vaddfp \b0,\b2,\b3 // {t1,t2,t9,ta}
vsubfp \b1,\b2,\b3 // {t6,t5,tc,tb}
vperm \a2,\a0,\a1,v23
vperm \a3,\a0,\a1,v24
vperm \b2,\b0,\b1,v28 // vcprm(0,2,s1,s3) // {t1,t9,t5,tb}
vperm \b3,\b0,\b1,v29 // vcprm(1,3,s0,s2) // {t2,ta,t6,tc}
vsubfp \b0,\a2,\b2 // {r4,r5,r6,r7}
vsubfp \b1,\a3,\b3 // {i4,i5,i6,i7}
vaddfp \a0,\a2,\b2 // {r0,r1,r2,r3}
vaddfp \a1,\a3,\b3 // {i0,i1,i2,i3}
.endm
.macro BF d0,d1,s0,s1
vsubfp \d1,\s0,\s1
vaddfp \d0,\s0,\s1
.endm
fft4_altivec:
lvx v0, 0,r3
lvx v1,r9,r3
FFT4 v0,v1,v2,v3
stvx v2, 0,r3
stvx v3,r9,r3
blr
fft8_altivec:
addi r4,r3,32
lvx v0, 0,r3
lvx v1,r9,r3
lvx v2, 0,r4
lvx v3,r9,r4
FFT8 v0,v1,v2,v3,v4,v5,v6,v7,v8
stvx v0, 0,r3
stvx v1,r9,r3
stvx v2, 0,r4
stvx v3,r9,r4
blr
fft16_altivec:
addi r5,r3,64
addi r6,r3,96
addi r4,r3,32
lvx v0, 0,r5
lvx v1,r9,r5
lvx v2, 0,r6
lvx v3,r9,r6
FFT4x2 v0,v1,v2,v3,v4,v5,v6,v7
lvx v0, 0,r3
lvx v1,r9,r3
lvx v2, 0,r4
lvx v3,r9,r4
FFT8 v0,v1,v2,v3,v8,v9,v10,v11,v12
vmaddfp v8,v4,v15,v14 // r2*wre
vmaddfp v9,v5,v15,v14 // i2*wre
vmaddfp v10,v6,v15,v14 // r3*wre
vmaddfp v11,v7,v15,v14 // i3*wre
vmaddfp v8,v5,v16,v8 // i2*wim
vnmsubfp v9,v4,v16,v9 // r2*wim
vnmsubfp v10,v7,v16,v10 // i3*wim
vmaddfp v11,v6,v16,v11 // r3*wim
BF v10,v12,v10,v8
BF v11,v13,v9,v11
BF v0,v4,v0,v10
BF v3,v7,v3,v12
stvx v0, 0,r3
stvx v4, 0,r5
stvx v3,r9,r4
stvx v7,r9,r6
BF v1,v5,v1,v11
BF v2,v6,v2,v13
stvx v1,r9,r3
stvx v5,r9,r5
stvx v2, 0,r4
stvx v6, 0,r6
blr
// void pass(float *z, float *wre, int n)
.macro PASS interleave, suffix
fft_pass\suffix\()_altivec:
mtctr r5
slwi r0,r5,4
slwi r7,r5,6 // o2
slwi r5,r5,5 // o1
add r10,r5,r7 // o3
add r0,r4,r0 // wim
addi r6,r5,16 // o1+16
addi r8,r7,16 // o2+16
addi r11,r10,16 // o3+16
1:
lvx v8, 0,r4 // wre
lvx v10, 0,r0 // wim
sub r0,r0,r9
lvx v9, 0,r0
vperm v9,v9,v10,v19 // vcprm(s0,3,2,1) => wim[0 .. -3]
lvx v4,r3,r7 // r2 = z[o2]
lvx v5,r3,r8 // i2 = z[o2+16]
lvx v6,r3,r10 // r3 = z[o3]
lvx v7,r3,r11 // i3 = z[o3+16]
vmaddfp v10,v4,v8,v14 // r2*wre
vmaddfp v11,v5,v8,v14 // i2*wre
vmaddfp v12,v6,v8,v14 // r3*wre
vmaddfp v13,v7,v8,v14 // i3*wre
lvx v0, 0,r3 // r0 = z[0]
lvx v3,r3,r6 // i1 = z[o1+16]
vmaddfp v10,v5,v9,v10 // i2*wim
vnmsubfp v11,v4,v9,v11 // r2*wim
vnmsubfp v12,v7,v9,v12 // i3*wim
vmaddfp v13,v6,v9,v13 // r3*wim
lvx v1,r3,r9 // i0 = z[16]
lvx v2,r3,r5 // r1 = z[o1]
BF v12,v8,v12,v10
BF v13,v9,v11,v13
BF v0,v4,v0,v12
BF v3,v7,v3,v8
.if !\interleave
stvx v0, 0,r3
stvx v4,r3,r7
stvx v3,r3,r6
stvx v7,r3,r11
.endif
BF v1,v5,v1,v13
BF v2,v6,v2,v9
.if !\interleave
stvx v1,r3,r9
stvx v2,r3,r5
stvx v5,r3,r8
stvx v6,r3,r10
.else
vmrghw v8,v0,v1
vmrglw v9,v0,v1
stvx v8, 0,r3
stvx v9,r3,r9
vmrghw v8,v2,v3
vmrglw v9,v2,v3
stvx v8,r3,r5
stvx v9,r3,r6
vmrghw v8,v4,v5
vmrglw v9,v4,v5
stvx v8,r3,r7
stvx v9,r3,r8
vmrghw v8,v6,v7
vmrglw v9,v6,v7
stvx v8,r3,r10
stvx v9,r3,r11
.endif
addi r3,r3,32
addi r4,r4,16
bdnz 1b
sub r3,r3,r5
blr
.endm
.macro DECL_FFT suffix, bits, n, n2, n4
fft\n\suffix\()_altivec:
mflr r0
STORE_PTR r0,r1,\bits-5
bl fft\n2\()_altivec
addi2 r3,\n*4
bl fft\n4\()_altivec
addi2 r3,\n*2
bl fft\n4\()_altivec
addi2 r3,\n*-6
LOAD_PTR r0,r1,\bits-5
LOAD_PTR r4,r12,\bits
mtlr r0
li r5,\n/16
b fft_pass\suffix\()_altivec
.endm
.macro DECL_FFTS interleave, suffix
.text
PASS \interleave, \suffix
DECL_FFT \suffix, 5, 32, 16, 8
DECL_FFT \suffix, 6, 64, 32, 16
DECL_FFT \suffix, 7, 128, 64, 32
DECL_FFT \suffix, 8, 256, 128, 64
DECL_FFT \suffix, 9, 512, 256, 128
DECL_FFT \suffix,10, 1024, 512, 256
DECL_FFT \suffix,11, 2048, 1024, 512
DECL_FFT \suffix,12, 4096, 2048, 1024
DECL_FFT \suffix,13, 8192, 4096, 2048
DECL_FFT \suffix,14,16384, 8192, 4096
DECL_FFT \suffix,15,32768,16384, 8192
DECL_FFT \suffix,16,65536,32768,16384
.rodata
.global EXTERN_ASM\()ff_fft_dispatch\suffix\()_altivec
EXTERN_ASM\()ff_fft_dispatch\suffix\()_altivec:
PTR fft4_altivec
PTR fft8_altivec
PTR fft16_altivec
PTR fft32\suffix\()_altivec
PTR fft64\suffix\()_altivec
PTR fft128\suffix\()_altivec
PTR fft256\suffix\()_altivec
PTR fft512\suffix\()_altivec
PTR fft1024\suffix\()_altivec
PTR fft2048\suffix\()_altivec
PTR fft4096\suffix\()_altivec
PTR fft8192\suffix\()_altivec
PTR fft16384\suffix\()_altivec
PTR fft32768\suffix\()_altivec
PTR fft65536\suffix\()_altivec
.endm
DECL_FFTS 0
DECL_FFTS 1, _interleave

View File

@ -30,6 +30,7 @@
#define vec_s16 vector signed short
#define vec_u32 vector unsigned int
#define vec_s32 vector signed int
#define vec_f vector float
/***********************************************************************
* Null vector