mirror of https://git.ffmpeg.org/ffmpeg.git
altivec patches by Romain Dolbeau
Originally committed as revision 1423 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
e366e6795d
commit
4013fcf4af
|
@ -26,15 +26,16 @@
|
|||
|
||||
int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
|
||||
{
|
||||
int s, i;
|
||||
vector unsigned char *tv, zero;
|
||||
int i;
|
||||
int s __attribute__((aligned(16)));
|
||||
const vector unsigned char zero = (const vector unsigned char)(0);
|
||||
vector unsigned char *tv;
|
||||
vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
|
||||
vector unsigned int sad;
|
||||
vector signed int sumdiffs;
|
||||
|
||||
s = 0;
|
||||
zero = vec_splat_u8(0);
|
||||
sad = vec_splat_u32(0);
|
||||
sad = (vector unsigned int)(0);
|
||||
for(i=0;i<16;i++) {
|
||||
/*
|
||||
Read unaligned pixels into our vectors. The vectors are as follows:
|
||||
|
@ -72,16 +73,17 @@ int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
|
|||
|
||||
int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
|
||||
{
|
||||
int s, i;
|
||||
vector unsigned char *tv, zero;
|
||||
int i;
|
||||
int s __attribute__((aligned(16)));
|
||||
const vector unsigned char zero = (const vector unsigned char)(0);
|
||||
vector unsigned char *tv;
|
||||
vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
|
||||
vector unsigned int sad;
|
||||
vector signed int sumdiffs;
|
||||
uint8_t *pix3 = pix2 + line_size;
|
||||
|
||||
s = 0;
|
||||
zero = vec_splat_u8(0);
|
||||
sad = vec_splat_u32(0);
|
||||
sad = (vector unsigned int)(0);
|
||||
|
||||
/*
|
||||
Due to the fact that pix3 = pix2 + line_size, the pix3 of one
|
||||
|
@ -131,20 +133,21 @@ int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
|
|||
|
||||
int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
|
||||
{
|
||||
int s, i;
|
||||
int i;
|
||||
int s __attribute__((aligned(16)));
|
||||
uint8_t *pix3 = pix2 + line_size;
|
||||
vector unsigned char *tv, avgv, t5, zero;
|
||||
const vector unsigned char zero = (const vector unsigned char)(0);
|
||||
const vector unsigned short two = (const vector unsigned short)(2);
|
||||
vector unsigned char *tv, avgv, t5;
|
||||
vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
|
||||
vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
|
||||
vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
|
||||
vector unsigned short avghv, avglv, two;
|
||||
vector unsigned short avghv, avglv;
|
||||
vector unsigned short t1, t2, t3, t4;
|
||||
vector unsigned int sad;
|
||||
vector signed int sumdiffs;
|
||||
|
||||
zero = vec_splat_u8(0);
|
||||
two = vec_splat_u16(2);
|
||||
sad = vec_splat_u32(0);
|
||||
sad = (vector unsigned int)(0);
|
||||
|
||||
s = 0;
|
||||
|
||||
|
@ -231,13 +234,14 @@ int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
|
|||
|
||||
int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
|
||||
{
|
||||
int i, s;
|
||||
int i;
|
||||
int s __attribute__((aligned(16)));
|
||||
const vector unsigned int zero = (const vector unsigned int)(0);
|
||||
vector unsigned char perm1, perm2, *pix1v, *pix2v;
|
||||
vector unsigned char t1, t2, t3,t4, t5;
|
||||
vector unsigned int sad, zero;
|
||||
vector unsigned int sad;
|
||||
vector signed int sumdiffs;
|
||||
|
||||
zero = (vector unsigned int) (0);
|
||||
sad = (vector unsigned int) (0);
|
||||
|
||||
|
||||
|
@ -272,14 +276,15 @@ int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
|
|||
|
||||
int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
|
||||
{
|
||||
int i, s;
|
||||
int i;
|
||||
int s __attribute__((aligned(16)));
|
||||
const vector unsigned int zero = (const vector unsigned int)(0);
|
||||
vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
|
||||
vector unsigned char t1, t2, t3,t4, t5;
|
||||
vector unsigned int sad, zero;
|
||||
vector unsigned int sad;
|
||||
vector signed int sumdiffs;
|
||||
|
||||
zero = (vector unsigned int) (0);
|
||||
sad = (vector unsigned int) (0);
|
||||
sad = (vector unsigned int)(0);
|
||||
permclear = (vector unsigned char) (255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
|
||||
|
||||
for(i=0;i<8;i++) {
|
||||
|
@ -315,14 +320,15 @@ int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
|
|||
|
||||
int pix_norm1_altivec(uint8_t *pix, int line_size)
|
||||
{
|
||||
int s, i;
|
||||
vector unsigned char *tv, zero;
|
||||
int i;
|
||||
int s __attribute__((aligned(16)));
|
||||
const vector unsigned int zero = (const vector unsigned int)(0);
|
||||
vector unsigned char *tv;
|
||||
vector unsigned char pixv;
|
||||
vector unsigned int sv;
|
||||
vector signed int sum;
|
||||
|
||||
zero = vec_splat_u8(0);
|
||||
sv = vec_splat_u32(0);
|
||||
|
||||
sv = (vector unsigned int)(0);
|
||||
|
||||
s = 0;
|
||||
for (i = 0; i < 16; i++) {
|
||||
|
@ -343,17 +349,122 @@ int pix_norm1_altivec(uint8_t *pix, int line_size)
|
|||
return s;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sum of Squared Errors for a 8x8 block.
|
||||
* AltiVec-enhanced.
|
||||
* It's the pix_abs8x8_altivec code above w/ squaring added.
|
||||
*/
|
||||
int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
|
||||
{
|
||||
int i;
|
||||
int s __attribute__((aligned(16)));
|
||||
const vector unsigned int zero = (const vector unsigned int)(0);
|
||||
vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
|
||||
vector unsigned char t1, t2, t3,t4, t5;
|
||||
vector unsigned int sum;
|
||||
vector signed int sumsqr;
|
||||
|
||||
sum = (vector unsigned int)(0);
|
||||
permclear = (vector unsigned char)(0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00);
|
||||
|
||||
for(i=0;i<8;i++) {
|
||||
/* Read potentially unaligned pixels into t1 and t2
|
||||
Since we're reading 16 pixels, and actually only want 8,
|
||||
mask out the last 8 pixels. The 0s don't change the sum. */
|
||||
perm1 = vec_lvsl(0, pix1);
|
||||
pix1v = (vector unsigned char *) pix1;
|
||||
perm2 = vec_lvsl(0, pix2);
|
||||
pix2v = (vector unsigned char *) pix2;
|
||||
t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
|
||||
t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
|
||||
|
||||
/*
|
||||
Since we want to use unsigned chars, we can take advantage
|
||||
of the fact that abs(a-b)^2 = (a-b)^2.
|
||||
*/
|
||||
|
||||
/* Calculate abs differences vector */
|
||||
t3 = vec_max(t1, t2);
|
||||
t4 = vec_min(t1, t2);
|
||||
t5 = vec_sub(t3, t4);
|
||||
|
||||
/* Square the values and add them to our sum */
|
||||
sum = vec_msum(t5, t5, sum);
|
||||
|
||||
pix1 += line_size;
|
||||
pix2 += line_size;
|
||||
}
|
||||
|
||||
/* Sum up the four partial sums, and put the result into s */
|
||||
sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
|
||||
sumsqr = vec_splat(sumsqr, 3);
|
||||
vec_ste(sumsqr, 0, &s);
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sum of Squared Errors for a 16x16 block.
|
||||
* AltiVec-enhanced.
|
||||
* It's the pix_abs16x16_altivec code above w/ squaring added.
|
||||
*/
|
||||
int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
|
||||
{
|
||||
int i;
|
||||
int s __attribute__((aligned(16)));
|
||||
const vector unsigned int zero = (const vector unsigned int)(0);
|
||||
vector unsigned char perm1, perm2, *pix1v, *pix2v;
|
||||
vector unsigned char t1, t2, t3,t4, t5;
|
||||
vector unsigned int sum;
|
||||
vector signed int sumsqr;
|
||||
|
||||
sum = (vector unsigned int)(0);
|
||||
|
||||
for(i=0;i<16;i++) {
|
||||
/* Read potentially unaligned pixels into t1 and t2 */
|
||||
perm1 = vec_lvsl(0, pix1);
|
||||
pix1v = (vector unsigned char *) pix1;
|
||||
perm2 = vec_lvsl(0, pix2);
|
||||
pix2v = (vector unsigned char *) pix2;
|
||||
t1 = vec_perm(pix1v[0], pix1v[1], perm1);
|
||||
t2 = vec_perm(pix2v[0], pix2v[1], perm2);
|
||||
|
||||
/*
|
||||
Since we want to use unsigned chars, we can take advantage
|
||||
of the fact that abs(a-b)^2 = (a-b)^2.
|
||||
*/
|
||||
|
||||
/* Calculate abs differences vector */
|
||||
t3 = vec_max(t1, t2);
|
||||
t4 = vec_min(t1, t2);
|
||||
t5 = vec_sub(t3, t4);
|
||||
|
||||
/* Square the values and add them to our sum */
|
||||
sum = vec_msum(t5, t5, sum);
|
||||
|
||||
pix1 += line_size;
|
||||
pix2 += line_size;
|
||||
}
|
||||
|
||||
/* Sum up the four partial sums, and put the result into s */
|
||||
sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
|
||||
sumsqr = vec_splat(sumsqr, 3);
|
||||
vec_ste(sumsqr, 0, &s);
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
int pix_sum_altivec(UINT8 * pix, int line_size)
|
||||
{
|
||||
|
||||
const vector unsigned int zero = (const vector unsigned int)(0);
|
||||
vector unsigned char perm, *pixv;
|
||||
vector unsigned char t1;
|
||||
vector unsigned int sad, zero;
|
||||
vector unsigned int sad;
|
||||
vector signed int sumdiffs;
|
||||
|
||||
int s, i;
|
||||
|
||||
zero = (vector unsigned int) (0);
|
||||
int i;
|
||||
int s __attribute__((aligned(16)));
|
||||
|
||||
sad = (vector unsigned int) (0);
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
|
@ -380,7 +491,7 @@ void get_pixels_altivec(DCTELEM *restrict block, const UINT8 *pixels, int line_s
|
|||
{
|
||||
int i;
|
||||
vector unsigned char perm, bytes, *pixv;
|
||||
vector unsigned char zero = (vector unsigned char) (0);
|
||||
const vector unsigned char zero = (const vector unsigned char) (0);
|
||||
vector signed short shorts;
|
||||
|
||||
for(i=0;i<8;i++)
|
||||
|
@ -407,7 +518,7 @@ void diff_pixels_altivec(DCTELEM *restrict block, const UINT8 *s1,
|
|||
{
|
||||
int i;
|
||||
vector unsigned char perm, bytes, *pixv;
|
||||
vector unsigned char zero = (vector unsigned char) (0);
|
||||
const vector unsigned char zero = (const vector unsigned char) (0);
|
||||
vector signed short shorts1, shorts2;
|
||||
|
||||
for(i=0;i<4;i++)
|
||||
|
|
|
@ -23,8 +23,37 @@ extern int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
|
|||
extern int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
|
||||
extern int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
|
||||
extern int pix_norm1_altivec(uint8_t *pix, int line_size);
|
||||
extern int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size);
|
||||
extern int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size);
|
||||
extern int pix_sum_altivec(UINT8 * pix, int line_size);
|
||||
extern void diff_pixels_altivec(DCTELEM* block, const UINT8* s1, const UINT8* s2, int stride);
|
||||
extern void get_pixels_altivec(DCTELEM* block, const UINT8 * pixels, int line_size);
|
||||
|
||||
extern int has_altivec(void);
|
||||
|
||||
|
||||
|
||||
// used to build registers permutation vectors (vcprm)
|
||||
// the 's' are for words in the _s_econd vector
|
||||
#define WORD_0 0x00,0x01,0x02,0x03
|
||||
#define WORD_1 0x04,0x05,0x06,0x07
|
||||
#define WORD_2 0x08,0x09,0x0a,0x0b
|
||||
#define WORD_3 0x0c,0x0d,0x0e,0x0f
|
||||
#define WORD_s0 0x10,0x11,0x12,0x13
|
||||
#define WORD_s1 0x14,0x15,0x16,0x17
|
||||
#define WORD_s2 0x18,0x19,0x1a,0x1b
|
||||
#define WORD_s3 0x1c,0x1d,0x1e,0x1f
|
||||
|
||||
#define vcprm(a,b,c,d) (const vector unsigned char)(WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d)
|
||||
|
||||
// vcprmle is used to keep the same index as in the SSE version.
|
||||
// it's the same as vcprm, with the index inversed
|
||||
// ('le' is Little Endian)
|
||||
#define vcprmle(a,b,c,d) vcprm(d,c,b,a)
|
||||
|
||||
// used to build inverse/identity vectors (vcii)
|
||||
// n is _n_egative, p is _p_ositive
|
||||
#define FLOAT_n -1.
|
||||
#define FLOAT_p 1.
|
||||
|
||||
#define vcii(a,b,c,d) (const vector float)(FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d)
|
||||
|
|
|
@ -42,9 +42,12 @@ void dsputil_init_ppc(DSPContext* c, unsigned mask)
|
|||
c->pix_abs16x16 = pix_abs16x16_altivec;
|
||||
c->pix_abs8x8 = pix_abs8x8_altivec;
|
||||
c->pix_norm1 = pix_norm1_altivec;
|
||||
c->sse[1]= sse8_altivec;
|
||||
c->sse[0]= sse16_altivec;
|
||||
c->pix_sum = pix_sum_altivec;
|
||||
c->diff_pixels = diff_pixels_altivec;
|
||||
c->get_pixels = get_pixels_altivec;
|
||||
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* FFT/IFFT transforms
|
||||
* AltiVec-enabled
|
||||
* Copyright (c) 2002 Romain Dolbeau <romain@dolbeau.org>
|
||||
* Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
|
||||
* Based on code Copyright (c) 2002 Fabrice Bellard.
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
|
@ -22,31 +22,6 @@
|
|||
|
||||
#include "dsputil_altivec.h"
|
||||
|
||||
// used to build registers permutation vectors (vcprm)
|
||||
// the 's' are for words in the _s_econd vector
|
||||
#define WORD_0 0x00,0x01,0x02,0x03
|
||||
#define WORD_1 0x04,0x05,0x06,0x07
|
||||
#define WORD_2 0x08,0x09,0x0a,0x0b
|
||||
#define WORD_3 0x0c,0x0d,0x0e,0x0f
|
||||
#define WORD_s0 0x10,0x11,0x12,0x13
|
||||
#define WORD_s1 0x14,0x15,0x16,0x17
|
||||
#define WORD_s2 0x18,0x19,0x1a,0x1b
|
||||
#define WORD_s3 0x1c,0x1d,0x1e,0x1f
|
||||
|
||||
#define vcprm(a,b,c,d) (const vector unsigned char)(WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d)
|
||||
|
||||
// vcprmle is used to keep the same index as in the SSE version.
|
||||
// it's the same as vcprm, with the index inversed
|
||||
// ('le' is Little Endian)
|
||||
#define vcprmle(a,b,c,d) vcprm(d,c,b,a)
|
||||
|
||||
// used to build inverse/identity vectors (vcii)
|
||||
// n is _n_egative, p is _p_ositive
|
||||
#define FLOAT_n -1.
|
||||
#define FLOAT_p 1.
|
||||
|
||||
#define vcii(a,b,c,d) (const vector float)(FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d)
|
||||
|
||||
/**
|
||||
* Do a complex FFT with the parameters defined in fft_init(). The
|
||||
* input data must be permuted before with s->revtab table. No
|
||||
|
@ -55,16 +30,8 @@
|
|||
* This code assumes that the 'z' pointer is 16 bytes-aligned
|
||||
* It also assumes all FFTComplex are 8 bytes-aligned pair of float
|
||||
* The code is exactly the same as the SSE version, except
|
||||
* that successive MUL + ADD/SUB have been fusionned into
|
||||
* that successive MUL + ADD/SUB have been merged into
|
||||
* fused multiply-add ('vec_madd' in altivec)
|
||||
*
|
||||
* To test this code you can use fft-test in libavcodec ; use
|
||||
* the following line in libavcodec to compile (MacOS X):
|
||||
* #####
|
||||
* gcc -I. -Ippc -no-cpp-precomp -pipe -O3 -fomit-frame-pointer -mdynamic-no-pic -Wall
|
||||
* -faltivec -DARCH_POWERPC -DHAVE_ALTIVEC -DCONFIG_DARWIN fft-test.c fft.c
|
||||
* ppc/fft_altivec.c ppc/dsputil_altivec.c mdct.c -DHAVE_LRINTF -o fft-test
|
||||
* #####
|
||||
*/
|
||||
void fft_calc_altivec(FFTContext *s, FFTComplex *z)
|
||||
{
|
||||
|
|
|
@ -100,7 +100,7 @@ int dct_quantize_altivec(MpegEncContext* s,
|
|||
int lastNonZero;
|
||||
vector float row0, row1, row2, row3, row4, row5, row6, row7;
|
||||
vector float alt0, alt1, alt2, alt3, alt4, alt5, alt6, alt7;
|
||||
const vector float zero = {FOUR_INSTANCES(0.0f)};
|
||||
const vector float zero = (vector float)(FOUR_INSTANCES(0.0f));
|
||||
|
||||
// Load the data into the row/alt vectors
|
||||
{
|
||||
|
@ -144,18 +144,18 @@ int dct_quantize_altivec(MpegEncContext* s,
|
|||
// in the vector local variables, as floats, which we'll use during the
|
||||
// quantize step...
|
||||
{
|
||||
const vector float vec_0_298631336 = {FOUR_INSTANCES(0.298631336f)};
|
||||
const vector float vec_0_390180644 = {FOUR_INSTANCES(-0.390180644f)};
|
||||
const vector float vec_0_541196100 = {FOUR_INSTANCES(0.541196100f)};
|
||||
const vector float vec_0_765366865 = {FOUR_INSTANCES(0.765366865f)};
|
||||
const vector float vec_0_899976223 = {FOUR_INSTANCES(-0.899976223f)};
|
||||
const vector float vec_1_175875602 = {FOUR_INSTANCES(1.175875602f)};
|
||||
const vector float vec_1_501321110 = {FOUR_INSTANCES(1.501321110f)};
|
||||
const vector float vec_1_847759065 = {FOUR_INSTANCES(-1.847759065f)};
|
||||
const vector float vec_1_961570560 = {FOUR_INSTANCES(-1.961570560f)};
|
||||
const vector float vec_2_053119869 = {FOUR_INSTANCES(2.053119869f)};
|
||||
const vector float vec_2_562915447 = {FOUR_INSTANCES(-2.562915447f)};
|
||||
const vector float vec_3_072711026 = {FOUR_INSTANCES(3.072711026f)};
|
||||
const vector float vec_0_298631336 = (vector float)(FOUR_INSTANCES(0.298631336f));
|
||||
const vector float vec_0_390180644 = (vector float)(FOUR_INSTANCES(-0.390180644f));
|
||||
const vector float vec_0_541196100 = (vector float)(FOUR_INSTANCES(0.541196100f));
|
||||
const vector float vec_0_765366865 = (vector float)(FOUR_INSTANCES(0.765366865f));
|
||||
const vector float vec_0_899976223 = (vector float)(FOUR_INSTANCES(-0.899976223f));
|
||||
const vector float vec_1_175875602 = (vector float)(FOUR_INSTANCES(1.175875602f));
|
||||
const vector float vec_1_501321110 = (vector float)(FOUR_INSTANCES(1.501321110f));
|
||||
const vector float vec_1_847759065 = (vector float)(FOUR_INSTANCES(-1.847759065f));
|
||||
const vector float vec_1_961570560 = (vector float)(FOUR_INSTANCES(-1.961570560f));
|
||||
const vector float vec_2_053119869 = (vector float)(FOUR_INSTANCES(2.053119869f));
|
||||
const vector float vec_2_562915447 = (vector float)(FOUR_INSTANCES(-2.562915447f));
|
||||
const vector float vec_3_072711026 = (vector float)(FOUR_INSTANCES(3.072711026f));
|
||||
|
||||
|
||||
int whichPass, whichHalf;
|
||||
|
|
Loading…
Reference in New Issue