2011-09-19 04:13:30 +00:00
|
|
|
/*
|
|
|
|
* audio resampling
|
2012-04-10 10:35:37 +00:00
|
|
|
* Copyright (c) 2004-2012 Michael Niedermayer <michaelni@gmx.at>
|
swresample/resample: improve bessel function accuracy and speed
This improves accuracy for the bessel function at large arguments, and this in turn
should improve the quality of the Kaiser window. It also improves the
performance of the bessel function and hence build_filter by ~ 20%.
Details are given below.
Algorithm: taken from the Boost project, who have done a detailed
investigation of the accuracy of their method, as compared with e.g the
GNU Scientific Library (GSL):
http://www.boost.org/doc/libs/1_52_0/libs/math/doc/sf_and_dist/html/math_toolkit/special/bessel/mbessel.html.
Boost source code (also cited and licensed in the code):
https://searchcode.com/codesearch/view/14918379/.
Accuracy: sample values may be obtained as follows. i0 denotes the old bessel code,
i0_boost the approach here, and i0_real an arbitrary precision result (truncated) from Wolfram Alpha:
type "bessel i0(6.0)" to reproduce. These are evaluation points that occur for
the default kaiser_beta = 9.
Some illustrations:
bessel(8.0)
i0 (8.000000) = 427.564115721804739678191254
i0_boost(8.000000) = 427.564115721804796521610115
i0_real (8.000000) = 427.564115721804785177396791
bessel(6.0)
i0 (6.000000) = 67.234406976477956163762428
i0_boost(6.000000) = 67.234406976477970374617144
i0_real (6.000000) = 67.234406976477975326188025
Reason for accuracy: Main accuracy benefits come at larger bessel arguments, where the
Taylor-Maclaurin method is not that good: 23+ iterations
(at large arguments, since the series is about 0) can cause
significant floating point error accumulation.
Benchmarks: Obtained on x86-64, Haswell, GNU/Linux via a loop calling
build_filter 1000 times:
test: fate-swr-resample-dblp-44100-2626
new:
995894468 decicycles in build_filter(loop 1000), 256 runs, 0 skips
1029719302 decicycles in build_filter(loop 1000), 512 runs, 0 skips
984101131 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
1250020763 decicycles in build_filter(loop 1000), 256 runs, 0 skips
1246353282 decicycles in build_filter(loop 1000), 512 runs, 0 skips
1220017565 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
A further ~ 5% may be squeezed by enabling -ftree-vectorize. However,
this is a separate issue from this patch.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-02 19:28:39 +00:00
|
|
|
* bessel function: Copyright (c) 2006 Xiaogang Zhang
|
2011-09-19 04:13:30 +00:00
|
|
|
*
|
|
|
|
* This file is part of FFmpeg.
|
|
|
|
*
|
|
|
|
* FFmpeg is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with FFmpeg; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* audio resampling
|
|
|
|
* @author Michael Niedermayer <michaelni@gmx.at>
|
|
|
|
*/
|
|
|
|
|
2012-04-10 11:18:49 +00:00
|
|
|
#include "libavutil/avassert.h"
|
2014-06-14 18:11:03 +00:00
|
|
|
#include "resample.h"
|
2011-09-19 04:13:30 +00:00
|
|
|
|
swresample/resample: improve bessel function accuracy and speed
This improves accuracy for the bessel function at large arguments, and this in turn
should improve the quality of the Kaiser window. It also improves the
performance of the bessel function and hence build_filter by ~ 20%.
Details are given below.
Algorithm: taken from the Boost project, who have done a detailed
investigation of the accuracy of their method, as compared with e.g the
GNU Scientific Library (GSL):
http://www.boost.org/doc/libs/1_52_0/libs/math/doc/sf_and_dist/html/math_toolkit/special/bessel/mbessel.html.
Boost source code (also cited and licensed in the code):
https://searchcode.com/codesearch/view/14918379/.
Accuracy: sample values may be obtained as follows. i0 denotes the old bessel code,
i0_boost the approach here, and i0_real an arbitrary precision result (truncated) from Wolfram Alpha:
type "bessel i0(6.0)" to reproduce. These are evaluation points that occur for
the default kaiser_beta = 9.
Some illustrations:
bessel(8.0)
i0 (8.000000) = 427.564115721804739678191254
i0_boost(8.000000) = 427.564115721804796521610115
i0_real (8.000000) = 427.564115721804785177396791
bessel(6.0)
i0 (6.000000) = 67.234406976477956163762428
i0_boost(6.000000) = 67.234406976477970374617144
i0_real (6.000000) = 67.234406976477975326188025
Reason for accuracy: Main accuracy benefits come at larger bessel arguments, where the
Taylor-Maclaurin method is not that good: 23+ iterations
(at large arguments, since the series is about 0) can cause
significant floating point error accumulation.
Benchmarks: Obtained on x86-64, Haswell, GNU/Linux via a loop calling
build_filter 1000 times:
test: fate-swr-resample-dblp-44100-2626
new:
995894468 decicycles in build_filter(loop 1000), 256 runs, 0 skips
1029719302 decicycles in build_filter(loop 1000), 512 runs, 0 skips
984101131 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
1250020763 decicycles in build_filter(loop 1000), 256 runs, 0 skips
1246353282 decicycles in build_filter(loop 1000), 512 runs, 0 skips
1220017565 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
A further ~ 5% may be squeezed by enabling -ftree-vectorize. However,
this is a separate issue from this patch.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-02 19:28:39 +00:00
|
|
|
static inline double eval_poly(const double *coeff, int size, double x) {
|
|
|
|
double sum = coeff[size-1];
|
|
|
|
int i;
|
|
|
|
for (i = size-2; i >= 0; --i) {
|
|
|
|
sum *= x;
|
|
|
|
sum += coeff[i];
|
|
|
|
}
|
|
|
|
return sum;
|
|
|
|
}
|
|
|
|
|
2011-09-19 04:13:30 +00:00
|
|
|
/**
|
|
|
|
* 0th order modified bessel function of the first kind.
|
swresample/resample: improve bessel function accuracy and speed
This improves accuracy for the bessel function at large arguments, and this in turn
should improve the quality of the Kaiser window. It also improves the
performance of the bessel function and hence build_filter by ~ 20%.
Details are given below.
Algorithm: taken from the Boost project, who have done a detailed
investigation of the accuracy of their method, as compared with e.g the
GNU Scientific Library (GSL):
http://www.boost.org/doc/libs/1_52_0/libs/math/doc/sf_and_dist/html/math_toolkit/special/bessel/mbessel.html.
Boost source code (also cited and licensed in the code):
https://searchcode.com/codesearch/view/14918379/.
Accuracy: sample values may be obtained as follows. i0 denotes the old bessel code,
i0_boost the approach here, and i0_real an arbitrary precision result (truncated) from Wolfram Alpha:
type "bessel i0(6.0)" to reproduce. These are evaluation points that occur for
the default kaiser_beta = 9.
Some illustrations:
bessel(8.0)
i0 (8.000000) = 427.564115721804739678191254
i0_boost(8.000000) = 427.564115721804796521610115
i0_real (8.000000) = 427.564115721804785177396791
bessel(6.0)
i0 (6.000000) = 67.234406976477956163762428
i0_boost(6.000000) = 67.234406976477970374617144
i0_real (6.000000) = 67.234406976477975326188025
Reason for accuracy: Main accuracy benefits come at larger bessel arguments, where the
Taylor-Maclaurin method is not that good: 23+ iterations
(at large arguments, since the series is about 0) can cause
significant floating point error accumulation.
Benchmarks: Obtained on x86-64, Haswell, GNU/Linux via a loop calling
build_filter 1000 times:
test: fate-swr-resample-dblp-44100-2626
new:
995894468 decicycles in build_filter(loop 1000), 256 runs, 0 skips
1029719302 decicycles in build_filter(loop 1000), 512 runs, 0 skips
984101131 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
1250020763 decicycles in build_filter(loop 1000), 256 runs, 0 skips
1246353282 decicycles in build_filter(loop 1000), 512 runs, 0 skips
1220017565 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
A further ~ 5% may be squeezed by enabling -ftree-vectorize. However,
this is a separate issue from this patch.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-02 19:28:39 +00:00
|
|
|
* Algorithm taken from the Boost project, source:
|
|
|
|
* https://searchcode.com/codesearch/view/14918379/
|
|
|
|
* Use, modification and distribution are subject to the
|
|
|
|
* Boost Software License, Version 1.0 (see notice below).
|
|
|
|
* Boost Software License - Version 1.0 - August 17th, 2003
|
|
|
|
Permission is hereby granted, free of charge, to any person or organization
|
|
|
|
obtaining a copy of the software and accompanying documentation covered by
|
|
|
|
this license (the "Software") to use, reproduce, display, distribute,
|
|
|
|
execute, and transmit the Software, and to prepare derivative works of the
|
|
|
|
Software, and to permit third-parties to whom the Software is furnished to
|
|
|
|
do so, all subject to the following:
|
|
|
|
|
|
|
|
The copyright notices in the Software and this entire statement, including
|
|
|
|
the above license grant, this restriction and the following disclaimer,
|
|
|
|
must be included in all copies of the Software, in whole or in part, and
|
|
|
|
all derivative works of the Software, unless such copies or derivative
|
|
|
|
works are solely in the form of machine-executable object code generated by
|
|
|
|
a source language processor.
|
|
|
|
|
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
|
|
|
|
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
|
|
|
|
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
|
|
|
|
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
DEALINGS IN THE SOFTWARE.
|
2011-09-19 04:13:30 +00:00
|
|
|
*/
|
|
|
|
|
swresample/resample: improve bessel function accuracy and speed
This improves accuracy for the bessel function at large arguments, and this in turn
should improve the quality of the Kaiser window. It also improves the
performance of the bessel function and hence build_filter by ~ 20%.
Details are given below.
Algorithm: taken from the Boost project, who have done a detailed
investigation of the accuracy of their method, as compared with e.g the
GNU Scientific Library (GSL):
http://www.boost.org/doc/libs/1_52_0/libs/math/doc/sf_and_dist/html/math_toolkit/special/bessel/mbessel.html.
Boost source code (also cited and licensed in the code):
https://searchcode.com/codesearch/view/14918379/.
Accuracy: sample values may be obtained as follows. i0 denotes the old bessel code,
i0_boost the approach here, and i0_real an arbitrary precision result (truncated) from Wolfram Alpha:
type "bessel i0(6.0)" to reproduce. These are evaluation points that occur for
the default kaiser_beta = 9.
Some illustrations:
bessel(8.0)
i0 (8.000000) = 427.564115721804739678191254
i0_boost(8.000000) = 427.564115721804796521610115
i0_real (8.000000) = 427.564115721804785177396791
bessel(6.0)
i0 (6.000000) = 67.234406976477956163762428
i0_boost(6.000000) = 67.234406976477970374617144
i0_real (6.000000) = 67.234406976477975326188025
Reason for accuracy: Main accuracy benefits come at larger bessel arguments, where the
Taylor-Maclaurin method is not that good: 23+ iterations
(at large arguments, since the series is about 0) can cause
significant floating point error accumulation.
Benchmarks: Obtained on x86-64, Haswell, GNU/Linux via a loop calling
build_filter 1000 times:
test: fate-swr-resample-dblp-44100-2626
new:
995894468 decicycles in build_filter(loop 1000), 256 runs, 0 skips
1029719302 decicycles in build_filter(loop 1000), 512 runs, 0 skips
984101131 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
1250020763 decicycles in build_filter(loop 1000), 256 runs, 0 skips
1246353282 decicycles in build_filter(loop 1000), 512 runs, 0 skips
1220017565 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
A further ~ 5% may be squeezed by enabling -ftree-vectorize. However,
this is a separate issue from this patch.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-02 19:28:39 +00:00
|
|
|
static double bessel(double x) {
|
|
|
|
// Modified Bessel function of the first kind of order zero
|
|
|
|
// minimax rational approximations on intervals, see
|
|
|
|
// Blair and Edwards, Chalk River Report AECL-4928, 1974
|
|
|
|
static const double p1[] = {
|
|
|
|
-2.2335582639474375249e+15,
|
|
|
|
-5.5050369673018427753e+14,
|
|
|
|
-3.2940087627407749166e+13,
|
|
|
|
-8.4925101247114157499e+11,
|
|
|
|
-1.1912746104985237192e+10,
|
|
|
|
-1.0313066708737980747e+08,
|
|
|
|
-5.9545626019847898221e+05,
|
|
|
|
-2.4125195876041896775e+03,
|
|
|
|
-7.0935347449210549190e+00,
|
|
|
|
-1.5453977791786851041e-02,
|
|
|
|
-2.5172644670688975051e-05,
|
|
|
|
-3.0517226450451067446e-08,
|
|
|
|
-2.6843448573468483278e-11,
|
|
|
|
-1.5982226675653184646e-14,
|
|
|
|
-5.2487866627945699800e-18,
|
|
|
|
};
|
|
|
|
static const double q1[] = {
|
|
|
|
-2.2335582639474375245e+15,
|
|
|
|
7.8858692566751002988e+12,
|
|
|
|
-1.2207067397808979846e+10,
|
|
|
|
1.0377081058062166144e+07,
|
|
|
|
-4.8527560179962773045e+03,
|
2015-11-15 15:12:55 +00:00
|
|
|
1.0,
|
swresample/resample: improve bessel function accuracy and speed
This improves accuracy for the bessel function at large arguments, and this in turn
should improve the quality of the Kaiser window. It also improves the
performance of the bessel function and hence build_filter by ~ 20%.
Details are given below.
Algorithm: taken from the Boost project, who have done a detailed
investigation of the accuracy of their method, as compared with e.g the
GNU Scientific Library (GSL):
http://www.boost.org/doc/libs/1_52_0/libs/math/doc/sf_and_dist/html/math_toolkit/special/bessel/mbessel.html.
Boost source code (also cited and licensed in the code):
https://searchcode.com/codesearch/view/14918379/.
Accuracy: sample values may be obtained as follows. i0 denotes the old bessel code,
i0_boost the approach here, and i0_real an arbitrary precision result (truncated) from Wolfram Alpha:
type "bessel i0(6.0)" to reproduce. These are evaluation points that occur for
the default kaiser_beta = 9.
Some illustrations:
bessel(8.0)
i0 (8.000000) = 427.564115721804739678191254
i0_boost(8.000000) = 427.564115721804796521610115
i0_real (8.000000) = 427.564115721804785177396791
bessel(6.0)
i0 (6.000000) = 67.234406976477956163762428
i0_boost(6.000000) = 67.234406976477970374617144
i0_real (6.000000) = 67.234406976477975326188025
Reason for accuracy: Main accuracy benefits come at larger bessel arguments, where the
Taylor-Maclaurin method is not that good: 23+ iterations
(at large arguments, since the series is about 0) can cause
significant floating point error accumulation.
Benchmarks: Obtained on x86-64, Haswell, GNU/Linux via a loop calling
build_filter 1000 times:
test: fate-swr-resample-dblp-44100-2626
new:
995894468 decicycles in build_filter(loop 1000), 256 runs, 0 skips
1029719302 decicycles in build_filter(loop 1000), 512 runs, 0 skips
984101131 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
1250020763 decicycles in build_filter(loop 1000), 256 runs, 0 skips
1246353282 decicycles in build_filter(loop 1000), 512 runs, 0 skips
1220017565 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
A further ~ 5% may be squeezed by enabling -ftree-vectorize. However,
this is a separate issue from this patch.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-02 19:28:39 +00:00
|
|
|
};
|
|
|
|
static const double p2[] = {
|
|
|
|
-2.2210262233306573296e-04,
|
|
|
|
1.3067392038106924055e-02,
|
|
|
|
-4.4700805721174453923e-01,
|
|
|
|
5.5674518371240761397e+00,
|
|
|
|
-2.3517945679239481621e+01,
|
|
|
|
3.1611322818701131207e+01,
|
|
|
|
-9.6090021968656180000e+00,
|
|
|
|
};
|
|
|
|
static const double q2[] = {
|
|
|
|
-5.5194330231005480228e-04,
|
|
|
|
3.2547697594819615062e-02,
|
|
|
|
-1.1151759188741312645e+00,
|
|
|
|
1.3982595353892851542e+01,
|
|
|
|
-6.0228002066743340583e+01,
|
|
|
|
8.5539563258012929600e+01,
|
|
|
|
-3.1446690275135491500e+01,
|
2015-11-15 15:12:55 +00:00
|
|
|
1.0,
|
swresample/resample: improve bessel function accuracy and speed
This improves accuracy for the bessel function at large arguments, and this in turn
should improve the quality of the Kaiser window. It also improves the
performance of the bessel function and hence build_filter by ~ 20%.
Details are given below.
Algorithm: taken from the Boost project, who have done a detailed
investigation of the accuracy of their method, as compared with e.g the
GNU Scientific Library (GSL):
http://www.boost.org/doc/libs/1_52_0/libs/math/doc/sf_and_dist/html/math_toolkit/special/bessel/mbessel.html.
Boost source code (also cited and licensed in the code):
https://searchcode.com/codesearch/view/14918379/.
Accuracy: sample values may be obtained as follows. i0 denotes the old bessel code,
i0_boost the approach here, and i0_real an arbitrary precision result (truncated) from Wolfram Alpha:
type "bessel i0(6.0)" to reproduce. These are evaluation points that occur for
the default kaiser_beta = 9.
Some illustrations:
bessel(8.0)
i0 (8.000000) = 427.564115721804739678191254
i0_boost(8.000000) = 427.564115721804796521610115
i0_real (8.000000) = 427.564115721804785177396791
bessel(6.0)
i0 (6.000000) = 67.234406976477956163762428
i0_boost(6.000000) = 67.234406976477970374617144
i0_real (6.000000) = 67.234406976477975326188025
Reason for accuracy: Main accuracy benefits come at larger bessel arguments, where the
Taylor-Maclaurin method is not that good: 23+ iterations
(at large arguments, since the series is about 0) can cause
significant floating point error accumulation.
Benchmarks: Obtained on x86-64, Haswell, GNU/Linux via a loop calling
build_filter 1000 times:
test: fate-swr-resample-dblp-44100-2626
new:
995894468 decicycles in build_filter(loop 1000), 256 runs, 0 skips
1029719302 decicycles in build_filter(loop 1000), 512 runs, 0 skips
984101131 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
1250020763 decicycles in build_filter(loop 1000), 256 runs, 0 skips
1246353282 decicycles in build_filter(loop 1000), 512 runs, 0 skips
1220017565 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
A further ~ 5% may be squeezed by enabling -ftree-vectorize. However,
this is a separate issue from this patch.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-02 19:28:39 +00:00
|
|
|
};
|
|
|
|
double y, r, factor;
|
|
|
|
if (x == 0)
|
|
|
|
return 1.0;
|
|
|
|
x = fabs(x);
|
|
|
|
if (x <= 15) {
|
|
|
|
y = x * x;
|
|
|
|
return eval_poly(p1, FF_ARRAY_ELEMS(p1), y) / eval_poly(q1, FF_ARRAY_ELEMS(q1), y);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
y = 1 / x - 1.0 / 15;
|
|
|
|
r = eval_poly(p2, FF_ARRAY_ELEMS(p2), y) / eval_poly(q2, FF_ARRAY_ELEMS(q2), y);
|
|
|
|
factor = exp(x) / sqrt(x);
|
|
|
|
return factor * r;
|
2011-09-19 04:13:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* builds a polyphase filterbank.
|
|
|
|
* @param factor resampling factor
|
|
|
|
* @param scale wanted sum of coefficients for each filter
|
2012-05-26 18:50:02 +00:00
|
|
|
* @param filter_type filter type
|
|
|
|
* @param kaiser_beta kaiser window beta
|
2011-09-19 04:13:30 +00:00
|
|
|
* @return 0 on success, negative on error
|
|
|
|
*/
|
2012-05-26 18:50:02 +00:00
|
|
|
static int build_filter(ResampleContext *c, void *filter, double factor, int tap_count, int alloc, int phase_count, int scale,
|
2015-11-07 15:16:27 +00:00
|
|
|
int filter_type, double kaiser_beta){
|
2011-09-19 04:13:30 +00:00
|
|
|
int ph, i;
|
swresample/resample: speed up upsampling by precomputing sines
When upsampling, factor is set to 1 and sines need to be evaluated only
once for each phase, and the complexity should not depend on the number
of filter taps. This does the desired precomputation, yielding
significant speedups. Hard guarantees on the gain are not possible, but gains
themselves are obvious and are illustrated below.
Sample benchmark (x86-64, Haswell, GNU/Linux)
test: fate-swr-resample-dblp-2626-44100
old:
29161085 decicycles in build_filter (loop 1000), 256 runs, 0 skips
28821467 decicycles in build_filter (loop 1000), 512 runs, 0 skips
28668201 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
new:
14351936 decicycles in build_filter (loop 1000), 256 runs, 0 skips
14306652 decicycles in build_filter (loop 1000), 512 runs, 0 skips
14299923 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
Note that this does not statically allocate the sin lookup table. This
may be done for the default 1024 phases, yielding a 512*8 = 4kB array
which should be small enough.
This should yield a small improvement. Nevertheless, this is separate from
this patch, is more ambiguous due to the binary increase, and requires a
lut to be generated offline.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-09 02:39:32 +00:00
|
|
|
double x, y, w, t, s;
|
swresample/resample: speed up build_filter by 50%
This speeds up build_filter by ~ 50%. This gain should be pretty
consistent across all architectures and platforms.
Essentially, this relies on a observation that the filters have some
even/odd symmetry that may be exploited during the construction of the
polyphase filter bank. In particular, phases (scaled to [0, 1]) in [0.5, 1] are
easily derived from [0, 0.5] and expensive reevaluation of function
points are unnecessary. This requires some rather annoying even/odd
bookkeeping as can be seen from the patch.
I vaguely recall from signal processing theory more general symmetries allowing even greater
optimization of the construction. At a high level, "even functions"
correspond to 2, and one can imagine variations. Nevertheless, for the sake
of some generality and because of existing filters, this is all that is
being exploited.
Currently, this patch relies on phase_count being even or (trivially) 1,
though this is not an inherent limitation to the approach. This
assumption is safe as phase_count is 1 << phase_bits, and is hence a
power of two. There is no way for user API to set it to a nontrivial odd
number. This assumption has been placed as an assert in the code.
To repeat, this assumes even symmetry of the filters, which is the most common
way to get generalized linear phase anyway and is true of all currently
supported filters.
As a side note, accuracy should be identical or perhaps slightly better
due to this "forcing" filter symmetries leading to a better phase
characteristic. As before, I can't test this claim easily, though it may
be of interest.
Patch tested with FATE.
Sample benchmark (x86-64, Haswell, GNU/Linux):
test: swr-resample-dblp-44100-2626
new:
527376779 decicycles in build_filter(loop 1000), 256 runs, 0 skips
524361765 decicycles in build_filter(loop 1000), 512 runs, 0 skips
516552574 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
974178658 decicycles in build_filter(loop 1000), 256 runs, 0 skips
972794408 decicycles in build_filter(loop 1000), 512 runs, 0 skips
954350046 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
Note that lower level optimizations are entirely possible, I focussed on
getting the high level semantics correct. In any case, this should
provide a good foundation.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-04 01:08:12 +00:00
|
|
|
double *tab = av_malloc_array(tap_count+1, sizeof(*tab));
|
swresample/resample: speed up upsampling by precomputing sines
When upsampling, factor is set to 1 and sines need to be evaluated only
once for each phase, and the complexity should not depend on the number
of filter taps. This does the desired precomputation, yielding
significant speedups. Hard guarantees on the gain are not possible, but gains
themselves are obvious and are illustrated below.
Sample benchmark (x86-64, Haswell, GNU/Linux)
test: fate-swr-resample-dblp-2626-44100
old:
29161085 decicycles in build_filter (loop 1000), 256 runs, 0 skips
28821467 decicycles in build_filter (loop 1000), 512 runs, 0 skips
28668201 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
new:
14351936 decicycles in build_filter (loop 1000), 256 runs, 0 skips
14306652 decicycles in build_filter (loop 1000), 512 runs, 0 skips
14299923 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
Note that this does not statically allocate the sin lookup table. This
may be done for the default 1024 phases, yielding a 512*8 = 4kB array
which should be small enough.
This should yield a small improvement. Nevertheless, this is separate from
this patch, is more ambiguous due to the binary increase, and requires a
lut to be generated offline.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-09 02:39:32 +00:00
|
|
|
double *sin_lut = av_malloc_array(phase_count / 2 + 1, sizeof(*sin_lut));
|
2011-09-19 04:13:30 +00:00
|
|
|
const int center= (tap_count-1)/2;
|
|
|
|
|
swresample/resample: speed up upsampling by precomputing sines
When upsampling, factor is set to 1 and sines need to be evaluated only
once for each phase, and the complexity should not depend on the number
of filter taps. This does the desired precomputation, yielding
significant speedups. Hard guarantees on the gain are not possible, but gains
themselves are obvious and are illustrated below.
Sample benchmark (x86-64, Haswell, GNU/Linux)
test: fate-swr-resample-dblp-2626-44100
old:
29161085 decicycles in build_filter (loop 1000), 256 runs, 0 skips
28821467 decicycles in build_filter (loop 1000), 512 runs, 0 skips
28668201 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
new:
14351936 decicycles in build_filter (loop 1000), 256 runs, 0 skips
14306652 decicycles in build_filter (loop 1000), 512 runs, 0 skips
14299923 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
Note that this does not statically allocate the sin lookup table. This
may be done for the default 1024 phases, yielding a 512*8 = 4kB array
which should be small enough.
This should yield a small improvement. Nevertheless, this is separate from
this patch, is more ambiguous due to the binary increase, and requires a
lut to be generated offline.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-09 02:39:32 +00:00
|
|
|
if (!tab || !sin_lut)
|
|
|
|
goto fail;
|
2011-09-19 04:13:30 +00:00
|
|
|
|
|
|
|
/* if upsampling, only need to interpolate, no filter */
|
|
|
|
if (factor > 1.0)
|
|
|
|
factor = 1.0;
|
|
|
|
|
swresample/resample: speed up build_filter by 50%
This speeds up build_filter by ~ 50%. This gain should be pretty
consistent across all architectures and platforms.
Essentially, this relies on a observation that the filters have some
even/odd symmetry that may be exploited during the construction of the
polyphase filter bank. In particular, phases (scaled to [0, 1]) in [0.5, 1] are
easily derived from [0, 0.5] and expensive reevaluation of function
points are unnecessary. This requires some rather annoying even/odd
bookkeeping as can be seen from the patch.
I vaguely recall from signal processing theory more general symmetries allowing even greater
optimization of the construction. At a high level, "even functions"
correspond to 2, and one can imagine variations. Nevertheless, for the sake
of some generality and because of existing filters, this is all that is
being exploited.
Currently, this patch relies on phase_count being even or (trivially) 1,
though this is not an inherent limitation to the approach. This
assumption is safe as phase_count is 1 << phase_bits, and is hence a
power of two. There is no way for user API to set it to a nontrivial odd
number. This assumption has been placed as an assert in the code.
To repeat, this assumes even symmetry of the filters, which is the most common
way to get generalized linear phase anyway and is true of all currently
supported filters.
As a side note, accuracy should be identical or perhaps slightly better
due to this "forcing" filter symmetries leading to a better phase
characteristic. As before, I can't test this claim easily, though it may
be of interest.
Patch tested with FATE.
Sample benchmark (x86-64, Haswell, GNU/Linux):
test: swr-resample-dblp-44100-2626
new:
527376779 decicycles in build_filter(loop 1000), 256 runs, 0 skips
524361765 decicycles in build_filter(loop 1000), 512 runs, 0 skips
516552574 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
974178658 decicycles in build_filter(loop 1000), 256 runs, 0 skips
972794408 decicycles in build_filter(loop 1000), 512 runs, 0 skips
954350046 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
Note that lower level optimizations are entirely possible, I focussed on
getting the high level semantics correct. In any case, this should
provide a good foundation.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-04 01:08:12 +00:00
|
|
|
av_assert0(phase_count == 1 || phase_count % 2 == 0);
|
swresample/resample: speed up upsampling by precomputing sines
When upsampling, factor is set to 1 and sines need to be evaluated only
once for each phase, and the complexity should not depend on the number
of filter taps. This does the desired precomputation, yielding
significant speedups. Hard guarantees on the gain are not possible, but gains
themselves are obvious and are illustrated below.
Sample benchmark (x86-64, Haswell, GNU/Linux)
test: fate-swr-resample-dblp-2626-44100
old:
29161085 decicycles in build_filter (loop 1000), 256 runs, 0 skips
28821467 decicycles in build_filter (loop 1000), 512 runs, 0 skips
28668201 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
new:
14351936 decicycles in build_filter (loop 1000), 256 runs, 0 skips
14306652 decicycles in build_filter (loop 1000), 512 runs, 0 skips
14299923 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
Note that this does not statically allocate the sin lookup table. This
may be done for the default 1024 phases, yielding a 512*8 = 4kB array
which should be small enough.
This should yield a small improvement. Nevertheless, this is separate from
this patch, is more ambiguous due to the binary increase, and requires a
lut to be generated offline.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-09 02:39:32 +00:00
|
|
|
|
|
|
|
if (factor == 1.0) {
|
|
|
|
for (ph = 0; ph <= phase_count / 2; ph++)
|
|
|
|
sin_lut[ph] = sin(M_PI * ph / phase_count);
|
|
|
|
}
|
swresample/resample: speed up build_filter by 50%
This speeds up build_filter by ~ 50%. This gain should be pretty
consistent across all architectures and platforms.
Essentially, this relies on a observation that the filters have some
even/odd symmetry that may be exploited during the construction of the
polyphase filter bank. In particular, phases (scaled to [0, 1]) in [0.5, 1] are
easily derived from [0, 0.5] and expensive reevaluation of function
points are unnecessary. This requires some rather annoying even/odd
bookkeeping as can be seen from the patch.
I vaguely recall from signal processing theory more general symmetries allowing even greater
optimization of the construction. At a high level, "even functions"
correspond to 2, and one can imagine variations. Nevertheless, for the sake
of some generality and because of existing filters, this is all that is
being exploited.
Currently, this patch relies on phase_count being even or (trivially) 1,
though this is not an inherent limitation to the approach. This
assumption is safe as phase_count is 1 << phase_bits, and is hence a
power of two. There is no way for user API to set it to a nontrivial odd
number. This assumption has been placed as an assert in the code.
To repeat, this assumes even symmetry of the filters, which is the most common
way to get generalized linear phase anyway and is true of all currently
supported filters.
As a side note, accuracy should be identical or perhaps slightly better
due to this "forcing" filter symmetries leading to a better phase
characteristic. As before, I can't test this claim easily, though it may
be of interest.
Patch tested with FATE.
Sample benchmark (x86-64, Haswell, GNU/Linux):
test: swr-resample-dblp-44100-2626
new:
527376779 decicycles in build_filter(loop 1000), 256 runs, 0 skips
524361765 decicycles in build_filter(loop 1000), 512 runs, 0 skips
516552574 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
974178658 decicycles in build_filter(loop 1000), 256 runs, 0 skips
972794408 decicycles in build_filter(loop 1000), 512 runs, 0 skips
954350046 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
Note that lower level optimizations are entirely possible, I focussed on
getting the high level semantics correct. In any case, this should
provide a good foundation.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-04 01:08:12 +00:00
|
|
|
for(ph = 0; ph <= phase_count / 2; ph++) {
|
2011-09-19 04:13:30 +00:00
|
|
|
double norm = 0;
|
swresample/resample: speed up upsampling by precomputing sines
When upsampling, factor is set to 1 and sines need to be evaluated only
once for each phase, and the complexity should not depend on the number
of filter taps. This does the desired precomputation, yielding
significant speedups. Hard guarantees on the gain are not possible, but gains
themselves are obvious and are illustrated below.
Sample benchmark (x86-64, Haswell, GNU/Linux)
test: fate-swr-resample-dblp-2626-44100
old:
29161085 decicycles in build_filter (loop 1000), 256 runs, 0 skips
28821467 decicycles in build_filter (loop 1000), 512 runs, 0 skips
28668201 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
new:
14351936 decicycles in build_filter (loop 1000), 256 runs, 0 skips
14306652 decicycles in build_filter (loop 1000), 512 runs, 0 skips
14299923 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
Note that this does not statically allocate the sin lookup table. This
may be done for the default 1024 phases, yielding a 512*8 = 4kB array
which should be small enough.
This should yield a small improvement. Nevertheless, this is separate from
this patch, is more ambiguous due to the binary increase, and requires a
lut to be generated offline.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-09 02:39:32 +00:00
|
|
|
s = sin_lut[ph];
|
swresample/resample: speed up build_filter by 50%
This speeds up build_filter by ~ 50%. This gain should be pretty
consistent across all architectures and platforms.
Essentially, this relies on a observation that the filters have some
even/odd symmetry that may be exploited during the construction of the
polyphase filter bank. In particular, phases (scaled to [0, 1]) in [0.5, 1] are
easily derived from [0, 0.5] and expensive reevaluation of function
points are unnecessary. This requires some rather annoying even/odd
bookkeeping as can be seen from the patch.
I vaguely recall from signal processing theory more general symmetries allowing even greater
optimization of the construction. At a high level, "even functions"
correspond to 2, and one can imagine variations. Nevertheless, for the sake
of some generality and because of existing filters, this is all that is
being exploited.
Currently, this patch relies on phase_count being even or (trivially) 1,
though this is not an inherent limitation to the approach. This
assumption is safe as phase_count is 1 << phase_bits, and is hence a
power of two. There is no way for user API to set it to a nontrivial odd
number. This assumption has been placed as an assert in the code.
To repeat, this assumes even symmetry of the filters, which is the most common
way to get generalized linear phase anyway and is true of all currently
supported filters.
As a side note, accuracy should be identical or perhaps slightly better
due to this "forcing" filter symmetries leading to a better phase
characteristic. As before, I can't test this claim easily, though it may
be of interest.
Patch tested with FATE.
Sample benchmark (x86-64, Haswell, GNU/Linux):
test: swr-resample-dblp-44100-2626
new:
527376779 decicycles in build_filter(loop 1000), 256 runs, 0 skips
524361765 decicycles in build_filter(loop 1000), 512 runs, 0 skips
516552574 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
974178658 decicycles in build_filter(loop 1000), 256 runs, 0 skips
972794408 decicycles in build_filter(loop 1000), 512 runs, 0 skips
954350046 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
Note that lower level optimizations are entirely possible, I focussed on
getting the high level semantics correct. In any case, this should
provide a good foundation.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-04 01:08:12 +00:00
|
|
|
for(i=0;i<=tap_count;i++) {
|
2011-09-19 04:13:30 +00:00
|
|
|
x = M_PI * ((double)(i - center) - (double)ph / phase_count) * factor;
|
|
|
|
if (x == 0) y = 1.0;
|
swresample/resample: speed up upsampling by precomputing sines
When upsampling, factor is set to 1 and sines need to be evaluated only
once for each phase, and the complexity should not depend on the number
of filter taps. This does the desired precomputation, yielding
significant speedups. Hard guarantees on the gain are not possible, but gains
themselves are obvious and are illustrated below.
Sample benchmark (x86-64, Haswell, GNU/Linux)
test: fate-swr-resample-dblp-2626-44100
old:
29161085 decicycles in build_filter (loop 1000), 256 runs, 0 skips
28821467 decicycles in build_filter (loop 1000), 512 runs, 0 skips
28668201 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
new:
14351936 decicycles in build_filter (loop 1000), 256 runs, 0 skips
14306652 decicycles in build_filter (loop 1000), 512 runs, 0 skips
14299923 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
Note that this does not statically allocate the sin lookup table. This
may be done for the default 1024 phases, yielding a 512*8 = 4kB array
which should be small enough.
This should yield a small improvement. Nevertheless, this is separate from
this patch, is more ambiguous due to the binary increase, and requires a
lut to be generated offline.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-09 02:39:32 +00:00
|
|
|
else if (factor == 1.0)
|
|
|
|
y = s / x;
|
|
|
|
else
|
|
|
|
y = sin(x) / x;
|
2012-05-26 18:50:02 +00:00
|
|
|
switch(filter_type){
|
|
|
|
case SWR_FILTER_TYPE_CUBIC:{
|
2011-09-19 04:13:30 +00:00
|
|
|
const float d= -0.5; //first order derivative = -0.5
|
|
|
|
x = fabs(((double)(i - center) - (double)ph / phase_count) * factor);
|
|
|
|
if(x<1.0) y= 1 - 3*x*x + 2*x*x*x + d*( -x*x + x*x*x);
|
|
|
|
else y= d*(-4 + 8*x - 5*x*x + x*x*x);
|
|
|
|
break;}
|
2012-05-26 18:50:02 +00:00
|
|
|
case SWR_FILTER_TYPE_BLACKMAN_NUTTALL:
|
swresample/resample: speed up Blackman Nuttall filter
This may be a slightly surprising optimization, but is actually based on
an understanding of how math libraries compute trigonometric functions.
Explanation is given here so that future development uses libm more effectively
across the codebase.
All libm's essentially compute transcendental functions via some kind of
polynomial approximation, be it Taylor-Maclaurin or Chebyshev.
Correction terms are added via polynomial correction factors when needed
to squeeze out the last bits of accuracy. Lookup tables are also
inserted strategically.
In the case of trigonometric functions, periodicity is exploited via
first doing a range reduction to an interval around zero, and then using
some polynomial approximation.
This range reduction is the most natural way of doing things - else one
would need polynomials for ranges in different periods which makes no
sense whatsoever.
To avoid the need for the range reduction, it is helpful to feed in
arguments as close to the origin as possible for the trigonometric
functions. In fact, this also makes sense from an accuracy point of view:
IEEE floating point has far more resolution for small numbers than big ones.
This patch does this for the Blackman-Nuttall filter, and yields a
non-negligible speedup.
Sample benchmark (x86-64, Haswell, GNU/Linux)
test: fate-swr-resample-dblp-2626-44100
old:
18893514 decicycles in build_filter (loop 1000), 256 runs, 0 skips
18599863 decicycles in build_filter (loop 1000), 512 runs, 0 skips
18445574 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
new:
16290697 decicycles in build_filter (loop 1000), 256 runs, 0 skips
16267172 decicycles in build_filter (loop 1000), 512 runs, 0 skips
16251105 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-09 05:38:45 +00:00
|
|
|
w = 2.0*x / (factor*tap_count);
|
|
|
|
t = -cos(w);
|
swresample/resample: speed up build_filter for Blackman-Nuttall filter
This uses the trigonometric double and triple angle formulae to avoid
repeated (expensive) evaluation of libc's cos().
Sample benchmark (x86-64, Haswell, GNU/Linux)
test: fate-swr-resample-dblp-44100-2626
old:
1104466600 decicycles in build_filter(loop 1000), 256 runs, 0 skips
1096765286 decicycles in build_filter(loop 1000), 512 runs, 0 skips
1070479590 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
new:
588861423 decicycles in build_filter(loop 1000), 256 runs, 0 skips
591262754 decicycles in build_filter(loop 1000), 512 runs, 0 skips
577355145 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
This results in small differences with the old expression:
difference (worst case on [0, 2*M_PI]), argmax 0.008:
max diff (relative): 0.000000000000157289807188
blackman_old(0.008): 0.000363951585488813192382
blackman_new(0.008): 0.000363951585488755946507
These are judged to be insignificant for the performance gain. PSNR to
reference file is unchanged up to second decimal point for instance.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-05 03:02:13 +00:00
|
|
|
y *= 0.3635819 - 0.4891775 * t + 0.1365995 * (2*t*t-1) - 0.0106411 * (4*t*t*t - 3*t);
|
2011-09-19 04:13:30 +00:00
|
|
|
break;
|
2012-05-26 18:50:02 +00:00
|
|
|
case SWR_FILTER_TYPE_KAISER:
|
2011-09-19 04:13:30 +00:00
|
|
|
w = 2.0*x / (factor*tap_count*M_PI);
|
2012-05-26 18:50:02 +00:00
|
|
|
y *= bessel(kaiser_beta*sqrt(FFMAX(1-w*w, 0)));
|
2011-09-19 04:13:30 +00:00
|
|
|
break;
|
2012-05-26 18:50:02 +00:00
|
|
|
default:
|
|
|
|
av_assert0(0);
|
2011-09-19 04:13:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tab[i] = y;
|
swresample/resample: speed up upsampling by precomputing sines
When upsampling, factor is set to 1 and sines need to be evaluated only
once for each phase, and the complexity should not depend on the number
of filter taps. This does the desired precomputation, yielding
significant speedups. Hard guarantees on the gain are not possible, but gains
themselves are obvious and are illustrated below.
Sample benchmark (x86-64, Haswell, GNU/Linux)
test: fate-swr-resample-dblp-2626-44100
old:
29161085 decicycles in build_filter (loop 1000), 256 runs, 0 skips
28821467 decicycles in build_filter (loop 1000), 512 runs, 0 skips
28668201 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
new:
14351936 decicycles in build_filter (loop 1000), 256 runs, 0 skips
14306652 decicycles in build_filter (loop 1000), 512 runs, 0 skips
14299923 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
Note that this does not statically allocate the sin lookup table. This
may be done for the default 1024 phases, yielding a 512*8 = 4kB array
which should be small enough.
This should yield a small improvement. Nevertheless, this is separate from
this patch, is more ambiguous due to the binary increase, and requires a
lut to be generated offline.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-09 02:39:32 +00:00
|
|
|
s = -s;
|
swresample/resample: speed up build_filter by 50%
This speeds up build_filter by ~ 50%. This gain should be pretty
consistent across all architectures and platforms.
Essentially, this relies on a observation that the filters have some
even/odd symmetry that may be exploited during the construction of the
polyphase filter bank. In particular, phases (scaled to [0, 1]) in [0.5, 1] are
easily derived from [0, 0.5] and expensive reevaluation of function
points are unnecessary. This requires some rather annoying even/odd
bookkeeping as can be seen from the patch.
I vaguely recall from signal processing theory more general symmetries allowing even greater
optimization of the construction. At a high level, "even functions"
correspond to 2, and one can imagine variations. Nevertheless, for the sake
of some generality and because of existing filters, this is all that is
being exploited.
Currently, this patch relies on phase_count being even or (trivially) 1,
though this is not an inherent limitation to the approach. This
assumption is safe as phase_count is 1 << phase_bits, and is hence a
power of two. There is no way for user API to set it to a nontrivial odd
number. This assumption has been placed as an assert in the code.
To repeat, this assumes even symmetry of the filters, which is the most common
way to get generalized linear phase anyway and is true of all currently
supported filters.
As a side note, accuracy should be identical or perhaps slightly better
due to this "forcing" filter symmetries leading to a better phase
characteristic. As before, I can't test this claim easily, though it may
be of interest.
Patch tested with FATE.
Sample benchmark (x86-64, Haswell, GNU/Linux):
test: swr-resample-dblp-44100-2626
new:
527376779 decicycles in build_filter(loop 1000), 256 runs, 0 skips
524361765 decicycles in build_filter(loop 1000), 512 runs, 0 skips
516552574 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
974178658 decicycles in build_filter(loop 1000), 256 runs, 0 skips
972794408 decicycles in build_filter(loop 1000), 512 runs, 0 skips
954350046 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
Note that lower level optimizations are entirely possible, I focussed on
getting the high level semantics correct. In any case, this should
provide a good foundation.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-04 01:08:12 +00:00
|
|
|
if (i < tap_count)
|
|
|
|
norm += y;
|
2011-09-19 04:13:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* normalize so that an uniform color remains the same */
|
2012-04-10 11:18:49 +00:00
|
|
|
switch(c->format){
|
2012-04-28 09:19:22 +00:00
|
|
|
case AV_SAMPLE_FMT_S16P:
|
2012-04-10 11:18:49 +00:00
|
|
|
for(i=0;i<tap_count;i++)
|
2015-12-23 22:48:40 +00:00
|
|
|
((int16_t*)filter)[ph * alloc + i] = av_clip_int16(lrintf(tab[i] * scale / norm));
|
2016-05-16 03:10:21 +00:00
|
|
|
if (tap_count % 2 == 0 || tap_count == 1) {
|
swresample/resample: speed up build_filter by 50%
This speeds up build_filter by ~ 50%. This gain should be pretty
consistent across all architectures and platforms.
Essentially, this relies on a observation that the filters have some
even/odd symmetry that may be exploited during the construction of the
polyphase filter bank. In particular, phases (scaled to [0, 1]) in [0.5, 1] are
easily derived from [0, 0.5] and expensive reevaluation of function
points are unnecessary. This requires some rather annoying even/odd
bookkeeping as can be seen from the patch.
I vaguely recall from signal processing theory more general symmetries allowing even greater
optimization of the construction. At a high level, "even functions"
correspond to 2, and one can imagine variations. Nevertheless, for the sake
of some generality and because of existing filters, this is all that is
being exploited.
Currently, this patch relies on phase_count being even or (trivially) 1,
though this is not an inherent limitation to the approach. This
assumption is safe as phase_count is 1 << phase_bits, and is hence a
power of two. There is no way for user API to set it to a nontrivial odd
number. This assumption has been placed as an assert in the code.
To repeat, this assumes even symmetry of the filters, which is the most common
way to get generalized linear phase anyway and is true of all currently
supported filters.
As a side note, accuracy should be identical or perhaps slightly better
due to this "forcing" filter symmetries leading to a better phase
characteristic. As before, I can't test this claim easily, though it may
be of interest.
Patch tested with FATE.
Sample benchmark (x86-64, Haswell, GNU/Linux):
test: swr-resample-dblp-44100-2626
new:
527376779 decicycles in build_filter(loop 1000), 256 runs, 0 skips
524361765 decicycles in build_filter(loop 1000), 512 runs, 0 skips
516552574 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
974178658 decicycles in build_filter(loop 1000), 256 runs, 0 skips
972794408 decicycles in build_filter(loop 1000), 512 runs, 0 skips
954350046 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
Note that lower level optimizations are entirely possible, I focussed on
getting the high level semantics correct. In any case, this should
provide a good foundation.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-04 01:08:12 +00:00
|
|
|
for (i = 0; i < tap_count; i++)
|
|
|
|
((int16_t*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((int16_t*)filter)[ph * alloc + i];
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
for (i = 1; i <= tap_count; i++)
|
|
|
|
((int16_t*)filter)[(phase_count-ph) * alloc + tap_count-i] =
|
2015-12-23 22:48:40 +00:00
|
|
|
av_clip_int16(lrintf(tab[i] * scale / (norm - tab[0] + tab[tap_count])));
|
swresample/resample: speed up build_filter by 50%
This speeds up build_filter by ~ 50%. This gain should be pretty
consistent across all architectures and platforms.
Essentially, this relies on a observation that the filters have some
even/odd symmetry that may be exploited during the construction of the
polyphase filter bank. In particular, phases (scaled to [0, 1]) in [0.5, 1] are
easily derived from [0, 0.5] and expensive reevaluation of function
points are unnecessary. This requires some rather annoying even/odd
bookkeeping as can be seen from the patch.
I vaguely recall from signal processing theory more general symmetries allowing even greater
optimization of the construction. At a high level, "even functions"
correspond to 2, and one can imagine variations. Nevertheless, for the sake
of some generality and because of existing filters, this is all that is
being exploited.
Currently, this patch relies on phase_count being even or (trivially) 1,
though this is not an inherent limitation to the approach. This
assumption is safe as phase_count is 1 << phase_bits, and is hence a
power of two. There is no way for user API to set it to a nontrivial odd
number. This assumption has been placed as an assert in the code.
To repeat, this assumes even symmetry of the filters, which is the most common
way to get generalized linear phase anyway and is true of all currently
supported filters.
As a side note, accuracy should be identical or perhaps slightly better
due to this "forcing" filter symmetries leading to a better phase
characteristic. As before, I can't test this claim easily, though it may
be of interest.
Patch tested with FATE.
Sample benchmark (x86-64, Haswell, GNU/Linux):
test: swr-resample-dblp-44100-2626
new:
527376779 decicycles in build_filter(loop 1000), 256 runs, 0 skips
524361765 decicycles in build_filter(loop 1000), 512 runs, 0 skips
516552574 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
974178658 decicycles in build_filter(loop 1000), 256 runs, 0 skips
972794408 decicycles in build_filter(loop 1000), 512 runs, 0 skips
954350046 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
Note that lower level optimizations are entirely possible, I focussed on
getting the high level semantics correct. In any case, this should
provide a good foundation.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-04 01:08:12 +00:00
|
|
|
}
|
2012-04-10 11:18:49 +00:00
|
|
|
break;
|
2012-04-28 09:19:22 +00:00
|
|
|
case AV_SAMPLE_FMT_S32P:
|
2012-04-10 11:18:49 +00:00
|
|
|
for(i=0;i<tap_count;i++)
|
2013-02-04 02:25:07 +00:00
|
|
|
((int32_t*)filter)[ph * alloc + i] = av_clipl_int32(llrint(tab[i] * scale / norm));
|
2016-05-16 03:10:21 +00:00
|
|
|
if (tap_count % 2 == 0 || tap_count == 1) {
|
swresample/resample: speed up build_filter by 50%
This speeds up build_filter by ~ 50%. This gain should be pretty
consistent across all architectures and platforms.
Essentially, this relies on a observation that the filters have some
even/odd symmetry that may be exploited during the construction of the
polyphase filter bank. In particular, phases (scaled to [0, 1]) in [0.5, 1] are
easily derived from [0, 0.5] and expensive reevaluation of function
points are unnecessary. This requires some rather annoying even/odd
bookkeeping as can be seen from the patch.
I vaguely recall from signal processing theory more general symmetries allowing even greater
optimization of the construction. At a high level, "even functions"
correspond to 2, and one can imagine variations. Nevertheless, for the sake
of some generality and because of existing filters, this is all that is
being exploited.
Currently, this patch relies on phase_count being even or (trivially) 1,
though this is not an inherent limitation to the approach. This
assumption is safe as phase_count is 1 << phase_bits, and is hence a
power of two. There is no way for user API to set it to a nontrivial odd
number. This assumption has been placed as an assert in the code.
To repeat, this assumes even symmetry of the filters, which is the most common
way to get generalized linear phase anyway and is true of all currently
supported filters.
As a side note, accuracy should be identical or perhaps slightly better
due to this "forcing" filter symmetries leading to a better phase
characteristic. As before, I can't test this claim easily, though it may
be of interest.
Patch tested with FATE.
Sample benchmark (x86-64, Haswell, GNU/Linux):
test: swr-resample-dblp-44100-2626
new:
527376779 decicycles in build_filter(loop 1000), 256 runs, 0 skips
524361765 decicycles in build_filter(loop 1000), 512 runs, 0 skips
516552574 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
974178658 decicycles in build_filter(loop 1000), 256 runs, 0 skips
972794408 decicycles in build_filter(loop 1000), 512 runs, 0 skips
954350046 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
Note that lower level optimizations are entirely possible, I focussed on
getting the high level semantics correct. In any case, this should
provide a good foundation.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-04 01:08:12 +00:00
|
|
|
for (i = 0; i < tap_count; i++)
|
|
|
|
((int32_t*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((int32_t*)filter)[ph * alloc + i];
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
for (i = 1; i <= tap_count; i++)
|
|
|
|
((int32_t*)filter)[(phase_count-ph) * alloc + tap_count-i] =
|
|
|
|
av_clipl_int32(llrint(tab[i] * scale / (norm - tab[0] + tab[tap_count])));
|
|
|
|
}
|
2012-04-10 11:18:49 +00:00
|
|
|
break;
|
2012-04-28 09:19:22 +00:00
|
|
|
case AV_SAMPLE_FMT_FLTP:
|
2012-04-10 11:18:49 +00:00
|
|
|
for(i=0;i<tap_count;i++)
|
2012-06-19 01:06:40 +00:00
|
|
|
((float*)filter)[ph * alloc + i] = tab[i] * scale / norm;
|
2016-05-16 03:10:21 +00:00
|
|
|
if (tap_count % 2 == 0 || tap_count == 1) {
|
swresample/resample: speed up build_filter by 50%
This speeds up build_filter by ~ 50%. This gain should be pretty
consistent across all architectures and platforms.
Essentially, this relies on a observation that the filters have some
even/odd symmetry that may be exploited during the construction of the
polyphase filter bank. In particular, phases (scaled to [0, 1]) in [0.5, 1] are
easily derived from [0, 0.5] and expensive reevaluation of function
points are unnecessary. This requires some rather annoying even/odd
bookkeeping as can be seen from the patch.
I vaguely recall from signal processing theory more general symmetries allowing even greater
optimization of the construction. At a high level, "even functions"
correspond to 2, and one can imagine variations. Nevertheless, for the sake
of some generality and because of existing filters, this is all that is
being exploited.
Currently, this patch relies on phase_count being even or (trivially) 1,
though this is not an inherent limitation to the approach. This
assumption is safe as phase_count is 1 << phase_bits, and is hence a
power of two. There is no way for user API to set it to a nontrivial odd
number. This assumption has been placed as an assert in the code.
To repeat, this assumes even symmetry of the filters, which is the most common
way to get generalized linear phase anyway and is true of all currently
supported filters.
As a side note, accuracy should be identical or perhaps slightly better
due to this "forcing" filter symmetries leading to a better phase
characteristic. As before, I can't test this claim easily, though it may
be of interest.
Patch tested with FATE.
Sample benchmark (x86-64, Haswell, GNU/Linux):
test: swr-resample-dblp-44100-2626
new:
527376779 decicycles in build_filter(loop 1000), 256 runs, 0 skips
524361765 decicycles in build_filter(loop 1000), 512 runs, 0 skips
516552574 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
974178658 decicycles in build_filter(loop 1000), 256 runs, 0 skips
972794408 decicycles in build_filter(loop 1000), 512 runs, 0 skips
954350046 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
Note that lower level optimizations are entirely possible, I focussed on
getting the high level semantics correct. In any case, this should
provide a good foundation.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-04 01:08:12 +00:00
|
|
|
for (i = 0; i < tap_count; i++)
|
|
|
|
((float*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((float*)filter)[ph * alloc + i];
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
for (i = 1; i <= tap_count; i++)
|
|
|
|
((float*)filter)[(phase_count-ph) * alloc + tap_count-i] = tab[i] * scale / (norm - tab[0] + tab[tap_count]);
|
|
|
|
}
|
2012-04-10 11:18:49 +00:00
|
|
|
break;
|
2012-04-28 09:19:22 +00:00
|
|
|
case AV_SAMPLE_FMT_DBLP:
|
2012-04-10 11:38:10 +00:00
|
|
|
for(i=0;i<tap_count;i++)
|
2012-06-19 01:06:40 +00:00
|
|
|
((double*)filter)[ph * alloc + i] = tab[i] * scale / norm;
|
2016-05-16 03:10:21 +00:00
|
|
|
if (tap_count % 2 == 0 || tap_count == 1) {
|
swresample/resample: speed up build_filter by 50%
This speeds up build_filter by ~ 50%. This gain should be pretty
consistent across all architectures and platforms.
Essentially, this relies on a observation that the filters have some
even/odd symmetry that may be exploited during the construction of the
polyphase filter bank. In particular, phases (scaled to [0, 1]) in [0.5, 1] are
easily derived from [0, 0.5] and expensive reevaluation of function
points are unnecessary. This requires some rather annoying even/odd
bookkeeping as can be seen from the patch.
I vaguely recall from signal processing theory more general symmetries allowing even greater
optimization of the construction. At a high level, "even functions"
correspond to 2, and one can imagine variations. Nevertheless, for the sake
of some generality and because of existing filters, this is all that is
being exploited.
Currently, this patch relies on phase_count being even or (trivially) 1,
though this is not an inherent limitation to the approach. This
assumption is safe as phase_count is 1 << phase_bits, and is hence a
power of two. There is no way for user API to set it to a nontrivial odd
number. This assumption has been placed as an assert in the code.
To repeat, this assumes even symmetry of the filters, which is the most common
way to get generalized linear phase anyway and is true of all currently
supported filters.
As a side note, accuracy should be identical or perhaps slightly better
due to this "forcing" filter symmetries leading to a better phase
characteristic. As before, I can't test this claim easily, though it may
be of interest.
Patch tested with FATE.
Sample benchmark (x86-64, Haswell, GNU/Linux):
test: swr-resample-dblp-44100-2626
new:
527376779 decicycles in build_filter(loop 1000), 256 runs, 0 skips
524361765 decicycles in build_filter(loop 1000), 512 runs, 0 skips
516552574 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
old:
974178658 decicycles in build_filter(loop 1000), 256 runs, 0 skips
972794408 decicycles in build_filter(loop 1000), 512 runs, 0 skips
954350046 decicycles in build_filter(loop 1000), 1024 runs, 0 skips
Note that lower level optimizations are entirely possible, I focussed on
getting the high level semantics correct. In any case, this should
provide a good foundation.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-04 01:08:12 +00:00
|
|
|
for (i = 0; i < tap_count; i++)
|
|
|
|
((double*)filter)[(phase_count-ph) * alloc + tap_count-1-i] = ((double*)filter)[ph * alloc + i];
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
for (i = 1; i <= tap_count; i++)
|
|
|
|
((double*)filter)[(phase_count-ph) * alloc + tap_count-i] = tab[i] * scale / (norm - tab[0] + tab[tap_count]);
|
|
|
|
}
|
2012-04-10 11:38:10 +00:00
|
|
|
break;
|
2011-09-19 04:13:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#if 0
|
|
|
|
{
|
|
|
|
#define LEN 1024
|
|
|
|
int j,k;
|
|
|
|
double sine[LEN + tap_count];
|
|
|
|
double filtered[LEN];
|
|
|
|
double maxff=-2, minff=2, maxsf=-2, minsf=2;
|
|
|
|
for(i=0; i<LEN; i++){
|
|
|
|
double ss=0, sf=0, ff=0;
|
|
|
|
for(j=0; j<LEN+tap_count; j++)
|
|
|
|
sine[j]= cos(i*j*M_PI/LEN);
|
|
|
|
for(j=0; j<LEN; j++){
|
|
|
|
double sum=0;
|
|
|
|
ph=0;
|
|
|
|
for(k=0; k<tap_count; k++)
|
|
|
|
sum += filter[ph * tap_count + k] * sine[k+j];
|
|
|
|
filtered[j]= sum / (1<<FILTER_SHIFT);
|
|
|
|
ss+= sine[j + center] * sine[j + center];
|
|
|
|
ff+= filtered[j] * filtered[j];
|
|
|
|
sf+= sine[j + center] * filtered[j];
|
|
|
|
}
|
|
|
|
ss= sqrt(2*ss/LEN);
|
|
|
|
ff= sqrt(2*ff/LEN);
|
|
|
|
sf= 2*sf/LEN;
|
|
|
|
maxff= FFMAX(maxff, ff);
|
|
|
|
minff= FFMIN(minff, ff);
|
|
|
|
maxsf= FFMAX(maxsf, sf);
|
|
|
|
minsf= FFMIN(minsf, sf);
|
|
|
|
if(i%11==0){
|
|
|
|
av_log(NULL, AV_LOG_ERROR, "i:%4d ss:%f ff:%13.6e-%13.6e sf:%13.6e-%13.6e\n", i, ss, maxff, minff, maxsf, minsf);
|
|
|
|
minff=minsf= 2;
|
|
|
|
maxff=maxsf= -2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
swresample/resample: speed up upsampling by precomputing sines
When upsampling, factor is set to 1 and sines need to be evaluated only
once for each phase, and the complexity should not depend on the number
of filter taps. This does the desired precomputation, yielding
significant speedups. Hard guarantees on the gain are not possible, but gains
themselves are obvious and are illustrated below.
Sample benchmark (x86-64, Haswell, GNU/Linux)
test: fate-swr-resample-dblp-2626-44100
old:
29161085 decicycles in build_filter (loop 1000), 256 runs, 0 skips
28821467 decicycles in build_filter (loop 1000), 512 runs, 0 skips
28668201 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
new:
14351936 decicycles in build_filter (loop 1000), 256 runs, 0 skips
14306652 decicycles in build_filter (loop 1000), 512 runs, 0 skips
14299923 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
Note that this does not statically allocate the sin lookup table. This
may be done for the default 1024 phases, yielding a 512*8 = 4kB array
which should be small enough.
This should yield a small improvement. Nevertheless, this is separate from
this patch, is more ambiguous due to the binary increase, and requires a
lut to be generated offline.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-09 02:39:32 +00:00
|
|
|
fail:
|
2011-09-19 04:13:30 +00:00
|
|
|
av_free(tab);
|
swresample/resample: speed up upsampling by precomputing sines
When upsampling, factor is set to 1 and sines need to be evaluated only
once for each phase, and the complexity should not depend on the number
of filter taps. This does the desired precomputation, yielding
significant speedups. Hard guarantees on the gain are not possible, but gains
themselves are obvious and are illustrated below.
Sample benchmark (x86-64, Haswell, GNU/Linux)
test: fate-swr-resample-dblp-2626-44100
old:
29161085 decicycles in build_filter (loop 1000), 256 runs, 0 skips
28821467 decicycles in build_filter (loop 1000), 512 runs, 0 skips
28668201 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
new:
14351936 decicycles in build_filter (loop 1000), 256 runs, 0 skips
14306652 decicycles in build_filter (loop 1000), 512 runs, 0 skips
14299923 decicycles in build_filter (loop 1000), 1000 runs, 24 skips
Note that this does not statically allocate the sin lookup table. This
may be done for the default 1024 phases, yielding a 512*8 = 4kB array
which should be small enough.
This should yield a small improvement. Nevertheless, this is separate from
this patch, is more ambiguous due to the binary increase, and requires a
lut to be generated offline.
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
2015-11-09 02:39:32 +00:00
|
|
|
av_free(sin_lut);
|
2011-09-19 04:13:30 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-11 17:36:58 +00:00
|
|
|
static ResampleContext *resample_init(ResampleContext *c, int out_rate, int in_rate, int filter_size, int phase_shift, int linear,
|
2015-11-07 15:16:27 +00:00
|
|
|
double cutoff0, enum AVSampleFormat format, enum SwrFilterType filter_type, double kaiser_beta,
|
2014-06-14 18:11:03 +00:00
|
|
|
double precision, int cheby)
|
|
|
|
{
|
2012-12-25 22:14:09 +00:00
|
|
|
double cutoff = cutoff0? cutoff0 : 0.97;
|
2011-09-19 04:13:30 +00:00
|
|
|
double factor= FFMIN(out_rate * cutoff / in_rate, 1.0);
|
|
|
|
int phase_count= 1<<phase_shift;
|
|
|
|
|
2011-10-10 13:54:09 +00:00
|
|
|
if (!c || c->phase_shift != phase_shift || c->linear!=linear || c->factor != factor
|
2012-05-26 18:50:02 +00:00
|
|
|
|| c->filter_length != FFMAX((int)ceil(filter_size/factor), 1) || c->format != format
|
|
|
|
|| c->filter_type != filter_type || c->kaiser_beta != kaiser_beta) {
|
2011-11-16 07:06:42 +00:00
|
|
|
c = av_mallocz(sizeof(*c));
|
2011-10-10 13:54:09 +00:00
|
|
|
if (!c)
|
|
|
|
return NULL;
|
|
|
|
|
2012-04-10 11:18:49 +00:00
|
|
|
c->format= format;
|
|
|
|
|
2012-04-11 07:57:38 +00:00
|
|
|
c->felem_size= av_get_bytes_per_sample(c->format);
|
|
|
|
|
2012-04-10 11:18:49 +00:00
|
|
|
switch(c->format){
|
2012-04-28 09:19:22 +00:00
|
|
|
case AV_SAMPLE_FMT_S16P:
|
2012-04-10 11:18:49 +00:00
|
|
|
c->filter_shift = 15;
|
|
|
|
break;
|
2012-04-28 09:19:22 +00:00
|
|
|
case AV_SAMPLE_FMT_S32P:
|
2012-04-10 11:18:49 +00:00
|
|
|
c->filter_shift = 30;
|
|
|
|
break;
|
2012-04-28 09:19:22 +00:00
|
|
|
case AV_SAMPLE_FMT_FLTP:
|
|
|
|
case AV_SAMPLE_FMT_DBLP:
|
2012-04-10 11:38:10 +00:00
|
|
|
c->filter_shift = 0;
|
|
|
|
break;
|
2012-04-10 11:18:49 +00:00
|
|
|
default:
|
|
|
|
av_log(NULL, AV_LOG_ERROR, "Unsupported sample format\n");
|
2012-10-11 02:36:23 +00:00
|
|
|
av_assert0(0);
|
2012-04-10 11:18:49 +00:00
|
|
|
}
|
|
|
|
|
2014-04-07 22:19:07 +00:00
|
|
|
if (filter_size/factor > INT32_MAX/256) {
|
|
|
|
av_log(NULL, AV_LOG_ERROR, "Filter length too large\n");
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2011-10-10 13:54:09 +00:00
|
|
|
c->phase_shift = phase_shift;
|
|
|
|
c->phase_mask = phase_count - 1;
|
|
|
|
c->linear = linear;
|
|
|
|
c->factor = factor;
|
|
|
|
c->filter_length = FFMAX((int)ceil(filter_size/factor), 1);
|
2012-06-19 01:06:40 +00:00
|
|
|
c->filter_alloc = FFALIGN(c->filter_length, 8);
|
2013-01-27 04:57:58 +00:00
|
|
|
c->filter_bank = av_calloc(c->filter_alloc, (phase_count+1)*c->felem_size);
|
2012-05-26 18:50:02 +00:00
|
|
|
c->filter_type = filter_type;
|
|
|
|
c->kaiser_beta = kaiser_beta;
|
2011-10-10 13:54:09 +00:00
|
|
|
if (!c->filter_bank)
|
|
|
|
goto error;
|
2012-05-26 18:50:02 +00:00
|
|
|
if (build_filter(c, (void*)c->filter_bank, factor, c->filter_length, c->filter_alloc, phase_count, 1<<c->filter_shift, filter_type, kaiser_beta))
|
2011-10-10 13:54:09 +00:00
|
|
|
goto error;
|
2012-06-19 01:06:40 +00:00
|
|
|
memcpy(c->filter_bank + (c->filter_alloc*phase_count+1)*c->felem_size, c->filter_bank, (c->filter_alloc-1)*c->felem_size);
|
|
|
|
memcpy(c->filter_bank + (c->filter_alloc*phase_count )*c->felem_size, c->filter_bank + (c->filter_alloc - 1)*c->felem_size, c->felem_size);
|
2011-09-19 04:13:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c->compensation_distance= 0;
|
2011-10-27 12:31:53 +00:00
|
|
|
if(!av_reduce(&c->src_incr, &c->dst_incr, out_rate, in_rate * (int64_t)phase_count, INT32_MAX/2))
|
|
|
|
goto error;
|
2015-11-11 15:49:21 +00:00
|
|
|
while (c->dst_incr < (1<<20) && c->src_incr < (1<<20)) {
|
|
|
|
c->dst_incr *= 2;
|
|
|
|
c->src_incr *= 2;
|
|
|
|
}
|
2014-06-18 11:26:03 +00:00
|
|
|
c->ideal_dst_incr = c->dst_incr;
|
|
|
|
c->dst_incr_div = c->dst_incr / c->src_incr;
|
|
|
|
c->dst_incr_mod = c->dst_incr % c->src_incr;
|
2011-10-27 12:31:53 +00:00
|
|
|
|
2011-09-19 04:13:30 +00:00
|
|
|
c->index= -phase_count*((c->filter_length-1)/2);
|
|
|
|
c->frac= 0;
|
|
|
|
|
2014-07-02 03:05:08 +00:00
|
|
|
swri_resample_dsp_init(c);
|
2014-06-14 18:11:03 +00:00
|
|
|
|
2011-09-19 04:13:30 +00:00
|
|
|
return c;
|
|
|
|
error:
|
2013-09-16 20:44:15 +00:00
|
|
|
av_freep(&c->filter_bank);
|
2011-09-19 04:13:30 +00:00
|
|
|
av_free(c);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-12-11 17:36:58 +00:00
|
|
|
static void resample_free(ResampleContext **c){
|
2011-09-19 04:13:30 +00:00
|
|
|
if(!*c)
|
|
|
|
return;
|
|
|
|
av_freep(&(*c)->filter_bank);
|
|
|
|
av_freep(c);
|
|
|
|
}
|
|
|
|
|
2012-12-11 17:36:58 +00:00
|
|
|
static int set_compensation(ResampleContext *c, int sample_delta, int compensation_distance){
|
2011-09-19 04:13:30 +00:00
|
|
|
c->compensation_distance= compensation_distance;
|
2012-01-09 00:42:38 +00:00
|
|
|
if (compensation_distance)
|
|
|
|
c->dst_incr = c->ideal_dst_incr - c->ideal_dst_incr * (int64_t)sample_delta / compensation_distance;
|
|
|
|
else
|
|
|
|
c->dst_incr = c->ideal_dst_incr;
|
2014-06-18 11:26:03 +00:00
|
|
|
|
|
|
|
c->dst_incr_div = c->dst_incr / c->src_incr;
|
|
|
|
c->dst_incr_mod = c->dst_incr % c->src_incr;
|
|
|
|
|
2012-01-09 00:42:38 +00:00
|
|
|
return 0;
|
2011-09-19 04:13:30 +00:00
|
|
|
}
|
|
|
|
|
2014-06-14 18:11:03 +00:00
|
|
|
static int swri_resample(ResampleContext *c,
|
|
|
|
uint8_t *dst, const uint8_t *src, int *consumed,
|
|
|
|
int src_size, int dst_size, int update_ctx)
|
|
|
|
{
|
|
|
|
if (c->filter_length == 1 && c->phase_shift == 0) {
|
|
|
|
int index= c->index;
|
|
|
|
int frac= c->frac;
|
|
|
|
int64_t index2= (1LL<<32)*c->frac/c->src_incr + (1LL<<32)*index;
|
|
|
|
int64_t incr= (1LL<<32) * c->dst_incr / c->src_incr;
|
|
|
|
int new_size = (src_size * (int64_t)c->src_incr - frac + c->dst_incr - 1) / c->dst_incr;
|
|
|
|
|
|
|
|
dst_size= FFMIN(dst_size, new_size);
|
2014-07-03 01:16:48 +00:00
|
|
|
c->dsp.resample_one(dst, src, dst_size, index2, incr);
|
2014-06-14 18:11:03 +00:00
|
|
|
|
2014-06-18 11:26:03 +00:00
|
|
|
index += dst_size * c->dst_incr_div;
|
|
|
|
index += (frac + dst_size * (int64_t)c->dst_incr_mod) / c->src_incr;
|
2014-06-14 18:11:03 +00:00
|
|
|
av_assert2(index >= 0);
|
|
|
|
*consumed= index;
|
|
|
|
if (update_ctx) {
|
2014-06-18 11:26:03 +00:00
|
|
|
c->frac = (frac + dst_size * (int64_t)c->dst_incr_mod) % c->src_incr;
|
2014-06-14 18:11:03 +00:00
|
|
|
c->index = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
int64_t end_index = (1LL + src_size - c->filter_length) << c->phase_shift;
|
|
|
|
int64_t delta_frac = (end_index - c->index) * c->src_incr - c->frac;
|
|
|
|
int delta_n = (delta_frac + c->dst_incr - 1) / c->dst_incr;
|
|
|
|
|
|
|
|
dst_size = FFMIN(dst_size, delta_n);
|
2014-06-28 12:00:05 +00:00
|
|
|
if (dst_size > 0) {
|
2014-07-03 01:16:48 +00:00
|
|
|
*consumed = c->dsp.resample(c, dst, src, dst_size, update_ctx);
|
2014-06-14 18:11:03 +00:00
|
|
|
} else {
|
2014-06-28 12:00:05 +00:00
|
|
|
*consumed = 0;
|
2014-06-14 18:11:03 +00:00
|
|
|
}
|
|
|
|
}
|
2014-05-15 22:54:00 +00:00
|
|
|
|
2014-06-14 18:11:03 +00:00
|
|
|
return dst_size;
|
|
|
|
}
|
2011-09-19 04:13:30 +00:00
|
|
|
|
2012-12-11 17:36:58 +00:00
|
|
|
static int multiple_resample(ResampleContext *c, AudioData *dst, int dst_size, AudioData *src, int src_size, int *consumed){
|
2011-09-19 04:13:30 +00:00
|
|
|
int i, ret= -1;
|
2012-09-13 16:03:13 +00:00
|
|
|
int av_unused mm_flags = av_get_cpu_flags();
|
2014-06-14 18:11:03 +00:00
|
|
|
int need_emms = c->format == AV_SAMPLE_FMT_S16P && ARCH_X86_32 &&
|
|
|
|
(mm_flags & (AV_CPU_FLAG_MMX2 | AV_CPU_FLAG_SSE2)) == AV_CPU_FLAG_MMX2;
|
2014-06-09 12:09:49 +00:00
|
|
|
int64_t max_src_size = (INT64_MAX >> (c->phase_shift+1)) / c->src_incr;
|
2011-09-19 04:13:30 +00:00
|
|
|
|
2014-05-27 12:39:12 +00:00
|
|
|
if (c->compensation_distance)
|
|
|
|
dst_size = FFMIN(dst_size, c->compensation_distance);
|
2014-06-09 12:09:49 +00:00
|
|
|
src_size = FFMIN(src_size, max_src_size);
|
2014-05-27 12:39:12 +00:00
|
|
|
|
2011-09-19 04:13:30 +00:00
|
|
|
for(i=0; i<dst->ch_count; i++){
|
2014-06-14 18:11:03 +00:00
|
|
|
ret= swri_resample(c, dst->ch[i], src->ch[i],
|
|
|
|
consumed, src_size, dst_size, i+1==dst->ch_count);
|
2011-09-19 04:13:30 +00:00
|
|
|
}
|
2012-06-30 19:27:39 +00:00
|
|
|
if(need_emms)
|
|
|
|
emms_c();
|
2014-05-27 12:39:12 +00:00
|
|
|
|
|
|
|
if (c->compensation_distance) {
|
|
|
|
c->compensation_distance -= ret;
|
2014-06-18 11:26:03 +00:00
|
|
|
if (!c->compensation_distance) {
|
|
|
|
c->dst_incr = c->ideal_dst_incr;
|
|
|
|
c->dst_incr_div = c->dst_incr / c->src_incr;
|
|
|
|
c->dst_incr_mod = c->dst_incr % c->src_incr;
|
|
|
|
}
|
2014-05-27 12:39:12 +00:00
|
|
|
}
|
|
|
|
|
2011-09-19 04:13:30 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2012-05-18 21:02:10 +00:00
|
|
|
|
2012-12-11 17:36:58 +00:00
|
|
|
static int64_t get_delay(struct SwrContext *s, int64_t base){
|
2012-05-18 21:02:10 +00:00
|
|
|
ResampleContext *c = s->resample;
|
2012-12-11 17:36:58 +00:00
|
|
|
int64_t num = s->in_buffer_count - (c->filter_length-1)/2;
|
2015-03-14 00:15:37 +00:00
|
|
|
num *= 1 << c->phase_shift;
|
2012-12-11 17:36:58 +00:00
|
|
|
num -= c->index;
|
|
|
|
num *= c->src_incr;
|
|
|
|
num -= c->frac;
|
|
|
|
return av_rescale(num, base, s->in_sample_rate*(int64_t)c->src_incr << c->phase_shift);
|
2012-05-18 21:02:10 +00:00
|
|
|
}
|
2012-12-11 17:36:58 +00:00
|
|
|
|
2015-06-02 23:22:25 +00:00
|
|
|
static int64_t get_out_samples(struct SwrContext *s, int in_samples) {
|
|
|
|
ResampleContext *c = s->resample;
|
2015-06-04 11:04:09 +00:00
|
|
|
// The + 2 are added to allow implementations to be slightly inaccurate, they should not be needed currently.
|
2015-06-02 23:22:25 +00:00
|
|
|
// They also make it easier to proof that changes and optimizations do not
|
2015-06-04 11:04:09 +00:00
|
|
|
// break the upper bound.
|
2015-06-02 23:22:25 +00:00
|
|
|
int64_t num = s->in_buffer_count + 2LL + in_samples;
|
|
|
|
num *= 1 << c->phase_shift;
|
|
|
|
num -= c->index;
|
|
|
|
num = av_rescale_rnd(num, s->out_sample_rate, ((int64_t)s->in_sample_rate) << c->phase_shift, AV_ROUND_UP) + 2;
|
|
|
|
|
|
|
|
if (c->compensation_distance) {
|
|
|
|
if (num > INT_MAX)
|
|
|
|
return AVERROR(EINVAL);
|
|
|
|
|
|
|
|
num = FFMAX(num, (num * c->ideal_dst_incr - 1) / c->dst_incr + 1);
|
|
|
|
}
|
|
|
|
return num;
|
|
|
|
}
|
|
|
|
|
2012-12-11 20:19:39 +00:00
|
|
|
static int resample_flush(struct SwrContext *s) {
|
|
|
|
AudioData *a= &s->in_buffer;
|
|
|
|
int i, j, ret;
|
|
|
|
if((ret = swri_realloc_audio(a, s->in_buffer_index + 2*s->in_buffer_count)) < 0)
|
|
|
|
return ret;
|
|
|
|
av_assert0(a->planar);
|
|
|
|
for(i=0; i<a->ch_count; i++){
|
|
|
|
for(j=0; j<s->in_buffer_count; j++){
|
|
|
|
memcpy(a->ch[i] + (s->in_buffer_index+s->in_buffer_count+j )*a->bps,
|
|
|
|
a->ch[i] + (s->in_buffer_index+s->in_buffer_count-j-1)*a->bps, a->bps);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s->in_buffer_count += (s->in_buffer_count+1)/2;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-13 23:06:30 +00:00
|
|
|
// in fact the whole handle multiple ridiculously small buffers might need more thinking...
|
|
|
|
static int invert_initial_buffer(ResampleContext *c, AudioData *dst, const AudioData *src,
|
|
|
|
int in_count, int *out_idx, int *out_sz)
|
|
|
|
{
|
|
|
|
int n, ch, num = FFMIN(in_count + *out_sz, c->filter_length + 1), res;
|
|
|
|
|
|
|
|
if (c->index >= 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if ((res = swri_realloc_audio(dst, c->filter_length * 2 + 1)) < 0)
|
|
|
|
return res;
|
|
|
|
|
|
|
|
// copy
|
|
|
|
for (n = *out_sz; n < num; n++) {
|
|
|
|
for (ch = 0; ch < src->ch_count; ch++) {
|
|
|
|
memcpy(dst->ch[ch] + ((c->filter_length + n) * c->felem_size),
|
|
|
|
src->ch[ch] + ((n - *out_sz) * c->felem_size), c->felem_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// if not enough data is in, return and wait for more
|
|
|
|
if (num < c->filter_length + 1) {
|
|
|
|
*out_sz = num;
|
|
|
|
*out_idx = c->filter_length;
|
|
|
|
return INT_MAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
// else invert
|
|
|
|
for (n = 1; n <= c->filter_length; n++) {
|
|
|
|
for (ch = 0; ch < src->ch_count; ch++) {
|
|
|
|
memcpy(dst->ch[ch] + ((c->filter_length - n) * c->felem_size),
|
|
|
|
dst->ch[ch] + ((c->filter_length + n) * c->felem_size),
|
|
|
|
c->felem_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
res = num - *out_sz;
|
|
|
|
*out_idx = c->filter_length + (c->index >> c->phase_shift);
|
2014-10-16 19:18:15 +00:00
|
|
|
*out_sz = FFMAX(*out_sz + c->filter_length,
|
|
|
|
1 + c->filter_length * 2) - *out_idx;
|
2014-06-13 23:06:30 +00:00
|
|
|
c->index &= c->phase_mask;
|
|
|
|
|
2014-10-16 19:18:15 +00:00
|
|
|
return FFMAX(res, 0);
|
2014-06-13 23:06:30 +00:00
|
|
|
}
|
|
|
|
|
2012-12-11 17:36:58 +00:00
|
|
|
struct Resampler const swri_resampler={
|
|
|
|
resample_init,
|
|
|
|
resample_free,
|
|
|
|
multiple_resample,
|
2012-12-11 20:19:39 +00:00
|
|
|
resample_flush,
|
2012-12-11 17:36:58 +00:00
|
|
|
set_compensation,
|
|
|
|
get_delay,
|
2014-06-13 23:06:30 +00:00
|
|
|
invert_initial_buffer,
|
2015-06-02 23:22:25 +00:00
|
|
|
get_out_samples,
|
2012-12-11 17:36:58 +00:00
|
|
|
};
|