mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2024-12-28 10:22:10 +00:00
avfilter/af_dynaudnorm: allow to filter subset of channels
This commit is contained in:
parent
b9f91a7cbc
commit
a9124a75b0
@ -4389,6 +4389,9 @@ If input frame volume is above this value frame will be normalized.
|
||||
Otherwise frame may not be normalized at all. The default value is set
|
||||
to 0, which means all input frames will be normalized.
|
||||
This option is mostly useful if digital noise is not wanted to be amplified.
|
||||
|
||||
@item channels, h
|
||||
Specify which channels to filter, by default all available channels are filtered.
|
||||
@end table
|
||||
|
||||
@subsection Commands
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <float.h>
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
#define MIN_FILTER_SIZE 3
|
||||
@ -76,6 +77,7 @@ typedef struct DynamicAudioNormalizerContext {
|
||||
|
||||
int channels;
|
||||
int eof;
|
||||
uint64_t channels_to_filter;
|
||||
int64_t pts;
|
||||
|
||||
cqueue **gain_history_original;
|
||||
@ -110,6 +112,8 @@ static const AVOption dynaudnorm_options[] = {
|
||||
{ "s", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
|
||||
{ "threshold", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
|
||||
{ "t", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
|
||||
{ "channels", "set channels to filter", OFFSET(channels_to_filter),AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS },
|
||||
{ "h", "set channels to filter", OFFSET(channels_to_filter),AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
@ -514,6 +518,11 @@ static inline double update_value(double new, double old, double aggressiveness)
|
||||
return aggressiveness * new + (1.0 - aggressiveness) * old;
|
||||
}
|
||||
|
||||
static inline int bypass_channel(DynamicAudioNormalizerContext *s, AVFrame *frame, int ch)
|
||||
{
|
||||
return !(av_channel_layout_extract_channel(frame->channel_layout, ch) & s->channels_to_filter);
|
||||
}
|
||||
|
||||
static void perform_dc_correction(DynamicAudioNormalizerContext *s, AVFrame *frame)
|
||||
{
|
||||
const double diff = 1.0 / frame->nb_samples;
|
||||
@ -521,6 +530,7 @@ static void perform_dc_correction(DynamicAudioNormalizerContext *s, AVFrame *fra
|
||||
int c, i;
|
||||
|
||||
for (c = 0; c < s->channels; c++) {
|
||||
const int bypass = bypass_channel(s, frame, c);
|
||||
double *dst_ptr = (double *)frame->extended_data[c];
|
||||
double current_average_value = 0.0;
|
||||
double prev_value;
|
||||
@ -531,7 +541,7 @@ static void perform_dc_correction(DynamicAudioNormalizerContext *s, AVFrame *fra
|
||||
prev_value = is_first_frame ? current_average_value : s->dc_correction_value[c];
|
||||
s->dc_correction_value[c] = is_first_frame ? current_average_value : update_value(current_average_value, s->dc_correction_value[c], 0.1);
|
||||
|
||||
for (i = 0; i < frame->nb_samples; i++) {
|
||||
for (i = 0; i < frame->nb_samples && !bypass; i++) {
|
||||
dst_ptr[i] -= fade(prev_value, s->dc_correction_value[c], i, frame->nb_samples);
|
||||
}
|
||||
}
|
||||
@ -604,6 +614,11 @@ static void perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame
|
||||
|
||||
for (c = 0; c < s->channels; c++) {
|
||||
double *const dst_ptr = (double *)frame->extended_data[c];
|
||||
const int bypass = bypass_channel(s, frame, c);
|
||||
|
||||
if (bypass)
|
||||
continue;
|
||||
|
||||
for (i = 0; i < frame->nb_samples; i++) {
|
||||
const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples);
|
||||
dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
|
||||
@ -611,19 +626,20 @@ static void perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame
|
||||
}
|
||||
} else {
|
||||
for (c = 0; c < s->channels; c++) {
|
||||
const int bypass = bypass_channel(s, frame, c);
|
||||
const double standard_deviation = compute_frame_std_dev(s, frame, c);
|
||||
const double current_threshold = setup_compress_thresh(FFMIN(1.0, s->compress_factor * standard_deviation));
|
||||
|
||||
const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[c];
|
||||
double prev_actual_thresh, curr_actual_thresh;
|
||||
double *dst_ptr;
|
||||
|
||||
s->compress_threshold[c] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[c], 1.0/3.0);
|
||||
|
||||
prev_actual_thresh = setup_compress_thresh(prev_value);
|
||||
curr_actual_thresh = setup_compress_thresh(s->compress_threshold[c]);
|
||||
|
||||
dst_ptr = (double *)frame->extended_data[c];
|
||||
for (i = 0; i < frame->nb_samples; i++) {
|
||||
for (i = 0; i < frame->nb_samples && !bypass; i++) {
|
||||
const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples);
|
||||
dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
|
||||
}
|
||||
@ -668,13 +684,14 @@ static void amplify_frame(DynamicAudioNormalizerContext *s, AVFrame *in,
|
||||
int c, i;
|
||||
|
||||
for (c = 0; c < s->channels; c++) {
|
||||
const int bypass = bypass_channel(s, frame, c);
|
||||
const double *src_ptr = (const double *)in->extended_data[c];
|
||||
double *dst_ptr = (double *)frame->extended_data[c];
|
||||
double current_amplification_factor;
|
||||
|
||||
cqueue_dequeue(s->gain_history_smoothed[c], ¤t_amplification_factor);
|
||||
|
||||
for (i = 0; i < frame->nb_samples && enabled; i++) {
|
||||
for (i = 0; i < frame->nb_samples && enabled && !bypass; i++) {
|
||||
const double amplification_factor = fade(s->prev_amplification_factor[c],
|
||||
current_amplification_factor, i,
|
||||
frame->nb_samples);
|
||||
|
Loading…
Reference in New Issue
Block a user