Index: webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc |
diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc |
index 1e766875caedc519004077e4a2ebfc1f993c9262..ffe8c5bc0f74133f44a424f8bdab50803f590186 100644 |
--- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc |
+++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc |
@@ -19,13 +19,12 @@ |
#include <math.h> |
#include <stdlib.h> |
- |
#include <algorithm> |
#include <numeric> |
#include "webrtc/base/checks.h" |
-#include "webrtc/common_audio/vad/include/webrtc_vad.h" |
#include "webrtc/common_audio/window_generator.h" |
+#include "webrtc/common_audio/include/audio_util.h" |
namespace webrtc { |
@@ -39,6 +38,7 @@ const float kConfigRho = 0.02f; // Default production and interpretation SNR. |
const float kKbdAlpha = 1.5f; |
const float kLambdaBot = -1.0f; // Extreme values in bisection |
const float kLambdaTop = -10e-18f; // search for lamda. |
+const float kMinNoise = 10e-18f; |
} // namespace |
@@ -65,56 +65,54 @@ void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock( |
} |
} |
-IntelligibilityEnhancer::IntelligibilityEnhancer(int erb_resolution, |
- int sample_rate_hz, |
- int channels, |
- int cv_type, |
- float cv_alpha, |
- int cv_win, |
- int analysis_rate, |
- int variance_rate, |
- float gain_limit) |
+IntelligibilityEnhancer::IntelligibilityEnhancer() |
+ : IntelligibilityEnhancer(IntelligibilityEnhancer::Config()) { |
+} |
+ |
+IntelligibilityEnhancer::IntelligibilityEnhancer(const Config& config) |
: freqs_(RealFourier::ComplexLength( |
- RealFourier::FftOrder(sample_rate_hz * kWindowSizeMs / 1000))), |
+ RealFourier::FftOrder(config.sample_rate_hz * kWindowSizeMs / 1000))), |
window_size_(1 << RealFourier::FftOrder(freqs_)), |
- chunk_length_(sample_rate_hz * kChunkSizeMs / 1000), |
- bank_size_(GetBankSize(sample_rate_hz, erb_resolution)), |
- sample_rate_hz_(sample_rate_hz), |
- erb_resolution_(erb_resolution), |
- channels_(channels), |
- analysis_rate_(analysis_rate), |
- variance_rate_(variance_rate), |
+ chunk_length_(config.sample_rate_hz * kChunkSizeMs / 1000), |
+ bank_size_(GetBankSize(config.sample_rate_hz, kErbResolution)), |
+ sample_rate_hz_(config.sample_rate_hz), |
+ erb_resolution_(kErbResolution), |
+ channels_(config.channels), |
+ analysis_rate_(config.analysis_rate), |
+ capture_vad_thresh_(config.capture_vad_thresh), |
+ render_vad_thresh_(config.render_vad_thresh), |
+ activate_snr_thresh_(config.activate_snr_thresh), |
+ deactivate_snr_thresh_(config.deactivate_snr_thresh), |
+ active_(false), |
+ deactivating_(false), |
clear_variance_(freqs_, |
- static_cast<VarianceType>(cv_type), |
- cv_win, |
- cv_alpha), |
- noise_variance_(freqs_, VarianceType::kStepInfinite, 475, 0.01f), |
+ config.var_type, |
+ config.var_window_size, |
+ config.var_decay_rate), |
+ noise_variance_(freqs_, |
+ config.var_type, |
+ config.var_window_size, |
+ config.var_decay_rate), |
filtered_clear_var_(new float[bank_size_]), |
filtered_noise_var_(new float[bank_size_]), |
filter_bank_(bank_size_), |
center_freqs_(new float[bank_size_]), |
rho_(new float[bank_size_]), |
gains_eq_(new float[bank_size_]), |
- gain_applier_(freqs_, gain_limit), |
+ gain_applier_(freqs_, config.gain_change_limit), |
temp_out_buffer_(nullptr), |
- input_audio_(new float* [channels]), |
kbd_window_(new float[window_size_]), |
render_callback_(this, AudioSource::kRenderStream), |
capture_callback_(this, AudioSource::kCaptureStream), |
block_count_(0), |
analysis_step_(0), |
- vad_high_(WebRtcVad_Create()), |
- vad_low_(WebRtcVad_Create()), |
+ using_capture_vad_(true), |
+ using_render_vad_(true), |
vad_tmp_buffer_(new int16_t[chunk_length_]) { |
- DCHECK_LE(kConfigRho, 1.0f); |
+ DCHECK_LE(config.rho, 1.0f); |
CreateErbBank(); |
- WebRtcVad_Init(vad_high_); |
- WebRtcVad_set_mode(vad_high_, 0); // High likelihood of speech. |
- WebRtcVad_Init(vad_low_); |
- WebRtcVad_set_mode(vad_low_, 3); // Low likelihood of speech. |
- |
temp_out_buffer_ = static_cast<float**>( |
malloc(sizeof(*temp_out_buffer_) * channels_ + |
sizeof(**temp_out_buffer_) * chunk_length_ * channels_)); |
@@ -126,13 +124,13 @@ IntelligibilityEnhancer::IntelligibilityEnhancer(int erb_resolution, |
// Assumes all rho equal. |
for (int i = 0; i < bank_size_; ++i) { |
- rho_[i] = kConfigRho * kConfigRho; |
+ rho_[i] = config.rho * config.rho; |
} |
float freqs_khz = kClipFreq / 1000.0f; |
int erb_index = static_cast<int>(ceilf( |
11.17f * logf((freqs_khz + 0.312f) / (freqs_khz + 14.6575f)) + 43.0f)); |
- start_freq_ = max(1, erb_index * kErbResolution); |
+ start_freq_ = max(1, erb_index * erb_resolution_); |
WindowGenerator::KaiserBesselDerived(kKbdAlpha, window_size_, |
kbd_window_.get()); |
@@ -145,20 +143,34 @@ IntelligibilityEnhancer::IntelligibilityEnhancer(int erb_resolution, |
} |
IntelligibilityEnhancer::~IntelligibilityEnhancer() { |
- WebRtcVad_Free(vad_low_); |
- WebRtcVad_Free(vad_high_); |
free(temp_out_buffer_); |
} |
-void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio) { |
- for (int i = 0; i < chunk_length_; ++i) { |
- vad_tmp_buffer_[i] = (int16_t)audio[0][i]; |
+void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio, |
+ int sample_rate_hz, |
+ int num_channels, |
+ float voice_probability) { |
+ render_voice_probability_ = voice_probability; |
+ using_render_vad_ = false; |
+ ProcessRenderAudio(audio, sample_rate_hz, num_channels); |
+} |
+ |
+void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio, |
+ int sample_rate_hz, |
+ int num_channels) { |
+ CHECK_EQ(sample_rate_hz, sample_rate_hz_); |
+ CHECK_EQ(num_channels, channels_); |
aluebs-webrtc
2015/07/20 19:33:43
Use num_channels_ for the member variable as well?
ekm
2015/07/21 01:02:44
Done.
|
+ |
+ if (using_render_vad_) { |
+ FloatToS16(audio[0], chunk_length_, vad_tmp_buffer_.get()); |
+ render_vad_.ProcessChunk(vad_tmp_buffer_.get(), chunk_length_, |
+ sample_rate_hz_); |
+ render_voice_probability_ = render_vad_.last_voice_probability(); |
} |
- has_voice_low_ = WebRtcVad_Process(vad_low_, sample_rate_hz_, |
- vad_tmp_buffer_.get(), chunk_length_) == 1; |
- // Process and enhance chunk of |audio| |
- render_mangler_->ProcessChunk(audio, temp_out_buffer_); |
+ if (render_voice_probability_ >= render_vad_thresh_ || active_) { |
aluebs-webrtc
2015/07/20 19:33:43
I am not sure why the voice probability affects if
ekm
2015/07/21 01:02:44
If we're pretty sure the far-end chunk contains sp
aluebs-webrtc
2015/07/21 01:50:55
Oh! ProcessChunk doesn't really processes the chun
ekm
2015/07/21 19:22:13
ProcessChunk transforms audio to frequency domain,
aluebs-webrtc
2015/07/21 21:30:22
Oh, I see. Thanks for clarifying.
|
+ render_mangler_->ProcessChunk(audio, temp_out_buffer_); |
+ } |
for (int i = 0; i < channels_; ++i) { |
memcpy(audio[i], temp_out_buffer_[i], |
@@ -166,23 +178,31 @@ void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio) { |
} |
} |
-void IntelligibilityEnhancer::ProcessCaptureAudio(float* const* audio) { |
- for (int i = 0; i < chunk_length_; ++i) { |
- vad_tmp_buffer_[i] = (int16_t)audio[0][i]; |
+void IntelligibilityEnhancer::AnalyzeCaptureAudio(float* const* audio, |
+ int sample_rate_hz, |
+ int num_channels, |
+ float voice_probability) { |
+ capture_voice_probability_ = voice_probability; |
+ using_capture_vad_ = false; |
+ AnalyzeCaptureAudio(audio, sample_rate_hz, num_channels); |
+} |
+ |
+void IntelligibilityEnhancer::AnalyzeCaptureAudio(float* const* audio, |
+ int sample_rate_hz, |
+ int num_channels) { |
+ CHECK_EQ(sample_rate_hz, sample_rate_hz_); |
+ CHECK_EQ(num_channels, channels_); |
+ |
+ if (using_capture_vad_) { |
+ FloatToS16(audio[0], chunk_length_, vad_tmp_buffer_.get()); |
+ capture_vad_.ProcessChunk(vad_tmp_buffer_.get(), chunk_length_, |
+ sample_rate_hz_); |
+ capture_voice_probability_ = capture_vad_.last_voice_probability(); |
} |
- // TODO(bercic): The VAD was always detecting voice in the noise stream, |
- // no matter what the aggressiveness, so it was temporarily disabled here. |
- |
- #if 0 |
- if (WebRtcVad_Process(vad_high_, sample_rate_hz_, vad_tmp_buffer_.get(), |
- chunk_length_) == 1) { |
- printf("capture HAS speech\n"); |
- return; |
- } |
- printf("capture NO speech\n"); |
- #endif |
- capture_mangler_->ProcessChunk(audio, temp_out_buffer_); |
+ if (capture_voice_probability_ <= capture_vad_thresh_) { |
aluebs-webrtc
2015/07/20 19:33:43
I am not sure why the voice probability affects if
ekm
2015/07/21 01:02:44
If we're pretty sure the near-end chunk contains n
aluebs-webrtc
2015/07/21 01:50:55
That makes sense. I was not aware that ProcessChun
ekm
2015/07/21 19:22:13
Yeah, in the capture case, the freq data is only u
|
+ capture_mangler_->ProcessChunk(audio, temp_out_buffer_); |
+ } |
} |
void IntelligibilityEnhancer::DispatchAudio( |
@@ -207,28 +227,23 @@ void IntelligibilityEnhancer::ProcessClearBlock(const complex<float>* in_block, |
return; |
} |
- // For now, always assumes enhancement is necessary. |
- // TODO(ekmeyerson): Change to only enhance if necessary, |
- // based on experiments with different cutoffs. |
- if (has_voice_low_ || true) { |
+ if (render_voice_probability_ >= render_vad_thresh_) { |
aluebs-webrtc
2015/07/20 19:33:43
This changed from has_voice_low to checking if the
ekm
2015/07/21 01:02:44
The has_voice_low_ was referring to detection of v
aluebs-webrtc
2015/07/21 01:50:55
Oh, I see. Then it makes sense.
|
clear_variance_.Step(in_block, false); |
- const float power_target = std::accumulate( |
- clear_variance_.variance(), clear_variance_.variance() + freqs_, 0.0f); |
- |
- if (block_count_ % analysis_rate_ == analysis_rate_ - 1) { |
+ if (active_ && !deactivating_ && |
+ block_count_ % analysis_rate_ == analysis_rate_ - 1) { |
+ const float power_target = std::accumulate( |
+ clear_variance_.variance(), clear_variance_.variance() + freqs_, 0.f); |
AnalyzeClearBlock(power_target); |
++analysis_step_; |
- if (analysis_step_ == variance_rate_) { |
- analysis_step_ = 0; |
- clear_variance_.Clear(); |
- noise_variance_.Clear(); |
- } |
} |
++block_count_; |
} |
- /* efidata(n,:) = sqrt(b(n)) * fidata(n,:) */ |
- gain_applier_.Apply(in_block, out_block); |
+ UpdateActivity(); |
+ if (active_) { |
+ // efidata(n,:) = sqrt(b(n)) * fidata(n,:) |
+ gain_applier_.Apply(in_block, out_block); |
+ } |
} |
void IntelligibilityEnhancer::AnalyzeClearBlock(float power_target) { |
@@ -395,6 +410,29 @@ void IntelligibilityEnhancer::FilterVariance(const float* var, float* result) { |
} |
} |
+float IntelligibilityEnhancer::SNR() { |
+ float total_clear_var = std::accumulate( |
+ clear_variance_.variance(), clear_variance_.variance() + freqs_, 0.f); |
+ float total_noise_var = |
+ std::accumulate(noise_variance_.variance(), |
+ noise_variance_.variance() + freqs_, kMinNoise); |
+ return total_clear_var / total_noise_var; |
+} |
+ |
+void IntelligibilityEnhancer::UpdateActivity() { |
+ const float snr = SNR(); |
+ if (snr <= activate_snr_thresh_) { |
+ active_ = true; |
+ deactivating_ = false; |
+ } else if (active_ && !deactivating_ && snr >= deactivate_snr_thresh_) { |
+ gain_applier_.Clear(); |
+ deactivating_ = true; |
+ } else if (deactivating_ && gain_applier_.IsIdentity()) { |
+ active_ = false; |
+ deactivating_ = false; |
+ } |
+} |
+ |
float IntelligibilityEnhancer::DotProduct(const float* a, |
const float* b, |
int length) { |