Chromium Code Reviews| Index: webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc |
| diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc |
| index b4cf294a9e0170ca769852518795807cb9550782..79ee888c78e4c1869f7f98d6ee39e4c27e30f1a2 100644 |
| --- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc |
| +++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc |
| @@ -31,6 +31,13 @@ const float kConfigRho = 0.02f; // Default production and interpretation SNR. |
| const float kKbdAlpha = 1.5f; |
| const float kLambdaBot = -1.0f; // Extreme values in bisection |
| const float kLambdaTop = -10e-18f; // search for lamda. |
| +const float kVoiceProbabilityThreshold = 0.02; |
| +// Number of chunks after voice activity which is still considered speech. |
| +const size_t kSpeechOffsetDelay = 80; |
| +const float kDecayRate = 0.97f; // Power estimation decay rate. |
|
hlundin-webrtc
2016/02/15 13:05:11
Two spaces before comment.
hlundin-webrtc
2016/02/15 13:05:11
You change this value from 0.9 to 0.97. Can you ex
aluebs-webrtc
2016/02/19 03:56:30
For this algorithm we care about the long-time psd
aluebs-webrtc
2016/02/19 03:56:31
Done.
|
| +const float kGainChangeLimit = 0.1f; // Maximum change in gain. |
|
turaj
2016/02/13 00:09:42
Is kGainChangeLimit relative to current value, or
hlundin-webrtc
2016/02/15 13:05:11
Two spaces before comment.
aluebs-webrtc
2016/02/19 03:56:30
It was an absolute limit, but I agree that a relat
aluebs-webrtc
2016/02/19 03:56:31
Done.
|
| +const float kRho = 0.0004f; |
|
hlundin-webrtc
2016/02/15 13:05:11
This value is also changed...
aluebs-webrtc
2016/02/19 03:56:30
It changed to be squared, which is the only way it
|
| + |
| // Returns dot product of vectors |a| and |b| with size |length|. |
| float DotProduct(const float* a, const float* b, size_t length) { |
| @@ -71,38 +78,27 @@ void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock( |
| } |
| } |
| -IntelligibilityEnhancer::IntelligibilityEnhancer() |
| - : IntelligibilityEnhancer(IntelligibilityEnhancer::Config()) { |
| -} |
| - |
| -IntelligibilityEnhancer::IntelligibilityEnhancer(const Config& config) |
| +IntelligibilityEnhancer::IntelligibilityEnhancer(int sample_rate_hz, |
| + size_t num_render_channels) |
| : freqs_(RealFourier::ComplexLength( |
| - RealFourier::FftOrder(config.sample_rate_hz * kWindowSizeMs / 1000))), |
| - window_size_(static_cast<size_t>(1 << RealFourier::FftOrder(freqs_))), |
| - chunk_length_( |
| - static_cast<size_t>(config.sample_rate_hz * kChunkSizeMs / 1000)), |
| - bank_size_(GetBankSize(config.sample_rate_hz, kErbResolution)), |
| - sample_rate_hz_(config.sample_rate_hz), |
| - erb_resolution_(kErbResolution), |
| - num_capture_channels_(config.num_capture_channels), |
| - num_render_channels_(config.num_render_channels), |
| - analysis_rate_(config.analysis_rate), |
| - active_(true), |
| - clear_power_(freqs_, config.decay_rate), |
| - noise_power_(freqs_, 0.f), |
| + RealFourier::FftOrder(sample_rate_hz * kWindowSizeMs / 1000))), |
| + chunk_length_(static_cast<size_t>(sample_rate_hz * kChunkSizeMs / 1000)), |
| + bank_size_(GetBankSize(sample_rate_hz, kErbResolution)), |
| + sample_rate_hz_(sample_rate_hz), |
| + num_render_channels_(num_render_channels), |
| + clear_power_estimator_(freqs_, kDecayRate), |
| filtered_clear_pow_(new float[bank_size_]), |
| filtered_noise_pow_(new float[bank_size_]), |
| center_freqs_(new float[bank_size_]), |
| render_filter_bank_(CreateErbBank(freqs_)), |
| - rho_(new float[bank_size_]), |
| gains_eq_(new float[bank_size_]), |
| - gain_applier_(freqs_, config.gain_change_limit), |
| + gain_applier_(freqs_, kGainChangeLimit), |
| temp_render_out_buffer_(chunk_length_, num_render_channels_), |
| - kbd_window_(new float[window_size_]), |
| render_callback_(this), |
| - block_count_(0), |
| - analysis_step_(0) { |
| - RTC_DCHECK_LE(config.rho, 1.0f); |
| + audio_s16_(chunk_length_), |
| + chunks_since_voice_(kSpeechOffsetDelay), |
| + is_speech_(false) { |
| + RTC_DCHECK_LE(kRho, 1.f); |
| memset(filtered_clear_pow_.get(), |
| 0, |
| @@ -111,21 +107,17 @@ IntelligibilityEnhancer::IntelligibilityEnhancer(const Config& config) |
| 0, |
| bank_size_ * sizeof(filtered_noise_pow_[0])); |
| - // Assumes all rho equal. |
| - for (size_t i = 0; i < bank_size_; ++i) { |
| - rho_[i] = config.rho * config.rho; |
| - } |
| - |
| - float freqs_khz = kClipFreq / 1000.0f; |
| + float freqs_khz = kClipFreq / 1000.f; |
|
hlundin-webrtc
2016/02/15 13:05:11
This is const too. And it should probably be named
aluebs-webrtc
2016/02/19 03:56:30
I removed it and updated the constant directly.
|
| size_t erb_index = static_cast<size_t>(ceilf( |
|
hlundin-webrtc
2016/02/15 13:05:11
const
aluebs-webrtc
2016/02/19 03:56:30
Done.
|
| - 11.17f * logf((freqs_khz + 0.312f) / (freqs_khz + 14.6575f)) + 43.0f)); |
| - start_freq_ = std::max(static_cast<size_t>(1), erb_index * erb_resolution_); |
| + 11.17f * logf((freqs_khz + 0.312f) / (freqs_khz + 14.6575f)) + 43.f)); |
| + start_freq_ = std::max(static_cast<size_t>(1), erb_index * kErbResolution); |
| - WindowGenerator::KaiserBesselDerived(kKbdAlpha, window_size_, |
| - kbd_window_.get()); |
| + size_t window_size = static_cast<size_t>(1 << RealFourier::FftOrder(freqs_)); |
| + std::vector<float> kbd_window(window_size); |
| + WindowGenerator::KaiserBesselDerived(kKbdAlpha, window_size, &kbd_window[0]); |
| render_mangler_.reset(new LappedTransform( |
| num_render_channels_, num_render_channels_, chunk_length_, |
| - kbd_window_.get(), window_size_, window_size_ / 2, &render_callback_)); |
| + &kbd_window[0], window_size, window_size / 2, &render_callback_)); |
| } |
| void IntelligibilityEnhancer::SetCaptureNoiseEstimate( |
| @@ -133,13 +125,9 @@ void IntelligibilityEnhancer::SetCaptureNoiseEstimate( |
| if (capture_filter_bank_.size() != bank_size_ || |
| capture_filter_bank_[0].size() != noise.size()) { |
| capture_filter_bank_ = CreateErbBank(noise.size()); |
| + noise_power_estimator_.reset(new PowerEstimator(noise.size(), kDecayRate)); |
| } |
| - if (noise.size() != noise_power_.size()) { |
| - noise_power_.resize(noise.size()); |
| - } |
| - for (size_t i = 0; i < noise.size(); ++i) { |
| - noise_power_[i] = noise[i] * noise[i]; |
| - } |
| + noise_power_estimator_->Step(&noise[0]); |
| } |
| void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio, |
| @@ -147,54 +135,31 @@ void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio, |
| size_t num_channels) { |
| RTC_CHECK_EQ(sample_rate_hz_, sample_rate_hz); |
| RTC_CHECK_EQ(num_render_channels_, num_channels); |
| - |
| - if (active_) { |
| - render_mangler_->ProcessChunk(audio, temp_render_out_buffer_.channels()); |
| - } |
| - |
| - if (active_) { |
| - for (size_t i = 0; i < num_render_channels_; ++i) { |
| - memcpy(audio[i], temp_render_out_buffer_.channels()[i], |
| - chunk_length_ * sizeof(**audio)); |
| - } |
| + is_speech_ = IsSpeech(audio[0]); |
| + render_mangler_->ProcessChunk(audio, temp_render_out_buffer_.channels()); |
| + for (size_t i = 0; i < num_render_channels_; ++i) { |
| + memcpy(audio[i], temp_render_out_buffer_.channels()[i], |
| + chunk_length_ * sizeof(**audio)); |
| } |
| } |
| void IntelligibilityEnhancer::ProcessClearBlock( |
| const std::complex<float>* in_block, |
| std::complex<float>* out_block) { |
| - if (block_count_ < 2) { |
| - memset(out_block, 0, freqs_ * sizeof(*out_block)); |
| - ++block_count_; |
| - return; |
| - } |
| - |
| - // TODO(ekm): Use VAD to |Step| and |AnalyzeClearBlock| only if necessary. |
| - if (true) { |
| - clear_power_.Step(in_block); |
| - if (block_count_ % analysis_rate_ == analysis_rate_ - 1) { |
| - AnalyzeClearBlock(); |
| - ++analysis_step_; |
| - } |
| - ++block_count_; |
| + if (is_speech_) { |
| + clear_power_estimator_.Step(in_block); |
| } |
| - |
| - if (active_) { |
| - gain_applier_.Apply(in_block, out_block); |
| - } |
| -} |
| - |
| -void IntelligibilityEnhancer::AnalyzeClearBlock() { |
| - const float* clear_power = clear_power_.Power(); |
| - MapToErbBands(clear_power, |
| + MapToErbBands(clear_power_estimator_.power(), |
| render_filter_bank_, |
| filtered_clear_pow_.get()); |
| - MapToErbBands(&noise_power_[0], |
| + MapToErbBands(noise_power_estimator_->power(), |
|
turaj
2016/02/13 00:09:42
I'm confused that why we are back to using PowerEs
aluebs-webrtc
2016/02/19 03:56:30
To be consistent with the PSD estimation from the
|
| capture_filter_bank_, |
| filtered_noise_pow_.get()); |
| SolveForGainsGivenLambda(kLambdaTop, start_freq_, gains_eq_.get()); |
| const float power_target = std::accumulate( |
| - clear_power, clear_power + freqs_, 0.f); |
| + clear_power_estimator_.power(), |
| + clear_power_estimator_.power() + freqs_, |
| + 0.f); |
| const float power_top = |
| DotProduct(gains_eq_.get(), filtered_clear_pow_.get(), bank_size_); |
| SolveForGainsGivenLambda(kLambdaBot, start_freq_, gains_eq_.get()); |
| @@ -204,6 +169,7 @@ void IntelligibilityEnhancer::AnalyzeClearBlock() { |
| SolveForLambda(power_target, power_bot, power_top); |
| UpdateErbGains(); |
| } // Else experiencing power underflow, so do nothing. |
| + gain_applier_.Apply(in_block, out_block); |
| } |
| void IntelligibilityEnhancer::SolveForLambda(float power_target, |
| @@ -215,11 +181,11 @@ void IntelligibilityEnhancer::SolveForLambda(float power_target, |
| const float reciprocal_power_target = 1.f / power_target; |
| float lambda_bot = kLambdaBot; |
| float lambda_top = kLambdaTop; |
| - float power_ratio = 2.0f; // Ratio of achieved power to target power. |
| + float power_ratio = 2.f; // Ratio of achieved power to target power. |
| int iters = 0; |
| - while (std::fabs(power_ratio - 1.0f) > kConvergeThresh && |
| + while (std::fabs(power_ratio - 1.f) > kConvergeThresh && |
| iters <= kMaxIters) { |
| - const float lambda = lambda_bot + (lambda_top - lambda_bot) / 2.0f; |
| + const float lambda = lambda_bot + (lambda_top - lambda_bot) / 2.f; |
| SolveForGainsGivenLambda(lambda, start_freq_, gains_eq_.get()); |
| const float power = |
| DotProduct(gains_eq_.get(), filtered_clear_pow_.get(), bank_size_); |
| @@ -237,7 +203,7 @@ void IntelligibilityEnhancer::UpdateErbGains() { |
| // (ERB gain) = filterbank' * (freq gain) |
| float* gains = gain_applier_.target(); |
| for (size_t i = 0; i < freqs_; ++i) { |
| - gains[i] = 0.0f; |
| + gains[i] = 0.f; |
| for (size_t j = 0; j < bank_size_; ++j) { |
| gains[i] = fmaf(render_filter_bank_[j][i], gains_eq_[j], gains[i]); |
| } |
| @@ -246,9 +212,9 @@ void IntelligibilityEnhancer::UpdateErbGains() { |
| size_t IntelligibilityEnhancer::GetBankSize(int sample_rate, |
| size_t erb_resolution) { |
| - float freq_limit = sample_rate / 2000.0f; |
| + float freq_limit = sample_rate / 2000.f; |
| size_t erb_scale = static_cast<size_t>(ceilf( |
| - 11.17f * logf((freq_limit + 0.312f) / (freq_limit + 14.6575f)) + 43.0f)); |
| + 11.17f * logf((freq_limit + 0.312f) / (freq_limit + 14.6575f)) + 43.f)); |
| return erb_scale * erb_resolution; |
| } |
| @@ -258,7 +224,7 @@ std::vector<std::vector<float>> IntelligibilityEnhancer::CreateErbBank( |
| size_t lf = 1, rf = 4; |
| for (size_t i = 0; i < bank_size_; ++i) { |
| - float abs_temp = fabsf((i + 1.0f) / static_cast<float>(erb_resolution_)); |
| + float abs_temp = fabsf((i + 1.f) / static_cast<float>(kErbResolution)); |
| center_freqs_[i] = 676170.4f / (47.06538f - expf(0.08950404f * abs_temp)); |
| center_freqs_[i] -= 14678.49f; |
| } |
| @@ -273,15 +239,14 @@ std::vector<std::vector<float>> IntelligibilityEnhancer::CreateErbBank( |
| for (size_t i = 1; i <= bank_size_; ++i) { |
| size_t lll, ll, rr, rrr; |
| - static const size_t kOne = 1; // Avoids repeated static_cast<>s below. |
| lll = static_cast<size_t>(round( |
| - center_freqs_[std::max(kOne, i - lf) - 1] * num_freqs / |
| + center_freqs_[std::max(1ul, i - lf) - 1] * num_freqs / |
| (0.5f * sample_rate_hz_))); |
| ll = static_cast<size_t>(round( |
| - center_freqs_[std::max(kOne, i) - 1] * num_freqs / |
| + center_freqs_[std::max(1ul, i) - 1] * num_freqs / |
| (0.5f * sample_rate_hz_))); |
| - lll = std::min(num_freqs, std::max(lll, kOne)) - 1; |
| - ll = std::min(num_freqs, std::max(ll, kOne)) - 1; |
| + lll = std::min(num_freqs, std::max(lll, 1ul)) - 1; |
| + ll = std::min(num_freqs, std::max(ll, 1ul)) - 1; |
| rrr = static_cast<size_t>(round( |
| center_freqs_[std::min(bank_size_, i + rf) - 1] * num_freqs / |
| @@ -289,31 +254,31 @@ std::vector<std::vector<float>> IntelligibilityEnhancer::CreateErbBank( |
| rr = static_cast<size_t>(round( |
| center_freqs_[std::min(bank_size_, i + 1) - 1] * num_freqs / |
| (0.5f * sample_rate_hz_))); |
| - rrr = std::min(num_freqs, std::max(rrr, kOne)) - 1; |
| - rr = std::min(num_freqs, std::max(rr, kOne)) - 1; |
| + rrr = std::min(num_freqs, std::max(rrr, 1ul)) - 1; |
| + rr = std::min(num_freqs, std::max(rr, 1ul)) - 1; |
| float step, element; |
| - step = 1.0f / (ll - lll); |
| - element = 0.0f; |
| + step = 1.f / (ll - lll); |
| + element = 0.f; |
| for (size_t j = lll; j <= ll; ++j) { |
| filter_bank[i - 1][j] = element; |
| element += step; |
| } |
| - step = 1.0f / (rrr - rr); |
| - element = 1.0f; |
| + step = 1.f / (rrr - rr); |
| + element = 1.f; |
| for (size_t j = rr; j <= rrr; ++j) { |
| filter_bank[i - 1][j] = element; |
| element -= step; |
| } |
| for (size_t j = ll; j <= rr; ++j) { |
| - filter_bank[i - 1][j] = 1.0f; |
| + filter_bank[i - 1][j] = 1.f; |
| } |
| } |
| float sum; |
| for (size_t i = 0; i < num_freqs; ++i) { |
| - sum = 0.0f; |
| + sum = 0.f; |
| for (size_t j = 0; j < bank_size_; ++j) { |
| sum += filter_bank[j][i]; |
| } |
| @@ -327,22 +292,22 @@ std::vector<std::vector<float>> IntelligibilityEnhancer::CreateErbBank( |
| void IntelligibilityEnhancer::SolveForGainsGivenLambda(float lambda, |
| size_t start_freq, |
| float* sols) { |
| - bool quadratic = (kConfigRho < 1.0f); |
| + bool quadratic = (kConfigRho < 1.f); |
| const float* pow_x0 = filtered_clear_pow_.get(); |
| const float* pow_n0 = filtered_noise_pow_.get(); |
| for (size_t n = 0; n < start_freq; ++n) { |
| - sols[n] = 1.0f; |
| + sols[n] = 1.f; |
| } |
| // Analytic solution for optimal gains. See paper for derivation. |
| for (size_t n = start_freq - 1; n < bank_size_; ++n) { |
| float alpha0, beta0, gamma0; |
| - gamma0 = 0.5f * rho_[n] * pow_x0[n] * pow_n0[n] + |
| + gamma0 = 0.5f * kRho * pow_x0[n] * pow_n0[n] + |
| lambda * pow_x0[n] * pow_n0[n] * pow_n0[n]; |
| - beta0 = lambda * pow_x0[n] * (2 - rho_[n]) * pow_x0[n] * pow_n0[n]; |
| + beta0 = lambda * pow_x0[n] * (2 - kRho) * pow_x0[n] * pow_n0[n]; |
| if (quadratic) { |
| - alpha0 = lambda * pow_x0[n] * (1 - rho_[n]) * pow_x0[n] * pow_x0[n]; |
| + alpha0 = lambda * pow_x0[n] * (1 - kRho) * pow_x0[n] * pow_x0[n]; |
| sols[n] = |
| (-beta0 - sqrtf(beta0 * beta0 - 4 * alpha0 * gamma0)) / (2 * alpha0); |
| } else { |
| @@ -352,8 +317,15 @@ void IntelligibilityEnhancer::SolveForGainsGivenLambda(float lambda, |
| } |
| } |
| -bool IntelligibilityEnhancer::active() const { |
| - return active_; |
| +bool IntelligibilityEnhancer::IsSpeech(const float* audio) { |
| + FloatToS16(audio, chunk_length_, &audio_s16_[0]); |
| + vad_.ProcessChunk(&audio_s16_[0], chunk_length_, sample_rate_hz_); |
| + if (vad_.last_voice_probability() > kVoiceProbabilityThreshold) { |
|
turaj
2016/02/13 00:09:42
I thought we gonna use the energy-based VAD with h
aluebs-webrtc
2016/02/19 03:56:30
As discussed offline, having the pitch-based VAD w
|
| + chunks_since_voice_ = 0; |
| + } else if (chunks_since_voice_ < kSpeechOffsetDelay) { |
|
turaj
2016/02/13 00:09:42
If energy-based VAD is used, do we still need this
aluebs-webrtc
2016/02/19 03:56:30
No, but I think we should use the pitch-based VAD.
|
| + ++chunks_since_voice_; |
| + } |
| + return chunks_since_voice_ < kSpeechOffsetDelay; |
| } |
| } // namespace webrtc |