Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 // | 11 // |
| 12 // Implements core class for intelligibility enhancer. | 12 // Implements core class for intelligibility enhancer. |
| 13 // | 13 // |
| 14 // Details of the model and algorithm can be found in the original paper: | 14 // Details of the model and algorithm can be found in the original paper: |
| 15 // http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6882788 | 15 // http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6882788 |
| 16 // | 16 // |
| 17 | 17 |
| 18 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc er.h" | 18 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc er.h" |
| 19 | 19 |
| 20 #include <math.h> | 20 #include <math.h> |
| 21 #include <stdlib.h> | 21 #include <stdlib.h> |
| 22 | |
| 23 #include <algorithm> | 22 #include <algorithm> |
| 24 #include <numeric> | 23 #include <numeric> |
| 25 | 24 |
| 26 #include "webrtc/base/checks.h" | 25 #include "webrtc/base/checks.h" |
| 27 #include "webrtc/common_audio/vad/include/webrtc_vad.h" | |
| 28 #include "webrtc/common_audio/window_generator.h" | 26 #include "webrtc/common_audio/window_generator.h" |
| 27 #include "webrtc/common_audio/include/audio_util.h" | |
| 29 | 28 |
| 30 namespace webrtc { | 29 namespace webrtc { |
| 31 | 30 |
| 32 namespace { | 31 namespace { |
| 33 | 32 |
| 34 const int kErbResolution = 2; | 33 const int kErbResolution = 2; |
| 35 const int kWindowSizeMs = 2; | 34 const int kWindowSizeMs = 2; |
| 36 const int kChunkSizeMs = 10; // Size provided by APM. | 35 const int kChunkSizeMs = 10; // Size provided by APM. |
| 37 const float kClipFreq = 200.0f; | 36 const float kClipFreq = 200.0f; |
| 38 const float kConfigRho = 0.02f; // Default production and interpretation SNR. | 37 const float kConfigRho = 0.02f; // Default production and interpretation SNR. |
| 39 const float kKbdAlpha = 1.5f; | 38 const float kKbdAlpha = 1.5f; |
| 40 const float kLambdaBot = -1.0f; // Extreme values in bisection | 39 const float kLambdaBot = -1.0f; // Extreme values in bisection |
| 41 const float kLambdaTop = -10e-18f; // search for lamda. | 40 const float kLambdaTop = -10e-18f; // search for lamda. |
| 41 const float kMinNoise = 10e-18f; | |
| 42 | 42 |
| 43 } // namespace | 43 } // namespace |
| 44 | 44 |
| 45 using std::complex; | 45 using std::complex; |
| 46 using std::max; | 46 using std::max; |
| 47 using std::min; | 47 using std::min; |
| 48 using VarianceType = intelligibility::VarianceArray::StepType; | 48 using VarianceType = intelligibility::VarianceArray::StepType; |
| 49 | 49 |
| 50 IntelligibilityEnhancer::TransformCallback::TransformCallback( | 50 IntelligibilityEnhancer::TransformCallback::TransformCallback( |
| 51 IntelligibilityEnhancer* parent, | 51 IntelligibilityEnhancer* parent, |
| 52 IntelligibilityEnhancer::AudioSource source) | 52 IntelligibilityEnhancer::AudioSource source) |
| 53 : parent_(parent), source_(source) { | 53 : parent_(parent), source_(source) { |
| 54 } | 54 } |
| 55 | 55 |
| 56 void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock( | 56 void IntelligibilityEnhancer::TransformCallback::ProcessAudioBlock( |
| 57 const complex<float>* const* in_block, | 57 const complex<float>* const* in_block, |
| 58 int in_channels, | 58 int in_channels, |
| 59 int frames, | 59 int frames, |
| 60 int /* out_channels */, | 60 int /* out_channels */, |
| 61 complex<float>* const* out_block) { | 61 complex<float>* const* out_block) { |
| 62 DCHECK_EQ(parent_->freqs_, frames); | 62 DCHECK_EQ(parent_->freqs_, frames); |
| 63 for (int i = 0; i < in_channels; ++i) { | 63 for (int i = 0; i < in_channels; ++i) { |
| 64 parent_->DispatchAudio(source_, in_block[i], out_block[i]); | 64 parent_->DispatchAudio(source_, in_block[i], out_block[i]); |
| 65 } | 65 } |
| 66 } | 66 } |
| 67 | 67 |
| 68 IntelligibilityEnhancer::IntelligibilityEnhancer(int erb_resolution, | 68 IntelligibilityEnhancer::IntelligibilityEnhancer() |
| 69 int sample_rate_hz, | 69 : IntelligibilityEnhancer(IntelligibilityEnhancer::Config()) { |
| 70 int channels, | 70 } |
| 71 int cv_type, | 71 |
| 72 float cv_alpha, | 72 IntelligibilityEnhancer::IntelligibilityEnhancer(const Config& config) |
| 73 int cv_win, | |
| 74 int analysis_rate, | |
| 75 int variance_rate, | |
| 76 float gain_limit) | |
| 77 : freqs_(RealFourier::ComplexLength( | 73 : freqs_(RealFourier::ComplexLength( |
| 78 RealFourier::FftOrder(sample_rate_hz * kWindowSizeMs / 1000))), | 74 RealFourier::FftOrder(config.sample_rate_hz * kWindowSizeMs / 1000))), |
| 79 window_size_(1 << RealFourier::FftOrder(freqs_)), | 75 window_size_(1 << RealFourier::FftOrder(freqs_)), |
| 80 chunk_length_(sample_rate_hz * kChunkSizeMs / 1000), | 76 chunk_length_(config.sample_rate_hz * kChunkSizeMs / 1000), |
| 81 bank_size_(GetBankSize(sample_rate_hz, erb_resolution)), | 77 bank_size_(GetBankSize(config.sample_rate_hz, kErbResolution)), |
| 82 sample_rate_hz_(sample_rate_hz), | 78 sample_rate_hz_(config.sample_rate_hz), |
| 83 erb_resolution_(erb_resolution), | 79 erb_resolution_(kErbResolution), |
| 84 channels_(channels), | 80 num_capture_channels_(config.num_capture_channels), |
| 85 analysis_rate_(analysis_rate), | 81 num_render_channels_(config.num_render_channels), |
| 86 variance_rate_(variance_rate), | 82 analysis_rate_(config.analysis_rate), |
| 83 capture_vad_thresh_(config.capture_vad_thresh), | |
| 84 render_vad_thresh_(config.render_vad_thresh), | |
| 85 activate_snr_thresh_(config.activate_snr_thresh), | |
| 86 deactivate_snr_thresh_(config.deactivate_snr_thresh), | |
| 87 active_(false), | |
| 88 deactivating_(false), | |
| 87 clear_variance_(freqs_, | 89 clear_variance_(freqs_, |
| 88 static_cast<VarianceType>(cv_type), | 90 config.var_type, |
| 89 cv_win, | 91 config.var_window_size, |
| 90 cv_alpha), | 92 config.var_decay_rate), |
| 91 noise_variance_(freqs_, VarianceType::kStepInfinite, 475, 0.01f), | 93 noise_variance_(freqs_, |
| 94 config.var_type, | |
| 95 config.var_window_size, | |
| 96 config.var_decay_rate), | |
| 92 filtered_clear_var_(new float[bank_size_]), | 97 filtered_clear_var_(new float[bank_size_]), |
| 93 filtered_noise_var_(new float[bank_size_]), | 98 filtered_noise_var_(new float[bank_size_]), |
| 94 filter_bank_(bank_size_), | 99 filter_bank_(bank_size_), |
| 95 center_freqs_(new float[bank_size_]), | 100 center_freqs_(new float[bank_size_]), |
| 96 rho_(new float[bank_size_]), | 101 rho_(new float[bank_size_]), |
| 97 gains_eq_(new float[bank_size_]), | 102 gains_eq_(new float[bank_size_]), |
| 98 gain_applier_(freqs_, gain_limit), | 103 gain_applier_(freqs_, config.gain_change_limit), |
| 99 temp_out_buffer_(nullptr), | 104 temp_render_out_buffer_(nullptr), |
| 100 input_audio_(new float* [channels]), | 105 temp_capture_out_buffer_(nullptr), |
| 101 kbd_window_(new float[window_size_]), | 106 kbd_window_(new float[window_size_]), |
| 102 render_callback_(this, AudioSource::kRenderStream), | 107 render_callback_(this, AudioSource::kRenderStream), |
| 103 capture_callback_(this, AudioSource::kCaptureStream), | 108 capture_callback_(this, AudioSource::kCaptureStream), |
| 104 block_count_(0), | 109 block_count_(0), |
| 105 analysis_step_(0), | 110 analysis_step_(0), |
| 106 vad_high_(WebRtcVad_Create()), | 111 using_capture_vad_(true), |
| 107 vad_low_(WebRtcVad_Create()), | 112 using_render_vad_(true), |
| 108 vad_tmp_buffer_(new int16_t[chunk_length_]) { | 113 vad_tmp_buffer_(new int16_t[chunk_length_]) { |
| 109 DCHECK_LE(kConfigRho, 1.0f); | 114 DCHECK_LE(config.rho, 1.0f); |
| 110 | 115 |
| 111 CreateErbBank(); | 116 CreateErbBank(); |
| 112 | 117 |
| 113 WebRtcVad_Init(vad_high_); | 118 temp_render_out_buffer_ = static_cast<float**>( |
| 114 WebRtcVad_set_mode(vad_high_, 0); // High likelihood of speech. | 119 malloc(sizeof(*temp_render_out_buffer_) * num_render_channels_ + |
|
aluebs-webrtc
2015/07/21 21:27:00
There is probably a smarter way of allocating this
ekm
2015/07/23 00:26:28
Definitely. Changed to ChannelBuffer.
| |
| 115 WebRtcVad_Init(vad_low_); | 120 sizeof(**temp_render_out_buffer_) * chunk_length_ * |
| 116 WebRtcVad_set_mode(vad_low_, 3); // Low likelihood of speech. | 121 num_render_channels_)); |
| 122 for (int i = 0; i < num_render_channels_; ++i) { | |
| 123 temp_render_out_buffer_[i] = | |
| 124 reinterpret_cast<float*>(temp_render_out_buffer_ + | |
| 125 num_render_channels_) + | |
| 126 chunk_length_ * i; | |
| 127 } | |
| 117 | 128 |
| 118 temp_out_buffer_ = static_cast<float**>( | 129 temp_capture_out_buffer_ = static_cast<float**>( |
| 119 malloc(sizeof(*temp_out_buffer_) * channels_ + | 130 malloc(sizeof(*temp_capture_out_buffer_) * num_capture_channels_ + |
| 120 sizeof(**temp_out_buffer_) * chunk_length_ * channels_)); | 131 sizeof(**temp_capture_out_buffer_) * chunk_length_ * |
| 121 for (int i = 0; i < channels_; ++i) { | 132 num_capture_channels_)); |
| 122 temp_out_buffer_[i] = | 133 for (int i = 0; i < num_capture_channels_; ++i) { |
| 123 reinterpret_cast<float*>(temp_out_buffer_ + channels_) + | 134 temp_capture_out_buffer_[i] = |
| 135 reinterpret_cast<float*>(temp_capture_out_buffer_ + | |
| 136 num_capture_channels_) + | |
| 124 chunk_length_ * i; | 137 chunk_length_ * i; |
| 125 } | 138 } |
| 126 | 139 |
| 127 // Assumes all rho equal. | 140 // Assumes all rho equal. |
| 128 for (int i = 0; i < bank_size_; ++i) { | 141 for (int i = 0; i < bank_size_; ++i) { |
| 129 rho_[i] = kConfigRho * kConfigRho; | 142 rho_[i] = config.rho * config.rho; |
| 130 } | 143 } |
| 131 | 144 |
| 132 float freqs_khz = kClipFreq / 1000.0f; | 145 float freqs_khz = kClipFreq / 1000.0f; |
| 133 int erb_index = static_cast<int>(ceilf( | 146 int erb_index = static_cast<int>(ceilf( |
| 134 11.17f * logf((freqs_khz + 0.312f) / (freqs_khz + 14.6575f)) + 43.0f)); | 147 11.17f * logf((freqs_khz + 0.312f) / (freqs_khz + 14.6575f)) + 43.0f)); |
| 135 start_freq_ = max(1, erb_index * kErbResolution); | 148 start_freq_ = max(1, erb_index * erb_resolution_); |
| 136 | 149 |
| 137 WindowGenerator::KaiserBesselDerived(kKbdAlpha, window_size_, | 150 WindowGenerator::KaiserBesselDerived(kKbdAlpha, window_size_, |
| 138 kbd_window_.get()); | 151 kbd_window_.get()); |
| 139 render_mangler_.reset(new LappedTransform( | 152 render_mangler_.reset(new LappedTransform( |
| 140 channels_, channels_, chunk_length_, kbd_window_.get(), window_size_, | 153 num_render_channels_, num_render_channels_, chunk_length_, |
| 141 window_size_ / 2, &render_callback_)); | 154 kbd_window_.get(), window_size_, window_size_ / 2, &render_callback_)); |
| 142 capture_mangler_.reset(new LappedTransform( | 155 capture_mangler_.reset(new LappedTransform( |
| 143 channels_, channels_, chunk_length_, kbd_window_.get(), window_size_, | 156 num_capture_channels_, num_capture_channels_, chunk_length_, |
| 144 window_size_ / 2, &capture_callback_)); | 157 kbd_window_.get(), window_size_, window_size_ / 2, &capture_callback_)); |
| 145 } | 158 } |
| 146 | 159 |
| 147 IntelligibilityEnhancer::~IntelligibilityEnhancer() { | 160 IntelligibilityEnhancer::~IntelligibilityEnhancer() { |
| 148 WebRtcVad_Free(vad_low_); | 161 free(temp_render_out_buffer_); |
| 149 WebRtcVad_Free(vad_high_); | 162 free(temp_capture_out_buffer_); |
| 150 free(temp_out_buffer_); | |
| 151 } | 163 } |
| 152 | 164 |
| 153 void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio) { | 165 void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio, |
| 154 for (int i = 0; i < chunk_length_; ++i) { | 166 int sample_rate_hz, |
| 155 vad_tmp_buffer_[i] = (int16_t)audio[0][i]; | 167 int num_channels, |
| 168 float voice_probability) { | |
| 169 render_voice_probability_ = voice_probability; | |
| 170 using_render_vad_ = false; | |
| 171 ProcessRenderAudio(audio, sample_rate_hz, num_channels); | |
| 172 } | |
| 173 | |
| 174 void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio, | |
| 175 int sample_rate_hz, | |
| 176 int num_channels) { | |
| 177 CHECK_EQ(sample_rate_hz_, sample_rate_hz); | |
| 178 CHECK_EQ(num_render_channels_, num_channels); | |
| 179 | |
| 180 if (using_render_vad_) { | |
| 181 FloatToS16(audio[0], chunk_length_, vad_tmp_buffer_.get()); | |
| 182 render_vad_.ProcessChunk(vad_tmp_buffer_.get(), chunk_length_, | |
| 183 sample_rate_hz_); | |
| 184 render_voice_probability_ = render_vad_.last_voice_probability(); | |
| 156 } | 185 } |
| 157 has_voice_low_ = WebRtcVad_Process(vad_low_, sample_rate_hz_, | |
| 158 vad_tmp_buffer_.get(), chunk_length_) == 1; | |
| 159 | 186 |
| 160 // Process and enhance chunk of |audio| | 187 if (render_voice_probability_ >= render_vad_thresh_ || active_) { |
| 161 render_mangler_->ProcessChunk(audio, temp_out_buffer_); | 188 render_mangler_->ProcessChunk(audio, temp_render_out_buffer_); |
| 189 } | |
| 162 | 190 |
| 163 for (int i = 0; i < channels_; ++i) { | 191 for (int i = 0; i < num_render_channels_; ++i) { |
| 164 memcpy(audio[i], temp_out_buffer_[i], | 192 memcpy(audio[i], temp_render_out_buffer_[i], |
| 165 chunk_length_ * sizeof(**temp_out_buffer_)); | 193 chunk_length_ * sizeof(**temp_render_out_buffer_)); |
| 166 } | 194 } |
| 167 } | 195 } |
| 168 | 196 |
| 169 void IntelligibilityEnhancer::ProcessCaptureAudio(float* const* audio) { | 197 void IntelligibilityEnhancer::AnalyzeCaptureAudio(float* const* audio, |
| 170 for (int i = 0; i < chunk_length_; ++i) { | 198 int sample_rate_hz, |
| 171 vad_tmp_buffer_[i] = (int16_t)audio[0][i]; | 199 int num_channels, |
| 200 float voice_probability) { | |
| 201 capture_voice_probability_ = voice_probability; | |
| 202 using_capture_vad_ = false; | |
| 203 AnalyzeCaptureAudio(audio, sample_rate_hz, num_channels); | |
| 204 } | |
| 205 | |
| 206 void IntelligibilityEnhancer::AnalyzeCaptureAudio(float* const* audio, | |
| 207 int sample_rate_hz, | |
| 208 int num_channels) { | |
| 209 CHECK_EQ(sample_rate_hz_, sample_rate_hz); | |
| 210 CHECK_EQ(num_capture_channels_, num_channels); | |
| 211 | |
| 212 if (using_capture_vad_) { | |
| 213 FloatToS16(audio[0], chunk_length_, vad_tmp_buffer_.get()); | |
| 214 capture_vad_.ProcessChunk(vad_tmp_buffer_.get(), chunk_length_, | |
| 215 sample_rate_hz_); | |
| 216 capture_voice_probability_ = capture_vad_.last_voice_probability(); | |
| 172 } | 217 } |
| 173 // TODO(bercic): The VAD was always detecting voice in the noise stream, | |
| 174 // no matter what the aggressiveness, so it was temporarily disabled here. | |
| 175 | 218 |
| 176 #if 0 | 219 if (capture_voice_probability_ <= capture_vad_thresh_) { |
| 177 if (WebRtcVad_Process(vad_high_, sample_rate_hz_, vad_tmp_buffer_.get(), | 220 capture_mangler_->ProcessChunk(audio, temp_capture_out_buffer_); |
| 178 chunk_length_) == 1) { | 221 } |
| 179 printf("capture HAS speech\n"); | |
| 180 return; | |
| 181 } | |
| 182 printf("capture NO speech\n"); | |
| 183 #endif | |
| 184 | |
| 185 capture_mangler_->ProcessChunk(audio, temp_out_buffer_); | |
| 186 } | 222 } |
| 187 | 223 |
| 188 void IntelligibilityEnhancer::DispatchAudio( | 224 void IntelligibilityEnhancer::DispatchAudio( |
| 189 IntelligibilityEnhancer::AudioSource source, | 225 IntelligibilityEnhancer::AudioSource source, |
| 190 const complex<float>* in_block, | 226 const complex<float>* in_block, |
| 191 complex<float>* out_block) { | 227 complex<float>* out_block) { |
| 192 switch (source) { | 228 switch (source) { |
| 193 case kRenderStream: | 229 case kRenderStream: |
| 194 ProcessClearBlock(in_block, out_block); | 230 ProcessClearBlock(in_block, out_block); |
| 195 break; | 231 break; |
| 196 case kCaptureStream: | 232 case kCaptureStream: |
| 197 ProcessNoiseBlock(in_block, out_block); | 233 ProcessNoiseBlock(in_block, out_block); |
| 198 break; | 234 break; |
| 199 } | 235 } |
| 200 } | 236 } |
| 201 | 237 |
| 202 void IntelligibilityEnhancer::ProcessClearBlock(const complex<float>* in_block, | 238 void IntelligibilityEnhancer::ProcessClearBlock(const complex<float>* in_block, |
| 203 complex<float>* out_block) { | 239 complex<float>* out_block) { |
| 204 if (block_count_ < 2) { | 240 if (block_count_ < 2) { |
| 205 memset(out_block, 0, freqs_ * sizeof(*out_block)); | 241 memset(out_block, 0, freqs_ * sizeof(*out_block)); |
| 206 ++block_count_; | 242 ++block_count_; |
| 207 return; | 243 return; |
| 208 } | 244 } |
| 209 | 245 |
| 210 // For now, always assumes enhancement is necessary. | 246 if (render_voice_probability_ >= render_vad_thresh_) { |
| 211 // TODO(ekmeyerson): Change to only enhance if necessary, | |
| 212 // based on experiments with different cutoffs. | |
| 213 if (has_voice_low_ || true) { | |
| 214 clear_variance_.Step(in_block, false); | 247 clear_variance_.Step(in_block, false); |
| 215 const float power_target = std::accumulate( | 248 if (active_ && !deactivating_ && |
| 216 clear_variance_.variance(), clear_variance_.variance() + freqs_, 0.0f); | 249 block_count_ % analysis_rate_ == analysis_rate_ - 1) { |
| 217 | 250 const float power_target = std::accumulate( |
| 218 if (block_count_ % analysis_rate_ == analysis_rate_ - 1) { | 251 clear_variance_.variance(), clear_variance_.variance() + freqs_, 0.f); |
| 219 AnalyzeClearBlock(power_target); | 252 AnalyzeClearBlock(power_target); |
| 220 ++analysis_step_; | 253 ++analysis_step_; |
| 221 if (analysis_step_ == variance_rate_) { | |
| 222 analysis_step_ = 0; | |
| 223 clear_variance_.Clear(); | |
| 224 noise_variance_.Clear(); | |
| 225 } | |
| 226 } | 254 } |
| 227 ++block_count_; | 255 ++block_count_; |
| 228 } | 256 } |
| 229 | 257 |
| 230 /* efidata(n,:) = sqrt(b(n)) * fidata(n,:) */ | 258 UpdateActivity(); |
| 231 gain_applier_.Apply(in_block, out_block); | 259 if (active_) { |
| 260 // efidata(n,:) = sqrt(b(n)) * fidata(n,:) | |
| 261 gain_applier_.Apply(in_block, out_block); | |
| 262 } | |
| 232 } | 263 } |
| 233 | 264 |
| 234 void IntelligibilityEnhancer::AnalyzeClearBlock(float power_target) { | 265 void IntelligibilityEnhancer::AnalyzeClearBlock(float power_target) { |
| 235 FilterVariance(clear_variance_.variance(), filtered_clear_var_.get()); | 266 FilterVariance(clear_variance_.variance(), filtered_clear_var_.get()); |
| 236 FilterVariance(noise_variance_.variance(), filtered_noise_var_.get()); | 267 FilterVariance(noise_variance_.variance(), filtered_noise_var_.get()); |
| 237 | 268 |
| 238 SolveForGainsGivenLambda(kLambdaTop, start_freq_, gains_eq_.get()); | 269 SolveForGainsGivenLambda(kLambdaTop, start_freq_, gains_eq_.get()); |
| 239 const float power_top = | 270 const float power_top = |
| 240 DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_); | 271 DotProduct(gains_eq_.get(), filtered_clear_var_.get(), bank_size_); |
| 241 SolveForGainsGivenLambda(kLambdaBot, start_freq_, gains_eq_.get()); | 272 SolveForGainsGivenLambda(kLambdaBot, start_freq_, gains_eq_.get()); |
| (...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 388 sols[n] = fmax(0, sols[n]); | 419 sols[n] = fmax(0, sols[n]); |
| 389 } | 420 } |
| 390 } | 421 } |
| 391 | 422 |
| 392 void IntelligibilityEnhancer::FilterVariance(const float* var, float* result) { | 423 void IntelligibilityEnhancer::FilterVariance(const float* var, float* result) { |
| 393 for (int i = 0; i < bank_size_; ++i) { | 424 for (int i = 0; i < bank_size_; ++i) { |
| 394 result[i] = DotProduct(filter_bank_[i].data(), var, freqs_); | 425 result[i] = DotProduct(filter_bank_[i].data(), var, freqs_); |
| 395 } | 426 } |
| 396 } | 427 } |
| 397 | 428 |
| 429 float IntelligibilityEnhancer::SNR() { | |
| 430 float total_clear_var = std::accumulate( | |
| 431 clear_variance_.variance(), clear_variance_.variance() + freqs_, 0.f); | |
| 432 float total_noise_var = | |
| 433 std::accumulate(noise_variance_.variance(), | |
| 434 noise_variance_.variance() + freqs_, kMinNoise); | |
| 435 return total_clear_var / total_noise_var; | |
| 436 } | |
| 437 | |
| 438 void IntelligibilityEnhancer::UpdateActivity() { | |
| 439 const float snr = SNR(); | |
| 440 if (snr <= activate_snr_thresh_) { | |
| 441 active_ = true; | |
| 442 deactivating_ = false; | |
| 443 } else if (active_ && !deactivating_ && snr >= deactivate_snr_thresh_) { | |
| 444 gain_applier_.Clear(); | |
| 445 deactivating_ = true; | |
| 446 } else if (deactivating_ && gain_applier_.IsIdentity()) { | |
| 447 active_ = false; | |
| 448 deactivating_ = false; | |
| 449 } | |
| 450 } | |
| 451 | |
| 398 float IntelligibilityEnhancer::DotProduct(const float* a, | 452 float IntelligibilityEnhancer::DotProduct(const float* a, |
| 399 const float* b, | 453 const float* b, |
| 400 int length) { | 454 int length) { |
| 401 float ret = 0.0f; | 455 float ret = 0.0f; |
| 402 | 456 |
| 403 for (int i = 0; i < length; ++i) { | 457 for (int i = 0; i < length; ++i) { |
| 404 ret = fmaf(a[i], b[i], ret); | 458 ret = fmaf(a[i], b[i], ret); |
| 405 } | 459 } |
| 406 return ret; | 460 return ret; |
| 407 } | 461 } |
| 408 | 462 |
| 409 } // namespace webrtc | 463 } // namespace webrtc |
| OLD | NEW |