Index: webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc |
diff --git a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc |
index f3d023ec620212142d41458e9c04070ad3dd1aae..421a501dda67e08091898eafd7968bcf2a09b526 100644 |
--- a/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc |
+++ b/webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.cc |
@@ -68,6 +68,7 @@ void MapToErbBands(const float* pow, |
IntelligibilityEnhancer::IntelligibilityEnhancer(int sample_rate_hz, |
size_t num_render_channels, |
+ size_t num_bands, |
size_t num_noise_bins) |
: freqs_(RealFourier::ComplexLength( |
RealFourier::FftOrder(sample_rate_hz * kWindowSizeMs / 1000))), |
@@ -110,14 +111,24 @@ IntelligibilityEnhancer::IntelligibilityEnhancer(int sample_rate_hz, |
render_mangler_.reset(new LappedTransform( |
num_render_channels_, num_render_channels_, chunk_length_, |
kbd_window.data(), window_size, window_size / 2, this)); |
+ |
+ const size_t initial_delay = render_mangler_->initial_delay(); |
peah-webrtc
2016/09/13 13:29:59
Have you verified that this is indeed the delay in
aluebs-webrtc
2016/09/14 00:35:54
Yes.
|
+ for (size_t i = 0u; i < num_bands - 1; ++i) { |
+ high_bands_buffers_.push_back( |
+ std::unique_ptr<AudioRingBuffer>(new AudioRingBuffer( |
+ num_render_channels_, chunk_length_ + initial_delay))); |
+ high_bands_buffers_[i]->MoveReadPositionBackward(initial_delay); |
+ } |
} |
IntelligibilityEnhancer::~IntelligibilityEnhancer() { |
- // Don't rely on this log, since the destructor isn't called when the app/tab |
- // is killed. |
- LOG(LS_INFO) << "Intelligibility Enhancer was active for " |
- << static_cast<float>(num_active_chunks_) / num_chunks_ |
- << "% of the call."; |
+ if (num_chunks_ > 0) { |
+ // Don't rely on this log, since the destructor isn't called when the |
+ // app/tab is killed. |
+ LOG(LS_INFO) << "Intelligibility Enhancer was active for " |
peah-webrtc
2016/09/13 13:29:59
I think it would be good to have a log line for th
aluebs-webrtc
2016/09/14 00:35:54
Done.
|
+ << 100.f * static_cast<float>(num_active_chunks_) / num_chunks_ |
+ << "% of the call."; |
+ } |
} |
void IntelligibilityEnhancer::SetCaptureNoiseEstimate( |
@@ -132,16 +143,15 @@ void IntelligibilityEnhancer::SetCaptureNoiseEstimate( |
}; |
} |
-void IntelligibilityEnhancer::ProcessRenderAudio(float* const* audio, |
- int sample_rate_hz, |
- size_t num_channels) { |
- RTC_CHECK_EQ(sample_rate_hz_, sample_rate_hz); |
- RTC_CHECK_EQ(num_render_channels_, num_channels); |
+void IntelligibilityEnhancer::ProcessRenderAudio(AudioBuffer* audio) { |
+ RTC_DCHECK_EQ(num_render_channels_, audio->num_channels()); |
while (noise_estimation_queue_.Remove(&noise_estimation_buffer_)) { |
noise_power_estimator_.Step(noise_estimation_buffer_.data()); |
} |
- is_speech_ = IsSpeech(audio[0]); |
- render_mangler_->ProcessChunk(audio, audio); |
+ float* const* low_band = audio->split_channels_f(kBand0To8kHz); |
+ is_speech_ = IsSpeech(low_band[0]); |
+ render_mangler_->ProcessChunk(low_band, low_band); |
+ DelayHighBands(audio); |
} |
void IntelligibilityEnhancer::ProcessAudioBlock( |
@@ -154,8 +164,6 @@ void IntelligibilityEnhancer::ProcessAudioBlock( |
if (is_speech_) { |
clear_power_estimator_.Step(in_block[0]); |
} |
- SnrBasedEffectActivation(); |
- ++num_chunks_; |
if (is_active_) { |
++num_active_chunks_; |
if (num_chunks_ % kGainUpdatePeriod == 0) { |
@@ -179,6 +187,8 @@ void IntelligibilityEnhancer::ProcessAudioBlock( |
} // Else experiencing power underflow, so do nothing. |
} |
} |
+ SnrBasedEffectActivation(); |
peah-webrtc
2016/09/13 13:29:59
I think it makes sense. But please comment in the
aluebs-webrtc
2016/09/14 00:35:54
Moved it back. This was left from when is_active_
|
+ ++num_chunks_; |
for (size_t i = 0; i < in_channels; ++i) { |
gain_applier_.Apply(in_block[i], out_block[i]); |
} |
@@ -369,4 +379,15 @@ bool IntelligibilityEnhancer::IsSpeech(const float* audio) { |
return chunks_since_voice_ < kSpeechOffsetDelay; |
} |
+void IntelligibilityEnhancer::DelayHighBands(AudioBuffer* audio) { |
+ RTC_DCHECK_EQ(audio->num_bands(), high_bands_buffers_.size() + 1u); |
+ for (size_t i = 0u; i < high_bands_buffers_.size(); ++i) { |
+ Band band = static_cast<Band>(i + 1); |
+ high_bands_buffers_[i]->Write(audio->split_channels_const_f(band), |
+ num_render_channels_, chunk_length_); |
+ high_bands_buffers_[i]->Read(audio->split_channels_f(band), |
+ num_render_channels_, chunk_length_); |
+ } |
+} |
+ |
} // namespace webrtc |