Chromium Code Reviews| Index: webrtc/modules/audio_processing/audio_processing_impl.cc |
| diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc |
| index 87b82a6a3509131adae9ed698cc0f896fd01d4c0..d22d24204e5703397c0f4a9b96309e4cbb2d6191 100644 |
| --- a/webrtc/modules/audio_processing/audio_processing_impl.cc |
| +++ b/webrtc/modules/audio_processing/audio_processing_impl.cc |
| @@ -7,7 +7,7 @@ |
| * in the file PATENTS. All contributing project authors may |
| * be found in the AUTHORS file in the root of the source tree. |
| */ |
| - |
| +#include <iostream> |
|
aluebs-webrtc
2015/07/21 01:50:55
Left from debugging, right?
ekm
2015/07/21 19:22:13
Yep. Done.
|
| #include "webrtc/modules/audio_processing/audio_processing_impl.h" |
| #include <assert.h> |
| @@ -28,6 +28,7 @@ extern "C" { |
| #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" |
| #include "webrtc/modules/audio_processing/gain_control_impl.h" |
| #include "webrtc/modules/audio_processing/high_pass_filter_impl.h" |
| +#include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h" |
| #include "webrtc/modules/audio_processing/level_estimator_impl.h" |
| #include "webrtc/modules/audio_processing/noise_suppression_impl.h" |
| #include "webrtc/modules/audio_processing/processing_component.h" |
| @@ -195,7 +196,8 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config, |
| beamformer_enabled_(config.Get<Beamforming>().enabled), |
| beamformer_(beamformer), |
| array_geometry_(config.Get<Beamforming>().array_geometry), |
| - supports_48kHz_(config.Get<AudioProcessing48kHzSupport>().enabled) { |
| + supports_48kHz_(config.Get<AudioProcessing48kHzSupport>().enabled), |
| + intelligibility_enabled_(config.Get<Intelligibility>().enabled) { |
| echo_cancellation_ = new EchoCancellationImpl(this, crit_); |
| component_list_.push_back(echo_cancellation_); |
| @@ -305,6 +307,8 @@ int AudioProcessingImpl::InitializeLocked() { |
| InitializeBeamformer(); |
| + InitializeIntelligibility(); |
| + |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| if (debug_file_->Open()) { |
| int err = WriteInitMessage(); |
| @@ -427,6 +431,11 @@ void AudioProcessingImpl::SetExtraOptions(const Config& config) { |
| transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; |
| InitializeTransient(); |
| } |
| + |
| + if (intelligibility_enabled_ != config.Get<Intelligibility>().enabled) { |
|
Andrew MacDonald
2015/07/21 19:29:21
So, remove this.
ekm
2015/07/23 00:26:28
Done.
|
| + intelligibility_enabled_ = config.Get<Intelligibility>().enabled; |
| + InitializeIntelligibility(); |
| + } |
| } |
| int AudioProcessingImpl::input_sample_rate_hz() const { |
| @@ -599,6 +608,7 @@ int AudioProcessingImpl::ProcessStreamLocked() { |
| MaybeUpdateHistograms(); |
| AudioBuffer* ca = capture_audio_.get(); // For brevity. |
| + |
| if (use_new_agc_ && gain_control_->is_enabled()) { |
| agc_manager_->AnalyzePreProcess(ca->channels()[0], |
| ca->num_channels(), |
| @@ -610,6 +620,11 @@ int AudioProcessingImpl::ProcessStreamLocked() { |
| ca->SplitIntoFrequencyBands(); |
| } |
| + if (intelligibility_enabled_) { |
| + intelligibility_enhancer_->AnalyzeCaptureAudio( |
| + ca->split_channels_f(kBand0To8kHz), split_rate_, ca->num_channels()); |
| + } |
| + |
| if (beamformer_enabled_) { |
| beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f()); |
| ca->set_num_channels(1); |
| @@ -664,9 +679,22 @@ int AudioProcessingImpl::ProcessStreamLocked() { |
| return kNoError; |
| } |
| +int AudioProcessingImpl::ProcessReverseStream(float* const* data, |
| + int samples_per_channel, |
| + int rev_sample_rate_hz, |
| + ChannelLayout layout) { |
| + RETURN_ON_ERR(AnalyzeReverseStream(data, samples_per_channel, |
| + rev_sample_rate_hz, layout)); |
| + if (intelligibility_enabled_) { |
| + render_audio_->CopyTo(samples_per_channel, layout, data); |
| + } |
| + |
| + return kNoError; |
| +} |
| + |
| int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, |
| int samples_per_channel, |
| - int sample_rate_hz, |
| + int rev_sample_rate_hz, |
| ChannelLayout layout) { |
| CriticalSectionScoped crit_scoped(crit_); |
| if (data == NULL) { |
| @@ -674,12 +702,10 @@ int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, |
| } |
| const int num_channels = ChannelsFromLayout(layout); |
| - RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(), |
| - fwd_out_format_.rate(), |
| - sample_rate_hz, |
| - fwd_in_format_.num_channels(), |
| - fwd_out_format_.num_channels(), |
| - num_channels)); |
| + RETURN_ON_ERR( |
| + MaybeInitializeLocked(fwd_in_format_.rate(), fwd_out_format_.rate(), |
| + rev_sample_rate_hz, fwd_in_format_.num_channels(), |
| + fwd_out_format_.num_channels(), num_channels)); |
| if (samples_per_channel != rev_in_format_.samples_per_channel()) { |
| return kBadDataLengthError; |
| } |
| @@ -716,7 +742,6 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
| if (frame->sample_rate_hz_ != fwd_in_format_.rate()) { |
| return kBadSampleRateError; |
| } |
| - |
| RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(), |
| fwd_out_format_.rate(), |
| frame->sample_rate_hz_, |
| @@ -738,9 +763,11 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
| RETURN_ON_ERR(WriteMessageToDebugFile()); |
| } |
| #endif |
| - |
| render_audio_->DeinterleaveFrom(frame); |
| - return AnalyzeReverseStreamLocked(); |
| + RETURN_ON_ERR(AnalyzeReverseStreamLocked()); |
| + render_audio_->InterleaveTo(frame, intelligibility_enabled_); |
| + |
| + return kNoError; |
| } |
| int AudioProcessingImpl::AnalyzeReverseStreamLocked() { |
|
Andrew MacDonald
2015/07/21 19:29:22
So AnalyzeReverseStream is no longer just for anal
ekm
2015/07/23 00:26:28
Yep, sounds good. I've re-renamed AnalyzeReverseSt
Andrew MacDonald
2015/07/24 23:50:39
Yes, mark AnalyzeReverseStream as deprecated in au
ekm
2015/07/29 00:37:19
Done.
|
| @@ -749,12 +776,21 @@ int AudioProcessingImpl::AnalyzeReverseStreamLocked() { |
| ra->SplitIntoFrequencyBands(); |
| } |
| + if (intelligibility_enabled_) { |
| + intelligibility_enhancer_->ProcessRenderAudio( |
| + ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels()); |
| + } |
| + |
| RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra)); |
| RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra)); |
| if (!use_new_agc_) { |
| RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra)); |
| } |
| + if (rev_proc_format_.rate() == kSampleRate32kHz) { |
| + ra->MergeFrequencyBands(); |
| + } |
| + |
| return kNoError; |
| } |
| @@ -1001,6 +1037,15 @@ void AudioProcessingImpl::InitializeBeamformer() { |
| } |
| } |
| +void AudioProcessingImpl::InitializeIntelligibility() { |
| + if (intelligibility_enabled_) { |
| + IntelligibilityEnhancer::Config config; |
| + config.sample_rate_hz = split_rate_; |
| + config.num_channels = 1; // TODO(ekmeyerson): Handle multiple channels. |
| + intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config)); |
| + } |
| +} |
| + |
| void AudioProcessingImpl::MaybeUpdateHistograms() { |
| static const int kMinDiffDelayMs = 60; |