Chromium Code Reviews| Index: webrtc/modules/audio_processing/audio_processing_impl.cc |
| diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc |
| index 222f749fb7b876fc55e53588a6d0914a192f90f1..697b1f2ff0f896dcfdecd275d53e106f3dd49c99 100644 |
| --- a/webrtc/modules/audio_processing/audio_processing_impl.cc |
| +++ b/webrtc/modules/audio_processing/audio_processing_impl.cc |
| @@ -82,15 +82,6 @@ const int AudioProcessing::kMaxNativeSampleRateHz = AudioProcessing:: |
| namespace { |
| -const int kInternalNativeRates[] = {AudioProcessing::kSampleRate8kHz, |
| - AudioProcessing::kSampleRate16kHz, |
| -#ifdef WEBRTC_ARCH_ARM_FAMILY |
| - AudioProcessing::kSampleRate32kHz}; |
| -#else |
| - AudioProcessing::kSampleRate32kHz, |
| - AudioProcessing::kSampleRate48kHz}; |
| -#endif // WEBRTC_ARCH_ARM_FAMILY |
| - |
| static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { |
| switch (layout) { |
| case AudioProcessing::kMono: |
| @@ -105,18 +96,32 @@ static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { |
| return false; |
| } |
| -bool is_multi_band(int sample_rate_hz) { |
| +bool SampleRateSupportsMultiBand(int sample_rate_hz) { |
| return sample_rate_hz == AudioProcessing::kSampleRate32kHz || |
| sample_rate_hz == AudioProcessing::kSampleRate48kHz; |
| } |
| -int ClosestHigherNativeRate(int min_proc_rate) { |
| - for (int rate : kInternalNativeRates) { |
| - if (rate >= min_proc_rate) { |
| +int NativeProcessRateToUse(int minimum_rate, bool band_splitting_required) { |
|
the sun
2016/09/08 08:10:15
FindNativeProcessRateToUse()
also, this is the ty
peah-webrtc
2016/09/08 08:44:03
Changed the name.
It is indeed a good function to
the sun
2016/09/08 09:20:48
I think so. Unless the functionality is covered by
peah-webrtc
2016/09/08 18:57:50
This is indirectly tested by the tests that operat
the sun
2016/09/08 20:36:38
Very well. I assume the non-bitexact changes canno
peah-webrtc
2016/09/10 07:52:37
Of course, they can. But we have at least coverage
|
| +#ifdef WEBRTC_ARCH_ARM_FAMILY |
| + const int kMaxSplittingNativeProcessRate = AudioProcessing::kSampleRate32kHz; |
| +#else |
| + const int kMaxSplittingNativeProcessRate = AudioProcessing::kSampleRate48kHz; |
| +#endif |
| + RTC_DCHECK_LE(kMaxSplittingNativeProcessRate, |
| + AudioProcessing::kMaxNativeSampleRateHz); |
| + const int uppermost_native_rate = band_splitting_required |
| + ? kMaxSplittingNativeProcessRate |
| + : AudioProcessing::kSampleRate48kHz; |
| + |
| + for (auto rate : AudioProcessing::kNativeSampleRatesHz) { |
| + if (rate >= uppermost_native_rate) { |
| + return uppermost_native_rate; |
| + } |
| + if (rate >= minimum_rate) { |
| return rate; |
| } |
| } |
| - return kInternalNativeRates[arraysize(kInternalNativeRates) - 1]; |
| + RTC_NOTREACHED(); |
| } |
| } // namespace |
| @@ -124,6 +129,79 @@ int ClosestHigherNativeRate(int min_proc_rate) { |
| // Throughout webrtc, it's assumed that success is represented by zero. |
| static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); |
| +AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} |
| + |
| +bool AudioProcessingImpl::ApmSubmoduleStates::Update( |
| + bool high_pass_filter_enabled, |
| + bool echo_canceller_enabled, |
| + bool mobile_echo_controller_enabled, |
| + bool noise_suppressor_enabled, |
| + bool intelligibility_enhancer_enabled, |
| + bool beamformer_enabled, |
| + bool adaptive_gain_controller_enabled, |
| + bool level_controller_enabled, |
| + bool voice_activity_detector_enabled, |
| + bool level_estimator_enabled, |
| + bool transient_suppressor_enabled) { |
| + bool changed = false; |
| + changed |= (high_pass_filter_enabled != high_pass_filter_enabled_); |
| + changed |= (echo_canceller_enabled != echo_canceller_enabled_); |
| + changed |= |
| + (mobile_echo_controller_enabled != mobile_echo_controller_enabled_); |
| + changed |= (noise_suppressor_enabled != noise_suppressor_enabled_); |
| + changed |= |
| + (intelligibility_enhancer_enabled != intelligibility_enhancer_enabled_); |
| + changed |= (beamformer_enabled != beamformer_enabled_); |
| + changed |= |
| + (adaptive_gain_controller_enabled != adaptive_gain_controller_enabled_); |
| + changed |= (level_controller_enabled != level_controller_enabled_); |
| + changed |= (level_estimator_enabled != level_estimator_enabled_); |
| + changed |= |
| + (voice_activity_detector_enabled != voice_activity_detector_enabled_); |
| + changed |= (transient_suppressor_enabled != transient_suppressor_enabled_); |
| + if (changed) { |
| + high_pass_filter_enabled_ = high_pass_filter_enabled; |
| + echo_canceller_enabled_ = echo_canceller_enabled; |
| + mobile_echo_controller_enabled_ = mobile_echo_controller_enabled; |
| + noise_suppressor_enabled_ = noise_suppressor_enabled; |
| + intelligibility_enhancer_enabled_ = intelligibility_enhancer_enabled; |
| + beamformer_enabled_ = beamformer_enabled; |
| + adaptive_gain_controller_enabled_ = adaptive_gain_controller_enabled; |
| + level_controller_enabled_ = level_controller_enabled; |
| + level_estimator_enabled_ = level_estimator_enabled; |
| + voice_activity_detector_enabled_ = voice_activity_detector_enabled; |
| + transient_suppressor_enabled_ = transient_suppressor_enabled; |
| + } |
| + |
| + changed |= first_update_; |
| + first_update_ = false; |
| + return changed; |
| +} |
| + |
| +bool AudioProcessingImpl::ApmSubmoduleStates::CaptureMultiBandModulesActive() |
|
the sun
2016/09/08 08:10:15
Is this distinction between "modules" and "effects
peah-webrtc
2016/09/08 08:44:04
Good points!
It is definitely not an accepted ter
|
| + const { |
| + return CaptureMultiBandEffectsActive() || intelligibility_enhancer_enabled_ || |
| + voice_activity_detector_enabled_; |
| +} |
| + |
| +bool AudioProcessingImpl::ApmSubmoduleStates::CaptureMultiBandEffectsActive() |
| + const { |
| + return high_pass_filter_enabled_ || echo_canceller_enabled_ || |
| + mobile_echo_controller_enabled_ || noise_suppressor_enabled_ || |
| + beamformer_enabled_ || adaptive_gain_controller_enabled_; |
| +} |
| + |
| +bool AudioProcessingImpl::ApmSubmoduleStates::RenderMultiBandModulesActive() |
| + const { |
| + return RenderMultiBandEffectsActive() || echo_canceller_enabled_ || |
| + mobile_echo_controller_enabled_ || adaptive_gain_controller_enabled_; |
| +} |
| + |
| +bool AudioProcessingImpl::ApmSubmoduleStates::RenderMultiBandEffectsActive() |
| + const { |
| + return intelligibility_enhancer_enabled_; |
| +} |
| + |
| struct AudioProcessingImpl::ApmPublicSubmodules { |
| ApmPublicSubmodules() {} |
| // Accessed externally of APM without any lock acquired. |
| @@ -275,12 +353,13 @@ int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { |
| int AudioProcessingImpl::MaybeInitializeRender( |
| const ProcessingConfig& processing_config) { |
| - return MaybeInitialize(processing_config); |
| + return MaybeInitialize(processing_config, false); |
| } |
| int AudioProcessingImpl::MaybeInitializeCapture( |
| - const ProcessingConfig& processing_config) { |
| - return MaybeInitialize(processing_config); |
| + const ProcessingConfig& processing_config, |
| + bool force_initialization) { |
| + return MaybeInitialize(processing_config, force_initialization); |
| } |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| @@ -300,9 +379,10 @@ AudioProcessingImpl::ApmDebugDumpState::~ApmDebugDumpState() {} |
| // Calls InitializeLocked() if any of the audio parameters have changed from |
| // their current values (needs to be called while holding the crit_render_lock). |
| int AudioProcessingImpl::MaybeInitialize( |
| - const ProcessingConfig& processing_config) { |
| + const ProcessingConfig& processing_config, |
| + bool force_initialization) { |
| // Called from both threads. Thread check is therefore not possible. |
| - if (processing_config == formats_.api_format) { |
| + if (processing_config == formats_.api_format && !force_initialization) { |
| return kNoError; |
| } |
| @@ -326,7 +406,8 @@ int AudioProcessingImpl::InitializeLocked() { |
| formats_.rev_proc_format.num_frames(), |
| formats_.rev_proc_format.num_channels(), |
| rev_audio_buffer_out_num_frames)); |
| - if (rev_conversion_needed()) { |
| + if (formats_.api_format.reverse_input_stream() != |
| + formats_.api_format.reverse_output_stream()) { |
| render_.render_converter = AudioConverter::Create( |
| formats_.api_format.reverse_input_stream().num_channels(), |
| formats_.api_format.reverse_input_stream().num_frames(), |
| @@ -397,17 +478,25 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
| formats_.api_format = config; |
| - capture_nonlocked_.fwd_proc_format = StreamConfig(ClosestHigherNativeRate( |
| + int fwd_proc_rate = NativeProcessRateToUse( |
|
the sun
2016/09/08 08:10:15
I find it confusing that "fwd" and "rev" are used
peah-webrtc
2016/09/08 08:44:03
I agree, and that is something that has been prese
the sun
2016/09/08 09:20:48
Personally I think Render and Capture makes much m
peah-webrtc
2016/09/08 18:57:50
That sounds great to me. Note that I named the var
the sun
2016/09/08 20:36:38
sgtm
|
| std::min(formats_.api_format.input_stream().sample_rate_hz(), |
| - formats_.api_format.output_stream().sample_rate_hz()))); |
| + formats_.api_format.output_stream().sample_rate_hz()), |
| + submodule_states_.CaptureMultiBandModulesActive() || |
| + submodule_states_.RenderMultiBandModulesActive()); |
| - int rev_proc_rate = ClosestHigherNativeRate(std::min( |
| - formats_.api_format.reverse_input_stream().sample_rate_hz(), |
| - formats_.api_format.reverse_output_stream().sample_rate_hz())); |
| + capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate); |
| + |
| + int rev_proc_rate = NativeProcessRateToUse( |
| + std::min(formats_.api_format.reverse_input_stream().sample_rate_hz(), |
| + formats_.api_format.reverse_output_stream().sample_rate_hz()), |
| + submodule_states_.CaptureMultiBandModulesActive() || |
| + submodule_states_.RenderMultiBandModulesActive()); |
| // TODO(aluebs): Remove this restriction once we figure out why the 3-band |
| // splitting filter degrades the AEC performance. |
| if (rev_proc_rate > kSampleRate32kHz) { |
| - rev_proc_rate = is_rev_processed() ? kSampleRate32kHz : kSampleRate16kHz; |
| + rev_proc_rate = submodule_states_.RenderMultiBandEffectsActive() |
| + ? kSampleRate32kHz |
| + : kSampleRate16kHz; |
| } |
| // If the forward sample rate is 8 kHz, the reverse stream is also processed |
| // at this rate. |
| @@ -556,6 +645,7 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
| float* const* dest) { |
| TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_StreamConfig"); |
| ProcessingConfig processing_config; |
| + bool reinitialization_required; |
|
the sun
2016/09/08 08:10:15
Please, give me a default value, in case someone l
peah-webrtc
2016/09/08 08:44:03
Done.
|
| { |
| // Acquire the capture lock in order to safely call the function |
| // that retrieves the render side data. This function accesses apm |
| @@ -570,6 +660,7 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
| } |
| processing_config = formats_.api_format; |
| + reinitialization_required = UpdateActiveSubmoduleStates(); |
| } |
| processing_config.input_stream() = input_config; |
| @@ -578,7 +669,8 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
| { |
| // Do conditional reinitialization. |
| rtc::CritScope cs_render(&crit_render_); |
| - RETURN_ON_ERR(MaybeInitializeCapture(processing_config)); |
| + RETURN_ON_ERR( |
| + MaybeInitializeCapture(processing_config, reinitialization_required)); |
| } |
| rtc::CritScope cs_capture(&crit_capture_); |
| assert(processing_config.input_stream().num_frames() == |
| @@ -646,6 +738,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| } |
| ProcessingConfig processing_config; |
| + bool reinitialization_required; |
|
the sun
2016/09/08 08:10:15
default init pls
peah-webrtc
2016/09/08 08:44:04
Done.
|
| { |
| // Aquire lock for the access of api_format. |
| // The lock is released immediately due to the conditional |
| @@ -654,6 +747,8 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| // TODO(ajm): The input and output rates and channels are currently |
| // constrained to be identical in the int16 interface. |
| processing_config = formats_.api_format; |
| + |
| + reinitialization_required = UpdateActiveSubmoduleStates(); |
| } |
| processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
| processing_config.input_stream().set_num_channels(frame->num_channels_); |
| @@ -663,7 +758,8 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| { |
| // Do conditional reinitialization. |
| rtc::CritScope cs_render(&crit_render_); |
| - RETURN_ON_ERR(MaybeInitializeCapture(processing_config)); |
| + RETURN_ON_ERR( |
| + MaybeInitializeCapture(processing_config, reinitialization_required)); |
| } |
| rtc::CritScope cs_capture(&crit_capture_); |
| if (frame->samples_per_channel_ != |
| @@ -685,7 +781,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| capture_.capture_audio->DeinterleaveFrom(frame); |
| RETURN_ON_ERR(ProcessStreamLocked()); |
| - capture_.capture_audio->InterleaveTo(frame, output_copy_needed()); |
| + capture_.capture_audio->InterleaveTo(frame, true); |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| if (debug_dump_.debug_file->is_open()) { |
| @@ -731,7 +827,9 @@ int AudioProcessingImpl::ProcessStreamLocked() { |
| capture_nonlocked_.fwd_proc_format.num_frames()); |
| } |
| - if (fwd_analysis_needed()) { |
| + if (submodule_states_.CaptureMultiBandModulesActive() && |
| + SampleRateSupportsMultiBand( |
| + capture_nonlocked_.fwd_proc_format.sample_rate_hz())) { |
| ca->SplitIntoFrequencyBands(); |
| } |
| @@ -802,7 +900,9 @@ int AudioProcessingImpl::ProcessStreamLocked() { |
| RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio( |
| ca, echo_cancellation()->stream_has_echo())); |
| - if (fwd_synthesis_needed()) { |
| + if (submodule_states_.CaptureMultiBandEffectsActive() && |
| + SampleRateSupportsMultiBand( |
| + capture_nonlocked_.fwd_proc_format.sample_rate_hz())) { |
| ca->MergeFrequencyBands(); |
| } |
| @@ -856,10 +956,11 @@ int AudioProcessingImpl::ProcessReverseStream( |
| rtc::CritScope cs(&crit_render_); |
| RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, reverse_input_config, |
| reverse_output_config)); |
| - if (is_rev_processed()) { |
| + if (submodule_states_.RenderMultiBandEffectsActive()) { |
| render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(), |
| dest); |
| - } else if (render_check_rev_conversion_needed()) { |
| + } else if (formats_.api_format.reverse_input_stream() != |
| + formats_.api_format.reverse_output_stream()) { |
| render_.render_converter->Convert(src, reverse_input_config.num_samples(), |
| dest, |
| reverse_output_config.num_samples()); |
| @@ -961,15 +1062,14 @@ int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| #endif |
| render_.render_audio->DeinterleaveFrom(frame); |
| RETURN_ON_ERR(ProcessReverseStreamLocked()); |
| - if (is_rev_processed()) { |
| - render_.render_audio->InterleaveTo(frame, true); |
| - } |
| + render_.render_audio->InterleaveTo(frame, true); |
| return kNoError; |
| } |
| int AudioProcessingImpl::ProcessReverseStreamLocked() { |
| AudioBuffer* ra = render_.render_audio.get(); // For brevity. |
| - if (rev_analysis_needed()) { |
| + if (submodule_states_.RenderMultiBandModulesActive() && |
| + SampleRateSupportsMultiBand(formats_.rev_proc_format.sample_rate_hz())) { |
| ra->SplitIntoFrequencyBands(); |
| } |
| @@ -988,7 +1088,8 @@ int AudioProcessingImpl::ProcessReverseStreamLocked() { |
| RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra)); |
| } |
| - if (rev_synthesis_needed()) { |
| + if (submodule_states_.RenderMultiBandEffectsActive() && |
| + SampleRateSupportsMultiBand(formats_.rev_proc_format.sample_rate_hz())) { |
| ra->MergeFrequencyBands(); |
| } |
| @@ -1122,20 +1223,14 @@ int AudioProcessingImpl::StopDebugRecording() { |
| } |
| EchoCancellation* AudioProcessingImpl::echo_cancellation() const { |
| - // Adding a lock here has no effect as it allows any access to the submodule |
| - // from the returned pointer. |
| return public_submodules_->echo_cancellation.get(); |
| } |
| EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const { |
| - // Adding a lock here has no effect as it allows any access to the submodule |
| - // from the returned pointer. |
| return public_submodules_->echo_control_mobile.get(); |
| } |
| GainControl* AudioProcessingImpl::gain_control() const { |
| - // Adding a lock here has no effect as it allows any access to the submodule |
| - // from the returned pointer. |
| if (constants_.use_experimental_agc) { |
| return public_submodules_->gain_control_for_experimental_agc.get(); |
| } |
| @@ -1143,103 +1238,34 @@ GainControl* AudioProcessingImpl::gain_control() const { |
| } |
| HighPassFilter* AudioProcessingImpl::high_pass_filter() const { |
| - // Adding a lock here has no effect as it allows any access to the submodule |
| - // from the returned pointer. |
| return public_submodules_->high_pass_filter.get(); |
| } |
| LevelEstimator* AudioProcessingImpl::level_estimator() const { |
| - // Adding a lock here has no effect as it allows any access to the submodule |
| - // from the returned pointer. |
| return public_submodules_->level_estimator.get(); |
| } |
| NoiseSuppression* AudioProcessingImpl::noise_suppression() const { |
| - // Adding a lock here has no effect as it allows any access to the submodule |
| - // from the returned pointer. |
| return public_submodules_->noise_suppression.get(); |
| } |
| VoiceDetection* AudioProcessingImpl::voice_detection() const { |
| - // Adding a lock here has no effect as it allows any access to the submodule |
| - // from the returned pointer. |
| return public_submodules_->voice_detection.get(); |
| } |
| -bool AudioProcessingImpl::is_fwd_processed() const { |
| - // The beamformer, noise suppressor and highpass filter |
| - // modify the data. |
| - if (capture_nonlocked_.beamformer_enabled || |
| - public_submodules_->high_pass_filter->is_enabled() || |
| - public_submodules_->noise_suppression->is_enabled() || |
| - public_submodules_->echo_cancellation->is_enabled() || |
| - public_submodules_->echo_control_mobile->is_enabled() || |
| - public_submodules_->gain_control->is_enabled()) { |
| - return true; |
| - } |
| - |
| - // The capture data is otherwise unchanged. |
| - return false; |
| -} |
| - |
| -bool AudioProcessingImpl::output_copy_needed() const { |
| - // Check if we've upmixed or downmixed the audio. |
| - return ((formats_.api_format.output_stream().num_channels() != |
| - formats_.api_format.input_stream().num_channels()) || |
| - is_fwd_processed() || capture_.transient_suppressor_enabled || |
| - capture_nonlocked_.level_controller_enabled); |
| -} |
| - |
| -bool AudioProcessingImpl::fwd_synthesis_needed() const { |
| - return (is_fwd_processed() && |
| - is_multi_band(capture_nonlocked_.fwd_proc_format.sample_rate_hz())); |
| -} |
| - |
| -bool AudioProcessingImpl::fwd_analysis_needed() const { |
| - if (!is_fwd_processed() && |
| - !public_submodules_->voice_detection->is_enabled() && |
| - !capture_.transient_suppressor_enabled) { |
| - // Only public_submodules_->level_estimator is enabled. |
| - return false; |
| - } else if (is_multi_band( |
| - capture_nonlocked_.fwd_proc_format.sample_rate_hz())) { |
| - // Something besides public_submodules_->level_estimator is enabled, and we |
| - // have super-wb. |
| - return true; |
| - } |
| - return false; |
| -} |
| - |
| -bool AudioProcessingImpl::is_rev_processed() const { |
| -#if WEBRTC_INTELLIGIBILITY_ENHANCER |
| - return capture_nonlocked_.intelligibility_enabled; |
|
the sun
2016/09/08 08:10:15
This was previously if-deffed but now is not - int
peah-webrtc
2016/09/08 08:44:04
Good point!
Done.
|
| -#else |
| - return false; |
| -#endif |
| -} |
| - |
| -bool AudioProcessingImpl::rev_synthesis_needed() const { |
| - return (is_rev_processed() && |
| - is_multi_band(formats_.rev_proc_format.sample_rate_hz())); |
| -} |
| - |
| -bool AudioProcessingImpl::rev_analysis_needed() const { |
| - return is_multi_band(formats_.rev_proc_format.sample_rate_hz()) && |
| - (is_rev_processed() || |
| - public_submodules_->echo_cancellation |
| - ->is_enabled_render_side_query() || |
| - public_submodules_->echo_control_mobile |
| - ->is_enabled_render_side_query() || |
| - public_submodules_->gain_control->is_enabled_render_side_query()); |
| -} |
| - |
| -bool AudioProcessingImpl::render_check_rev_conversion_needed() const { |
| - return rev_conversion_needed(); |
| -} |
| - |
| -bool AudioProcessingImpl::rev_conversion_needed() const { |
| - return (formats_.api_format.reverse_input_stream() != |
| - formats_.api_format.reverse_output_stream()); |
| +bool AudioProcessingImpl::UpdateActiveSubmoduleStates() { |
| + return submodule_states_.Update( |
| + public_submodules_->high_pass_filter->is_enabled(), |
| + public_submodules_->echo_cancellation->is_enabled(), |
| + public_submodules_->echo_control_mobile->is_enabled(), |
| + public_submodules_->noise_suppression->is_enabled(), |
| + capture_nonlocked_.intelligibility_enabled, |
| + capture_nonlocked_.beamformer_enabled, |
| + public_submodules_->gain_control->is_enabled(), |
| + capture_nonlocked_.level_controller_enabled, |
| + public_submodules_->voice_detection->is_enabled(), |
| + public_submodules_->level_estimator->is_enabled(), |
| + capture_.transient_suppressor_enabled); |
| } |
| void AudioProcessingImpl::InitializeExperimentalAgc() { |