Chromium Code Reviews| Index: webrtc/modules/audio_processing/audio_processing_impl.cc |
| diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc |
| index d6795a518705b54c6627caaeb4fe62e9ff2bbcec..8e46e6296cd972eccbda8adf31c320de517b683a 100644 |
| --- a/webrtc/modules/audio_processing/audio_processing_impl.cc |
| +++ b/webrtc/modules/audio_processing/audio_processing_impl.cc |
| @@ -37,7 +37,6 @@ extern "C" { |
| #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" |
| #include "webrtc/modules/audio_processing/voice_detection_impl.h" |
| #include "webrtc/modules/interface/module_common_types.h" |
| -#include "webrtc/system_wrappers/interface/critical_section_wrapper.h" |
| #include "webrtc/system_wrappers/interface/file_wrapper.h" |
| #include "webrtc/system_wrappers/interface/logging.h" |
| #include "webrtc/system_wrappers/interface/metrics.h" |
| @@ -190,7 +189,6 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config, |
| level_estimator_(NULL), |
| noise_suppression_(NULL), |
| voice_detection_(NULL), |
| - crit_(CriticalSectionWrapper::CreateCriticalSection()), |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| debug_file_(FileWrapper::Create()), |
| event_msg_(new audioproc::Event()), |
| @@ -222,28 +220,32 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config, |
| beamformer_(beamformer), |
| array_geometry_(config.Get<Beamforming>().array_geometry), |
| intelligibility_enabled_(config.Get<Intelligibility>().enabled) { |
| - echo_cancellation_ = |
| - new EchoCancellationImpl(this, crit_, &render_thread_, &capture_thread_); |
| + |
| + echo_cancellation_ = new EchoCancellationImpl( |
| + this, &crit_render_, &crit_capture_, &render_thread_, &capture_thread_); |
| component_list_.push_back(echo_cancellation_); |
| - echo_control_mobile_ = |
| - new EchoControlMobileImpl(this, crit_, &render_thread_, &capture_thread_); |
| + echo_control_mobile_ = new EchoControlMobileImpl( |
| + this, &crit_render_, &crit_capture_, &render_thread_, &capture_thread_); |
| component_list_.push_back(echo_control_mobile_); |
| - gain_control_ = |
| - new GainControlImpl(this, crit_, &render_thread_, &capture_thread_); |
| + gain_control_ = new GainControlImpl(this, &crit_capture_, &crit_capture_, |
| + &render_thread_, &capture_thread_); |
| + |
| component_list_.push_back(gain_control_); |
| - high_pass_filter_ = new HighPassFilterImpl(this, crit_); |
| + high_pass_filter_ = new HighPassFilterImpl(this); |
| component_list_.push_back(high_pass_filter_); |
| - level_estimator_ = new LevelEstimatorImpl(this, crit_); |
| + level_estimator_ = new LevelEstimatorImpl(this); |
| component_list_.push_back(level_estimator_); |
| - noise_suppression_ = new NoiseSuppressionImpl(this, crit_, &capture_thread_); |
| + noise_suppression_ = |
| + new NoiseSuppressionImpl(this, &crit_capture_, &capture_thread_); |
| component_list_.push_back(noise_suppression_); |
| - voice_detection_ = new VoiceDetectionImpl(this, crit_, &capture_thread_); |
| + voice_detection_ = |
| + new VoiceDetectionImpl(this, &crit_capture_, &capture_thread_); |
| component_list_.push_back(voice_detection_); |
| gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_)); |
| @@ -256,7 +258,8 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config, |
| AudioProcessingImpl::~AudioProcessingImpl() { |
| { |
| - CriticalSectionScoped crit_scoped(crit_); |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| // Depends on gain_control_ and gain_control_for_new_agc_. |
| agc_manager_.reset(); |
| // Depends on gain_control_. |
| @@ -274,12 +277,12 @@ AudioProcessingImpl::~AudioProcessingImpl() { |
| } |
| #endif |
| } |
| - delete crit_; |
| - crit_ = NULL; |
| } |
| int AudioProcessingImpl::Initialize() { |
| - CriticalSectionScoped crit_scoped(crit_); |
| + // Run in a single-threaded manner during initialization. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| return InitializeLocked(); |
| } |
| @@ -289,6 +292,10 @@ int AudioProcessingImpl::Initialize(int input_sample_rate_hz, |
| ChannelLayout input_layout, |
| ChannelLayout output_layout, |
| ChannelLayout reverse_layout) { |
| + // Run in a single-threaded manner during initialization. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| + |
| const ProcessingConfig processing_config = { |
| {{input_sample_rate_hz, |
| ChannelsFromLayout(input_layout), |
| @@ -307,17 +314,23 @@ int AudioProcessingImpl::Initialize(int input_sample_rate_hz, |
| } |
| int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| + // Run in a single-threaded manner during initialization. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| return InitializeLocked(processing_config); |
| } |
| // Calls InitializeLocked() if any of the audio parameters have changed from |
| -// their current values. |
| -int AudioProcessingImpl::MaybeInitializeLocked( |
| +// their current values (needs to be called while holding the crit_render_ |
| +// lock). |
| +int AudioProcessingImpl::MaybeInitialize( |
| const ProcessingConfig& processing_config) { |
| + RTC_DCHECK(crit_render_.IsLocked()); |
|
kwiberg-webrtc
2015/10/27 13:15:56
Is this check useful? Doesn't it just check that *
peah-webrtc
2015/11/05 11:47:58
I added the check as a threadchecker instead which
|
| if (processing_config == shared_state_.api_format_) { |
| return kNoError; |
| } |
| + |
| + rtc::CritScope cs_capture(&crit_capture_); |
| return InitializeLocked(processing_config); |
| } |
| @@ -459,7 +472,9 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
| } |
| void AudioProcessingImpl::SetExtraOptions(const Config& config) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| + // Run in a single-threaded manner when setting ehe extra options. |
|
kwiberg-webrtc
2015/10/27 13:15:57
the
peah-webrtc
2015/11/05 11:47:58
Done.
|
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| for (auto item : component_list_) { |
| item->SetExtraOptions(config); |
| } |
| @@ -492,7 +507,7 @@ int AudioProcessingImpl::num_output_channels() const { |
| } |
| void AudioProcessingImpl::set_output_will_be_muted(bool muted) { |
| - CriticalSectionScoped lock(crit_); |
| + rtc::CritScope cs(&crit_capture_); |
| RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
| output_will_be_muted_ = muted; |
| if (agc_manager_.get()) { |
| @@ -508,14 +523,22 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
| int output_sample_rate_hz, |
| ChannelLayout output_layout, |
| float* const* dest) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
| - StreamConfig input_stream = shared_state_.api_format_.input_stream(); |
| + StreamConfig input_stream; |
| + StreamConfig output_stream; |
| + { |
| + // Access the shared_state_.api_format_.input_stream beneath the capture |
| + // lock. |
| + // The lock must be released as it is later required in the call |
| + // to ProcessStream(,,,); |
| + rtc::CritScope cs(&crit_capture_); |
| + input_stream = shared_state_.api_format_.input_stream(); |
| + output_stream = shared_state_.api_format_.output_stream(); |
| + } |
| + |
| input_stream.set_sample_rate_hz(input_sample_rate_hz); |
| input_stream.set_num_channels(ChannelsFromLayout(input_layout)); |
| input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); |
| - |
| - StreamConfig output_stream = shared_state_.api_format_.output_stream(); |
| output_stream.set_sample_rate_hz(output_sample_rate_hz); |
| output_stream.set_num_channels(ChannelsFromLayout(output_layout)); |
| output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); |
| @@ -530,21 +553,31 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
| const StreamConfig& input_config, |
| const StreamConfig& output_config, |
| float* const* dest) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
| + { |
| + // Acquire the capture lock in order to safely call the function |
| + // that retrieves the render side data. This function accesses apm |
| + // getters that need the capture lock held when being called. |
| + rtc::CritScope cs_capture(&crit_capture_); |
| + echo_cancellation_->ReadQueuedRenderData(); |
| + echo_control_mobile_->ReadQueuedRenderData(); |
| + gain_control_->ReadQueuedRenderData(); |
| + } |
| if (!src || !dest) { |
| return kNullPointerError; |
| } |
| - echo_cancellation_->ReadQueuedRenderData(); |
| - echo_control_mobile_->ReadQueuedRenderData(); |
| - gain_control_->ReadQueuedRenderData(); |
| - |
| ProcessingConfig processing_config = shared_state_.api_format_; |
| processing_config.input_stream() = input_config; |
| processing_config.output_stream() = output_config; |
| - RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| + { |
| + // Do conditional reinitialization. |
| + rtc::CritScope cs_render(&crit_render_); |
| + RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| + } |
| + rtc::CritScope cs_capture(&crit_capture_); |
| + |
| assert(processing_config.input_stream().num_frames() == |
| shared_state_.api_format_.input_stream().num_frames()); |
| @@ -582,11 +615,18 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
| } |
| int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
| - echo_cancellation_->ReadQueuedRenderData(); |
| - echo_control_mobile_->ReadQueuedRenderData(); |
| - gain_control_->ReadQueuedRenderData(); |
| + { |
| + // Acquire the capture lock in order to safely call the function |
| + // that retrieves the render side data. This function accesses apm |
| + // getters that need the capture lock held when being called. |
| + // The lock needs to be released as |
| + // echo_control_mobile_->is_enabled() aquires this lock as well. |
| + rtc::CritScope cs_capture(&crit_capture_); |
| + echo_cancellation_->ReadQueuedRenderData(); |
| + echo_control_mobile_->ReadQueuedRenderData(); |
| + gain_control_->ReadQueuedRenderData(); |
| + } |
| if (!frame) { |
| return kNullPointerError; |
| @@ -605,15 +645,27 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| return kUnsupportedComponentError; |
| } |
| - // TODO(ajm): The input and output rates and channels are currently |
| - // constrained to be identical in the int16 interface. |
| - ProcessingConfig processing_config = shared_state_.api_format_; |
| + ProcessingConfig processing_config; |
| + { |
| + // Aquire lock for the access of api_format. |
| + // The lock is released immediately due to the conditional |
| + // reinitialization. |
| + rtc::CritScope cs_capture(&crit_capture_); |
| + // TODO(ajm): The input and output rates and channels are currently |
| + // constrained to be identical in the int16 interface. |
| + processing_config = shared_state_.api_format_; |
| + } |
| processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
| processing_config.input_stream().set_num_channels(frame->num_channels_); |
| processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
| processing_config.output_stream().set_num_channels(frame->num_channels_); |
| - RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| + { |
| + // Do conditional reinitialization. |
| + rtc::CritScope cs_render(&crit_render_); |
| + RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| + } |
| + rtc::CritScope cs_capture(&crit_capture_); |
| if (frame->samples_per_channel_ != |
| shared_state_.api_format_.input_stream().num_frames()) { |
| return kBadDataLengthError; |
| @@ -729,13 +781,14 @@ int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, |
| int rev_sample_rate_hz, |
| ChannelLayout layout) { |
| RTC_DCHECK(render_thread_.CalledOnValidThread()); |
| + rtc::CritScope cs(&crit_render_); |
| const StreamConfig reverse_config = { |
| rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), |
| }; |
| if (samples_per_channel != reverse_config.num_frames()) { |
| return kBadDataLengthError; |
| } |
| - return AnalyzeReverseStream(data, reverse_config, reverse_config); |
| + return AnalyzeReverseStreamLocked(data, reverse_config, reverse_config); |
| } |
| int AudioProcessingImpl::ProcessReverseStream( |
| @@ -744,8 +797,9 @@ int AudioProcessingImpl::ProcessReverseStream( |
| const StreamConfig& reverse_output_config, |
| float* const* dest) { |
| RTC_DCHECK(render_thread_.CalledOnValidThread()); |
| - RETURN_ON_ERR( |
| - AnalyzeReverseStream(src, reverse_input_config, reverse_output_config)); |
| + rtc::CritScope cs(&crit_render_); |
| + RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, reverse_input_config, |
| + reverse_output_config)); |
| if (is_rev_processed()) { |
| render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(), |
| dest); |
| @@ -760,11 +814,10 @@ int AudioProcessingImpl::ProcessReverseStream( |
| return kNoError; |
| } |
| -int AudioProcessingImpl::AnalyzeReverseStream( |
| +int AudioProcessingImpl::AnalyzeReverseStreamLocked( |
| const float* const* src, |
| const StreamConfig& reverse_input_config, |
| const StreamConfig& reverse_output_config) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| if (src == NULL) { |
| return kNullPointerError; |
| } |
| @@ -777,7 +830,7 @@ int AudioProcessingImpl::AnalyzeReverseStream( |
| processing_config.reverse_input_stream() = reverse_input_config; |
| processing_config.reverse_output_stream() = reverse_output_config; |
| - RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| + RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| assert(reverse_input_config.num_frames() == |
| shared_state_.api_format_.reverse_input_stream().num_frames()); |
| @@ -804,6 +857,7 @@ int AudioProcessingImpl::AnalyzeReverseStream( |
| int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| RTC_DCHECK(render_thread_.CalledOnValidThread()); |
| RETURN_ON_ERR(AnalyzeReverseStream(frame)); |
| + rtc::CritScope cs(&crit_render_); |
| if (is_rev_processed()) { |
| render_audio_->InterleaveTo(frame, true); |
| } |
| @@ -813,7 +867,7 @@ int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
| RTC_DCHECK(render_thread_.CalledOnValidThread()); |
| - CriticalSectionScoped crit_scoped(crit_); |
| + rtc::CritScope cs(&crit_render_); |
| if (frame == NULL) { |
| return kNullPointerError; |
| } |
| @@ -844,7 +898,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
| processing_config.reverse_output_stream().set_num_channels( |
| frame->num_channels_); |
| - RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| + RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| if (frame->samples_per_channel_ != |
| shared_state_.api_format_.reverse_input_stream().num_frames()) { |
| return kBadDataLengthError; |
| @@ -871,6 +925,10 @@ int AudioProcessingImpl::ProcessReverseStreamLocked() { |
| } |
| if (intelligibility_enabled_) { |
| + // Currently run in single-threaded mode when the intelligibility |
| + // enhancer is activated. |
| + // TODO(peah): Fix to be properly multi-threaded. |
| + rtc::CritScope cs(&crit_capture_); |
| intelligibility_enhancer_->ProcessRenderAudio( |
| ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels()); |
| } |
| @@ -891,6 +949,7 @@ int AudioProcessingImpl::ProcessReverseStreamLocked() { |
| int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
| RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
| + rtc::CritScope cs(&crit_capture_); |
| Error retval = kNoError; |
| was_stream_delay_set_ = true; |
| delay += delay_offset_ms_; |
| @@ -912,6 +971,7 @@ int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
| int AudioProcessingImpl::stream_delay_ms() const { |
| RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
| + rtc::CritScope cs(&crit_capture_); |
| return stream_delay_ms_; |
| } |
| @@ -921,23 +981,28 @@ bool AudioProcessingImpl::was_stream_delay_set() const { |
| void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) { |
| RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
| + rtc::CritScope cs(&crit_capture_); |
| key_pressed_ = key_pressed; |
| } |
| void AudioProcessingImpl::set_delay_offset_ms(int offset) { |
| RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
| - CriticalSectionScoped crit_scoped(crit_); |
| + rtc::CritScope cs(&crit_capture_); |
| delay_offset_ms_ = offset; |
| } |
| int AudioProcessingImpl::delay_offset_ms() const { |
| RTC_DCHECK(capture_thread_.CalledOnValidThread()); |
| + rtc::CritScope cs(&crit_capture_); |
| return delay_offset_ms_; |
| } |
| int AudioProcessingImpl::StartDebugRecording( |
| const char filename[AudioProcessing::kMaxFilenameSize]) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| + // Run in a single-threaded manner. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| + |
| static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); |
| if (filename == NULL) { |
| @@ -966,7 +1031,9 @@ int AudioProcessingImpl::StartDebugRecording( |
| } |
| int AudioProcessingImpl::StartDebugRecording(FILE* handle) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| + // Run in a single-threaded manner. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| if (handle == NULL) { |
| return kNullPointerError; |
| @@ -994,12 +1061,17 @@ int AudioProcessingImpl::StartDebugRecording(FILE* handle) { |
| int AudioProcessingImpl::StartDebugRecordingForPlatformFile( |
| rtc::PlatformFile handle) { |
| + // Run in a single-threaded manner. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| FILE* stream = rtc::FdopenPlatformFileForWriting(handle); |
| return StartDebugRecording(stream); |
| } |
| int AudioProcessingImpl::StopDebugRecording() { |
| - CriticalSectionScoped crit_scoped(crit_); |
| + // Run in a single-threaded manner. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| // We just return if recording hasn't started. |
| @@ -1197,7 +1269,10 @@ void AudioProcessingImpl::MaybeUpdateHistograms() { |
| } |
| void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { |
| - CriticalSectionScoped crit_scoped(crit_); |
| + // Run in a single-threaded manner. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| + |
| if (stream_delay_jumps_ > -1) { |
| RTC_HISTOGRAM_ENUMERATION( |
| "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps", |