Index: webrtc/modules/audio_processing/audio_processing_impl.cc |
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc |
index f4977d4b01f8a507545dc46ccc17a16a1ea60c43..500e0f4e27d22f2a05ceb3faee3f2e1c21a3313d 100644 |
--- a/webrtc/modules/audio_processing/audio_processing_impl.cc |
+++ b/webrtc/modules/audio_processing/audio_processing_impl.cc |
@@ -223,13 +223,19 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config, |
array_geometry_(config.Get<Beamforming>().array_geometry), |
target_direction_(config.Get<Beamforming>().target_direction), |
intelligibility_enabled_(config.Get<Intelligibility>().enabled) { |
- echo_cancellation_ = new EchoCancellationImpl(this, crit_); |
+ render_thread_checker_.DetachFromThread(); |
+ capture_thread_checker_.DetachFromThread(); |
+ |
+ echo_cancellation_ = |
+ new EchoCancellationImpl(this, crit_, &render_thread_checker_); |
component_list_.push_back(echo_cancellation_); |
- echo_control_mobile_ = new EchoControlMobileImpl(this, crit_); |
+ echo_control_mobile_ = |
+ new EchoControlMobileImpl(this, crit_, &render_thread_checker_); |
component_list_.push_back(echo_control_mobile_); |
- gain_control_ = new GainControlImpl(this, crit_); |
+ gain_control_ = new GainControlImpl(this, crit_, &render_thread_checker_, |
+ &capture_thread_checker_); |
component_list_.push_back(gain_control_); |
high_pass_filter_ = new HighPassFilterImpl(this, crit_); |
@@ -250,6 +256,7 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config, |
} |
AudioProcessingImpl::~AudioProcessingImpl() { |
+ RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
{ |
CriticalSectionScoped crit_scoped(crit_); |
// Depends on gain_control_ and gain_control_for_new_agc_. |
@@ -274,6 +281,7 @@ AudioProcessingImpl::~AudioProcessingImpl() { |
} |
int AudioProcessingImpl::Initialize() { |
+ RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
CriticalSectionScoped crit_scoped(crit_); |
return InitializeLocked(); |
} |
@@ -284,6 +292,7 @@ int AudioProcessingImpl::Initialize(int input_sample_rate_hz, |
ChannelLayout input_layout, |
ChannelLayout output_layout, |
ChannelLayout reverse_layout) { |
+ RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
const ProcessingConfig processing_config = { |
{{input_sample_rate_hz, |
ChannelsFromLayout(input_layout), |
@@ -302,10 +311,23 @@ int AudioProcessingImpl::Initialize(int input_sample_rate_hz, |
} |
int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { |
+ RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
CriticalSectionScoped crit_scoped(crit_); |
return InitializeLocked(processing_config); |
} |
+int AudioProcessingImpl::MaybeInitializeLockedRender( |
+ const ProcessingConfig& processing_config) { |
+ RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
+ return MaybeInitializeLocked(processing_config); |
+} |
+ |
+int AudioProcessingImpl::MaybeInitializeLockedCapture( |
+ const ProcessingConfig& processing_config) { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
+ return MaybeInitializeLocked(processing_config); |
+} |
+ |
// Calls InitializeLocked() if any of the audio parameters have changed from |
// their current values. |
int AudioProcessingImpl::MaybeInitializeLocked( |
@@ -379,6 +401,10 @@ int AudioProcessingImpl::InitializeLocked() { |
} |
int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
+ // This is called from the initialization functionality which is shared |
the sun
2015/11/25 08:53:56
I'm not a fan of repeated, verbose comments. Can y
peah-webrtc
2015/11/25 15:40:17
Could not be happier to oblige! :-)
Done.
|
+ // between the render and capture parts, and also during the APM creation. |
+ // Therefore it is neither possible to do thread checks nor to separate |
+ // into different thread-specific implementations. |
for (const auto& stream : config.streams) { |
if (stream.num_channels() < 0) { |
return kBadNumberChannelsError; |
@@ -453,9 +479,9 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
return InitializeLocked(); |
} |
- |
void AudioProcessingImpl::SetExtraOptions(const Config& config) { |
CriticalSectionScoped crit_scoped(crit_); |
+ RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
for (auto item : component_list_) { |
item->SetExtraOptions(config); |
} |
@@ -468,27 +494,45 @@ void AudioProcessingImpl::SetExtraOptions(const Config& config) { |
int AudioProcessingImpl::proc_sample_rate_hz() const { |
+ // This is called from the initialization functionality which is shared |
+ // between the render and capture parts, and also during the APM creation. |
+ // Therefore it is neither possible to do thread checks nor to separate |
+ // into different thread-specific implementations. |
return fwd_proc_format_.sample_rate_hz(); |
} |
int AudioProcessingImpl::proc_split_sample_rate_hz() const { |
+ // This is called from within the VAD submodule. It is used from code that is |
+ // run by the capture and creation threads and is called via the |
+ // public AudioProcessing API. Therefore it is not possible to do thread |
+ // checks on these call without extending the public APM api. |
return split_rate_; |
} |
int AudioProcessingImpl::num_reverse_channels() const { |
+ // This is called from within the submodules. It is used from code that is |
+ // run by the render, capture and creation threads and is called via the |
+ // public AudioProcessing API. Therefore it is not possible to do thread |
+ // checks on these call without extending the public APM api. |
return rev_proc_format_.num_channels(); |
} |
int AudioProcessingImpl::num_input_channels() const { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
return shared_state_.api_format_.input_stream().num_channels(); |
} |
int AudioProcessingImpl::num_output_channels() const { |
+ // This is called from within the submodules. It is used from code that is |
+ // run by the render, capture and creation threads and is called via the |
+ // public AudioProcessing API. Therefore it is not possible to do thread |
+ // checks on these call without extending the public APM api. |
return shared_state_.api_format_.output_stream().num_channels(); |
} |
void AudioProcessingImpl::set_output_will_be_muted(bool muted) { |
CriticalSectionScoped lock(crit_); |
+ RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
output_will_be_muted_ = muted; |
if (agc_manager_.get()) { |
agc_manager_->SetCaptureMuted(output_will_be_muted_); |
@@ -504,6 +548,7 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
ChannelLayout output_layout, |
float* const* dest) { |
CriticalSectionScoped crit_scoped(crit_); |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
StreamConfig input_stream = shared_state_.api_format_.input_stream(); |
input_stream.set_sample_rate_hz(input_sample_rate_hz); |
input_stream.set_num_channels(ChannelsFromLayout(input_layout)); |
@@ -525,6 +570,7 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
const StreamConfig& output_config, |
float* const* dest) { |
CriticalSectionScoped crit_scoped(crit_); |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
if (!src || !dest) { |
return kNullPointerError; |
} |
@@ -537,7 +583,7 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
processing_config.input_stream() = input_config; |
processing_config.output_stream() = output_config; |
- RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
+ RETURN_ON_ERR(MaybeInitializeLockedCapture(processing_config)); |
assert(processing_config.input_stream().num_frames() == |
shared_state_.api_format_.input_stream().num_frames()); |
@@ -576,6 +622,7 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
CriticalSectionScoped crit_scoped(crit_); |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
echo_cancellation_->ReadQueuedRenderData(); |
echo_control_mobile_->ReadQueuedRenderData(); |
gain_control_->ReadQueuedRenderData(); |
@@ -605,7 +652,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
processing_config.output_stream().set_num_channels(frame->num_channels_); |
- RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
+ RETURN_ON_ERR(MaybeInitializeLockedCapture(processing_config)); |
if (frame->samples_per_channel_ != |
shared_state_.api_format_.input_stream().num_frames()) { |
return kBadDataLengthError; |
@@ -639,6 +686,7 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
} |
int AudioProcessingImpl::ProcessStreamLocked() { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
if (debug_file_->Open()) { |
audioproc::Stream* msg = event_msg_->mutable_stream(); |
@@ -720,6 +768,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, |
size_t samples_per_channel, |
int rev_sample_rate_hz, |
ChannelLayout layout) { |
+ RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
const StreamConfig reverse_config = { |
rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), |
}; |
@@ -734,12 +783,13 @@ int AudioProcessingImpl::ProcessReverseStream( |
const StreamConfig& reverse_input_config, |
const StreamConfig& reverse_output_config, |
float* const* dest) { |
+ RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
RETURN_ON_ERR( |
AnalyzeReverseStream(src, reverse_input_config, reverse_output_config)); |
if (is_rev_processed()) { |
render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(), |
dest); |
- } else if (rev_conversion_needed()) { |
+ } else if (render_check_rev_conversion_needed()) { |
render_converter_->Convert(src, reverse_input_config.num_samples(), dest, |
reverse_output_config.num_samples()); |
} else { |
@@ -755,6 +805,7 @@ int AudioProcessingImpl::AnalyzeReverseStream( |
const StreamConfig& reverse_input_config, |
const StreamConfig& reverse_output_config) { |
CriticalSectionScoped crit_scoped(crit_); |
+ RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
if (src == NULL) { |
return kNullPointerError; |
} |
@@ -767,7 +818,7 @@ int AudioProcessingImpl::AnalyzeReverseStream( |
processing_config.reverse_input_stream() = reverse_input_config; |
processing_config.reverse_output_stream() = reverse_output_config; |
- RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
+ RETURN_ON_ERR(MaybeInitializeLockedRender(processing_config)); |
assert(reverse_input_config.num_frames() == |
shared_state_.api_format_.reverse_input_stream().num_frames()); |
@@ -792,6 +843,7 @@ int AudioProcessingImpl::AnalyzeReverseStream( |
} |
int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
+ RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
RETURN_ON_ERR(AnalyzeReverseStream(frame)); |
if (is_rev_processed()) { |
render_audio_->InterleaveTo(frame, true); |
@@ -801,6 +853,7 @@ int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
} |
int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
+ RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
CriticalSectionScoped crit_scoped(crit_); |
if (frame == NULL) { |
return kNullPointerError; |
@@ -832,7 +885,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
processing_config.reverse_output_stream().set_num_channels( |
frame->num_channels_); |
- RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
+ RETURN_ON_ERR(MaybeInitializeLockedRender(processing_config)); |
if (frame->samples_per_channel_ != |
shared_state_.api_format_.reverse_input_stream().num_frames()) { |
return kBadDataLengthError; |
@@ -853,6 +906,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
} |
int AudioProcessingImpl::ProcessReverseStreamLocked() { |
+ RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
AudioBuffer* ra = render_audio_.get(); // For brevity. |
if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) { |
ra->SplitIntoFrequencyBands(); |
@@ -878,6 +932,7 @@ int AudioProcessingImpl::ProcessReverseStreamLocked() { |
} |
int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
Error retval = kNoError; |
was_stream_delay_set_ = true; |
delay += delay_offset_ms_; |
@@ -898,29 +953,35 @@ int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
} |
int AudioProcessingImpl::stream_delay_ms() const { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
return stream_delay_ms_; |
} |
bool AudioProcessingImpl::was_stream_delay_set() const { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
return was_stream_delay_set_; |
} |
void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
key_pressed_ = key_pressed; |
} |
void AudioProcessingImpl::set_delay_offset_ms(int offset) { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
CriticalSectionScoped crit_scoped(crit_); |
delay_offset_ms_ = offset; |
} |
int AudioProcessingImpl::delay_offset_ms() const { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
return delay_offset_ms_; |
} |
int AudioProcessingImpl::StartDebugRecording( |
const char filename[AudioProcessing::kMaxFilenameSize]) { |
CriticalSectionScoped crit_scoped(crit_); |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); |
if (filename == NULL) { |
@@ -950,6 +1011,7 @@ int AudioProcessingImpl::StartDebugRecording( |
int AudioProcessingImpl::StartDebugRecording(FILE* handle) { |
CriticalSectionScoped crit_scoped(crit_); |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
if (handle == NULL) { |
return kNullPointerError; |
@@ -977,12 +1039,14 @@ int AudioProcessingImpl::StartDebugRecording(FILE* handle) { |
int AudioProcessingImpl::StartDebugRecordingForPlatformFile( |
rtc::PlatformFile handle) { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
FILE* stream = rtc::FdopenPlatformFileForWriting(handle); |
return StartDebugRecording(stream); |
} |
int AudioProcessingImpl::StopDebugRecording() { |
CriticalSectionScoped crit_scoped(crit_); |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
// We just return if recording hasn't started. |
@@ -1029,6 +1093,7 @@ VoiceDetection* AudioProcessingImpl::voice_detection() const { |
} |
bool AudioProcessingImpl::is_data_processed() const { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
if (beamformer_enabled_) { |
return true; |
} |
@@ -1057,6 +1122,7 @@ bool AudioProcessingImpl::is_data_processed() const { |
} |
bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
// Check if we've upmixed or downmixed the audio. |
return ((shared_state_.api_format_.output_stream().num_channels() != |
shared_state_.api_format_.input_stream().num_channels()) || |
@@ -1064,12 +1130,14 @@ bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { |
} |
bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
return (is_data_processed && |
(fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || |
fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz)); |
} |
bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
if (!is_data_processed && !voice_detection_->is_enabled() && |
!transient_suppressor_enabled_) { |
// Only level_estimator_ is enabled. |
@@ -1083,15 +1151,29 @@ bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { |
} |
bool AudioProcessingImpl::is_rev_processed() const { |
+ RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
return intelligibility_enabled_ && intelligibility_enhancer_->active(); |
} |
+bool AudioProcessingImpl::render_check_rev_conversion_needed() const { |
+ RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
+ return rev_conversion_needed(); |
+} |
+ |
bool AudioProcessingImpl::rev_conversion_needed() const { |
+ // This is called from the initialization functionality, which is shared |
+ // between the render and capture parts, and also during the APM creation. |
+ // Therefore it is neither possible to do thread checks nor to separate |
+ // into different thread-specific implementations. |
return (shared_state_.api_format_.reverse_input_stream() != |
shared_state_.api_format_.reverse_output_stream()); |
} |
void AudioProcessingImpl::InitializeExperimentalAgc() { |
+ // This is called from the initialization functionality, which is shared |
+ // between the render and capture parts, and also during the APM creation. |
+ // Therefore it is neither possible to do thread checks nor to separate |
+ // into different thread-specific implementations. |
if (use_new_agc_) { |
if (!agc_manager_.get()) { |
agc_manager_.reset(new AgcManagerDirect(gain_control_, |
@@ -1104,6 +1186,10 @@ void AudioProcessingImpl::InitializeExperimentalAgc() { |
} |
void AudioProcessingImpl::InitializeTransient() { |
+ // This is called from the initialization functionality, which is shared |
+ // between the render and capture parts, and also during the APM creation. |
+ // Therefore it is neither possible to do thread checks nor to separate |
+ // into different thread-specific implementations. |
if (transient_suppressor_enabled_) { |
if (!transient_suppressor_.get()) { |
transient_suppressor_.reset(new TransientSuppressor()); |
@@ -1115,6 +1201,10 @@ void AudioProcessingImpl::InitializeTransient() { |
} |
void AudioProcessingImpl::InitializeBeamformer() { |
+ // This is called from the initialization functionality, which is shared |
+ // between the render and capture parts, and also during the APM creation. |
+ // Therefore it is neither possible to do thread checks nor to separate |
+ // into different thread-specific implementations. |
if (beamformer_enabled_) { |
if (!beamformer_) { |
beamformer_.reset( |
@@ -1125,6 +1215,10 @@ void AudioProcessingImpl::InitializeBeamformer() { |
} |
void AudioProcessingImpl::InitializeIntelligibility() { |
+ // This is called from the initialization functionality, which is shared |
+ // between the render and capture parts, and also during the APM creation. |
+ // Therefore it is neither possible to do thread checks nor to separate |
+ // into different thread-specific implementations. |
if (intelligibility_enabled_) { |
IntelligibilityEnhancer::Config config; |
config.sample_rate_hz = split_rate_; |
@@ -1135,6 +1229,7 @@ void AudioProcessingImpl::InitializeIntelligibility() { |
} |
void AudioProcessingImpl::MaybeUpdateHistograms() { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
static const int kMinDiffDelayMs = 60; |
if (echo_cancellation()->is_enabled()) { |
@@ -1181,6 +1276,7 @@ void AudioProcessingImpl::MaybeUpdateHistograms() { |
} |
void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
CriticalSectionScoped crit_scoped(crit_); |
if (stream_delay_jumps_ > -1) { |
RTC_HISTOGRAM_ENUMERATION( |
@@ -1200,6 +1296,7 @@ void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { |
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
int AudioProcessingImpl::WriteMessageToDebugFile() { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
int32_t size = event_msg_->ByteSize(); |
if (size <= 0) { |
return kUnspecifiedError; |
@@ -1227,6 +1324,10 @@ int AudioProcessingImpl::WriteMessageToDebugFile() { |
} |
int AudioProcessingImpl::WriteInitMessage() { |
+ // This is called from the initialization functionality, which is shared |
+ // between the render and capture parts, and also during the APM creation. |
+ // Therefore it is neither possible to do thread checks nor to separate |
+ // into different thread-specific implementations. |
event_msg_->set_type(audioproc::Event::INIT); |
audioproc::Init* msg = event_msg_->mutable_init(); |
msg->set_sample_rate( |
@@ -1248,6 +1349,7 @@ int AudioProcessingImpl::WriteInitMessage() { |
} |
int AudioProcessingImpl::WriteConfigMessage(bool forced) { |
+ RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
audioproc::Config config; |
config.set_aec_enabled(echo_cancellation_->is_enabled()); |