Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 30 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" | 30 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" |
| 31 #include "webrtc/modules/audio_processing/gain_control_impl.h" | 31 #include "webrtc/modules/audio_processing/gain_control_impl.h" |
| 32 #include "webrtc/modules/audio_processing/high_pass_filter_impl.h" | 32 #include "webrtc/modules/audio_processing/high_pass_filter_impl.h" |
| 33 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc er.h" | 33 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc er.h" |
| 34 #include "webrtc/modules/audio_processing/level_estimator_impl.h" | 34 #include "webrtc/modules/audio_processing/level_estimator_impl.h" |
| 35 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" | 35 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" |
| 36 #include "webrtc/modules/audio_processing/processing_component.h" | 36 #include "webrtc/modules/audio_processing/processing_component.h" |
| 37 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" | 37 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" |
| 38 #include "webrtc/modules/audio_processing/voice_detection_impl.h" | 38 #include "webrtc/modules/audio_processing/voice_detection_impl.h" |
| 39 #include "webrtc/modules/include/module_common_types.h" | 39 #include "webrtc/modules/include/module_common_types.h" |
| 40 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | |
| 41 #include "webrtc/system_wrappers/include/file_wrapper.h" | 40 #include "webrtc/system_wrappers/include/file_wrapper.h" |
| 42 #include "webrtc/system_wrappers/include/logging.h" | 41 #include "webrtc/system_wrappers/include/logging.h" |
| 43 #include "webrtc/system_wrappers/include/metrics.h" | 42 #include "webrtc/system_wrappers/include/metrics.h" |
| 44 | 43 |
| 45 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 44 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 46 // Files generated at build-time by the protobuf compiler. | 45 // Files generated at build-time by the protobuf compiler. |
| 47 #ifdef WEBRTC_ANDROID_PLATFORM_BUILD | 46 #ifdef WEBRTC_ANDROID_PLATFORM_BUILD |
| 48 #include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h" | 47 #include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h" |
| 49 #else | 48 #else |
| 50 #include "webrtc/audio_processing/debug.pb.h" | 49 #include "webrtc/audio_processing/debug.pb.h" |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 68 case AudioProcessing::kStereo: | 67 case AudioProcessing::kStereo: |
| 69 return false; | 68 return false; |
| 70 case AudioProcessing::kMonoAndKeyboard: | 69 case AudioProcessing::kMonoAndKeyboard: |
| 71 case AudioProcessing::kStereoAndKeyboard: | 70 case AudioProcessing::kStereoAndKeyboard: |
| 72 return true; | 71 return true; |
| 73 } | 72 } |
| 74 | 73 |
| 75 assert(false); | 74 assert(false); |
| 76 return false; | 75 return false; |
| 77 } | 76 } |
| 77 } // namespace | |
| 78 | 78 |
| 79 } // namespace | 79 struct ApmPublicSubmodules { |
| 80 ApmPublicSubmodules() | |
| 81 : echo_cancellation(NULL), | |
| 82 echo_control_mobile(NULL), | |
| 83 gain_control(NULL), | |
| 84 high_pass_filter(NULL), | |
| 85 level_estimator(NULL), | |
| 86 noise_suppression(NULL), | |
| 87 voice_detection(NULL) {} | |
|
kwiberg-webrtc
2015/11/23 22:15:10
nullptr
peah-webrtc
2015/11/24 21:42:23
Agree, and in the spirit of a boyscout I did a sea
| |
| 88 // Accessed externally of APM without any lock acquired. | |
| 89 EchoCancellationImpl* echo_cancellation; | |
| 90 EchoControlMobileImpl* echo_control_mobile; | |
| 91 GainControlImpl* gain_control; | |
| 92 HighPassFilterImpl* high_pass_filter; | |
| 93 LevelEstimatorImpl* level_estimator; | |
| 94 NoiseSuppressionImpl* noise_suppression; | |
| 95 VoiceDetectionImpl* voice_detection; | |
| 96 rtc::scoped_ptr<GainControlForNewAgc> gain_control_for_new_agc; | |
| 97 | |
| 98 // Accessed internally from both render and capture. | |
| 99 rtc::scoped_ptr<TransientSuppressor> transient_suppressor; | |
| 100 rtc::scoped_ptr<IntelligibilityEnhancer> intelligibility_enhancer; | |
| 101 }; | |
| 102 | |
| 103 struct ApmPrivateSubmodules { | |
| 104 explicit ApmPrivateSubmodules(Beamformer<float>* beamformer) | |
| 105 : beamformer(beamformer) {} | |
| 106 // Accessed internally from capture or during initialization | |
| 107 std::list<ProcessingComponent*> component_list; | |
| 108 rtc::scoped_ptr<Beamformer<float>> beamformer; | |
| 109 rtc::scoped_ptr<AgcManagerDirect> agc_manager; | |
| 110 }; | |
| 80 | 111 |
| 81 // Throughout webrtc, it's assumed that success is represented by zero. | 112 // Throughout webrtc, it's assumed that success is represented by zero. |
| 82 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); | 113 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); |
| 83 | 114 |
| 84 // This class has two main functionalities: | 115 // This class has two main functionalities: |
| 85 // | 116 // |
| 86 // 1) It is returned instead of the real GainControl after the new AGC has been | 117 // 1) It is returned instead of the real GainControl after the new AGC has been |
| 87 // enabled in order to prevent an outside user from overriding compression | 118 // enabled in order to prevent an outside user from overriding compression |
| 88 // settings. It doesn't do anything in its implementation, except for | 119 // settings. It doesn't do anything in its implementation, except for |
| 89 // delegating the const methods and Enable calls to the real GainControl, so | 120 // delegating the const methods and Enable calls to the real GainControl, so |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 176 } | 207 } |
| 177 | 208 |
| 178 return apm; | 209 return apm; |
| 179 } | 210 } |
| 180 | 211 |
| 181 AudioProcessingImpl::AudioProcessingImpl(const Config& config) | 212 AudioProcessingImpl::AudioProcessingImpl(const Config& config) |
| 182 : AudioProcessingImpl(config, nullptr) {} | 213 : AudioProcessingImpl(config, nullptr) {} |
| 183 | 214 |
| 184 AudioProcessingImpl::AudioProcessingImpl(const Config& config, | 215 AudioProcessingImpl::AudioProcessingImpl(const Config& config, |
| 185 Beamformer<float>* beamformer) | 216 Beamformer<float>* beamformer) |
| 186 : echo_cancellation_(NULL), | 217 : public_submodules_(new ApmPublicSubmodules()), |
| 187 echo_control_mobile_(NULL), | 218 private_submodules_(new ApmPrivateSubmodules(beamformer)), |
| 188 gain_control_(NULL), | 219 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
| 189 high_pass_filter_(NULL), | 220 constants_(config.Get<ExperimentalAgc>().startup_min_volume, |
| 190 level_estimator_(NULL), | 221 config.Get<Beamforming>().array_geometry, |
| 191 noise_suppression_(NULL), | 222 config.Get<Beamforming>().target_direction, |
| 192 voice_detection_(NULL), | 223 false, |
| 193 crit_(CriticalSectionWrapper::CreateCriticalSection()), | 224 config.Get<Intelligibility>().enabled, |
| 194 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 225 config.Get<Beamforming>().enabled), |
| 195 debug_file_(FileWrapper::Create()), | 226 #else |
| 196 event_msg_(new audioproc::Event()), | 227 constants_(config.Get<ExperimentalAgc>().startup_min_volume, |
| 228 config.Get<Beamforming>().array_geometry, | |
| 229 config.Get<Beamforming>().target_direction, | |
| 230 config.Get<ExperimentalAgc>().enabled, | |
| 231 config.Get<Intelligibility>().enabled, | |
| 232 config.Get<Beamforming>().enabled), | |
| 197 #endif | 233 #endif |
|
kwiberg-webrtc
2015/11/23 22:15:10
You can shrink the ifdef region to one line, right
peah-webrtc
2015/11/24 21:42:23
Done.
| |
| 198 fwd_proc_format_(kSampleRate16kHz), | 234 |
| 199 rev_proc_format_(kSampleRate16kHz, 1), | |
| 200 split_rate_(kSampleRate16kHz), | |
| 201 stream_delay_ms_(0), | |
| 202 delay_offset_ms_(0), | |
| 203 was_stream_delay_set_(false), | |
| 204 last_stream_delay_ms_(0), | |
| 205 last_aec_system_delay_ms_(0), | |
| 206 stream_delay_jumps_(-1), | |
| 207 aec_system_delay_jumps_(-1), | |
| 208 output_will_be_muted_(false), | |
| 209 key_pressed_(false), | |
| 210 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 235 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
| 211 use_new_agc_(false), | 236 capture_(false) |
| 212 #else | 237 #else |
| 213 use_new_agc_(config.Get<ExperimentalAgc>().enabled), | 238 capture_(config.Get<ExperimentalNs>().enabled) |
| 214 #endif | 239 #endif |
| 215 agc_startup_min_volume_(config.Get<ExperimentalAgc>().startup_min_volume), | 240 { |
| 216 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | |
| 217 transient_suppressor_enabled_(false), | |
| 218 #else | |
| 219 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled), | |
| 220 #endif | |
| 221 beamformer_enabled_(config.Get<Beamforming>().enabled), | |
| 222 beamformer_(beamformer), | |
| 223 array_geometry_(config.Get<Beamforming>().array_geometry), | |
| 224 target_direction_(config.Get<Beamforming>().target_direction), | |
| 225 intelligibility_enabled_(config.Get<Intelligibility>().enabled) { | |
| 226 render_thread_checker_.DetachFromThread(); | 241 render_thread_checker_.DetachFromThread(); |
| 227 signal_thread_checker_.DetachFromThread(); | 242 signal_thread_checker_.DetachFromThread(); |
| 228 capture_thread_checker_.DetachFromThread(); | 243 capture_thread_checker_.DetachFromThread(); |
| 229 | 244 |
| 230 echo_cancellation_ = | 245 { |
| 231 new EchoCancellationImpl(this, crit_, &render_thread_checker_); | 246 rtc::CritScope cs_render(&crit_render_); |
| 232 component_list_.push_back(echo_cancellation_); | 247 rtc::CritScope cs_capture(&crit_capture_); |
| 233 | 248 |
| 234 echo_control_mobile_ = | 249 public_submodules_->echo_cancellation = new EchoCancellationImpl( |
| 235 new EchoControlMobileImpl(this, crit_, &render_thread_checker_); | 250 this, &crit_render_, &crit_capture_, &render_thread_checker_); |
| 236 component_list_.push_back(echo_control_mobile_); | 251 public_submodules_->echo_control_mobile = new EchoControlMobileImpl( |
| 252 this, &crit_render_, &crit_capture_, &render_thread_checker_); | |
| 253 public_submodules_->gain_control = | |
| 254 new GainControlImpl(this, &crit_capture_, &crit_capture_, | |
| 255 &render_thread_checker_, &capture_thread_checker_); | |
| 256 public_submodules_->high_pass_filter = | |
| 257 new HighPassFilterImpl(this, &crit_capture_); | |
| 258 public_submodules_->level_estimator = new LevelEstimatorImpl(this); | |
| 259 public_submodules_->noise_suppression = | |
| 260 new NoiseSuppressionImpl(this, &crit_capture_); | |
| 261 public_submodules_->voice_detection = | |
| 262 new VoiceDetectionImpl(this, &crit_capture_); | |
| 263 public_submodules_->gain_control_for_new_agc.reset( | |
| 264 new GainControlForNewAgc(public_submodules_->gain_control)); | |
| 237 | 265 |
| 238 gain_control_ = new GainControlImpl(this, crit_, &render_thread_checker_, | 266 private_submodules_->component_list.push_back( |
| 239 &capture_thread_checker_); | 267 public_submodules_->echo_cancellation); |
| 240 component_list_.push_back(gain_control_); | 268 private_submodules_->component_list.push_back( |
| 241 | 269 public_submodules_->echo_control_mobile); |
| 242 high_pass_filter_ = new HighPassFilterImpl(this, crit_); | 270 private_submodules_->component_list.push_back( |
| 243 component_list_.push_back(high_pass_filter_); | 271 public_submodules_->gain_control); |
| 244 | 272 private_submodules_->component_list.push_back( |
| 245 level_estimator_ = new LevelEstimatorImpl(this, crit_); | 273 public_submodules_->high_pass_filter); |
| 246 component_list_.push_back(level_estimator_); | 274 private_submodules_->component_list.push_back( |
| 247 | 275 public_submodules_->level_estimator); |
| 248 noise_suppression_ = new NoiseSuppressionImpl(this, crit_); | 276 private_submodules_->component_list.push_back( |
| 249 component_list_.push_back(noise_suppression_); | 277 public_submodules_->noise_suppression); |
| 250 | 278 private_submodules_->component_list.push_back( |
| 251 voice_detection_ = new VoiceDetectionImpl(this, crit_); | 279 public_submodules_->voice_detection); |
| 252 component_list_.push_back(voice_detection_); | 280 } |
| 253 | |
| 254 gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_)); | |
| 255 | 281 |
| 256 SetExtraOptions(config); | 282 SetExtraOptions(config); |
| 257 } | 283 } |
| 258 | 284 |
| 259 AudioProcessingImpl::~AudioProcessingImpl() { | 285 AudioProcessingImpl::~AudioProcessingImpl() { |
| 260 { | 286 // Depends on gain_control_ and |
| 261 CriticalSectionScoped crit_scoped(crit_); | 287 // public_submodules_->gain_control_for_new_agc. |
| 262 // Depends on gain_control_ and gain_control_for_new_agc_. | 288 private_submodules_->agc_manager.reset(); |
| 263 agc_manager_.reset(); | 289 // Depends on gain_control_. |
| 264 // Depends on gain_control_. | 290 public_submodules_->gain_control_for_new_agc.reset(); |
| 265 gain_control_for_new_agc_.reset(); | 291 while (!private_submodules_->component_list.empty()) { |
| 266 while (!component_list_.empty()) { | 292 ProcessingComponent* component = |
| 267 ProcessingComponent* component = component_list_.front(); | 293 private_submodules_->component_list.front(); |
| 268 component->Destroy(); | 294 component->Destroy(); |
| 269 delete component; | 295 delete component; |
| 270 component_list_.pop_front(); | 296 private_submodules_->component_list.pop_front(); |
| 271 } | 297 } |
| 272 | 298 |
| 273 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 299 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 274 if (debug_file_->Open()) { | 300 if (debug_dump_.debug_file->Open()) { |
| 275 debug_file_->CloseFile(); | 301 debug_dump_.debug_file->CloseFile(); |
| 276 } | 302 } |
| 277 #endif | 303 #endif |
| 278 } | |
| 279 delete crit_; | |
| 280 crit_ = NULL; | |
| 281 } | 304 } |
| 282 | 305 |
| 283 int AudioProcessingImpl::Initialize() { | 306 int AudioProcessingImpl::Initialize() { |
| 307 // Run in a single-threaded manner during initialization. | |
| 308 rtc::CritScope cs_render(&crit_render_); | |
| 309 rtc::CritScope cs_capture(&crit_capture_); | |
| 284 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | 310 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| 285 CriticalSectionScoped crit_scoped(crit_); | |
| 286 return InitializeLocked(); | 311 return InitializeLocked(); |
| 287 } | 312 } |
| 288 | 313 |
| 289 int AudioProcessingImpl::Initialize(int input_sample_rate_hz, | 314 int AudioProcessingImpl::Initialize(int input_sample_rate_hz, |
| 290 int output_sample_rate_hz, | 315 int output_sample_rate_hz, |
| 291 int reverse_sample_rate_hz, | 316 int reverse_sample_rate_hz, |
| 292 ChannelLayout input_layout, | 317 ChannelLayout input_layout, |
| 293 ChannelLayout output_layout, | 318 ChannelLayout output_layout, |
| 294 ChannelLayout reverse_layout) { | 319 ChannelLayout reverse_layout) { |
| 320 // Run in a single-threaded manner during initialization. | |
| 321 rtc::CritScope cs_render(&crit_render_); | |
| 322 rtc::CritScope cs_capture(&crit_capture_); | |
| 295 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | 323 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| 296 const ProcessingConfig processing_config = { | 324 const ProcessingConfig processing_config = { |
| 297 {{input_sample_rate_hz, | 325 {{input_sample_rate_hz, |
| 298 ChannelsFromLayout(input_layout), | 326 ChannelsFromLayout(input_layout), |
| 299 LayoutHasKeyboard(input_layout)}, | 327 LayoutHasKeyboard(input_layout)}, |
| 300 {output_sample_rate_hz, | 328 {output_sample_rate_hz, |
| 301 ChannelsFromLayout(output_layout), | 329 ChannelsFromLayout(output_layout), |
| 302 LayoutHasKeyboard(output_layout)}, | 330 LayoutHasKeyboard(output_layout)}, |
| 303 {reverse_sample_rate_hz, | 331 {reverse_sample_rate_hz, |
| 304 ChannelsFromLayout(reverse_layout), | 332 ChannelsFromLayout(reverse_layout), |
| 305 LayoutHasKeyboard(reverse_layout)}, | 333 LayoutHasKeyboard(reverse_layout)}, |
| 306 {reverse_sample_rate_hz, | 334 {reverse_sample_rate_hz, |
| 307 ChannelsFromLayout(reverse_layout), | 335 ChannelsFromLayout(reverse_layout), |
| 308 LayoutHasKeyboard(reverse_layout)}}}; | 336 LayoutHasKeyboard(reverse_layout)}}}; |
| 309 | 337 |
| 310 return Initialize(processing_config); | 338 return Initialize(processing_config); |
| 311 } | 339 } |
| 312 | 340 |
| 313 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { | 341 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { |
| 342 // Run in a single-threaded manner during initialization. | |
| 343 rtc::CritScope cs_render(&crit_render_); | |
| 344 rtc::CritScope cs_capture(&crit_capture_); | |
| 314 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | 345 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| 315 CriticalSectionScoped crit_scoped(crit_); | |
| 316 return InitializeLocked(processing_config); | 346 return InitializeLocked(processing_config); |
| 317 } | 347 } |
| 318 | 348 |
| 319 // Calls InitializeLocked() if any of the audio parameters have changed from | 349 // Calls InitializeLocked() if any of the audio parameters have changed from |
| 320 // their current values. | 350 // their current values (needs to be called while holding the crit_render_lock). |
| 321 int AudioProcessingImpl::MaybeInitializeLocked( | 351 int AudioProcessingImpl::MaybeInitialize( |
| 322 const ProcessingConfig& processing_config) { | 352 const ProcessingConfig& processing_config) { |
| 323 // Called from both threads. Thread check is therefore not possible. | 353 // Called from both threads. Thread check is therefore not possible. |
| 324 if (processing_config == shared_state_.api_format_) { | 354 if (processing_config == formats_.api_format) { |
| 325 return kNoError; | 355 return kNoError; |
| 326 } | 356 } |
| 357 | |
| 358 rtc::CritScope cs_capture(&crit_capture_); | |
| 327 return InitializeLocked(processing_config); | 359 return InitializeLocked(processing_config); |
| 328 } | 360 } |
| 329 | 361 |
| 330 int AudioProcessingImpl::InitializeLocked() { | 362 int AudioProcessingImpl::InitializeLocked() { |
| 331 const int fwd_audio_buffer_channels = | 363 const int fwd_audio_buffer_channels = |
| 332 beamformer_enabled_ | 364 constants_.beamformer_enabled |
| 333 ? shared_state_.api_format_.input_stream().num_channels() | 365 ? formats_.api_format.input_stream().num_channels() |
| 334 : shared_state_.api_format_.output_stream().num_channels(); | 366 : formats_.api_format.output_stream().num_channels(); |
| 335 const int rev_audio_buffer_out_num_frames = | 367 const int rev_audio_buffer_out_num_frames = |
| 336 shared_state_.api_format_.reverse_output_stream().num_frames() == 0 | 368 formats_.api_format.reverse_output_stream().num_frames() == 0 |
| 337 ? rev_proc_format_.num_frames() | 369 ? formats_.rev_proc_format.num_frames() |
| 338 : shared_state_.api_format_.reverse_output_stream().num_frames(); | 370 : formats_.api_format.reverse_output_stream().num_frames(); |
| 339 if (shared_state_.api_format_.reverse_input_stream().num_channels() > 0) { | 371 if (formats_.api_format.reverse_input_stream().num_channels() > 0) { |
| 340 render_audio_.reset(new AudioBuffer( | 372 render_.render_audio.reset(new AudioBuffer( |
| 341 shared_state_.api_format_.reverse_input_stream().num_frames(), | 373 formats_.api_format.reverse_input_stream().num_frames(), |
| 342 shared_state_.api_format_.reverse_input_stream().num_channels(), | 374 formats_.api_format.reverse_input_stream().num_channels(), |
| 343 rev_proc_format_.num_frames(), rev_proc_format_.num_channels(), | 375 formats_.rev_proc_format.num_frames(), |
| 376 formats_.rev_proc_format.num_channels(), | |
| 344 rev_audio_buffer_out_num_frames)); | 377 rev_audio_buffer_out_num_frames)); |
| 345 if (rev_conversion_needed()) { | 378 if (rev_conversion_needed()) { |
| 346 render_converter_ = AudioConverter::Create( | 379 render_.render_converter = AudioConverter::Create( |
| 347 shared_state_.api_format_.reverse_input_stream().num_channels(), | 380 formats_.api_format.reverse_input_stream().num_channels(), |
| 348 shared_state_.api_format_.reverse_input_stream().num_frames(), | 381 formats_.api_format.reverse_input_stream().num_frames(), |
| 349 shared_state_.api_format_.reverse_output_stream().num_channels(), | 382 formats_.api_format.reverse_output_stream().num_channels(), |
| 350 shared_state_.api_format_.reverse_output_stream().num_frames()); | 383 formats_.api_format.reverse_output_stream().num_frames()); |
| 351 } else { | 384 } else { |
| 352 render_converter_.reset(nullptr); | 385 render_.render_converter.reset(nullptr); |
| 353 } | 386 } |
| 354 } else { | 387 } else { |
| 355 render_audio_.reset(nullptr); | 388 render_.render_audio.reset(nullptr); |
| 356 render_converter_.reset(nullptr); | 389 render_.render_converter.reset(nullptr); |
| 357 } | 390 } |
| 358 capture_audio_.reset( | 391 capture_.capture_audio.reset( |
| 359 new AudioBuffer(shared_state_.api_format_.input_stream().num_frames(), | 392 new AudioBuffer(formats_.api_format.input_stream().num_frames(), |
| 360 shared_state_.api_format_.input_stream().num_channels(), | 393 formats_.api_format.input_stream().num_channels(), |
| 361 fwd_proc_format_.num_frames(), fwd_audio_buffer_channels, | 394 capture_nonlocked_.fwd_proc_format.num_frames(), |
| 362 shared_state_.api_format_.output_stream().num_frames())); | 395 fwd_audio_buffer_channels, |
| 396 formats_.api_format.output_stream().num_frames())); | |
| 363 | 397 |
| 364 // Initialize all components. | 398 // Initialize all components. |
| 365 for (auto item : component_list_) { | 399 for (auto item : private_submodules_->component_list) { |
| 366 int err = item->Initialize(); | 400 int err = item->Initialize(); |
| 367 if (err != kNoError) { | 401 if (err != kNoError) { |
| 368 return err; | 402 return err; |
| 369 } | 403 } |
| 370 } | 404 } |
| 371 | 405 |
| 372 InitializeExperimentalAgc(); | 406 InitializeExperimentalAgc(); |
| 373 | 407 |
| 374 InitializeTransient(); | 408 InitializeTransient(); |
| 375 | 409 |
| 376 InitializeBeamformer(); | 410 InitializeBeamformer(); |
| 377 | 411 |
| 378 InitializeIntelligibility(); | 412 InitializeIntelligibility(); |
| 379 | 413 |
| 380 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 414 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 381 if (debug_file_->Open()) { | 415 if (debug_dump_.debug_file->Open()) { |
| 382 int err = WriteInitMessage(); | 416 int err = WriteInitMessage(); |
| 383 if (err != kNoError) { | 417 if (err != kNoError) { |
| 384 return err; | 418 return err; |
| 385 } | 419 } |
| 386 } | 420 } |
| 387 #endif | 421 #endif |
| 388 | 422 |
| 389 return kNoError; | 423 return kNoError; |
| 390 } | 424 } |
| 391 | 425 |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 404 const int num_in_channels = config.input_stream().num_channels(); | 438 const int num_in_channels = config.input_stream().num_channels(); |
| 405 const int num_out_channels = config.output_stream().num_channels(); | 439 const int num_out_channels = config.output_stream().num_channels(); |
| 406 | 440 |
| 407 // Need at least one input channel. | 441 // Need at least one input channel. |
| 408 // Need either one output channel or as many outputs as there are inputs. | 442 // Need either one output channel or as many outputs as there are inputs. |
| 409 if (num_in_channels == 0 || | 443 if (num_in_channels == 0 || |
| 410 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { | 444 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { |
| 411 return kBadNumberChannelsError; | 445 return kBadNumberChannelsError; |
| 412 } | 446 } |
| 413 | 447 |
| 414 if (beamformer_enabled_ && | 448 if (constants_.beamformer_enabled && (static_cast<size_t>(num_in_channels) != |
| 415 (static_cast<size_t>(num_in_channels) != array_geometry_.size() || | 449 constants_.array_geometry.size() || |
| 416 num_out_channels > 1)) { | 450 num_out_channels > 1)) { |
| 417 return kBadNumberChannelsError; | 451 return kBadNumberChannelsError; |
| 418 } | 452 } |
| 419 | 453 |
| 420 shared_state_.api_format_ = config; | 454 formats_.api_format = config; |
| 421 | 455 |
| 422 // We process at the closest native rate >= min(input rate, output rate)... | 456 // We process at the closest native rate >= min(input rate, output rate)... |
| 423 const int min_proc_rate = | 457 const int min_proc_rate = |
| 424 std::min(shared_state_.api_format_.input_stream().sample_rate_hz(), | 458 std::min(formats_.api_format.input_stream().sample_rate_hz(), |
| 425 shared_state_.api_format_.output_stream().sample_rate_hz()); | 459 formats_.api_format.output_stream().sample_rate_hz()); |
| 426 int fwd_proc_rate; | 460 int fwd_proc_rate; |
| 427 for (size_t i = 0; i < kNumNativeSampleRates; ++i) { | 461 for (size_t i = 0; i < kNumNativeSampleRates; ++i) { |
| 428 fwd_proc_rate = kNativeSampleRatesHz[i]; | 462 fwd_proc_rate = kNativeSampleRatesHz[i]; |
| 429 if (fwd_proc_rate >= min_proc_rate) { | 463 if (fwd_proc_rate >= min_proc_rate) { |
| 430 break; | 464 break; |
| 431 } | 465 } |
| 432 } | 466 } |
| 433 // ...with one exception. | 467 // ...with one exception. |
| 434 if (echo_control_mobile_->is_enabled() && | 468 if (public_submodules_->echo_control_mobile->is_enabled() && |
| 435 min_proc_rate > kMaxAECMSampleRateHz) { | 469 min_proc_rate > kMaxAECMSampleRateHz) { |
| 436 fwd_proc_rate = kMaxAECMSampleRateHz; | 470 fwd_proc_rate = kMaxAECMSampleRateHz; |
| 437 } | 471 } |
| 438 | 472 |
| 439 fwd_proc_format_ = StreamConfig(fwd_proc_rate); | 473 capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate); |
| 440 | 474 |
| 441 // We normally process the reverse stream at 16 kHz. Unless... | 475 // We normally process the reverse stream at 16 kHz. Unless... |
| 442 int rev_proc_rate = kSampleRate16kHz; | 476 int rev_proc_rate = kSampleRate16kHz; |
| 443 if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) { | 477 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) { |
| 444 // ...the forward stream is at 8 kHz. | 478 // ...the forward stream is at 8 kHz. |
| 445 rev_proc_rate = kSampleRate8kHz; | 479 rev_proc_rate = kSampleRate8kHz; |
| 446 } else { | 480 } else { |
| 447 if (shared_state_.api_format_.reverse_input_stream().sample_rate_hz() == | 481 if (formats_.api_format.reverse_input_stream().sample_rate_hz() == |
| 448 kSampleRate32kHz) { | 482 kSampleRate32kHz) { |
| 449 // ...or the input is at 32 kHz, in which case we use the splitting | 483 // ...or the input is at 32 kHz, in which case we use the splitting |
| 450 // filter rather than the resampler. | 484 // filter rather than the resampler. |
| 451 rev_proc_rate = kSampleRate32kHz; | 485 rev_proc_rate = kSampleRate32kHz; |
| 452 } | 486 } |
| 453 } | 487 } |
| 454 | 488 |
| 455 // Always downmix the reverse stream to mono for analysis. This has been | 489 // Always downmix the reverse stream to mono for analysis. This has been |
| 456 // demonstrated to work well for AEC in most practical scenarios. | 490 // demonstrated to work well for AEC in most practical scenarios. |
| 457 rev_proc_format_ = StreamConfig(rev_proc_rate, 1); | 491 formats_.rev_proc_format = StreamConfig(rev_proc_rate, 1); |
| 458 | 492 |
| 459 if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || | 493 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate32kHz || |
| 460 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { | 494 capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate48kHz) { |
| 461 split_rate_ = kSampleRate16kHz; | 495 capture_nonlocked_.split_rate = kSampleRate16kHz; |
| 462 } else { | 496 } else { |
| 463 split_rate_ = fwd_proc_format_.sample_rate_hz(); | 497 capture_nonlocked_.split_rate = |
| 498 capture_nonlocked_.fwd_proc_format.sample_rate_hz(); | |
| 464 } | 499 } |
| 465 | 500 |
| 466 return InitializeLocked(); | 501 return InitializeLocked(); |
| 467 } | 502 } |
| 468 | 503 |
| 469 void AudioProcessingImpl::SetExtraOptions(const Config& config) { | 504 void AudioProcessingImpl::SetExtraOptions(const Config& config) { |
| 470 CriticalSectionScoped crit_scoped(crit_); | 505 // Run in a single-threaded manner when setting the extra options. |
| 506 rtc::CritScope cs_render(&crit_render_); | |
| 507 rtc::CritScope cs_capture(&crit_capture_); | |
| 471 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | 508 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| 472 for (auto item : component_list_) { | 509 for (auto item : private_submodules_->component_list) { |
| 473 item->SetExtraOptions(config); | 510 item->SetExtraOptions(config); |
| 474 } | 511 } |
| 475 | 512 |
| 476 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { | 513 if (capture_.transient_suppressor_enabled != |
| 477 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; | 514 config.Get<ExperimentalNs>().enabled) { |
| 515 capture_.transient_suppressor_enabled = | |
| 516 config.Get<ExperimentalNs>().enabled; | |
| 478 InitializeTransient(); | 517 InitializeTransient(); |
| 479 } | 518 } |
| 480 } | 519 } |
| 481 | 520 |
| 482 | 521 |
| 483 int AudioProcessingImpl::proc_sample_rate_hz() const { | 522 int AudioProcessingImpl::proc_sample_rate_hz() const { |
| 523 // Only called from submodules beneath APM, hence locking is not needed. | |
| 484 // TODO(peah): Add threadchecker when possible. | 524 // TODO(peah): Add threadchecker when possible. |
| 485 return fwd_proc_format_.sample_rate_hz(); | 525 return capture_nonlocked_.fwd_proc_format.sample_rate_hz(); |
| 486 } | 526 } |
| 487 | 527 |
| 488 int AudioProcessingImpl::proc_split_sample_rate_hz() const { | 528 int AudioProcessingImpl::proc_split_sample_rate_hz() const { |
| 529 // Only called from submodules/tests beneath APM, hence locking is not needed. | |
| 489 // TODO(peah): Add threadchecker when possible. | 530 // TODO(peah): Add threadchecker when possible. |
| 490 return split_rate_; | 531 return capture_nonlocked_.split_rate; |
| 491 } | 532 } |
| 492 | 533 |
| 493 int AudioProcessingImpl::num_reverse_channels() const { | 534 int AudioProcessingImpl::num_reverse_channels() const { |
| 494 // TODO(peah): Add threadchecker when possible. | 535 // Only called from submodules/tests beneath APM, hence locking is not needed. |
| 495 return rev_proc_format_.num_channels(); | 536 return formats_.rev_proc_format.num_channels(); |
| 496 } | 537 } |
| 497 | 538 |
| 498 int AudioProcessingImpl::num_input_channels() const { | 539 int AudioProcessingImpl::num_input_channels() const { |
| 540 // Only called from submodules/tests beneath APM, hence locking is not needed. | |
| 499 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 541 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 500 return shared_state_.api_format_.input_stream().num_channels(); | 542 return formats_.api_format.input_stream().num_channels(); |
| 501 } | 543 } |
| 502 | 544 |
| 503 int AudioProcessingImpl::num_output_channels() const { | 545 int AudioProcessingImpl::num_output_channels() const { |
| 546 // Only called from submodules/tests beneath APM, hence locking is not needed. | |
| 504 // TODO(peah): Add appropriate thread checker when possible. | 547 // TODO(peah): Add appropriate thread checker when possible. |
| 505 return shared_state_.api_format_.output_stream().num_channels(); | 548 return formats_.api_format.output_stream().num_channels(); |
| 506 } | 549 } |
| 507 | 550 |
| 508 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { | 551 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { |
| 509 CriticalSectionScoped lock(crit_); | 552 rtc::CritScope cs(&crit_capture_); |
| 510 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | 553 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| 511 output_will_be_muted_ = muted; | 554 capture_.output_will_be_muted = muted; |
| 512 if (agc_manager_.get()) { | 555 if (private_submodules_->agc_manager.get()) { |
| 513 agc_manager_->SetCaptureMuted(output_will_be_muted_); | 556 private_submodules_->agc_manager->SetCaptureMuted( |
| 557 capture_.output_will_be_muted); | |
| 514 } | 558 } |
| 515 } | 559 } |
| 516 | 560 |
| 517 | 561 |
| 518 int AudioProcessingImpl::ProcessStream(const float* const* src, | 562 int AudioProcessingImpl::ProcessStream(const float* const* src, |
| 519 size_t samples_per_channel, | 563 size_t samples_per_channel, |
| 520 int input_sample_rate_hz, | 564 int input_sample_rate_hz, |
| 521 ChannelLayout input_layout, | 565 ChannelLayout input_layout, |
| 522 int output_sample_rate_hz, | 566 int output_sample_rate_hz, |
| 523 ChannelLayout output_layout, | 567 ChannelLayout output_layout, |
| 524 float* const* dest) { | 568 float* const* dest) { |
| 525 CriticalSectionScoped crit_scoped(crit_); | |
| 526 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 569 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 527 StreamConfig input_stream = shared_state_.api_format_.input_stream(); | 570 StreamConfig input_stream; |
| 571 StreamConfig output_stream; | |
| 572 { | |
| 573 // Access the formats_.api_format.input_stream beneath the capture lock. | |
| 574 // The lock must be released as it is later required in the call | |
| 575 // to ProcessStream(,,,); | |
|
kwiberg-webrtc
2015/11/23 22:15:10
Are the locks reentrant or not? Above in AudioProc
peah-webrtc
2015/11/24 21:42:23
Great find!
I think they are probably not reentra
kwiberg-webrtc
2015/11/25 10:17:14
(As we later found out, the locks are in fact reen
| |
| 576 rtc::CritScope cs(&crit_capture_); | |
| 577 input_stream = formats_.api_format.input_stream(); | |
| 578 output_stream = formats_.api_format.output_stream(); | |
| 579 } | |
| 580 | |
| 528 input_stream.set_sample_rate_hz(input_sample_rate_hz); | 581 input_stream.set_sample_rate_hz(input_sample_rate_hz); |
| 529 input_stream.set_num_channels(ChannelsFromLayout(input_layout)); | 582 input_stream.set_num_channels(ChannelsFromLayout(input_layout)); |
| 530 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); | 583 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); |
| 531 | |
| 532 StreamConfig output_stream = shared_state_.api_format_.output_stream(); | |
| 533 output_stream.set_sample_rate_hz(output_sample_rate_hz); | 584 output_stream.set_sample_rate_hz(output_sample_rate_hz); |
| 534 output_stream.set_num_channels(ChannelsFromLayout(output_layout)); | 585 output_stream.set_num_channels(ChannelsFromLayout(output_layout)); |
| 535 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); | 586 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); |
| 536 | 587 |
| 537 if (samples_per_channel != input_stream.num_frames()) { | 588 if (samples_per_channel != input_stream.num_frames()) { |
| 538 return kBadDataLengthError; | 589 return kBadDataLengthError; |
| 539 } | 590 } |
| 540 return ProcessStream(src, input_stream, output_stream, dest); | 591 return ProcessStream(src, input_stream, output_stream, dest); |
| 541 } | 592 } |
| 542 | 593 |
| 543 int AudioProcessingImpl::ProcessStream(const float* const* src, | 594 int AudioProcessingImpl::ProcessStream(const float* const* src, |
| 544 const StreamConfig& input_config, | 595 const StreamConfig& input_config, |
| 545 const StreamConfig& output_config, | 596 const StreamConfig& output_config, |
| 546 float* const* dest) { | 597 float* const* dest) { |
| 547 CriticalSectionScoped crit_scoped(crit_); | |
| 548 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 598 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 599 { | |
| 600 // Acquire the capture lock in order to safely call the function | |
| 601 // that retrieves the render side data. This function accesses apm | |
| 602 // getters that need the capture lock held when being called. | |
| 603 rtc::CritScope cs_capture(&crit_capture_); | |
| 604 public_submodules_->echo_cancellation->ReadQueuedRenderData(); | |
| 605 public_submodules_->echo_control_mobile->ReadQueuedRenderData(); | |
| 606 public_submodules_->gain_control->ReadQueuedRenderData(); | |
| 607 } | |
| 549 if (!src || !dest) { | 608 if (!src || !dest) { |
| 550 return kNullPointerError; | 609 return kNullPointerError; |
| 551 } | 610 } |
| 552 | 611 |
| 553 echo_cancellation_->ReadQueuedRenderData(); | 612 ProcessingConfig processing_config = formats_.api_format; |
| 554 echo_control_mobile_->ReadQueuedRenderData(); | |
| 555 gain_control_->ReadQueuedRenderData(); | |
| 556 | |
| 557 ProcessingConfig processing_config = shared_state_.api_format_; | |
| 558 processing_config.input_stream() = input_config; | 613 processing_config.input_stream() = input_config; |
| 559 processing_config.output_stream() = output_config; | 614 processing_config.output_stream() = output_config; |
| 560 | 615 |
| 561 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 616 { |
| 617 // Do conditional reinitialization. | |
| 618 rtc::CritScope cs_render(&crit_render_); | |
| 619 RETURN_ON_ERR(MaybeInitialize(processing_config)); | |
| 620 } | |
| 621 rtc::CritScope cs_capture(&crit_capture_); | |
| 622 | |
| 562 assert(processing_config.input_stream().num_frames() == | 623 assert(processing_config.input_stream().num_frames() == |
| 563 shared_state_.api_format_.input_stream().num_frames()); | 624 formats_.api_format.input_stream().num_frames()); |
| 564 | 625 |
| 565 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 626 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 566 if (debug_file_->Open()) { | 627 if (debug_dump_.debug_file->Open()) { |
| 567 RETURN_ON_ERR(WriteConfigMessage(false)); | 628 RETURN_ON_ERR(WriteConfigMessage(false)); |
| 568 | 629 |
| 569 event_msg_->set_type(audioproc::Event::STREAM); | 630 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 570 audioproc::Stream* msg = event_msg_->mutable_stream(); | 631 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 571 const size_t channel_size = | 632 const size_t channel_size = |
| 572 sizeof(float) * shared_state_.api_format_.input_stream().num_frames(); | 633 sizeof(float) * formats_.api_format.input_stream().num_frames(); |
| 573 for (int i = 0; i < shared_state_.api_format_.input_stream().num_channels(); | 634 for (int i = 0; i < formats_.api_format.input_stream().num_channels(); ++i) |
| 574 ++i) | |
| 575 msg->add_input_channel(src[i], channel_size); | 635 msg->add_input_channel(src[i], channel_size); |
| 576 } | 636 } |
| 577 #endif | 637 #endif |
| 578 | 638 |
| 579 capture_audio_->CopyFrom(src, shared_state_.api_format_.input_stream()); | 639 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
| 580 RETURN_ON_ERR(ProcessStreamLocked()); | 640 RETURN_ON_ERR(ProcessStreamLocked()); |
| 581 capture_audio_->CopyTo(shared_state_.api_format_.output_stream(), dest); | 641 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
| 582 | 642 |
| 583 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 643 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 584 if (debug_file_->Open()) { | 644 if (debug_dump_.debug_file->Open()) { |
| 585 audioproc::Stream* msg = event_msg_->mutable_stream(); | 645 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 586 const size_t channel_size = | 646 const size_t channel_size = |
| 587 sizeof(float) * shared_state_.api_format_.output_stream().num_frames(); | 647 sizeof(float) * formats_.api_format.output_stream().num_frames(); |
| 588 for (int i = 0; | 648 for (int i = 0; i < formats_.api_format.output_stream().num_channels(); ++i) |
| 589 i < shared_state_.api_format_.output_stream().num_channels(); ++i) | |
| 590 msg->add_output_channel(dest[i], channel_size); | 649 msg->add_output_channel(dest[i], channel_size); |
| 591 RETURN_ON_ERR(WriteMessageToDebugFile()); | 650 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 651 &crit_debug_, &debug_dump_.capture)); | |
| 592 } | 652 } |
| 593 #endif | 653 #endif |
| 594 | 654 |
| 595 return kNoError; | 655 return kNoError; |
| 596 } | 656 } |
| 597 | 657 |
| 598 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { | 658 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| 599 CriticalSectionScoped crit_scoped(crit_); | |
| 600 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 659 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 601 echo_cancellation_->ReadQueuedRenderData(); | 660 { |
| 602 echo_control_mobile_->ReadQueuedRenderData(); | 661 // Acquire the capture lock in order to safely call the function |
| 603 gain_control_->ReadQueuedRenderData(); | 662 // that retrieves the render side data. This function accesses apm |
| 663 // getters that need the capture lock held when being called. | |
| 664 // The lock needs to be released as | |
| 665 // public_submodules_->echo_control_mobile->is_enabled() aquires this lock | |
| 666 // as well. | |
| 667 rtc::CritScope cs_capture(&crit_capture_); | |
| 668 public_submodules_->echo_cancellation->ReadQueuedRenderData(); | |
| 669 public_submodules_->echo_control_mobile->ReadQueuedRenderData(); | |
| 670 public_submodules_->gain_control->ReadQueuedRenderData(); | |
| 671 } | |
| 604 | 672 |
| 605 if (!frame) { | 673 if (!frame) { |
| 606 return kNullPointerError; | 674 return kNullPointerError; |
| 607 } | 675 } |
| 608 // Must be a native rate. | 676 // Must be a native rate. |
| 609 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 677 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
| 610 frame->sample_rate_hz_ != kSampleRate16kHz && | 678 frame->sample_rate_hz_ != kSampleRate16kHz && |
| 611 frame->sample_rate_hz_ != kSampleRate32kHz && | 679 frame->sample_rate_hz_ != kSampleRate32kHz && |
| 612 frame->sample_rate_hz_ != kSampleRate48kHz) { | 680 frame->sample_rate_hz_ != kSampleRate48kHz) { |
| 613 return kBadSampleRateError; | 681 return kBadSampleRateError; |
| 614 } | 682 } |
| 615 | 683 |
| 616 if (echo_control_mobile_->is_enabled() && | 684 if (public_submodules_->echo_control_mobile->is_enabled() && |
| 617 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { | 685 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { |
| 618 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; | 686 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; |
| 619 return kUnsupportedComponentError; | 687 return kUnsupportedComponentError; |
| 620 } | 688 } |
| 621 | 689 |
| 622 // TODO(ajm): The input and output rates and channels are currently | 690 ProcessingConfig processing_config; |
| 623 // constrained to be identical in the int16 interface. | 691 { |
| 624 ProcessingConfig processing_config = shared_state_.api_format_; | 692 // Aquire lock for the access of api_format. |
| 693 // The lock is released immediately due to the conditional | |
| 694 // reinitialization. | |
| 695 rtc::CritScope cs_capture(&crit_capture_); | |
| 696 // TODO(ajm): The input and output rates and channels are currently | |
| 697 // constrained to be identical in the int16 interface. | |
| 698 processing_config = formats_.api_format; | |
| 699 } | |
| 625 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 700 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
| 626 processing_config.input_stream().set_num_channels(frame->num_channels_); | 701 processing_config.input_stream().set_num_channels(frame->num_channels_); |
| 627 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 702 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
| 628 processing_config.output_stream().set_num_channels(frame->num_channels_); | 703 processing_config.output_stream().set_num_channels(frame->num_channels_); |
| 629 | 704 |
| 630 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 705 { |
| 706 // Do conditional reinitialization. | |
| 707 rtc::CritScope cs_render(&crit_render_); | |
| 708 RETURN_ON_ERR(MaybeInitialize(processing_config)); | |
| 709 } | |
| 710 rtc::CritScope cs_capture(&crit_capture_); | |
| 631 if (frame->samples_per_channel_ != | 711 if (frame->samples_per_channel_ != |
| 632 shared_state_.api_format_.input_stream().num_frames()) { | 712 formats_.api_format.input_stream().num_frames()) { |
| 633 return kBadDataLengthError; | 713 return kBadDataLengthError; |
| 634 } | 714 } |
| 635 | 715 |
| 636 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 716 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 637 if (debug_file_->Open()) { | 717 if (debug_dump_.debug_file->Open()) { |
| 638 event_msg_->set_type(audioproc::Event::STREAM); | 718 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 639 audioproc::Stream* msg = event_msg_->mutable_stream(); | 719 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 640 const size_t data_size = | 720 const size_t data_size = |
| 641 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 721 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 642 msg->set_input_data(frame->data_, data_size); | 722 msg->set_input_data(frame->data_, data_size); |
| 643 } | 723 } |
| 644 #endif | 724 #endif |
| 645 | 725 |
| 646 capture_audio_->DeinterleaveFrom(frame); | 726 capture_.capture_audio->DeinterleaveFrom(frame); |
| 647 RETURN_ON_ERR(ProcessStreamLocked()); | 727 RETURN_ON_ERR(ProcessStreamLocked()); |
| 648 capture_audio_->InterleaveTo(frame, output_copy_needed(is_data_processed())); | 728 capture_.capture_audio->InterleaveTo(frame, |
| 729 output_copy_needed(is_data_processed())); | |
| 649 | 730 |
| 650 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 731 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 651 if (debug_file_->Open()) { | 732 if (debug_dump_.debug_file->Open()) { |
| 652 audioproc::Stream* msg = event_msg_->mutable_stream(); | 733 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 653 const size_t data_size = | 734 const size_t data_size = |
| 654 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 735 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 655 msg->set_output_data(frame->data_, data_size); | 736 msg->set_output_data(frame->data_, data_size); |
| 656 RETURN_ON_ERR(WriteMessageToDebugFile()); | 737 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 738 &crit_debug_, &debug_dump_.capture)); | |
| 657 } | 739 } |
| 658 #endif | 740 #endif |
| 659 | 741 |
| 660 return kNoError; | 742 return kNoError; |
| 661 } | 743 } |
| 662 | 744 |
| 663 int AudioProcessingImpl::ProcessStreamLocked() { | 745 int AudioProcessingImpl::ProcessStreamLocked() { |
| 664 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 746 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 665 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 747 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 666 if (debug_file_->Open()) { | 748 if (debug_dump_.debug_file->Open()) { |
| 667 audioproc::Stream* msg = event_msg_->mutable_stream(); | 749 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 668 msg->set_delay(stream_delay_ms_); | 750 msg->set_delay(capture_nonlocked_.stream_delay_ms); |
| 669 msg->set_drift(echo_cancellation_->stream_drift_samples()); | 751 msg->set_drift( |
| 752 public_submodules_->echo_cancellation->stream_drift_samples()); | |
| 670 msg->set_level(gain_control()->stream_analog_level()); | 753 msg->set_level(gain_control()->stream_analog_level()); |
| 671 msg->set_keypress(key_pressed_); | 754 msg->set_keypress(capture_.key_pressed); |
| 672 } | 755 } |
| 673 #endif | 756 #endif |
| 674 | 757 |
| 675 MaybeUpdateHistograms(); | 758 MaybeUpdateHistograms(); |
| 676 | 759 |
| 677 AudioBuffer* ca = capture_audio_.get(); // For brevity. | 760 AudioBuffer* ca = capture_.capture_audio.get(); // For brevity. |
| 678 | 761 |
| 679 if (use_new_agc_ && gain_control_->is_enabled()) { | 762 if (constants_.use_new_agc && |
| 680 agc_manager_->AnalyzePreProcess(ca->channels()[0], ca->num_channels(), | 763 public_submodules_->gain_control->is_enabled()) { |
| 681 fwd_proc_format_.num_frames()); | 764 private_submodules_->agc_manager->AnalyzePreProcess( |
| 765 ca->channels()[0], ca->num_channels(), | |
| 766 capture_nonlocked_.fwd_proc_format.num_frames()); | |
| 682 } | 767 } |
| 683 | 768 |
| 684 bool data_processed = is_data_processed(); | 769 bool data_processed = is_data_processed(); |
| 685 if (analysis_needed(data_processed)) { | 770 if (analysis_needed(data_processed)) { |
| 686 ca->SplitIntoFrequencyBands(); | 771 ca->SplitIntoFrequencyBands(); |
| 687 } | 772 } |
| 688 | 773 |
| 689 if (intelligibility_enabled_) { | 774 if (constants_.intelligibility_enabled) { |
| 690 intelligibility_enhancer_->AnalyzeCaptureAudio( | 775 public_submodules_->intelligibility_enhancer->AnalyzeCaptureAudio( |
| 691 ca->split_channels_f(kBand0To8kHz), split_rate_, ca->num_channels()); | 776 ca->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate, |
| 777 ca->num_channels()); | |
| 692 } | 778 } |
| 693 | 779 |
| 694 if (beamformer_enabled_) { | 780 if (constants_.beamformer_enabled) { |
| 695 beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f()); | 781 private_submodules_->beamformer->ProcessChunk(*ca->split_data_f(), |
| 782 ca->split_data_f()); | |
| 696 ca->set_num_channels(1); | 783 ca->set_num_channels(1); |
| 697 } | 784 } |
| 698 | 785 |
| 699 RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca)); | 786 RETURN_ON_ERR(public_submodules_->high_pass_filter->ProcessCaptureAudio(ca)); |
| 700 RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca)); | 787 RETURN_ON_ERR(public_submodules_->gain_control->AnalyzeCaptureAudio(ca)); |
| 701 RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca)); | 788 RETURN_ON_ERR(public_submodules_->noise_suppression->AnalyzeCaptureAudio(ca)); |
| 702 RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca)); | 789 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessCaptureAudio(ca)); |
| 703 | 790 |
| 704 if (echo_control_mobile_->is_enabled() && noise_suppression_->is_enabled()) { | 791 if (public_submodules_->echo_control_mobile->is_enabled() && |
| 792 public_submodules_->noise_suppression->is_enabled()) { | |
| 705 ca->CopyLowPassToReference(); | 793 ca->CopyLowPassToReference(); |
| 706 } | 794 } |
| 707 RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(ca)); | 795 RETURN_ON_ERR(public_submodules_->noise_suppression->ProcessCaptureAudio(ca)); |
| 708 RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca)); | 796 RETURN_ON_ERR( |
| 709 RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca)); | 797 public_submodules_->echo_control_mobile->ProcessCaptureAudio(ca)); |
| 798 RETURN_ON_ERR(public_submodules_->voice_detection->ProcessCaptureAudio(ca)); | |
| 710 | 799 |
| 711 if (use_new_agc_ && gain_control_->is_enabled() && | 800 if (constants_.use_new_agc && |
| 712 (!beamformer_enabled_ || beamformer_->is_target_present())) { | 801 public_submodules_->gain_control->is_enabled() && |
| 713 agc_manager_->Process(ca->split_bands_const(0)[kBand0To8kHz], | 802 (!constants_.beamformer_enabled || |
| 714 ca->num_frames_per_band(), split_rate_); | 803 private_submodules_->beamformer->is_target_present())) { |
| 804 private_submodules_->agc_manager->Process( | |
| 805 ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(), | |
| 806 capture_nonlocked_.split_rate); | |
| 715 } | 807 } |
| 716 RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(ca)); | 808 RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio(ca)); |
| 717 | 809 |
| 718 if (synthesis_needed(data_processed)) { | 810 if (synthesis_needed(data_processed)) { |
| 719 ca->MergeFrequencyBands(); | 811 ca->MergeFrequencyBands(); |
| 720 } | 812 } |
| 721 | 813 |
| 722 // TODO(aluebs): Investigate if the transient suppression placement should be | 814 // TODO(aluebs): Investigate if the transient suppression placement should be |
| 723 // before or after the AGC. | 815 // before or after the AGC. |
| 724 if (transient_suppressor_enabled_) { | 816 if (capture_.transient_suppressor_enabled) { |
| 725 float voice_probability = | 817 float voice_probability = |
| 726 agc_manager_.get() ? agc_manager_->voice_probability() : 1.f; | 818 private_submodules_->agc_manager.get() |
| 819 ? private_submodules_->agc_manager->voice_probability() | |
| 820 : 1.f; | |
| 727 | 821 |
| 728 transient_suppressor_->Suppress( | 822 public_submodules_->transient_suppressor->Suppress( |
| 729 ca->channels_f()[0], ca->num_frames(), ca->num_channels(), | 823 ca->channels_f()[0], ca->num_frames(), ca->num_channels(), |
| 730 ca->split_bands_const_f(0)[kBand0To8kHz], ca->num_frames_per_band(), | 824 ca->split_bands_const_f(0)[kBand0To8kHz], ca->num_frames_per_band(), |
| 731 ca->keyboard_data(), ca->num_keyboard_frames(), voice_probability, | 825 ca->keyboard_data(), ca->num_keyboard_frames(), voice_probability, |
| 732 key_pressed_); | 826 capture_.key_pressed); |
| 733 } | 827 } |
| 734 | 828 |
| 735 // The level estimator operates on the recombined data. | 829 // The level estimator operates on the recombined data. |
| 736 RETURN_ON_ERR(level_estimator_->ProcessStream(ca)); | 830 RETURN_ON_ERR(public_submodules_->level_estimator->ProcessStream(ca)); |
| 737 | 831 |
| 738 was_stream_delay_set_ = false; | 832 capture_.was_stream_delay_set = false; |
| 739 return kNoError; | 833 return kNoError; |
| 740 } | 834 } |
| 741 | 835 |
| 742 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, | 836 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, |
| 743 size_t samples_per_channel, | 837 size_t samples_per_channel, |
| 744 int rev_sample_rate_hz, | 838 int rev_sample_rate_hz, |
| 745 ChannelLayout layout) { | 839 ChannelLayout layout) { |
| 746 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 840 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 841 rtc::CritScope cs(&crit_render_); | |
| 747 const StreamConfig reverse_config = { | 842 const StreamConfig reverse_config = { |
| 748 rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), | 843 rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), |
| 749 }; | 844 }; |
| 750 if (samples_per_channel != reverse_config.num_frames()) { | 845 if (samples_per_channel != reverse_config.num_frames()) { |
| 751 return kBadDataLengthError; | 846 return kBadDataLengthError; |
| 752 } | 847 } |
| 753 return AnalyzeReverseStream(data, reverse_config, reverse_config); | 848 return AnalyzeReverseStreamLocked(data, reverse_config, reverse_config); |
| 754 } | 849 } |
| 755 | 850 |
| 756 int AudioProcessingImpl::ProcessReverseStream( | 851 int AudioProcessingImpl::ProcessReverseStream( |
| 757 const float* const* src, | 852 const float* const* src, |
| 758 const StreamConfig& reverse_input_config, | 853 const StreamConfig& reverse_input_config, |
| 759 const StreamConfig& reverse_output_config, | 854 const StreamConfig& reverse_output_config, |
| 760 float* const* dest) { | 855 float* const* dest) { |
| 761 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 856 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 762 RETURN_ON_ERR( | 857 rtc::CritScope cs(&crit_render_); |
| 763 AnalyzeReverseStream(src, reverse_input_config, reverse_output_config)); | 858 RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, reverse_input_config, |
| 859 reverse_output_config)); | |
| 764 if (is_rev_processed()) { | 860 if (is_rev_processed()) { |
| 765 render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(), | 861 render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(), |
| 766 dest); | 862 dest); |
| 767 } else if (rev_conversion_needed()) { | 863 } else if (rev_conversion_needed()) { |
| 768 render_converter_->Convert(src, reverse_input_config.num_samples(), dest, | 864 render_.render_converter->Convert(src, reverse_input_config.num_samples(), |
| 769 reverse_output_config.num_samples()); | 865 dest, |
| 866 reverse_output_config.num_samples()); | |
| 770 } else { | 867 } else { |
| 771 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), | 868 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), |
| 772 reverse_input_config.num_channels(), dest); | 869 reverse_input_config.num_channels(), dest); |
| 773 } | 870 } |
| 774 | 871 |
| 775 return kNoError; | 872 return kNoError; |
| 776 } | 873 } |
| 777 | 874 |
| 778 int AudioProcessingImpl::AnalyzeReverseStream( | 875 int AudioProcessingImpl::AnalyzeReverseStreamLocked( |
| 779 const float* const* src, | 876 const float* const* src, |
| 780 const StreamConfig& reverse_input_config, | 877 const StreamConfig& reverse_input_config, |
| 781 const StreamConfig& reverse_output_config) { | 878 const StreamConfig& reverse_output_config) { |
| 782 CriticalSectionScoped crit_scoped(crit_); | |
| 783 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 879 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 784 if (src == NULL) { | 880 if (src == NULL) { |
| 785 return kNullPointerError; | 881 return kNullPointerError; |
| 786 } | 882 } |
| 787 | 883 |
| 788 if (reverse_input_config.num_channels() <= 0) { | 884 if (reverse_input_config.num_channels() <= 0) { |
| 789 return kBadNumberChannelsError; | 885 return kBadNumberChannelsError; |
| 790 } | 886 } |
| 791 | 887 |
| 792 ProcessingConfig processing_config = shared_state_.api_format_; | 888 ProcessingConfig processing_config = formats_.api_format; |
| 793 processing_config.reverse_input_stream() = reverse_input_config; | 889 processing_config.reverse_input_stream() = reverse_input_config; |
| 794 processing_config.reverse_output_stream() = reverse_output_config; | 890 processing_config.reverse_output_stream() = reverse_output_config; |
| 795 | 891 |
| 796 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 892 RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| 797 assert(reverse_input_config.num_frames() == | 893 assert(reverse_input_config.num_frames() == |
| 798 shared_state_.api_format_.reverse_input_stream().num_frames()); | 894 formats_.api_format.reverse_input_stream().num_frames()); |
| 799 | 895 |
| 800 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 896 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 801 if (debug_file_->Open()) { | 897 if (debug_dump_.debug_file->Open()) { |
| 802 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 898 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); |
| 803 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 899 audioproc::ReverseStream* msg = |
| 900 debug_dump_.render.event_msg->mutable_reverse_stream(); | |
| 804 const size_t channel_size = | 901 const size_t channel_size = |
| 805 sizeof(float) * | 902 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
| 806 shared_state_.api_format_.reverse_input_stream().num_frames(); | |
| 807 for (int i = 0; | 903 for (int i = 0; |
| 808 i < shared_state_.api_format_.reverse_input_stream().num_channels(); | 904 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) |
| 809 ++i) | |
| 810 msg->add_channel(src[i], channel_size); | 905 msg->add_channel(src[i], channel_size); |
| 811 RETURN_ON_ERR(WriteMessageToDebugFile()); | 906 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 907 &crit_debug_, &debug_dump_.render)); | |
| 812 } | 908 } |
| 813 #endif | 909 #endif |
| 814 | 910 |
| 815 render_audio_->CopyFrom(src, | 911 render_.render_audio->CopyFrom(src, |
| 816 shared_state_.api_format_.reverse_input_stream()); | 912 formats_.api_format.reverse_input_stream()); |
| 817 return ProcessReverseStreamLocked(); | 913 return ProcessReverseStreamLocked(); |
| 818 } | 914 } |
| 819 | 915 |
| 820 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 916 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| 821 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 917 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 822 RETURN_ON_ERR(AnalyzeReverseStream(frame)); | 918 RETURN_ON_ERR(AnalyzeReverseStream(frame)); |
| 919 rtc::CritScope cs(&crit_render_); | |
| 823 if (is_rev_processed()) { | 920 if (is_rev_processed()) { |
| 824 render_audio_->InterleaveTo(frame, true); | 921 render_.render_audio->InterleaveTo(frame, true); |
| 825 } | 922 } |
| 826 | 923 |
| 827 return kNoError; | 924 return kNoError; |
| 828 } | 925 } |
| 829 | 926 |
| 830 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { | 927 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
| 831 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 928 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 832 CriticalSectionScoped crit_scoped(crit_); | 929 rtc::CritScope cs(&crit_render_); |
| 833 if (frame == NULL) { | 930 if (frame == NULL) { |
| 834 return kNullPointerError; | 931 return kNullPointerError; |
| 835 } | 932 } |
| 836 // Must be a native rate. | 933 // Must be a native rate. |
| 837 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 934 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
| 838 frame->sample_rate_hz_ != kSampleRate16kHz && | 935 frame->sample_rate_hz_ != kSampleRate16kHz && |
| 839 frame->sample_rate_hz_ != kSampleRate32kHz && | 936 frame->sample_rate_hz_ != kSampleRate32kHz && |
| 840 frame->sample_rate_hz_ != kSampleRate48kHz) { | 937 frame->sample_rate_hz_ != kSampleRate48kHz) { |
| 841 return kBadSampleRateError; | 938 return kBadSampleRateError; |
| 842 } | 939 } |
| 843 // This interface does not tolerate different forward and reverse rates. | 940 // This interface does not tolerate different forward and reverse rates. |
| 844 if (frame->sample_rate_hz_ != | 941 if (frame->sample_rate_hz_ != |
| 845 shared_state_.api_format_.input_stream().sample_rate_hz()) { | 942 formats_.api_format.input_stream().sample_rate_hz()) { |
| 846 return kBadSampleRateError; | 943 return kBadSampleRateError; |
| 847 } | 944 } |
| 848 | 945 |
| 849 if (frame->num_channels_ <= 0) { | 946 if (frame->num_channels_ <= 0) { |
| 850 return kBadNumberChannelsError; | 947 return kBadNumberChannelsError; |
| 851 } | 948 } |
| 852 | 949 |
| 853 ProcessingConfig processing_config = shared_state_.api_format_; | 950 ProcessingConfig processing_config = formats_.api_format; |
| 854 processing_config.reverse_input_stream().set_sample_rate_hz( | 951 processing_config.reverse_input_stream().set_sample_rate_hz( |
| 855 frame->sample_rate_hz_); | 952 frame->sample_rate_hz_); |
| 856 processing_config.reverse_input_stream().set_num_channels( | 953 processing_config.reverse_input_stream().set_num_channels( |
| 857 frame->num_channels_); | 954 frame->num_channels_); |
| 858 processing_config.reverse_output_stream().set_sample_rate_hz( | 955 processing_config.reverse_output_stream().set_sample_rate_hz( |
| 859 frame->sample_rate_hz_); | 956 frame->sample_rate_hz_); |
| 860 processing_config.reverse_output_stream().set_num_channels( | 957 processing_config.reverse_output_stream().set_num_channels( |
| 861 frame->num_channels_); | 958 frame->num_channels_); |
| 862 | 959 |
| 863 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 960 RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| 864 if (frame->samples_per_channel_ != | 961 if (frame->samples_per_channel_ != |
| 865 shared_state_.api_format_.reverse_input_stream().num_frames()) { | 962 formats_.api_format.reverse_input_stream().num_frames()) { |
| 866 return kBadDataLengthError; | 963 return kBadDataLengthError; |
| 867 } | 964 } |
| 868 | 965 |
| 869 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 966 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 870 if (debug_file_->Open()) { | 967 if (debug_dump_.debug_file->Open()) { |
| 871 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 968 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); |
| 872 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 969 audioproc::ReverseStream* msg = |
| 970 debug_dump_.render.event_msg->mutable_reverse_stream(); | |
| 873 const size_t data_size = | 971 const size_t data_size = |
| 874 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 972 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 875 msg->set_data(frame->data_, data_size); | 973 msg->set_data(frame->data_, data_size); |
| 876 RETURN_ON_ERR(WriteMessageToDebugFile()); | 974 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 975 &crit_debug_, &debug_dump_.render)); | |
| 877 } | 976 } |
| 878 #endif | 977 #endif |
| 879 render_audio_->DeinterleaveFrom(frame); | 978 render_.render_audio->DeinterleaveFrom(frame); |
| 880 return ProcessReverseStreamLocked(); | 979 return ProcessReverseStreamLocked(); |
| 881 } | 980 } |
| 882 | 981 |
| 883 int AudioProcessingImpl::ProcessReverseStreamLocked() { | 982 int AudioProcessingImpl::ProcessReverseStreamLocked() { |
| 884 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 983 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 885 AudioBuffer* ra = render_audio_.get(); // For brevity. | 984 AudioBuffer* ra = render_.render_audio.get(); // For brevity. |
| 886 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) { | 985 if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz) { |
| 887 ra->SplitIntoFrequencyBands(); | 986 ra->SplitIntoFrequencyBands(); |
| 888 } | 987 } |
| 889 | 988 |
| 890 if (intelligibility_enabled_) { | 989 if (constants_.intelligibility_enabled) { |
| 891 intelligibility_enhancer_->ProcessRenderAudio( | 990 // Currently run in single-threaded mode when the intelligibility |
| 892 ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels()); | 991 // enhancer is activated. |
| 992 // TODO(peah): Fix to be properly multi-threaded. | |
| 993 rtc::CritScope cs(&crit_capture_); | |
| 994 public_submodules_->intelligibility_enhancer->ProcessRenderAudio( | |
| 995 ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate, | |
| 996 ra->num_channels()); | |
| 893 } | 997 } |
| 894 | 998 |
| 895 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra)); | 999 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra)); |
| 896 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra)); | 1000 RETURN_ON_ERR( |
| 897 if (!use_new_agc_) { | 1001 public_submodules_->echo_control_mobile->ProcessRenderAudio(ra)); |
| 898 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra)); | 1002 if (!constants_.use_new_agc) { |
| 1003 RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra)); | |
| 899 } | 1004 } |
| 900 | 1005 |
| 901 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz && | 1006 if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz && |
| 902 is_rev_processed()) { | 1007 is_rev_processed()) { |
| 903 ra->MergeFrequencyBands(); | 1008 ra->MergeFrequencyBands(); |
| 904 } | 1009 } |
| 905 | 1010 |
| 906 return kNoError; | 1011 return kNoError; |
| 907 } | 1012 } |
| 908 | 1013 |
| 909 int AudioProcessingImpl::set_stream_delay_ms(int delay) { | 1014 int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
| 910 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1015 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1016 rtc::CritScope cs(&crit_capture_); | |
| 911 Error retval = kNoError; | 1017 Error retval = kNoError; |
| 912 was_stream_delay_set_ = true; | 1018 capture_.was_stream_delay_set = true; |
| 913 delay += delay_offset_ms_; | 1019 delay += capture_.delay_offset_ms; |
| 914 | 1020 |
| 915 if (delay < 0) { | 1021 if (delay < 0) { |
| 916 delay = 0; | 1022 delay = 0; |
| 917 retval = kBadStreamParameterWarning; | 1023 retval = kBadStreamParameterWarning; |
| 918 } | 1024 } |
| 919 | 1025 |
| 920 // TODO(ajm): the max is rather arbitrarily chosen; investigate. | 1026 // TODO(ajm): the max is rather arbitrarily chosen; investigate. |
| 921 if (delay > 500) { | 1027 if (delay > 500) { |
| 922 delay = 500; | 1028 delay = 500; |
| 923 retval = kBadStreamParameterWarning; | 1029 retval = kBadStreamParameterWarning; |
| 924 } | 1030 } |
| 925 | 1031 |
| 926 stream_delay_ms_ = delay; | 1032 capture_nonlocked_.stream_delay_ms = delay; |
| 927 return retval; | 1033 return retval; |
| 928 } | 1034 } |
| 929 | 1035 |
| 930 int AudioProcessingImpl::stream_delay_ms() const { | 1036 int AudioProcessingImpl::stream_delay_ms() const { |
| 931 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1037 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 932 return stream_delay_ms_; | 1038 return capture_nonlocked_.stream_delay_ms; |
| 933 } | 1039 } |
| 934 | 1040 |
| 935 bool AudioProcessingImpl::was_stream_delay_set() const { | 1041 bool AudioProcessingImpl::was_stream_delay_set() const { |
| 936 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1042 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 937 return was_stream_delay_set_; | 1043 return capture_.was_stream_delay_set; |
| 938 } | 1044 } |
| 939 | 1045 |
| 940 void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) { | 1046 void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) { |
| 1047 rtc::CritScope cs(&crit_capture_); | |
| 941 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1048 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 942 key_pressed_ = key_pressed; | 1049 capture_.key_pressed = key_pressed; |
| 943 } | 1050 } |
| 944 | 1051 |
| 945 void AudioProcessingImpl::set_delay_offset_ms(int offset) { | 1052 void AudioProcessingImpl::set_delay_offset_ms(int offset) { |
| 946 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1053 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 947 CriticalSectionScoped crit_scoped(crit_); | 1054 rtc::CritScope cs(&crit_capture_); |
| 948 delay_offset_ms_ = offset; | 1055 capture_.delay_offset_ms = offset; |
| 949 } | 1056 } |
| 950 | 1057 |
| 951 int AudioProcessingImpl::delay_offset_ms() const { | 1058 int AudioProcessingImpl::delay_offset_ms() const { |
| 952 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1059 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 953 return delay_offset_ms_; | 1060 rtc::CritScope cs(&crit_capture_); |
| 1061 return capture_.delay_offset_ms; | |
| 954 } | 1062 } |
| 955 | 1063 |
| 956 int AudioProcessingImpl::StartDebugRecording( | 1064 int AudioProcessingImpl::StartDebugRecording( |
| 957 const char filename[AudioProcessing::kMaxFilenameSize]) { | 1065 const char filename[AudioProcessing::kMaxFilenameSize]) { |
| 958 CriticalSectionScoped crit_scoped(crit_); | 1066 // Run in a single-threaded manner. |
| 1067 rtc::CritScope cs_render(&crit_render_); | |
| 1068 rtc::CritScope cs_capture(&crit_capture_); | |
| 959 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1069 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 960 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); | 1070 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); |
| 961 | 1071 |
| 962 if (filename == NULL) { | 1072 if (filename == NULL) { |
| 963 return kNullPointerError; | 1073 return kNullPointerError; |
| 964 } | 1074 } |
| 965 | 1075 |
| 966 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1076 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 967 // Stop any ongoing recording. | 1077 // Stop any ongoing recording. |
| 968 if (debug_file_->Open()) { | 1078 if (debug_dump_.debug_file->Open()) { |
| 969 if (debug_file_->CloseFile() == -1) { | 1079 if (debug_dump_.debug_file->CloseFile() == -1) { |
| 970 return kFileError; | 1080 return kFileError; |
| 971 } | 1081 } |
| 972 } | 1082 } |
| 973 | 1083 |
| 974 if (debug_file_->OpenFile(filename, false) == -1) { | 1084 if (debug_dump_.debug_file->OpenFile(filename, false) == -1) { |
| 975 debug_file_->CloseFile(); | 1085 debug_dump_.debug_file->CloseFile(); |
| 976 return kFileError; | 1086 return kFileError; |
| 977 } | 1087 } |
| 978 | 1088 |
| 979 RETURN_ON_ERR(WriteConfigMessage(true)); | 1089 RETURN_ON_ERR(WriteConfigMessage(true)); |
| 980 RETURN_ON_ERR(WriteInitMessage()); | 1090 RETURN_ON_ERR(WriteInitMessage()); |
| 981 return kNoError; | 1091 return kNoError; |
| 982 #else | 1092 #else |
| 983 return kUnsupportedFunctionError; | 1093 return kUnsupportedFunctionError; |
| 984 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1094 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 985 } | 1095 } |
| 986 | 1096 |
| 987 int AudioProcessingImpl::StartDebugRecording(FILE* handle) { | 1097 int AudioProcessingImpl::StartDebugRecording(FILE* handle) { |
| 988 CriticalSectionScoped crit_scoped(crit_); | 1098 // Run in a single-threaded manner. |
| 1099 rtc::CritScope cs_render(&crit_render_); | |
| 1100 rtc::CritScope cs_capture(&crit_capture_); | |
| 989 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1101 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 990 | 1102 |
| 991 if (handle == NULL) { | 1103 if (handle == NULL) { |
| 992 return kNullPointerError; | 1104 return kNullPointerError; |
| 993 } | 1105 } |
| 994 | 1106 |
| 995 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1107 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 996 // Stop any ongoing recording. | 1108 // Stop any ongoing recording. |
| 997 if (debug_file_->Open()) { | 1109 if (debug_dump_.debug_file->Open()) { |
| 998 if (debug_file_->CloseFile() == -1) { | 1110 if (debug_dump_.debug_file->CloseFile() == -1) { |
| 999 return kFileError; | 1111 return kFileError; |
| 1000 } | 1112 } |
| 1001 } | 1113 } |
| 1002 | 1114 |
| 1003 if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) { | 1115 if (debug_dump_.debug_file->OpenFromFileHandle(handle, true, false) == -1) { |
| 1004 return kFileError; | 1116 return kFileError; |
| 1005 } | 1117 } |
| 1006 | 1118 |
| 1007 RETURN_ON_ERR(WriteConfigMessage(true)); | 1119 RETURN_ON_ERR(WriteConfigMessage(true)); |
| 1008 RETURN_ON_ERR(WriteInitMessage()); | 1120 RETURN_ON_ERR(WriteInitMessage()); |
| 1009 return kNoError; | 1121 return kNoError; |
| 1010 #else | 1122 #else |
| 1011 return kUnsupportedFunctionError; | 1123 return kUnsupportedFunctionError; |
| 1012 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1124 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1013 } | 1125 } |
| 1014 | 1126 |
| 1015 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( | 1127 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( |
| 1016 rtc::PlatformFile handle) { | 1128 rtc::PlatformFile handle) { |
| 1129 // Run in a single-threaded manner. | |
| 1130 rtc::CritScope cs_render(&crit_render_); | |
| 1131 rtc::CritScope cs_capture(&crit_capture_); | |
| 1017 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1132 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1018 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); | 1133 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); |
| 1019 return StartDebugRecording(stream); | 1134 return StartDebugRecording(stream); |
| 1020 } | 1135 } |
| 1021 | 1136 |
| 1022 int AudioProcessingImpl::StopDebugRecording() { | 1137 int AudioProcessingImpl::StopDebugRecording() { |
| 1023 CriticalSectionScoped crit_scoped(crit_); | 1138 // Run in a single-threaded manner. |
| 1139 rtc::CritScope cs_render(&crit_render_); | |
| 1140 rtc::CritScope cs_capture(&crit_capture_); | |
| 1024 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1141 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1025 | 1142 |
| 1026 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1143 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1027 // We just return if recording hasn't started. | 1144 // We just return if recording hasn't started. |
| 1028 if (debug_file_->Open()) { | 1145 if (debug_dump_.debug_file->Open()) { |
| 1029 if (debug_file_->CloseFile() == -1) { | 1146 if (debug_dump_.debug_file->CloseFile() == -1) { |
| 1030 return kFileError; | 1147 return kFileError; |
| 1031 } | 1148 } |
| 1032 } | 1149 } |
| 1033 return kNoError; | 1150 return kNoError; |
| 1034 #else | 1151 #else |
| 1035 return kUnsupportedFunctionError; | 1152 return kUnsupportedFunctionError; |
| 1036 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1153 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1037 } | 1154 } |
| 1038 | 1155 |
| 1039 EchoCancellation* AudioProcessingImpl::echo_cancellation() const { | 1156 EchoCancellation* AudioProcessingImpl::echo_cancellation() const { |
| 1040 return echo_cancellation_; | 1157 // Adding a lock here has no effect as it allows any access to the submodule |
| 1158 // from the returned pointer. | |
| 1159 return public_submodules_->echo_cancellation; | |
| 1041 } | 1160 } |
| 1042 | 1161 |
| 1043 EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const { | 1162 EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const { |
| 1044 return echo_control_mobile_; | 1163 // Adding a lock here has no effect as it allows any access to the submodule |
| 1164 // from the returned pointer. | |
| 1165 return public_submodules_->echo_control_mobile; | |
| 1045 } | 1166 } |
| 1046 | 1167 |
| 1047 GainControl* AudioProcessingImpl::gain_control() const { | 1168 GainControl* AudioProcessingImpl::gain_control() const { |
| 1048 if (use_new_agc_) { | 1169 // Adding a lock here has no effect as it allows any access to the submodule |
| 1049 return gain_control_for_new_agc_.get(); | 1170 // from the returned pointer. |
| 1171 if (constants_.use_new_agc) { | |
| 1172 return public_submodules_->gain_control_for_new_agc.get(); | |
| 1050 } | 1173 } |
| 1051 return gain_control_; | 1174 return public_submodules_->gain_control; |
| 1052 } | 1175 } |
| 1053 | 1176 |
| 1054 HighPassFilter* AudioProcessingImpl::high_pass_filter() const { | 1177 HighPassFilter* AudioProcessingImpl::high_pass_filter() const { |
| 1055 return high_pass_filter_; | 1178 // Adding a lock here has no effect as it allows any access to the submodule |
| 1179 // from the returned pointer. | |
| 1180 return public_submodules_->high_pass_filter; | |
| 1056 } | 1181 } |
| 1057 | 1182 |
| 1058 LevelEstimator* AudioProcessingImpl::level_estimator() const { | 1183 LevelEstimator* AudioProcessingImpl::level_estimator() const { |
| 1059 return level_estimator_; | 1184 // Adding a lock here has no effect as it allows any access to the submodule |
| 1185 // from the returned pointer. | |
| 1186 return public_submodules_->level_estimator; | |
| 1060 } | 1187 } |
| 1061 | 1188 |
| 1062 NoiseSuppression* AudioProcessingImpl::noise_suppression() const { | 1189 NoiseSuppression* AudioProcessingImpl::noise_suppression() const { |
| 1063 return noise_suppression_; | 1190 // Adding a lock here has no effect as it allows any access to the submodule |
| 1191 // from the returned pointer. | |
| 1192 return public_submodules_->noise_suppression; | |
| 1064 } | 1193 } |
| 1065 | 1194 |
| 1066 VoiceDetection* AudioProcessingImpl::voice_detection() const { | 1195 VoiceDetection* AudioProcessingImpl::voice_detection() const { |
| 1067 return voice_detection_; | 1196 // Adding a lock here has no effect as it allows any access to the submodule |
| 1197 // from the returned pointer. | |
| 1198 return public_submodules_->voice_detection; | |
| 1068 } | 1199 } |
| 1069 | 1200 |
| 1070 bool AudioProcessingImpl::is_data_processed() const { | 1201 bool AudioProcessingImpl::is_data_processed() const { |
| 1071 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1202 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1072 if (beamformer_enabled_) { | 1203 if (constants_.beamformer_enabled) { |
| 1073 return true; | 1204 return true; |
| 1074 } | 1205 } |
| 1075 | 1206 |
| 1076 int enabled_count = 0; | 1207 int enabled_count = 0; |
| 1077 for (auto item : component_list_) { | 1208 for (auto item : private_submodules_->component_list) { |
| 1078 if (item->is_component_enabled()) { | 1209 if (item->is_component_enabled()) { |
| 1079 enabled_count++; | 1210 enabled_count++; |
| 1080 } | 1211 } |
| 1081 } | 1212 } |
| 1082 | 1213 |
| 1083 // Data is unchanged if no components are enabled, or if only level_estimator_ | 1214 // Data is unchanged if no components are enabled, or if only |
| 1084 // or voice_detection_ is enabled. | 1215 // public_submodules_->level_estimator |
| 1216 // or public_submodules_->voice_detection is enabled. | |
| 1085 if (enabled_count == 0) { | 1217 if (enabled_count == 0) { |
| 1086 return false; | 1218 return false; |
| 1087 } else if (enabled_count == 1) { | 1219 } else if (enabled_count == 1) { |
| 1088 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) { | 1220 if (public_submodules_->level_estimator->is_enabled() || |
| 1221 public_submodules_->voice_detection->is_enabled()) { | |
| 1089 return false; | 1222 return false; |
| 1090 } | 1223 } |
| 1091 } else if (enabled_count == 2) { | 1224 } else if (enabled_count == 2) { |
| 1092 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) { | 1225 if (public_submodules_->level_estimator->is_enabled() && |
| 1226 public_submodules_->voice_detection->is_enabled()) { | |
| 1093 return false; | 1227 return false; |
| 1094 } | 1228 } |
| 1095 } | 1229 } |
| 1096 return true; | 1230 return true; |
| 1097 } | 1231 } |
| 1098 | 1232 |
| 1099 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { | 1233 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { |
| 1100 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1234 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1101 // Check if we've upmixed or downmixed the audio. | 1235 // Check if we've upmixed or downmixed the audio. |
| 1102 return ((shared_state_.api_format_.output_stream().num_channels() != | 1236 return ((formats_.api_format.output_stream().num_channels() != |
| 1103 shared_state_.api_format_.input_stream().num_channels()) || | 1237 formats_.api_format.input_stream().num_channels()) || |
| 1104 is_data_processed || transient_suppressor_enabled_); | 1238 is_data_processed || capture_.transient_suppressor_enabled); |
| 1105 } | 1239 } |
| 1106 | 1240 |
| 1107 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { | 1241 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { |
| 1108 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1242 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1109 return (is_data_processed && | 1243 return (is_data_processed && |
| 1110 (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || | 1244 (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == |
| 1111 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz)); | 1245 kSampleRate32kHz || |
| 1246 capture_nonlocked_.fwd_proc_format.sample_rate_hz() == | |
| 1247 kSampleRate48kHz)); | |
| 1112 } | 1248 } |
| 1113 | 1249 |
| 1114 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { | 1250 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { |
| 1115 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1251 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1116 if (!is_data_processed && !voice_detection_->is_enabled() && | 1252 if (!is_data_processed && |
| 1117 !transient_suppressor_enabled_) { | 1253 !public_submodules_->voice_detection->is_enabled() && |
| 1118 // Only level_estimator_ is enabled. | 1254 !capture_.transient_suppressor_enabled) { |
| 1255 // Only public_submodules_->level_estimator is enabled. | |
| 1119 return false; | 1256 return false; |
| 1120 } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || | 1257 } else if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == |
| 1121 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { | 1258 kSampleRate32kHz || |
| 1122 // Something besides level_estimator_ is enabled, and we have super-wb. | 1259 capture_nonlocked_.fwd_proc_format.sample_rate_hz() == |
| 1260 kSampleRate48kHz) { | |
| 1261 // Something besides public_submodules_->level_estimator is enabled, and we | |
| 1262 // have super-wb. | |
| 1123 return true; | 1263 return true; |
| 1124 } | 1264 } |
| 1125 return false; | 1265 return false; |
| 1126 } | 1266 } |
| 1127 | 1267 |
| 1128 bool AudioProcessingImpl::is_rev_processed() const { | 1268 bool AudioProcessingImpl::is_rev_processed() const { |
| 1129 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 1269 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 1130 return intelligibility_enabled_ && intelligibility_enhancer_->active(); | 1270 return constants_.intelligibility_enabled && |
| 1271 public_submodules_->intelligibility_enhancer->active(); | |
| 1131 } | 1272 } |
| 1132 | 1273 |
| 1133 bool AudioProcessingImpl::rev_conversion_needed() const { | 1274 bool AudioProcessingImpl::rev_conversion_needed() const { |
| 1134 // Called from several threads, thread check not possible. | 1275 // Called from several threads, thread check not possible. |
| 1135 return (shared_state_.api_format_.reverse_input_stream() != | 1276 return (formats_.api_format.reverse_input_stream() != |
| 1136 shared_state_.api_format_.reverse_output_stream()); | 1277 formats_.api_format.reverse_output_stream()); |
| 1137 } | 1278 } |
| 1138 | 1279 |
| 1139 void AudioProcessingImpl::InitializeExperimentalAgc() { | 1280 void AudioProcessingImpl::InitializeExperimentalAgc() { |
| 1140 // Called from several threads, thread check not possible. | 1281 // Called from several threads, thread check not possible. |
| 1141 if (use_new_agc_) { | 1282 if (constants_.use_new_agc) { |
| 1142 if (!agc_manager_.get()) { | 1283 if (!private_submodules_->agc_manager.get()) { |
| 1143 agc_manager_.reset(new AgcManagerDirect(gain_control_, | 1284 private_submodules_->agc_manager.reset(new AgcManagerDirect( |
| 1144 gain_control_for_new_agc_.get(), | 1285 public_submodules_->gain_control, |
| 1145 agc_startup_min_volume_)); | 1286 public_submodules_->gain_control_for_new_agc.get(), |
| 1287 constants_.agc_startup_min_volume)); | |
| 1146 } | 1288 } |
| 1147 agc_manager_->Initialize(); | 1289 private_submodules_->agc_manager->Initialize(); |
| 1148 agc_manager_->SetCaptureMuted(output_will_be_muted_); | 1290 private_submodules_->agc_manager->SetCaptureMuted( |
| 1291 capture_.output_will_be_muted); | |
| 1149 } | 1292 } |
| 1150 } | 1293 } |
| 1151 | 1294 |
| 1152 void AudioProcessingImpl::InitializeTransient() { | 1295 void AudioProcessingImpl::InitializeTransient() { |
| 1153 // Called from several threads, thread check not possible. | 1296 // Called from several threads, thread check not possible. |
| 1154 if (transient_suppressor_enabled_) { | 1297 if (capture_.transient_suppressor_enabled) { |
| 1155 if (!transient_suppressor_.get()) { | 1298 if (!public_submodules_->transient_suppressor.get()) { |
| 1156 transient_suppressor_.reset(new TransientSuppressor()); | 1299 public_submodules_->transient_suppressor.reset(new TransientSuppressor()); |
| 1157 } | 1300 } |
| 1158 transient_suppressor_->Initialize( | 1301 public_submodules_->transient_suppressor->Initialize( |
| 1159 fwd_proc_format_.sample_rate_hz(), split_rate_, | 1302 capture_nonlocked_.fwd_proc_format.sample_rate_hz(), |
| 1160 shared_state_.api_format_.output_stream().num_channels()); | 1303 capture_nonlocked_.split_rate, |
| 1304 formats_.api_format.output_stream().num_channels()); | |
| 1161 } | 1305 } |
| 1162 } | 1306 } |
| 1163 | 1307 |
| 1164 void AudioProcessingImpl::InitializeBeamformer() { | 1308 void AudioProcessingImpl::InitializeBeamformer() { |
| 1165 // Called from several threads, thread check not possible. | 1309 // Called from several threads, thread check not possible. |
| 1166 if (beamformer_enabled_) { | 1310 if (constants_.beamformer_enabled) { |
| 1167 if (!beamformer_) { | 1311 if (!private_submodules_->beamformer) { |
| 1168 beamformer_.reset( | 1312 private_submodules_->beamformer.reset(new NonlinearBeamformer( |
| 1169 new NonlinearBeamformer(array_geometry_, target_direction_)); | 1313 constants_.array_geometry, constants_.target_direction)); |
| 1170 } | 1314 } |
| 1171 beamformer_->Initialize(kChunkSizeMs, split_rate_); | 1315 private_submodules_->beamformer->Initialize(kChunkSizeMs, |
| 1316 capture_nonlocked_.split_rate); | |
| 1172 } | 1317 } |
| 1173 } | 1318 } |
| 1174 | 1319 |
| 1175 void AudioProcessingImpl::InitializeIntelligibility() { | 1320 void AudioProcessingImpl::InitializeIntelligibility() { |
| 1176 // Called from several threads, thread check not possible. | 1321 // Called from several threads, thread check not possible. |
| 1177 if (intelligibility_enabled_) { | 1322 if (constants_.intelligibility_enabled) { |
| 1178 IntelligibilityEnhancer::Config config; | 1323 IntelligibilityEnhancer::Config config; |
| 1179 config.sample_rate_hz = split_rate_; | 1324 config.sample_rate_hz = capture_nonlocked_.split_rate; |
| 1180 config.num_capture_channels = capture_audio_->num_channels(); | 1325 config.num_capture_channels = capture_.capture_audio->num_channels(); |
| 1181 config.num_render_channels = render_audio_->num_channels(); | 1326 config.num_render_channels = render_.render_audio->num_channels(); |
| 1182 intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config)); | 1327 public_submodules_->intelligibility_enhancer.reset( |
| 1328 new IntelligibilityEnhancer(config)); | |
| 1183 } | 1329 } |
| 1184 } | 1330 } |
| 1185 | 1331 |
| 1186 void AudioProcessingImpl::MaybeUpdateHistograms() { | 1332 void AudioProcessingImpl::MaybeUpdateHistograms() { |
| 1187 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1333 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1188 static const int kMinDiffDelayMs = 60; | 1334 static const int kMinDiffDelayMs = 60; |
| 1189 | 1335 |
| 1190 if (echo_cancellation()->is_enabled()) { | 1336 if (echo_cancellation()->is_enabled()) { |
| 1191 // Activate delay_jumps_ counters if we know echo_cancellation is runnning. | 1337 // Activate delay_jumps_ counters if we know echo_cancellation is runnning. |
| 1192 // If a stream has echo we know that the echo_cancellation is in process. | 1338 // If a stream has echo we know that the echo_cancellation is in process. |
| 1193 if (stream_delay_jumps_ == -1 && echo_cancellation()->stream_has_echo()) { | 1339 if (capture_.stream_delay_jumps == -1 && |
| 1194 stream_delay_jumps_ = 0; | 1340 echo_cancellation()->stream_has_echo()) { |
| 1341 capture_.stream_delay_jumps = 0; | |
| 1195 } | 1342 } |
| 1196 if (aec_system_delay_jumps_ == -1 && | 1343 if (capture_.aec_system_delay_jumps == -1 && |
| 1197 echo_cancellation()->stream_has_echo()) { | 1344 echo_cancellation()->stream_has_echo()) { |
| 1198 aec_system_delay_jumps_ = 0; | 1345 capture_.aec_system_delay_jumps = 0; |
| 1199 } | 1346 } |
| 1200 | 1347 |
| 1201 // Detect a jump in platform reported system delay and log the difference. | 1348 // Detect a jump in platform reported system delay and log the difference. |
| 1202 const int diff_stream_delay_ms = stream_delay_ms_ - last_stream_delay_ms_; | 1349 const int diff_stream_delay_ms = |
| 1203 if (diff_stream_delay_ms > kMinDiffDelayMs && last_stream_delay_ms_ != 0) { | 1350 capture_nonlocked_.stream_delay_ms - capture_.last_stream_delay_ms; |
| 1351 if (diff_stream_delay_ms > kMinDiffDelayMs && | |
| 1352 capture_.last_stream_delay_ms != 0) { | |
| 1204 RTC_HISTOGRAM_COUNTS("WebRTC.Audio.PlatformReportedStreamDelayJump", | 1353 RTC_HISTOGRAM_COUNTS("WebRTC.Audio.PlatformReportedStreamDelayJump", |
| 1205 diff_stream_delay_ms, kMinDiffDelayMs, 1000, 100); | 1354 diff_stream_delay_ms, kMinDiffDelayMs, 1000, 100); |
| 1206 if (stream_delay_jumps_ == -1) { | 1355 if (capture_.stream_delay_jumps == -1) { |
| 1207 stream_delay_jumps_ = 0; // Activate counter if needed. | 1356 capture_.stream_delay_jumps = 0; // Activate counter if needed. |
| 1208 } | 1357 } |
| 1209 stream_delay_jumps_++; | 1358 capture_.stream_delay_jumps++; |
| 1210 } | 1359 } |
| 1211 last_stream_delay_ms_ = stream_delay_ms_; | 1360 capture_.last_stream_delay_ms = capture_nonlocked_.stream_delay_ms; |
| 1212 | 1361 |
| 1213 // Detect a jump in AEC system delay and log the difference. | 1362 // Detect a jump in AEC system delay and log the difference. |
| 1214 const int frames_per_ms = rtc::CheckedDivExact(split_rate_, 1000); | 1363 const int frames_per_ms = |
| 1364 rtc::CheckedDivExact(capture_nonlocked_.split_rate, 1000); | |
| 1215 const int aec_system_delay_ms = | 1365 const int aec_system_delay_ms = |
| 1216 WebRtcAec_system_delay(echo_cancellation()->aec_core()) / frames_per_ms; | 1366 WebRtcAec_system_delay(echo_cancellation()->aec_core()) / frames_per_ms; |
| 1217 const int diff_aec_system_delay_ms = | 1367 const int diff_aec_system_delay_ms = |
| 1218 aec_system_delay_ms - last_aec_system_delay_ms_; | 1368 aec_system_delay_ms - capture_.last_aec_system_delay_ms; |
| 1219 if (diff_aec_system_delay_ms > kMinDiffDelayMs && | 1369 if (diff_aec_system_delay_ms > kMinDiffDelayMs && |
| 1220 last_aec_system_delay_ms_ != 0) { | 1370 capture_.last_aec_system_delay_ms != 0) { |
| 1221 RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecSystemDelayJump", | 1371 RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecSystemDelayJump", |
| 1222 diff_aec_system_delay_ms, kMinDiffDelayMs, 1000, | 1372 diff_aec_system_delay_ms, kMinDiffDelayMs, 1000, |
| 1223 100); | 1373 100); |
| 1224 if (aec_system_delay_jumps_ == -1) { | 1374 if (capture_.aec_system_delay_jumps == -1) { |
| 1225 aec_system_delay_jumps_ = 0; // Activate counter if needed. | 1375 capture_.aec_system_delay_jumps = 0; // Activate counter if needed. |
| 1226 } | 1376 } |
| 1227 aec_system_delay_jumps_++; | 1377 capture_.aec_system_delay_jumps++; |
| 1228 } | 1378 } |
| 1229 last_aec_system_delay_ms_ = aec_system_delay_ms; | 1379 capture_.last_aec_system_delay_ms = aec_system_delay_ms; |
| 1230 } | 1380 } |
| 1231 } | 1381 } |
| 1232 | 1382 |
| 1233 void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { | 1383 void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { |
| 1384 // Run in a single-threaded manner. | |
| 1385 rtc::CritScope cs_render(&crit_render_); | |
| 1386 rtc::CritScope cs_capture(&crit_capture_); | |
| 1234 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1387 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1235 CriticalSectionScoped crit_scoped(crit_); | 1388 |
| 1236 if (stream_delay_jumps_ > -1) { | 1389 if (capture_.stream_delay_jumps > -1) { |
| 1237 RTC_HISTOGRAM_ENUMERATION( | 1390 RTC_HISTOGRAM_ENUMERATION( |
| 1238 "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps", | 1391 "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps", |
| 1239 stream_delay_jumps_, 51); | 1392 capture_.stream_delay_jumps, 51); |
| 1240 } | 1393 } |
| 1241 stream_delay_jumps_ = -1; | 1394 capture_.stream_delay_jumps = -1; |
| 1242 last_stream_delay_ms_ = 0; | 1395 capture_.last_stream_delay_ms = 0; |
| 1243 | 1396 |
| 1244 if (aec_system_delay_jumps_ > -1) { | 1397 if (capture_.aec_system_delay_jumps > -1) { |
| 1245 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", | 1398 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", |
| 1246 aec_system_delay_jumps_, 51); | 1399 capture_.aec_system_delay_jumps, 51); |
| 1247 } | 1400 } |
| 1248 aec_system_delay_jumps_ = -1; | 1401 capture_.aec_system_delay_jumps = -1; |
| 1249 last_aec_system_delay_ms_ = 0; | 1402 capture_.last_aec_system_delay_ms = 0; |
| 1250 } | 1403 } |
| 1251 | 1404 |
| 1252 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1405 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1253 int AudioProcessingImpl::WriteMessageToDebugFile() { | 1406 int AudioProcessingImpl::WriteMessageToDebugFile( |
| 1254 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1407 FileWrapper* debug_file, |
| 1255 int32_t size = event_msg_->ByteSize(); | 1408 rtc::CriticalSection* crit_debug, |
| 1409 ApmDebugDumpThreadState* debug_state) { | |
| 1410 // Thread checker not possible due to function being static. | |
| 1411 int32_t size = debug_state->event_msg->ByteSize(); | |
| 1256 if (size <= 0) { | 1412 if (size <= 0) { |
| 1257 return kUnspecifiedError; | 1413 return kUnspecifiedError; |
| 1258 } | 1414 } |
| 1259 #if defined(WEBRTC_ARCH_BIG_ENDIAN) | 1415 #if defined(WEBRTC_ARCH_BIG_ENDIAN) |
| 1260 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be | 1416 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be |
| 1261 // pretty safe in assuming little-endian. | 1417 // pretty safe in assuming little-endian. |
| 1262 #endif | 1418 #endif |
| 1263 | 1419 |
| 1264 if (!event_msg_->SerializeToString(&event_str_)) { | 1420 if (!debug_state->event_msg->SerializeToString(&debug_state->event_str)) { |
| 1265 return kUnspecifiedError; | 1421 return kUnspecifiedError; |
| 1266 } | 1422 } |
| 1267 | 1423 |
| 1268 // Write message preceded by its size. | 1424 { |
| 1269 if (!debug_file_->Write(&size, sizeof(int32_t))) { | 1425 // Ensure atomic writes of the message. |
| 1270 return kFileError; | 1426 rtc::CritScope cs_capture(crit_debug); |
| 1271 } | 1427 // Write message preceded by its size. |
| 1272 if (!debug_file_->Write(event_str_.data(), event_str_.length())) { | 1428 if (!debug_file->Write(&size, sizeof(int32_t))) { |
| 1273 return kFileError; | 1429 return kFileError; |
| 1430 } | |
| 1431 if (!debug_file->Write(debug_state->event_str.data(), | |
| 1432 debug_state->event_str.length())) { | |
| 1433 return kFileError; | |
| 1434 } | |
| 1274 } | 1435 } |
| 1275 | 1436 |
| 1276 event_msg_->Clear(); | 1437 debug_state->event_msg->Clear(); |
| 1277 | 1438 |
| 1278 return kNoError; | 1439 return kNoError; |
| 1279 } | 1440 } |
| 1280 | 1441 |
| 1281 int AudioProcessingImpl::WriteInitMessage() { | 1442 int AudioProcessingImpl::WriteInitMessage() { |
| 1282 // Called from both render and capture threads, not threadchecker possible. | 1443 // Called from both render and capture threads, not threadchecker possible. |
| 1283 event_msg_->set_type(audioproc::Event::INIT); | 1444 debug_dump_.capture.event_msg->set_type(audioproc::Event::INIT); |
| 1284 audioproc::Init* msg = event_msg_->mutable_init(); | 1445 audioproc::Init* msg = debug_dump_.capture.event_msg->mutable_init(); |
| 1285 msg->set_sample_rate( | 1446 msg->set_sample_rate(formats_.api_format.input_stream().sample_rate_hz()); |
| 1286 shared_state_.api_format_.input_stream().sample_rate_hz()); | |
| 1287 msg->set_num_input_channels( | 1447 msg->set_num_input_channels( |
| 1288 shared_state_.api_format_.input_stream().num_channels()); | 1448 formats_.api_format.input_stream().num_channels()); |
| 1289 msg->set_num_output_channels( | 1449 msg->set_num_output_channels( |
| 1290 shared_state_.api_format_.output_stream().num_channels()); | 1450 formats_.api_format.output_stream().num_channels()); |
| 1291 msg->set_num_reverse_channels( | 1451 msg->set_num_reverse_channels( |
| 1292 shared_state_.api_format_.reverse_input_stream().num_channels()); | 1452 formats_.api_format.reverse_input_stream().num_channels()); |
| 1293 msg->set_reverse_sample_rate( | 1453 msg->set_reverse_sample_rate( |
| 1294 shared_state_.api_format_.reverse_input_stream().sample_rate_hz()); | 1454 formats_.api_format.reverse_input_stream().sample_rate_hz()); |
| 1295 msg->set_output_sample_rate( | 1455 msg->set_output_sample_rate( |
| 1296 shared_state_.api_format_.output_stream().sample_rate_hz()); | 1456 formats_.api_format.output_stream().sample_rate_hz()); |
| 1297 // TODO(ekmeyerson): Add reverse output fields to event_msg_. | 1457 // TODO(ekmeyerson): Add reverse output fields to |
| 1458 // debug_dump_.capture.event_msg. | |
| 1298 | 1459 |
| 1299 RETURN_ON_ERR(WriteMessageToDebugFile()); | 1460 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1461 &crit_debug_, &debug_dump_.capture)); | |
| 1300 return kNoError; | 1462 return kNoError; |
| 1301 } | 1463 } |
| 1302 | 1464 |
| 1303 int AudioProcessingImpl::WriteConfigMessage(bool forced) { | 1465 int AudioProcessingImpl::WriteConfigMessage(bool forced) { |
| 1304 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1466 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1305 audioproc::Config config; | 1467 audioproc::Config config; |
| 1306 | 1468 |
| 1307 config.set_aec_enabled(echo_cancellation_->is_enabled()); | 1469 config.set_aec_enabled(public_submodules_->echo_cancellation->is_enabled()); |
| 1308 config.set_aec_delay_agnostic_enabled( | 1470 config.set_aec_delay_agnostic_enabled( |
| 1309 echo_cancellation_->is_delay_agnostic_enabled()); | 1471 public_submodules_->echo_cancellation->is_delay_agnostic_enabled()); |
| 1310 config.set_aec_drift_compensation_enabled( | 1472 config.set_aec_drift_compensation_enabled( |
| 1311 echo_cancellation_->is_drift_compensation_enabled()); | 1473 public_submodules_->echo_cancellation->is_drift_compensation_enabled()); |
| 1312 config.set_aec_extended_filter_enabled( | 1474 config.set_aec_extended_filter_enabled( |
| 1313 echo_cancellation_->is_extended_filter_enabled()); | 1475 public_submodules_->echo_cancellation->is_extended_filter_enabled()); |
| 1314 config.set_aec_suppression_level( | 1476 config.set_aec_suppression_level(static_cast<int>( |
| 1315 static_cast<int>(echo_cancellation_->suppression_level())); | 1477 public_submodules_->echo_cancellation->suppression_level())); |
| 1316 | 1478 |
| 1317 config.set_aecm_enabled(echo_control_mobile_->is_enabled()); | 1479 config.set_aecm_enabled( |
| 1480 public_submodules_->echo_control_mobile->is_enabled()); | |
| 1318 config.set_aecm_comfort_noise_enabled( | 1481 config.set_aecm_comfort_noise_enabled( |
| 1319 echo_control_mobile_->is_comfort_noise_enabled()); | 1482 public_submodules_->echo_control_mobile->is_comfort_noise_enabled()); |
| 1320 config.set_aecm_routing_mode( | 1483 config.set_aecm_routing_mode(static_cast<int>( |
| 1321 static_cast<int>(echo_control_mobile_->routing_mode())); | 1484 public_submodules_->echo_control_mobile->routing_mode())); |
| 1322 | 1485 |
| 1323 config.set_agc_enabled(gain_control_->is_enabled()); | 1486 config.set_agc_enabled(public_submodules_->gain_control->is_enabled()); |
| 1324 config.set_agc_mode(static_cast<int>(gain_control_->mode())); | 1487 config.set_agc_mode( |
| 1325 config.set_agc_limiter_enabled(gain_control_->is_limiter_enabled()); | 1488 static_cast<int>(public_submodules_->gain_control->mode())); |
| 1326 config.set_noise_robust_agc_enabled(use_new_agc_); | 1489 config.set_agc_limiter_enabled( |
| 1490 public_submodules_->gain_control->is_limiter_enabled()); | |
| 1491 config.set_noise_robust_agc_enabled(constants_.use_new_agc); | |
| 1327 | 1492 |
| 1328 config.set_hpf_enabled(high_pass_filter_->is_enabled()); | 1493 config.set_hpf_enabled(public_submodules_->high_pass_filter->is_enabled()); |
| 1329 | 1494 |
| 1330 config.set_ns_enabled(noise_suppression_->is_enabled()); | 1495 config.set_ns_enabled(public_submodules_->noise_suppression->is_enabled()); |
| 1331 config.set_ns_level(static_cast<int>(noise_suppression_->level())); | 1496 config.set_ns_level( |
| 1497 static_cast<int>(public_submodules_->noise_suppression->level())); | |
| 1332 | 1498 |
| 1333 config.set_transient_suppression_enabled(transient_suppressor_enabled_); | 1499 config.set_transient_suppression_enabled( |
| 1500 capture_.transient_suppressor_enabled); | |
| 1334 | 1501 |
| 1335 std::string serialized_config = config.SerializeAsString(); | 1502 std::string serialized_config = config.SerializeAsString(); |
| 1336 if (!forced && last_serialized_config_ == serialized_config) { | 1503 if (!forced && |
| 1504 debug_dump_.capture.last_serialized_config == serialized_config) { | |
| 1337 return kNoError; | 1505 return kNoError; |
| 1338 } | 1506 } |
| 1339 | 1507 |
| 1340 last_serialized_config_ = serialized_config; | 1508 debug_dump_.capture.last_serialized_config = serialized_config; |
| 1341 | 1509 |
| 1342 event_msg_->set_type(audioproc::Event::CONFIG); | 1510 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); |
| 1343 event_msg_->mutable_config()->CopyFrom(config); | 1511 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); |
| 1344 | 1512 |
| 1345 RETURN_ON_ERR(WriteMessageToDebugFile()); | 1513 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1514 &crit_debug_, &debug_dump_.capture)); | |
| 1346 return kNoError; | 1515 return kNoError; |
| 1347 } | 1516 } |
| 1348 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1517 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1349 | 1518 |
| 1350 } // namespace webrtc | 1519 } // namespace webrtc |
| OLD | NEW |