| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 19 matching lines...) Expand all Loading... |
| 30 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" | 30 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" |
| 31 #include "webrtc/modules/audio_processing/gain_control_impl.h" | 31 #include "webrtc/modules/audio_processing/gain_control_impl.h" |
| 32 #include "webrtc/modules/audio_processing/high_pass_filter_impl.h" | 32 #include "webrtc/modules/audio_processing/high_pass_filter_impl.h" |
| 33 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc
er.h" | 33 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc
er.h" |
| 34 #include "webrtc/modules/audio_processing/level_estimator_impl.h" | 34 #include "webrtc/modules/audio_processing/level_estimator_impl.h" |
| 35 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" | 35 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" |
| 36 #include "webrtc/modules/audio_processing/processing_component.h" | 36 #include "webrtc/modules/audio_processing/processing_component.h" |
| 37 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" | 37 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" |
| 38 #include "webrtc/modules/audio_processing/voice_detection_impl.h" | 38 #include "webrtc/modules/audio_processing/voice_detection_impl.h" |
| 39 #include "webrtc/modules/include/module_common_types.h" | 39 #include "webrtc/modules/include/module_common_types.h" |
| 40 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | |
| 41 #include "webrtc/system_wrappers/include/file_wrapper.h" | 40 #include "webrtc/system_wrappers/include/file_wrapper.h" |
| 42 #include "webrtc/system_wrappers/include/logging.h" | 41 #include "webrtc/system_wrappers/include/logging.h" |
| 43 #include "webrtc/system_wrappers/include/metrics.h" | 42 #include "webrtc/system_wrappers/include/metrics.h" |
| 44 | 43 |
| 45 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 44 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 46 // Files generated at build-time by the protobuf compiler. | 45 // Files generated at build-time by the protobuf compiler. |
| 47 #ifdef WEBRTC_ANDROID_PLATFORM_BUILD | 46 #ifdef WEBRTC_ANDROID_PLATFORM_BUILD |
| 48 #include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h" | 47 #include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h" |
| 49 #else | 48 #else |
| 50 #include "webrtc/audio_processing/debug.pb.h" | 49 #include "webrtc/audio_processing/debug.pb.h" |
| (...skipping 17 matching lines...) Expand all Loading... |
| 68 case AudioProcessing::kStereo: | 67 case AudioProcessing::kStereo: |
| 69 return false; | 68 return false; |
| 70 case AudioProcessing::kMonoAndKeyboard: | 69 case AudioProcessing::kMonoAndKeyboard: |
| 71 case AudioProcessing::kStereoAndKeyboard: | 70 case AudioProcessing::kStereoAndKeyboard: |
| 72 return true; | 71 return true; |
| 73 } | 72 } |
| 74 | 73 |
| 75 assert(false); | 74 assert(false); |
| 76 return false; | 75 return false; |
| 77 } | 76 } |
| 77 } // namespace |
| 78 | 78 |
| 79 } // namespace | 79 struct ApmPublicSubmodules { |
| 80 ApmPublicSubmodules() |
| 81 : echo_cancellation(nullptr), |
| 82 echo_control_mobile(nullptr), |
| 83 gain_control(nullptr), |
| 84 high_pass_filter(nullptr), |
| 85 level_estimator(nullptr), |
| 86 noise_suppression(nullptr), |
| 87 voice_detection(nullptr) {} |
| 88 // Accessed externally of APM without any lock acquired. |
| 89 EchoCancellationImpl* echo_cancellation; |
| 90 EchoControlMobileImpl* echo_control_mobile; |
| 91 GainControlImpl* gain_control; |
| 92 HighPassFilterImpl* high_pass_filter; |
| 93 LevelEstimatorImpl* level_estimator; |
| 94 NoiseSuppressionImpl* noise_suppression; |
| 95 VoiceDetectionImpl* voice_detection; |
| 96 rtc::scoped_ptr<GainControlForNewAgc> gain_control_for_new_agc; |
| 97 |
| 98 // Accessed internally from both render and capture. |
| 99 rtc::scoped_ptr<TransientSuppressor> transient_suppressor; |
| 100 rtc::scoped_ptr<IntelligibilityEnhancer> intelligibility_enhancer; |
| 101 }; |
| 102 |
| 103 struct ApmPrivateSubmodules { |
| 104 explicit ApmPrivateSubmodules(Beamformer<float>* beamformer) |
| 105 : beamformer(beamformer) {} |
| 106 // Accessed internally from capture or during initialization |
| 107 std::list<ProcessingComponent*> component_list; |
| 108 rtc::scoped_ptr<Beamformer<float>> beamformer; |
| 109 rtc::scoped_ptr<AgcManagerDirect> agc_manager; |
| 110 }; |
| 80 | 111 |
| 81 // Throughout webrtc, it's assumed that success is represented by zero. | 112 // Throughout webrtc, it's assumed that success is represented by zero. |
| 82 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); | 113 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); |
| 83 | 114 |
| 84 // This class has two main functionalities: | 115 // This class has two main functionalities: |
| 85 // | 116 // |
| 86 // 1) It is returned instead of the real GainControl after the new AGC has been | 117 // 1) It is returned instead of the real GainControl after the new AGC has been |
| 87 // enabled in order to prevent an outside user from overriding compression | 118 // enabled in order to prevent an outside user from overriding compression |
| 88 // settings. It doesn't do anything in its implementation, except for | 119 // settings. It doesn't do anything in its implementation, except for |
| 89 // delegating the const methods and Enable calls to the real GainControl, so | 120 // delegating the const methods and Enable calls to the real GainControl, so |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 165 | 196 |
| 166 AudioProcessing* AudioProcessing::Create(const Config& config) { | 197 AudioProcessing* AudioProcessing::Create(const Config& config) { |
| 167 return Create(config, nullptr); | 198 return Create(config, nullptr); |
| 168 } | 199 } |
| 169 | 200 |
| 170 AudioProcessing* AudioProcessing::Create(const Config& config, | 201 AudioProcessing* AudioProcessing::Create(const Config& config, |
| 171 Beamformer<float>* beamformer) { | 202 Beamformer<float>* beamformer) { |
| 172 AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer); | 203 AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer); |
| 173 if (apm->Initialize() != kNoError) { | 204 if (apm->Initialize() != kNoError) { |
| 174 delete apm; | 205 delete apm; |
| 175 apm = NULL; | 206 apm = nullptr; |
| 176 } | 207 } |
| 177 | 208 |
| 178 return apm; | 209 return apm; |
| 179 } | 210 } |
| 180 | 211 |
| 181 AudioProcessingImpl::AudioProcessingImpl(const Config& config) | 212 AudioProcessingImpl::AudioProcessingImpl(const Config& config) |
| 182 : AudioProcessingImpl(config, nullptr) {} | 213 : AudioProcessingImpl(config, nullptr) {} |
| 183 | 214 |
| 184 AudioProcessingImpl::AudioProcessingImpl(const Config& config, | 215 AudioProcessingImpl::AudioProcessingImpl(const Config& config, |
| 185 Beamformer<float>* beamformer) | 216 Beamformer<float>* beamformer) |
| 186 : echo_cancellation_(NULL), | 217 : public_submodules_(new ApmPublicSubmodules()), |
| 187 echo_control_mobile_(NULL), | 218 private_submodules_(new ApmPrivateSubmodules(beamformer)), |
| 188 gain_control_(NULL), | 219 constants_(config.Get<ExperimentalAgc>().startup_min_volume, |
| 189 high_pass_filter_(NULL), | 220 config.Get<Beamforming>().array_geometry, |
| 190 level_estimator_(NULL), | 221 config.Get<Beamforming>().target_direction, |
| 191 noise_suppression_(NULL), | 222 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
| 192 voice_detection_(NULL), | 223 false, |
| 193 crit_(CriticalSectionWrapper::CreateCriticalSection()), | 224 #else |
| 194 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 225 config.Get<ExperimentalAgc>().enabled, |
| 195 debug_file_(FileWrapper::Create()), | |
| 196 event_msg_(new audioproc::Event()), | |
| 197 #endif | 226 #endif |
| 198 fwd_proc_format_(kSampleRate16kHz), | 227 config.Get<Intelligibility>().enabled, |
| 199 rev_proc_format_(kSampleRate16kHz, 1), | 228 config.Get<Beamforming>().enabled), |
| 200 split_rate_(kSampleRate16kHz), | 229 |
| 201 stream_delay_ms_(0), | |
| 202 delay_offset_ms_(0), | |
| 203 was_stream_delay_set_(false), | |
| 204 last_stream_delay_ms_(0), | |
| 205 last_aec_system_delay_ms_(0), | |
| 206 stream_delay_jumps_(-1), | |
| 207 aec_system_delay_jumps_(-1), | |
| 208 output_will_be_muted_(false), | |
| 209 key_pressed_(false), | |
| 210 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 230 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
| 211 use_new_agc_(false), | 231 capture_(false) |
| 212 #else | 232 #else |
| 213 use_new_agc_(config.Get<ExperimentalAgc>().enabled), | 233 capture_(config.Get<ExperimentalNs>().enabled) |
| 214 #endif | 234 #endif |
| 215 agc_startup_min_volume_(config.Get<ExperimentalAgc>().startup_min_volume), | 235 { |
| 216 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | |
| 217 transient_suppressor_enabled_(false), | |
| 218 #else | |
| 219 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled), | |
| 220 #endif | |
| 221 beamformer_enabled_(config.Get<Beamforming>().enabled), | |
| 222 beamformer_(beamformer), | |
| 223 array_geometry_(config.Get<Beamforming>().array_geometry), | |
| 224 target_direction_(config.Get<Beamforming>().target_direction), | |
| 225 intelligibility_enabled_(config.Get<Intelligibility>().enabled) { | |
| 226 render_thread_checker_.DetachFromThread(); | 236 render_thread_checker_.DetachFromThread(); |
| 227 capture_thread_checker_.DetachFromThread(); | 237 capture_thread_checker_.DetachFromThread(); |
| 228 | 238 |
| 229 echo_cancellation_ = | 239 { |
| 230 new EchoCancellationImpl(this, crit_, &render_thread_checker_); | 240 rtc::CritScope cs_render(&crit_render_); |
| 231 component_list_.push_back(echo_cancellation_); | 241 rtc::CritScope cs_capture(&crit_capture_); |
| 232 | 242 |
| 233 echo_control_mobile_ = | 243 public_submodules_->echo_cancellation = new EchoCancellationImpl( |
| 234 new EchoControlMobileImpl(this, crit_, &render_thread_checker_); | 244 this, &crit_render_, &crit_capture_, &render_thread_checker_); |
| 235 component_list_.push_back(echo_control_mobile_); | 245 public_submodules_->echo_control_mobile = new EchoControlMobileImpl( |
| 246 this, &crit_render_, &crit_capture_, &render_thread_checker_); |
| 247 public_submodules_->gain_control = |
| 248 new GainControlImpl(this, &crit_capture_, &crit_capture_, |
| 249 &render_thread_checker_, &capture_thread_checker_); |
| 250 public_submodules_->high_pass_filter = |
| 251 new HighPassFilterImpl(this, &crit_capture_); |
| 252 public_submodules_->level_estimator = new LevelEstimatorImpl(this); |
| 253 public_submodules_->noise_suppression = |
| 254 new NoiseSuppressionImpl(this, &crit_capture_); |
| 255 public_submodules_->voice_detection = |
| 256 new VoiceDetectionImpl(this, &crit_capture_); |
| 257 public_submodules_->gain_control_for_new_agc.reset( |
| 258 new GainControlForNewAgc(public_submodules_->gain_control)); |
| 236 | 259 |
| 237 gain_control_ = new GainControlImpl(this, crit_, &render_thread_checker_, | 260 private_submodules_->component_list.push_back( |
| 238 &capture_thread_checker_); | 261 public_submodules_->echo_cancellation); |
| 239 component_list_.push_back(gain_control_); | 262 private_submodules_->component_list.push_back( |
| 240 | 263 public_submodules_->echo_control_mobile); |
| 241 high_pass_filter_ = new HighPassFilterImpl(this, crit_); | 264 private_submodules_->component_list.push_back( |
| 242 component_list_.push_back(high_pass_filter_); | 265 public_submodules_->gain_control); |
| 243 | 266 private_submodules_->component_list.push_back( |
| 244 level_estimator_ = new LevelEstimatorImpl(this, crit_); | 267 public_submodules_->high_pass_filter); |
| 245 component_list_.push_back(level_estimator_); | 268 private_submodules_->component_list.push_back( |
| 246 | 269 public_submodules_->level_estimator); |
| 247 noise_suppression_ = new NoiseSuppressionImpl(this, crit_); | 270 private_submodules_->component_list.push_back( |
| 248 component_list_.push_back(noise_suppression_); | 271 public_submodules_->noise_suppression); |
| 249 | 272 private_submodules_->component_list.push_back( |
| 250 voice_detection_ = new VoiceDetectionImpl(this, crit_); | 273 public_submodules_->voice_detection); |
| 251 component_list_.push_back(voice_detection_); | 274 } |
| 252 | |
| 253 gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_)); | |
| 254 | 275 |
| 255 SetExtraOptions(config); | 276 SetExtraOptions(config); |
| 256 } | 277 } |
| 257 | 278 |
| 258 AudioProcessingImpl::~AudioProcessingImpl() { | 279 AudioProcessingImpl::~AudioProcessingImpl() { |
| 259 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | 280 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| 260 { | 281 |
| 261 CriticalSectionScoped crit_scoped(crit_); | 282 // Depends on gain_control_ and |
| 262 // Depends on gain_control_ and gain_control_for_new_agc_. | 283 // public_submodules_->gain_control_for_new_agc. |
| 263 agc_manager_.reset(); | 284 private_submodules_->agc_manager.reset(); |
| 264 // Depends on gain_control_. | 285 // Depends on gain_control_. |
| 265 gain_control_for_new_agc_.reset(); | 286 public_submodules_->gain_control_for_new_agc.reset(); |
| 266 while (!component_list_.empty()) { | 287 while (!private_submodules_->component_list.empty()) { |
| 267 ProcessingComponent* component = component_list_.front(); | 288 ProcessingComponent* component = |
| 268 component->Destroy(); | 289 private_submodules_->component_list.front(); |
| 269 delete component; | 290 component->Destroy(); |
| 270 component_list_.pop_front(); | 291 delete component; |
| 271 } | 292 private_submodules_->component_list.pop_front(); |
| 293 } |
| 272 | 294 |
| 273 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 295 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 274 if (debug_file_->Open()) { | 296 if (debug_dump_.debug_file->Open()) { |
| 275 debug_file_->CloseFile(); | 297 debug_dump_.debug_file->CloseFile(); |
| 276 } | 298 } |
| 277 #endif | 299 #endif |
| 278 } | |
| 279 delete crit_; | |
| 280 crit_ = NULL; | |
| 281 } | 300 } |
| 282 | 301 |
| 283 int AudioProcessingImpl::Initialize() { | 302 int AudioProcessingImpl::Initialize() { |
| 303 // Run in a single-threaded manner during initialization. |
| 304 rtc::CritScope cs_render(&crit_render_); |
| 305 rtc::CritScope cs_capture(&crit_capture_); |
| 284 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | 306 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| 285 CriticalSectionScoped crit_scoped(crit_); | |
| 286 return InitializeLocked(); | 307 return InitializeLocked(); |
| 287 } | 308 } |
| 288 | 309 |
| 289 int AudioProcessingImpl::Initialize(int input_sample_rate_hz, | 310 int AudioProcessingImpl::Initialize(int input_sample_rate_hz, |
| 290 int output_sample_rate_hz, | 311 int output_sample_rate_hz, |
| 291 int reverse_sample_rate_hz, | 312 int reverse_sample_rate_hz, |
| 292 ChannelLayout input_layout, | 313 ChannelLayout input_layout, |
| 293 ChannelLayout output_layout, | 314 ChannelLayout output_layout, |
| 294 ChannelLayout reverse_layout) { | 315 ChannelLayout reverse_layout) { |
| 295 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | 316 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| 296 const ProcessingConfig processing_config = { | 317 const ProcessingConfig processing_config = { |
| 297 {{input_sample_rate_hz, | 318 {{input_sample_rate_hz, |
| 298 ChannelsFromLayout(input_layout), | 319 ChannelsFromLayout(input_layout), |
| 299 LayoutHasKeyboard(input_layout)}, | 320 LayoutHasKeyboard(input_layout)}, |
| 300 {output_sample_rate_hz, | 321 {output_sample_rate_hz, |
| 301 ChannelsFromLayout(output_layout), | 322 ChannelsFromLayout(output_layout), |
| 302 LayoutHasKeyboard(output_layout)}, | 323 LayoutHasKeyboard(output_layout)}, |
| 303 {reverse_sample_rate_hz, | 324 {reverse_sample_rate_hz, |
| 304 ChannelsFromLayout(reverse_layout), | 325 ChannelsFromLayout(reverse_layout), |
| 305 LayoutHasKeyboard(reverse_layout)}, | 326 LayoutHasKeyboard(reverse_layout)}, |
| 306 {reverse_sample_rate_hz, | 327 {reverse_sample_rate_hz, |
| 307 ChannelsFromLayout(reverse_layout), | 328 ChannelsFromLayout(reverse_layout), |
| 308 LayoutHasKeyboard(reverse_layout)}}}; | 329 LayoutHasKeyboard(reverse_layout)}}}; |
| 309 | 330 |
| 310 return Initialize(processing_config); | 331 return Initialize(processing_config); |
| 311 } | 332 } |
| 312 | 333 |
| 313 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { | 334 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { |
| 335 // Run in a single-threaded manner during initialization. |
| 336 rtc::CritScope cs_render(&crit_render_); |
| 337 rtc::CritScope cs_capture(&crit_capture_); |
| 314 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | 338 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| 315 CriticalSectionScoped crit_scoped(crit_); | |
| 316 return InitializeLocked(processing_config); | 339 return InitializeLocked(processing_config); |
| 317 } | 340 } |
| 318 | 341 |
| 319 // Calls InitializeLocked() if any of the audio parameters have changed from | 342 // Calls InitializeLocked() if any of the audio parameters have changed from |
| 320 // their current values. | 343 // their current values (needs to be called while holding the crit_render_lock). |
| 321 int AudioProcessingImpl::MaybeInitializeLocked( | 344 int AudioProcessingImpl::MaybeInitialize( |
| 322 const ProcessingConfig& processing_config) { | 345 const ProcessingConfig& processing_config) { |
| 323 RTC_DCHECK(render_thread_checker_.CalledOnValidThread() || | 346 RTC_DCHECK(render_thread_checker_.CalledOnValidThread() || |
| 324 capture_thread_checker_.CalledOnValidThread()); | 347 capture_thread_checker_.CalledOnValidThread()); |
| 325 if (processing_config == shared_state_.api_format_) { | 348 // Called from both threads. Thread check is therefore not possible. |
| 349 if (processing_config == formats_.api_format) { |
| 326 return kNoError; | 350 return kNoError; |
| 327 } | 351 } |
| 352 |
| 353 rtc::CritScope cs_capture(&crit_capture_); |
| 328 return InitializeLocked(processing_config); | 354 return InitializeLocked(processing_config); |
| 329 } | 355 } |
| 330 | 356 |
| 331 int AudioProcessingImpl::InitializeLocked() { | 357 int AudioProcessingImpl::InitializeLocked() { |
| 332 const int fwd_audio_buffer_channels = | 358 const int fwd_audio_buffer_channels = |
| 333 beamformer_enabled_ | 359 constants_.beamformer_enabled |
| 334 ? shared_state_.api_format_.input_stream().num_channels() | 360 ? formats_.api_format.input_stream().num_channels() |
| 335 : shared_state_.api_format_.output_stream().num_channels(); | 361 : formats_.api_format.output_stream().num_channels(); |
| 336 const int rev_audio_buffer_out_num_frames = | 362 const int rev_audio_buffer_out_num_frames = |
| 337 shared_state_.api_format_.reverse_output_stream().num_frames() == 0 | 363 formats_.api_format.reverse_output_stream().num_frames() == 0 |
| 338 ? rev_proc_format_.num_frames() | 364 ? formats_.rev_proc_format.num_frames() |
| 339 : shared_state_.api_format_.reverse_output_stream().num_frames(); | 365 : formats_.api_format.reverse_output_stream().num_frames(); |
| 340 if (shared_state_.api_format_.reverse_input_stream().num_channels() > 0) { | 366 if (formats_.api_format.reverse_input_stream().num_channels() > 0) { |
| 341 render_audio_.reset(new AudioBuffer( | 367 render_.render_audio.reset(new AudioBuffer( |
| 342 shared_state_.api_format_.reverse_input_stream().num_frames(), | 368 formats_.api_format.reverse_input_stream().num_frames(), |
| 343 shared_state_.api_format_.reverse_input_stream().num_channels(), | 369 formats_.api_format.reverse_input_stream().num_channels(), |
| 344 rev_proc_format_.num_frames(), rev_proc_format_.num_channels(), | 370 formats_.rev_proc_format.num_frames(), |
| 371 formats_.rev_proc_format.num_channels(), |
| 345 rev_audio_buffer_out_num_frames)); | 372 rev_audio_buffer_out_num_frames)); |
| 346 if (rev_conversion_needed()) { | 373 if (rev_conversion_needed()) { |
| 347 render_converter_ = AudioConverter::Create( | 374 render_.render_converter = AudioConverter::Create( |
| 348 shared_state_.api_format_.reverse_input_stream().num_channels(), | 375 formats_.api_format.reverse_input_stream().num_channels(), |
| 349 shared_state_.api_format_.reverse_input_stream().num_frames(), | 376 formats_.api_format.reverse_input_stream().num_frames(), |
| 350 shared_state_.api_format_.reverse_output_stream().num_channels(), | 377 formats_.api_format.reverse_output_stream().num_channels(), |
| 351 shared_state_.api_format_.reverse_output_stream().num_frames()); | 378 formats_.api_format.reverse_output_stream().num_frames()); |
| 352 } else { | 379 } else { |
| 353 render_converter_.reset(nullptr); | 380 render_.render_converter.reset(nullptr); |
| 354 } | 381 } |
| 355 } else { | 382 } else { |
| 356 render_audio_.reset(nullptr); | 383 render_.render_audio.reset(nullptr); |
| 357 render_converter_.reset(nullptr); | 384 render_.render_converter.reset(nullptr); |
| 358 } | 385 } |
| 359 capture_audio_.reset( | 386 capture_.capture_audio.reset( |
| 360 new AudioBuffer(shared_state_.api_format_.input_stream().num_frames(), | 387 new AudioBuffer(formats_.api_format.input_stream().num_frames(), |
| 361 shared_state_.api_format_.input_stream().num_channels(), | 388 formats_.api_format.input_stream().num_channels(), |
| 362 fwd_proc_format_.num_frames(), fwd_audio_buffer_channels, | 389 capture_nonlocked_.fwd_proc_format.num_frames(), |
| 363 shared_state_.api_format_.output_stream().num_frames())); | 390 fwd_audio_buffer_channels, |
| 391 formats_.api_format.output_stream().num_frames())); |
| 364 | 392 |
| 365 // Initialize all components. | 393 // Initialize all components. |
| 366 for (auto item : component_list_) { | 394 for (auto item : private_submodules_->component_list) { |
| 367 int err = item->Initialize(); | 395 int err = item->Initialize(); |
| 368 if (err != kNoError) { | 396 if (err != kNoError) { |
| 369 return err; | 397 return err; |
| 370 } | 398 } |
| 371 } | 399 } |
| 372 | 400 |
| 373 InitializeExperimentalAgc(); | 401 InitializeExperimentalAgc(); |
| 374 | 402 |
| 375 InitializeTransient(); | 403 InitializeTransient(); |
| 376 | 404 |
| 377 InitializeBeamformer(); | 405 InitializeBeamformer(); |
| 378 | 406 |
| 379 InitializeIntelligibility(); | 407 InitializeIntelligibility(); |
| 380 | 408 |
| 381 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 409 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 382 if (debug_file_->Open()) { | 410 if (debug_dump_.debug_file->Open()) { |
| 383 int err = WriteInitMessage(); | 411 int err = WriteInitMessage(); |
| 384 if (err != kNoError) { | 412 if (err != kNoError) { |
| 385 return err; | 413 return err; |
| 386 } | 414 } |
| 387 } | 415 } |
| 388 #endif | 416 #endif |
| 389 | 417 |
| 390 return kNoError; | 418 return kNoError; |
| 391 } | 419 } |
| 392 | 420 |
| 393 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 421 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
| 394 RTC_DCHECK(render_thread_checker_.CalledOnValidThread() || | |
| 395 capture_thread_checker_.CalledOnValidThread()); | |
| 396 for (const auto& stream : config.streams) { | 422 for (const auto& stream : config.streams) { |
| 397 if (stream.num_channels() < 0) { | 423 if (stream.num_channels() < 0) { |
| 398 return kBadNumberChannelsError; | 424 return kBadNumberChannelsError; |
| 399 } | 425 } |
| 400 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 426 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { |
| 401 return kBadSampleRateError; | 427 return kBadSampleRateError; |
| 402 } | 428 } |
| 403 } | 429 } |
| 404 | 430 |
| 405 const int num_in_channels = config.input_stream().num_channels(); | 431 const int num_in_channels = config.input_stream().num_channels(); |
| 406 const int num_out_channels = config.output_stream().num_channels(); | 432 const int num_out_channels = config.output_stream().num_channels(); |
| 407 | 433 |
| 408 // Need at least one input channel. | 434 // Need at least one input channel. |
| 409 // Need either one output channel or as many outputs as there are inputs. | 435 // Need either one output channel or as many outputs as there are inputs. |
| 410 if (num_in_channels == 0 || | 436 if (num_in_channels == 0 || |
| 411 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { | 437 !(num_out_channels == 1 || num_out_channels == num_in_channels)) { |
| 412 return kBadNumberChannelsError; | 438 return kBadNumberChannelsError; |
| 413 } | 439 } |
| 414 | 440 |
| 415 if (beamformer_enabled_ && | 441 if (constants_.beamformer_enabled && (static_cast<size_t>(num_in_channels) != |
| 416 (static_cast<size_t>(num_in_channels) != array_geometry_.size() || | 442 constants_.array_geometry.size() || |
| 417 num_out_channels > 1)) { | 443 num_out_channels > 1)) { |
| 418 return kBadNumberChannelsError; | 444 return kBadNumberChannelsError; |
| 419 } | 445 } |
| 420 | 446 |
| 421 shared_state_.api_format_ = config; | 447 formats_.api_format = config; |
| 422 | 448 |
| 423 // We process at the closest native rate >= min(input rate, output rate)... | 449 // We process at the closest native rate >= min(input rate, output rate)... |
| 424 const int min_proc_rate = | 450 const int min_proc_rate = |
| 425 std::min(shared_state_.api_format_.input_stream().sample_rate_hz(), | 451 std::min(formats_.api_format.input_stream().sample_rate_hz(), |
| 426 shared_state_.api_format_.output_stream().sample_rate_hz()); | 452 formats_.api_format.output_stream().sample_rate_hz()); |
| 427 int fwd_proc_rate; | 453 int fwd_proc_rate; |
| 428 for (size_t i = 0; i < kNumNativeSampleRates; ++i) { | 454 for (size_t i = 0; i < kNumNativeSampleRates; ++i) { |
| 429 fwd_proc_rate = kNativeSampleRatesHz[i]; | 455 fwd_proc_rate = kNativeSampleRatesHz[i]; |
| 430 if (fwd_proc_rate >= min_proc_rate) { | 456 if (fwd_proc_rate >= min_proc_rate) { |
| 431 break; | 457 break; |
| 432 } | 458 } |
| 433 } | 459 } |
| 434 // ...with one exception. | 460 // ...with one exception. |
| 435 if (echo_control_mobile_->is_enabled() && | 461 if (public_submodules_->echo_control_mobile->is_enabled() && |
| 436 min_proc_rate > kMaxAECMSampleRateHz) { | 462 min_proc_rate > kMaxAECMSampleRateHz) { |
| 437 fwd_proc_rate = kMaxAECMSampleRateHz; | 463 fwd_proc_rate = kMaxAECMSampleRateHz; |
| 438 } | 464 } |
| 439 | 465 |
| 440 fwd_proc_format_ = StreamConfig(fwd_proc_rate); | 466 capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate); |
| 441 | 467 |
| 442 // We normally process the reverse stream at 16 kHz. Unless... | 468 // We normally process the reverse stream at 16 kHz. Unless... |
| 443 int rev_proc_rate = kSampleRate16kHz; | 469 int rev_proc_rate = kSampleRate16kHz; |
| 444 if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) { | 470 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) { |
| 445 // ...the forward stream is at 8 kHz. | 471 // ...the forward stream is at 8 kHz. |
| 446 rev_proc_rate = kSampleRate8kHz; | 472 rev_proc_rate = kSampleRate8kHz; |
| 447 } else { | 473 } else { |
| 448 if (shared_state_.api_format_.reverse_input_stream().sample_rate_hz() == | 474 if (formats_.api_format.reverse_input_stream().sample_rate_hz() == |
| 449 kSampleRate32kHz) { | 475 kSampleRate32kHz) { |
| 450 // ...or the input is at 32 kHz, in which case we use the splitting | 476 // ...or the input is at 32 kHz, in which case we use the splitting |
| 451 // filter rather than the resampler. | 477 // filter rather than the resampler. |
| 452 rev_proc_rate = kSampleRate32kHz; | 478 rev_proc_rate = kSampleRate32kHz; |
| 453 } | 479 } |
| 454 } | 480 } |
| 455 | 481 |
| 456 // Always downmix the reverse stream to mono for analysis. This has been | 482 // Always downmix the reverse stream to mono for analysis. This has been |
| 457 // demonstrated to work well for AEC in most practical scenarios. | 483 // demonstrated to work well for AEC in most practical scenarios. |
| 458 rev_proc_format_ = StreamConfig(rev_proc_rate, 1); | 484 formats_.rev_proc_format = StreamConfig(rev_proc_rate, 1); |
| 459 | 485 |
| 460 if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || | 486 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate32kHz || |
| 461 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { | 487 capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate48kHz) { |
| 462 split_rate_ = kSampleRate16kHz; | 488 capture_nonlocked_.split_rate = kSampleRate16kHz; |
| 463 } else { | 489 } else { |
| 464 split_rate_ = fwd_proc_format_.sample_rate_hz(); | 490 capture_nonlocked_.split_rate = |
| 491 capture_nonlocked_.fwd_proc_format.sample_rate_hz(); |
| 465 } | 492 } |
| 466 | 493 |
| 467 return InitializeLocked(); | 494 return InitializeLocked(); |
| 468 } | 495 } |
| 469 | 496 |
| 470 void AudioProcessingImpl::SetExtraOptions(const Config& config) { | 497 void AudioProcessingImpl::SetExtraOptions(const Config& config) { |
| 471 CriticalSectionScoped crit_scoped(crit_); | 498 // Run in a single-threaded manner when setting the extra options. |
| 499 rtc::CritScope cs_render(&crit_render_); |
| 500 rtc::CritScope cs_capture(&crit_capture_); |
| 472 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | 501 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| 473 for (auto item : component_list_) { | 502 for (auto item : private_submodules_->component_list) { |
| 474 item->SetExtraOptions(config); | 503 item->SetExtraOptions(config); |
| 475 } | 504 } |
| 476 | 505 |
| 477 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { | 506 if (capture_.transient_suppressor_enabled != |
| 478 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; | 507 config.Get<ExperimentalNs>().enabled) { |
| 508 capture_.transient_suppressor_enabled = |
| 509 config.Get<ExperimentalNs>().enabled; |
| 479 InitializeTransient(); | 510 InitializeTransient(); |
| 480 } | 511 } |
| 481 } | 512 } |
| 482 | 513 |
| 483 | 514 |
| 484 int AudioProcessingImpl::proc_sample_rate_hz() const { | 515 int AudioProcessingImpl::proc_sample_rate_hz() const { |
| 485 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread() || | 516 // Only called from submodules beneath APM, hence locking is not needed. |
| 486 render_thread_checker_.CalledOnValidThread() || | 517 return capture_nonlocked_.fwd_proc_format.sample_rate_hz(); |
| 487 capture_thread_checker_.CalledOnValidThread()); | |
| 488 return fwd_proc_format_.sample_rate_hz(); | |
| 489 } | 518 } |
| 490 | 519 |
| 491 int AudioProcessingImpl::proc_split_sample_rate_hz() const { | 520 int AudioProcessingImpl::proc_split_sample_rate_hz() const { |
| 492 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread() || | 521 // Only called from submodules/tests beneath APM, hence locking is not needed. |
| 493 render_thread_checker_.CalledOnValidThread() || | 522 return capture_nonlocked_.split_rate; |
| 494 capture_thread_checker_.CalledOnValidThread()); | |
| 495 return split_rate_; | |
| 496 } | 523 } |
| 497 | 524 |
| 498 int AudioProcessingImpl::num_reverse_channels() const { | 525 int AudioProcessingImpl::num_reverse_channels() const { |
| 499 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread() || | 526 // Only called from submodules/tests beneath APM, hence locking is not needed. |
| 500 render_thread_checker_.CalledOnValidThread() || | 527 return formats_.rev_proc_format.num_channels(); |
| 501 capture_thread_checker_.CalledOnValidThread()); | |
| 502 return rev_proc_format_.num_channels(); | |
| 503 } | 528 } |
| 504 | 529 |
| 505 int AudioProcessingImpl::num_input_channels() const { | 530 int AudioProcessingImpl::num_input_channels() const { |
| 531 // Only called from submodules/tests beneath APM, hence locking is not needed. |
| 506 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 532 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 507 return shared_state_.api_format_.input_stream().num_channels(); | 533 return formats_.api_format.input_stream().num_channels(); |
| 508 } | 534 } |
| 509 | 535 |
| 510 int AudioProcessingImpl::num_output_channels() const { | 536 int AudioProcessingImpl::num_output_channels() const { |
| 511 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread() || | 537 // Only called from submodules/tests beneath APM, hence locking is not needed. |
| 512 render_thread_checker_.CalledOnValidThread() || | 538 return formats_.api_format.output_stream().num_channels(); |
| 513 capture_thread_checker_.CalledOnValidThread()); | |
| 514 return shared_state_.api_format_.output_stream().num_channels(); | |
| 515 } | 539 } |
| 516 | 540 |
| 517 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { | 541 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { |
| 518 CriticalSectionScoped lock(crit_); | 542 rtc::CritScope cs(&crit_capture_); |
| 519 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | 543 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| 520 output_will_be_muted_ = muted; | 544 capture_.output_will_be_muted = muted; |
| 521 if (agc_manager_.get()) { | 545 if (private_submodules_->agc_manager.get()) { |
| 522 agc_manager_->SetCaptureMuted(output_will_be_muted_); | 546 private_submodules_->agc_manager->SetCaptureMuted( |
| 547 capture_.output_will_be_muted); |
| 523 } | 548 } |
| 524 } | 549 } |
| 525 | 550 |
| 526 | 551 |
| 527 int AudioProcessingImpl::ProcessStream(const float* const* src, | 552 int AudioProcessingImpl::ProcessStream(const float* const* src, |
| 528 size_t samples_per_channel, | 553 size_t samples_per_channel, |
| 529 int input_sample_rate_hz, | 554 int input_sample_rate_hz, |
| 530 ChannelLayout input_layout, | 555 ChannelLayout input_layout, |
| 531 int output_sample_rate_hz, | 556 int output_sample_rate_hz, |
| 532 ChannelLayout output_layout, | 557 ChannelLayout output_layout, |
| 533 float* const* dest) { | 558 float* const* dest) { |
| 534 CriticalSectionScoped crit_scoped(crit_); | |
| 535 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 559 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 536 StreamConfig input_stream = shared_state_.api_format_.input_stream(); | 560 StreamConfig input_stream; |
| 561 StreamConfig output_stream; |
| 562 { |
| 563 // Access the formats_.api_format.input_stream beneath the capture lock. |
| 564 // The lock must be released as it is later required in the call |
| 565 // to ProcessStream(,,,); |
| 566 rtc::CritScope cs(&crit_capture_); |
| 567 input_stream = formats_.api_format.input_stream(); |
| 568 output_stream = formats_.api_format.output_stream(); |
| 569 } |
| 570 |
| 537 input_stream.set_sample_rate_hz(input_sample_rate_hz); | 571 input_stream.set_sample_rate_hz(input_sample_rate_hz); |
| 538 input_stream.set_num_channels(ChannelsFromLayout(input_layout)); | 572 input_stream.set_num_channels(ChannelsFromLayout(input_layout)); |
| 539 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); | 573 input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); |
| 540 | |
| 541 StreamConfig output_stream = shared_state_.api_format_.output_stream(); | |
| 542 output_stream.set_sample_rate_hz(output_sample_rate_hz); | 574 output_stream.set_sample_rate_hz(output_sample_rate_hz); |
| 543 output_stream.set_num_channels(ChannelsFromLayout(output_layout)); | 575 output_stream.set_num_channels(ChannelsFromLayout(output_layout)); |
| 544 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); | 576 output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); |
| 545 | 577 |
| 546 if (samples_per_channel != input_stream.num_frames()) { | 578 if (samples_per_channel != input_stream.num_frames()) { |
| 547 return kBadDataLengthError; | 579 return kBadDataLengthError; |
| 548 } | 580 } |
| 549 return ProcessStream(src, input_stream, output_stream, dest); | 581 return ProcessStream(src, input_stream, output_stream, dest); |
| 550 } | 582 } |
| 551 | 583 |
| 552 int AudioProcessingImpl::ProcessStream(const float* const* src, | 584 int AudioProcessingImpl::ProcessStream(const float* const* src, |
| 553 const StreamConfig& input_config, | 585 const StreamConfig& input_config, |
| 554 const StreamConfig& output_config, | 586 const StreamConfig& output_config, |
| 555 float* const* dest) { | 587 float* const* dest) { |
| 556 CriticalSectionScoped crit_scoped(crit_); | |
| 557 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 588 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 589 { |
| 590 // Acquire the capture lock in order to safely call the function |
| 591 // that retrieves the render side data. This function accesses apm |
| 592 // getters that need the capture lock held when being called. |
| 593 rtc::CritScope cs_capture(&crit_capture_); |
| 594 public_submodules_->echo_cancellation->ReadQueuedRenderData(); |
| 595 public_submodules_->echo_control_mobile->ReadQueuedRenderData(); |
| 596 public_submodules_->gain_control->ReadQueuedRenderData(); |
| 597 } |
| 558 if (!src || !dest) { | 598 if (!src || !dest) { |
| 559 return kNullPointerError; | 599 return kNullPointerError; |
| 560 } | 600 } |
| 561 | 601 |
| 562 echo_cancellation_->ReadQueuedRenderData(); | 602 ProcessingConfig processing_config = formats_.api_format; |
| 563 echo_control_mobile_->ReadQueuedRenderData(); | |
| 564 gain_control_->ReadQueuedRenderData(); | |
| 565 | |
| 566 ProcessingConfig processing_config = shared_state_.api_format_; | |
| 567 processing_config.input_stream() = input_config; | 603 processing_config.input_stream() = input_config; |
| 568 processing_config.output_stream() = output_config; | 604 processing_config.output_stream() = output_config; |
| 569 | 605 |
| 570 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 606 { |
| 607 // Do conditional reinitialization. |
| 608 rtc::CritScope cs_render(&crit_render_); |
| 609 RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| 610 } |
| 611 rtc::CritScope cs_capture(&crit_capture_); |
| 612 |
| 571 assert(processing_config.input_stream().num_frames() == | 613 assert(processing_config.input_stream().num_frames() == |
| 572 shared_state_.api_format_.input_stream().num_frames()); | 614 formats_.api_format.input_stream().num_frames()); |
| 573 | 615 |
| 574 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 616 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 575 if (debug_file_->Open()) { | 617 if (debug_dump_.debug_file->Open()) { |
| 576 RETURN_ON_ERR(WriteConfigMessage(false)); | 618 RETURN_ON_ERR(WriteConfigMessage(false)); |
| 577 | 619 |
| 578 event_msg_->set_type(audioproc::Event::STREAM); | 620 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 579 audioproc::Stream* msg = event_msg_->mutable_stream(); | 621 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 580 const size_t channel_size = | 622 const size_t channel_size = |
| 581 sizeof(float) * shared_state_.api_format_.input_stream().num_frames(); | 623 sizeof(float) * formats_.api_format.input_stream().num_frames(); |
| 582 for (int i = 0; i < shared_state_.api_format_.input_stream().num_channels(); | 624 for (int i = 0; i < formats_.api_format.input_stream().num_channels(); ++i) |
| 583 ++i) | |
| 584 msg->add_input_channel(src[i], channel_size); | 625 msg->add_input_channel(src[i], channel_size); |
| 585 } | 626 } |
| 586 #endif | 627 #endif |
| 587 | 628 |
| 588 capture_audio_->CopyFrom(src, shared_state_.api_format_.input_stream()); | 629 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
| 589 RETURN_ON_ERR(ProcessStreamLocked()); | 630 RETURN_ON_ERR(ProcessStreamLocked()); |
| 590 capture_audio_->CopyTo(shared_state_.api_format_.output_stream(), dest); | 631 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
| 591 | 632 |
| 592 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 633 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 593 if (debug_file_->Open()) { | 634 if (debug_dump_.debug_file->Open()) { |
| 594 audioproc::Stream* msg = event_msg_->mutable_stream(); | 635 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 595 const size_t channel_size = | 636 const size_t channel_size = |
| 596 sizeof(float) * shared_state_.api_format_.output_stream().num_frames(); | 637 sizeof(float) * formats_.api_format.output_stream().num_frames(); |
| 597 for (int i = 0; | 638 for (int i = 0; i < formats_.api_format.output_stream().num_channels(); ++i) |
| 598 i < shared_state_.api_format_.output_stream().num_channels(); ++i) | |
| 599 msg->add_output_channel(dest[i], channel_size); | 639 msg->add_output_channel(dest[i], channel_size); |
| 600 RETURN_ON_ERR(WriteMessageToDebugFile()); | 640 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 641 &crit_debug_, &debug_dump_.capture)); |
| 601 } | 642 } |
| 602 #endif | 643 #endif |
| 603 | 644 |
| 604 return kNoError; | 645 return kNoError; |
| 605 } | 646 } |
| 606 | 647 |
| 607 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { | 648 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| 608 CriticalSectionScoped crit_scoped(crit_); | |
| 609 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 649 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 610 echo_cancellation_->ReadQueuedRenderData(); | 650 { |
| 611 echo_control_mobile_->ReadQueuedRenderData(); | 651 // Acquire the capture lock in order to safely call the function |
| 612 gain_control_->ReadQueuedRenderData(); | 652 // that retrieves the render side data. This function accesses apm |
| 653 // getters that need the capture lock held when being called. |
| 654 // The lock needs to be released as |
| 655 // public_submodules_->echo_control_mobile->is_enabled() aquires this lock |
| 656 // as well. |
| 657 rtc::CritScope cs_capture(&crit_capture_); |
| 658 public_submodules_->echo_cancellation->ReadQueuedRenderData(); |
| 659 public_submodules_->echo_control_mobile->ReadQueuedRenderData(); |
| 660 public_submodules_->gain_control->ReadQueuedRenderData(); |
| 661 } |
| 613 | 662 |
| 614 if (!frame) { | 663 if (!frame) { |
| 615 return kNullPointerError; | 664 return kNullPointerError; |
| 616 } | 665 } |
| 617 // Must be a native rate. | 666 // Must be a native rate. |
| 618 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 667 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
| 619 frame->sample_rate_hz_ != kSampleRate16kHz && | 668 frame->sample_rate_hz_ != kSampleRate16kHz && |
| 620 frame->sample_rate_hz_ != kSampleRate32kHz && | 669 frame->sample_rate_hz_ != kSampleRate32kHz && |
| 621 frame->sample_rate_hz_ != kSampleRate48kHz) { | 670 frame->sample_rate_hz_ != kSampleRate48kHz) { |
| 622 return kBadSampleRateError; | 671 return kBadSampleRateError; |
| 623 } | 672 } |
| 624 | 673 |
| 625 if (echo_control_mobile_->is_enabled() && | 674 if (public_submodules_->echo_control_mobile->is_enabled() && |
| 626 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { | 675 frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { |
| 627 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; | 676 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; |
| 628 return kUnsupportedComponentError; | 677 return kUnsupportedComponentError; |
| 629 } | 678 } |
| 630 | 679 |
| 631 // TODO(ajm): The input and output rates and channels are currently | 680 ProcessingConfig processing_config; |
| 632 // constrained to be identical in the int16 interface. | 681 { |
| 633 ProcessingConfig processing_config = shared_state_.api_format_; | 682 // Aquire lock for the access of api_format. |
| 683 // The lock is released immediately due to the conditional |
| 684 // reinitialization. |
| 685 rtc::CritScope cs_capture(&crit_capture_); |
| 686 // TODO(ajm): The input and output rates and channels are currently |
| 687 // constrained to be identical in the int16 interface. |
| 688 processing_config = formats_.api_format; |
| 689 } |
| 634 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 690 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
| 635 processing_config.input_stream().set_num_channels(frame->num_channels_); | 691 processing_config.input_stream().set_num_channels(frame->num_channels_); |
| 636 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 692 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
| 637 processing_config.output_stream().set_num_channels(frame->num_channels_); | 693 processing_config.output_stream().set_num_channels(frame->num_channels_); |
| 638 | 694 |
| 639 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 695 { |
| 696 // Do conditional reinitialization. |
| 697 rtc::CritScope cs_render(&crit_render_); |
| 698 RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| 699 } |
| 700 rtc::CritScope cs_capture(&crit_capture_); |
| 640 if (frame->samples_per_channel_ != | 701 if (frame->samples_per_channel_ != |
| 641 shared_state_.api_format_.input_stream().num_frames()) { | 702 formats_.api_format.input_stream().num_frames()) { |
| 642 return kBadDataLengthError; | 703 return kBadDataLengthError; |
| 643 } | 704 } |
| 644 | 705 |
| 645 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 706 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 646 if (debug_file_->Open()) { | 707 if (debug_dump_.debug_file->Open()) { |
| 647 event_msg_->set_type(audioproc::Event::STREAM); | 708 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 648 audioproc::Stream* msg = event_msg_->mutable_stream(); | 709 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 649 const size_t data_size = | 710 const size_t data_size = |
| 650 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 711 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 651 msg->set_input_data(frame->data_, data_size); | 712 msg->set_input_data(frame->data_, data_size); |
| 652 } | 713 } |
| 653 #endif | 714 #endif |
| 654 | 715 |
| 655 capture_audio_->DeinterleaveFrom(frame); | 716 capture_.capture_audio->DeinterleaveFrom(frame); |
| 656 RETURN_ON_ERR(ProcessStreamLocked()); | 717 RETURN_ON_ERR(ProcessStreamLocked()); |
| 657 capture_audio_->InterleaveTo(frame, output_copy_needed(is_data_processed())); | 718 capture_.capture_audio->InterleaveTo(frame, |
| 719 output_copy_needed(is_data_processed())); |
| 658 | 720 |
| 659 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 721 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 660 if (debug_file_->Open()) { | 722 if (debug_dump_.debug_file->Open()) { |
| 661 audioproc::Stream* msg = event_msg_->mutable_stream(); | 723 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 662 const size_t data_size = | 724 const size_t data_size = |
| 663 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 725 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 664 msg->set_output_data(frame->data_, data_size); | 726 msg->set_output_data(frame->data_, data_size); |
| 665 RETURN_ON_ERR(WriteMessageToDebugFile()); | 727 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 728 &crit_debug_, &debug_dump_.capture)); |
| 666 } | 729 } |
| 667 #endif | 730 #endif |
| 668 | 731 |
| 669 return kNoError; | 732 return kNoError; |
| 670 } | 733 } |
| 671 | 734 |
| 672 int AudioProcessingImpl::ProcessStreamLocked() { | 735 int AudioProcessingImpl::ProcessStreamLocked() { |
| 673 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 736 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 674 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 737 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 675 if (debug_file_->Open()) { | 738 if (debug_dump_.debug_file->Open()) { |
| 676 audioproc::Stream* msg = event_msg_->mutable_stream(); | 739 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 677 msg->set_delay(stream_delay_ms_); | 740 msg->set_delay(capture_nonlocked_.stream_delay_ms); |
| 678 msg->set_drift(echo_cancellation_->stream_drift_samples()); | 741 msg->set_drift( |
| 742 public_submodules_->echo_cancellation->stream_drift_samples()); |
| 679 msg->set_level(gain_control()->stream_analog_level()); | 743 msg->set_level(gain_control()->stream_analog_level()); |
| 680 msg->set_keypress(key_pressed_); | 744 msg->set_keypress(capture_.key_pressed); |
| 681 } | 745 } |
| 682 #endif | 746 #endif |
| 683 | 747 |
| 684 MaybeUpdateHistograms(); | 748 MaybeUpdateHistograms(); |
| 685 | 749 |
| 686 AudioBuffer* ca = capture_audio_.get(); // For brevity. | 750 AudioBuffer* ca = capture_.capture_audio.get(); // For brevity. |
| 687 | 751 |
| 688 if (use_new_agc_ && gain_control_->is_enabled()) { | 752 if (constants_.use_new_agc && |
| 689 agc_manager_->AnalyzePreProcess(ca->channels()[0], ca->num_channels(), | 753 public_submodules_->gain_control->is_enabled()) { |
| 690 fwd_proc_format_.num_frames()); | 754 private_submodules_->agc_manager->AnalyzePreProcess( |
| 755 ca->channels()[0], ca->num_channels(), |
| 756 capture_nonlocked_.fwd_proc_format.num_frames()); |
| 691 } | 757 } |
| 692 | 758 |
| 693 bool data_processed = is_data_processed(); | 759 bool data_processed = is_data_processed(); |
| 694 if (analysis_needed(data_processed)) { | 760 if (analysis_needed(data_processed)) { |
| 695 ca->SplitIntoFrequencyBands(); | 761 ca->SplitIntoFrequencyBands(); |
| 696 } | 762 } |
| 697 | 763 |
| 698 if (intelligibility_enabled_) { | 764 if (constants_.intelligibility_enabled) { |
| 699 intelligibility_enhancer_->AnalyzeCaptureAudio( | 765 public_submodules_->intelligibility_enhancer->AnalyzeCaptureAudio( |
| 700 ca->split_channels_f(kBand0To8kHz), split_rate_, ca->num_channels()); | 766 ca->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate, |
| 767 ca->num_channels()); |
| 701 } | 768 } |
| 702 | 769 |
| 703 if (beamformer_enabled_) { | 770 if (constants_.beamformer_enabled) { |
| 704 beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f()); | 771 private_submodules_->beamformer->ProcessChunk(*ca->split_data_f(), |
| 772 ca->split_data_f()); |
| 705 ca->set_num_channels(1); | 773 ca->set_num_channels(1); |
| 706 } | 774 } |
| 707 | 775 |
| 708 RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca)); | 776 RETURN_ON_ERR(public_submodules_->high_pass_filter->ProcessCaptureAudio(ca)); |
| 709 RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca)); | 777 RETURN_ON_ERR(public_submodules_->gain_control->AnalyzeCaptureAudio(ca)); |
| 710 RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca)); | 778 RETURN_ON_ERR(public_submodules_->noise_suppression->AnalyzeCaptureAudio(ca)); |
| 711 RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca)); | 779 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessCaptureAudio(ca)); |
| 712 | 780 |
| 713 if (echo_control_mobile_->is_enabled() && noise_suppression_->is_enabled()) { | 781 if (public_submodules_->echo_control_mobile->is_enabled() && |
| 782 public_submodules_->noise_suppression->is_enabled()) { |
| 714 ca->CopyLowPassToReference(); | 783 ca->CopyLowPassToReference(); |
| 715 } | 784 } |
| 716 RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(ca)); | 785 RETURN_ON_ERR(public_submodules_->noise_suppression->ProcessCaptureAudio(ca)); |
| 717 RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca)); | 786 RETURN_ON_ERR( |
| 718 RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca)); | 787 public_submodules_->echo_control_mobile->ProcessCaptureAudio(ca)); |
| 788 RETURN_ON_ERR(public_submodules_->voice_detection->ProcessCaptureAudio(ca)); |
| 719 | 789 |
| 720 if (use_new_agc_ && gain_control_->is_enabled() && | 790 if (constants_.use_new_agc && |
| 721 (!beamformer_enabled_ || beamformer_->is_target_present())) { | 791 public_submodules_->gain_control->is_enabled() && |
| 722 agc_manager_->Process(ca->split_bands_const(0)[kBand0To8kHz], | 792 (!constants_.beamformer_enabled || |
| 723 ca->num_frames_per_band(), split_rate_); | 793 private_submodules_->beamformer->is_target_present())) { |
| 794 private_submodules_->agc_manager->Process( |
| 795 ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(), |
| 796 capture_nonlocked_.split_rate); |
| 724 } | 797 } |
| 725 RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(ca)); | 798 RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio(ca)); |
| 726 | 799 |
| 727 if (synthesis_needed(data_processed)) { | 800 if (synthesis_needed(data_processed)) { |
| 728 ca->MergeFrequencyBands(); | 801 ca->MergeFrequencyBands(); |
| 729 } | 802 } |
| 730 | 803 |
| 731 // TODO(aluebs): Investigate if the transient suppression placement should be | 804 // TODO(aluebs): Investigate if the transient suppression placement should be |
| 732 // before or after the AGC. | 805 // before or after the AGC. |
| 733 if (transient_suppressor_enabled_) { | 806 if (capture_.transient_suppressor_enabled) { |
| 734 float voice_probability = | 807 float voice_probability = |
| 735 agc_manager_.get() ? agc_manager_->voice_probability() : 1.f; | 808 private_submodules_->agc_manager.get() |
| 809 ? private_submodules_->agc_manager->voice_probability() |
| 810 : 1.f; |
| 736 | 811 |
| 737 transient_suppressor_->Suppress( | 812 public_submodules_->transient_suppressor->Suppress( |
| 738 ca->channels_f()[0], ca->num_frames(), ca->num_channels(), | 813 ca->channels_f()[0], ca->num_frames(), ca->num_channels(), |
| 739 ca->split_bands_const_f(0)[kBand0To8kHz], ca->num_frames_per_band(), | 814 ca->split_bands_const_f(0)[kBand0To8kHz], ca->num_frames_per_band(), |
| 740 ca->keyboard_data(), ca->num_keyboard_frames(), voice_probability, | 815 ca->keyboard_data(), ca->num_keyboard_frames(), voice_probability, |
| 741 key_pressed_); | 816 capture_.key_pressed); |
| 742 } | 817 } |
| 743 | 818 |
| 744 // The level estimator operates on the recombined data. | 819 // The level estimator operates on the recombined data. |
| 745 RETURN_ON_ERR(level_estimator_->ProcessStream(ca)); | 820 RETURN_ON_ERR(public_submodules_->level_estimator->ProcessStream(ca)); |
| 746 | 821 |
| 747 was_stream_delay_set_ = false; | 822 capture_.was_stream_delay_set = false; |
| 748 return kNoError; | 823 return kNoError; |
| 749 } | 824 } |
| 750 | 825 |
| 751 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, | 826 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, |
| 752 size_t samples_per_channel, | 827 size_t samples_per_channel, |
| 753 int rev_sample_rate_hz, | 828 int rev_sample_rate_hz, |
| 754 ChannelLayout layout) { | 829 ChannelLayout layout) { |
| 755 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 830 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 831 rtc::CritScope cs(&crit_render_); |
| 756 const StreamConfig reverse_config = { | 832 const StreamConfig reverse_config = { |
| 757 rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), | 833 rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), |
| 758 }; | 834 }; |
| 759 if (samples_per_channel != reverse_config.num_frames()) { | 835 if (samples_per_channel != reverse_config.num_frames()) { |
| 760 return kBadDataLengthError; | 836 return kBadDataLengthError; |
| 761 } | 837 } |
| 762 return AnalyzeReverseStream(data, reverse_config, reverse_config); | 838 return AnalyzeReverseStreamLocked(data, reverse_config, reverse_config); |
| 763 } | 839 } |
| 764 | 840 |
| 765 int AudioProcessingImpl::ProcessReverseStream( | 841 int AudioProcessingImpl::ProcessReverseStream( |
| 766 const float* const* src, | 842 const float* const* src, |
| 767 const StreamConfig& reverse_input_config, | 843 const StreamConfig& reverse_input_config, |
| 768 const StreamConfig& reverse_output_config, | 844 const StreamConfig& reverse_output_config, |
| 769 float* const* dest) { | 845 float* const* dest) { |
| 770 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 846 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 771 RETURN_ON_ERR( | 847 rtc::CritScope cs(&crit_render_); |
| 772 AnalyzeReverseStream(src, reverse_input_config, reverse_output_config)); | 848 RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, reverse_input_config, |
| 849 reverse_output_config)); |
| 773 if (is_rev_processed()) { | 850 if (is_rev_processed()) { |
| 774 render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(), | 851 render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(), |
| 775 dest); | 852 dest); |
| 776 } else if (rev_conversion_needed()) { | 853 } else if (rev_conversion_needed()) { |
| 777 render_converter_->Convert(src, reverse_input_config.num_samples(), dest, | 854 render_.render_converter->Convert(src, reverse_input_config.num_samples(), |
| 778 reverse_output_config.num_samples()); | 855 dest, |
| 856 reverse_output_config.num_samples()); |
| 779 } else { | 857 } else { |
| 780 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), | 858 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), |
| 781 reverse_input_config.num_channels(), dest); | 859 reverse_input_config.num_channels(), dest); |
| 782 } | 860 } |
| 783 | 861 |
| 784 return kNoError; | 862 return kNoError; |
| 785 } | 863 } |
| 786 | 864 |
| 787 int AudioProcessingImpl::AnalyzeReverseStream( | 865 int AudioProcessingImpl::AnalyzeReverseStreamLocked( |
| 788 const float* const* src, | 866 const float* const* src, |
| 789 const StreamConfig& reverse_input_config, | 867 const StreamConfig& reverse_input_config, |
| 790 const StreamConfig& reverse_output_config) { | 868 const StreamConfig& reverse_output_config) { |
| 791 CriticalSectionScoped crit_scoped(crit_); | |
| 792 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 869 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 793 if (src == NULL) { | 870 if (src == nullptr) { |
| 794 return kNullPointerError; | 871 return kNullPointerError; |
| 795 } | 872 } |
| 796 | 873 |
| 797 if (reverse_input_config.num_channels() <= 0) { | 874 if (reverse_input_config.num_channels() <= 0) { |
| 798 return kBadNumberChannelsError; | 875 return kBadNumberChannelsError; |
| 799 } | 876 } |
| 800 | 877 |
| 801 ProcessingConfig processing_config = shared_state_.api_format_; | 878 ProcessingConfig processing_config = formats_.api_format; |
| 802 processing_config.reverse_input_stream() = reverse_input_config; | 879 processing_config.reverse_input_stream() = reverse_input_config; |
| 803 processing_config.reverse_output_stream() = reverse_output_config; | 880 processing_config.reverse_output_stream() = reverse_output_config; |
| 804 | 881 |
| 805 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 882 RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| 806 assert(reverse_input_config.num_frames() == | 883 assert(reverse_input_config.num_frames() == |
| 807 shared_state_.api_format_.reverse_input_stream().num_frames()); | 884 formats_.api_format.reverse_input_stream().num_frames()); |
| 808 | 885 |
| 809 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 886 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 810 if (debug_file_->Open()) { | 887 if (debug_dump_.debug_file->Open()) { |
| 811 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 888 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); |
| 812 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 889 audioproc::ReverseStream* msg = |
| 890 debug_dump_.render.event_msg->mutable_reverse_stream(); |
| 813 const size_t channel_size = | 891 const size_t channel_size = |
| 814 sizeof(float) * | 892 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
| 815 shared_state_.api_format_.reverse_input_stream().num_frames(); | |
| 816 for (int i = 0; | 893 for (int i = 0; |
| 817 i < shared_state_.api_format_.reverse_input_stream().num_channels(); | 894 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) |
| 818 ++i) | |
| 819 msg->add_channel(src[i], channel_size); | 895 msg->add_channel(src[i], channel_size); |
| 820 RETURN_ON_ERR(WriteMessageToDebugFile()); | 896 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 897 &crit_debug_, &debug_dump_.render)); |
| 821 } | 898 } |
| 822 #endif | 899 #endif |
| 823 | 900 |
| 824 render_audio_->CopyFrom(src, | 901 render_.render_audio->CopyFrom(src, |
| 825 shared_state_.api_format_.reverse_input_stream()); | 902 formats_.api_format.reverse_input_stream()); |
| 826 return ProcessReverseStreamLocked(); | 903 return ProcessReverseStreamLocked(); |
| 827 } | 904 } |
| 828 | 905 |
| 829 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 906 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| 830 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 907 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 831 RETURN_ON_ERR(AnalyzeReverseStream(frame)); | 908 RETURN_ON_ERR(AnalyzeReverseStream(frame)); |
| 909 rtc::CritScope cs(&crit_render_); |
| 832 if (is_rev_processed()) { | 910 if (is_rev_processed()) { |
| 833 render_audio_->InterleaveTo(frame, true); | 911 render_.render_audio->InterleaveTo(frame, true); |
| 834 } | 912 } |
| 835 | 913 |
| 836 return kNoError; | 914 return kNoError; |
| 837 } | 915 } |
| 838 | 916 |
| 839 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { | 917 int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
| 840 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 918 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 841 CriticalSectionScoped crit_scoped(crit_); | 919 rtc::CritScope cs(&crit_render_); |
| 842 if (frame == NULL) { | 920 if (frame == nullptr) { |
| 843 return kNullPointerError; | 921 return kNullPointerError; |
| 844 } | 922 } |
| 845 // Must be a native rate. | 923 // Must be a native rate. |
| 846 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 924 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
| 847 frame->sample_rate_hz_ != kSampleRate16kHz && | 925 frame->sample_rate_hz_ != kSampleRate16kHz && |
| 848 frame->sample_rate_hz_ != kSampleRate32kHz && | 926 frame->sample_rate_hz_ != kSampleRate32kHz && |
| 849 frame->sample_rate_hz_ != kSampleRate48kHz) { | 927 frame->sample_rate_hz_ != kSampleRate48kHz) { |
| 850 return kBadSampleRateError; | 928 return kBadSampleRateError; |
| 851 } | 929 } |
| 852 // This interface does not tolerate different forward and reverse rates. | 930 // This interface does not tolerate different forward and reverse rates. |
| 853 if (frame->sample_rate_hz_ != | 931 if (frame->sample_rate_hz_ != |
| 854 shared_state_.api_format_.input_stream().sample_rate_hz()) { | 932 formats_.api_format.input_stream().sample_rate_hz()) { |
| 855 return kBadSampleRateError; | 933 return kBadSampleRateError; |
| 856 } | 934 } |
| 857 | 935 |
| 858 if (frame->num_channels_ <= 0) { | 936 if (frame->num_channels_ <= 0) { |
| 859 return kBadNumberChannelsError; | 937 return kBadNumberChannelsError; |
| 860 } | 938 } |
| 861 | 939 |
| 862 ProcessingConfig processing_config = shared_state_.api_format_; | 940 ProcessingConfig processing_config = formats_.api_format; |
| 863 processing_config.reverse_input_stream().set_sample_rate_hz( | 941 processing_config.reverse_input_stream().set_sample_rate_hz( |
| 864 frame->sample_rate_hz_); | 942 frame->sample_rate_hz_); |
| 865 processing_config.reverse_input_stream().set_num_channels( | 943 processing_config.reverse_input_stream().set_num_channels( |
| 866 frame->num_channels_); | 944 frame->num_channels_); |
| 867 processing_config.reverse_output_stream().set_sample_rate_hz( | 945 processing_config.reverse_output_stream().set_sample_rate_hz( |
| 868 frame->sample_rate_hz_); | 946 frame->sample_rate_hz_); |
| 869 processing_config.reverse_output_stream().set_num_channels( | 947 processing_config.reverse_output_stream().set_num_channels( |
| 870 frame->num_channels_); | 948 frame->num_channels_); |
| 871 | 949 |
| 872 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); | 950 RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| 873 if (frame->samples_per_channel_ != | 951 if (frame->samples_per_channel_ != |
| 874 shared_state_.api_format_.reverse_input_stream().num_frames()) { | 952 formats_.api_format.reverse_input_stream().num_frames()) { |
| 875 return kBadDataLengthError; | 953 return kBadDataLengthError; |
| 876 } | 954 } |
| 877 | 955 |
| 878 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 956 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 879 if (debug_file_->Open()) { | 957 if (debug_dump_.debug_file->Open()) { |
| 880 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 958 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); |
| 881 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 959 audioproc::ReverseStream* msg = |
| 960 debug_dump_.render.event_msg->mutable_reverse_stream(); |
| 882 const size_t data_size = | 961 const size_t data_size = |
| 883 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 962 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 884 msg->set_data(frame->data_, data_size); | 963 msg->set_data(frame->data_, data_size); |
| 885 RETURN_ON_ERR(WriteMessageToDebugFile()); | 964 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 965 &crit_debug_, &debug_dump_.render)); |
| 886 } | 966 } |
| 887 #endif | 967 #endif |
| 888 render_audio_->DeinterleaveFrom(frame); | 968 render_.render_audio->DeinterleaveFrom(frame); |
| 889 return ProcessReverseStreamLocked(); | 969 return ProcessReverseStreamLocked(); |
| 890 } | 970 } |
| 891 | 971 |
| 892 int AudioProcessingImpl::ProcessReverseStreamLocked() { | 972 int AudioProcessingImpl::ProcessReverseStreamLocked() { |
| 893 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 973 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 894 AudioBuffer* ra = render_audio_.get(); // For brevity. | 974 AudioBuffer* ra = render_.render_audio.get(); // For brevity. |
| 895 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) { | 975 if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz) { |
| 896 ra->SplitIntoFrequencyBands(); | 976 ra->SplitIntoFrequencyBands(); |
| 897 } | 977 } |
| 898 | 978 |
| 899 if (intelligibility_enabled_) { | 979 if (constants_.intelligibility_enabled) { |
| 900 intelligibility_enhancer_->ProcessRenderAudio( | 980 // Currently run in single-threaded mode when the intelligibility |
| 901 ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels()); | 981 // enhancer is activated. |
| 982 // TODO(peah): Fix to be properly multi-threaded. |
| 983 rtc::CritScope cs(&crit_capture_); |
| 984 public_submodules_->intelligibility_enhancer->ProcessRenderAudio( |
| 985 ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate, |
| 986 ra->num_channels()); |
| 902 } | 987 } |
| 903 | 988 |
| 904 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra)); | 989 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra)); |
| 905 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra)); | 990 RETURN_ON_ERR( |
| 906 if (!use_new_agc_) { | 991 public_submodules_->echo_control_mobile->ProcessRenderAudio(ra)); |
| 907 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra)); | 992 if (!constants_.use_new_agc) { |
| 993 RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra)); |
| 908 } | 994 } |
| 909 | 995 |
| 910 if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz && | 996 if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz && |
| 911 is_rev_processed()) { | 997 is_rev_processed()) { |
| 912 ra->MergeFrequencyBands(); | 998 ra->MergeFrequencyBands(); |
| 913 } | 999 } |
| 914 | 1000 |
| 915 return kNoError; | 1001 return kNoError; |
| 916 } | 1002 } |
| 917 | 1003 |
| 918 int AudioProcessingImpl::set_stream_delay_ms(int delay) { | 1004 int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
| 919 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1005 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1006 rtc::CritScope cs(&crit_capture_); |
| 920 Error retval = kNoError; | 1007 Error retval = kNoError; |
| 921 was_stream_delay_set_ = true; | 1008 capture_.was_stream_delay_set = true; |
| 922 delay += delay_offset_ms_; | 1009 delay += capture_.delay_offset_ms; |
| 923 | 1010 |
| 924 if (delay < 0) { | 1011 if (delay < 0) { |
| 925 delay = 0; | 1012 delay = 0; |
| 926 retval = kBadStreamParameterWarning; | 1013 retval = kBadStreamParameterWarning; |
| 927 } | 1014 } |
| 928 | 1015 |
| 929 // TODO(ajm): the max is rather arbitrarily chosen; investigate. | 1016 // TODO(ajm): the max is rather arbitrarily chosen; investigate. |
| 930 if (delay > 500) { | 1017 if (delay > 500) { |
| 931 delay = 500; | 1018 delay = 500; |
| 932 retval = kBadStreamParameterWarning; | 1019 retval = kBadStreamParameterWarning; |
| 933 } | 1020 } |
| 934 | 1021 |
| 935 stream_delay_ms_ = delay; | 1022 capture_nonlocked_.stream_delay_ms = delay; |
| 936 return retval; | 1023 return retval; |
| 937 } | 1024 } |
| 938 | 1025 |
| 939 int AudioProcessingImpl::stream_delay_ms() const { | 1026 int AudioProcessingImpl::stream_delay_ms() const { |
| 940 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1027 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 941 return stream_delay_ms_; | 1028 return capture_nonlocked_.stream_delay_ms; |
| 942 } | 1029 } |
| 943 | 1030 |
| 944 bool AudioProcessingImpl::was_stream_delay_set() const { | 1031 bool AudioProcessingImpl::was_stream_delay_set() const { |
| 945 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1032 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 946 return was_stream_delay_set_; | 1033 return capture_.was_stream_delay_set; |
| 947 } | 1034 } |
| 948 | 1035 |
| 949 void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) { | 1036 void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) { |
| 1037 rtc::CritScope cs(&crit_capture_); |
| 950 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1038 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 951 key_pressed_ = key_pressed; | 1039 capture_.key_pressed = key_pressed; |
| 952 } | 1040 } |
| 953 | 1041 |
| 954 void AudioProcessingImpl::set_delay_offset_ms(int offset) { | 1042 void AudioProcessingImpl::set_delay_offset_ms(int offset) { |
| 955 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1043 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 956 CriticalSectionScoped crit_scoped(crit_); | 1044 rtc::CritScope cs(&crit_capture_); |
| 957 delay_offset_ms_ = offset; | 1045 capture_.delay_offset_ms = offset; |
| 958 } | 1046 } |
| 959 | 1047 |
| 960 int AudioProcessingImpl::delay_offset_ms() const { | 1048 int AudioProcessingImpl::delay_offset_ms() const { |
| 961 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1049 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 962 return delay_offset_ms_; | 1050 rtc::CritScope cs(&crit_capture_); |
| 1051 return capture_.delay_offset_ms; |
| 963 } | 1052 } |
| 964 | 1053 |
| 965 int AudioProcessingImpl::StartDebugRecording( | 1054 int AudioProcessingImpl::StartDebugRecording( |
| 966 const char filename[AudioProcessing::kMaxFilenameSize]) { | 1055 const char filename[AudioProcessing::kMaxFilenameSize]) { |
| 967 CriticalSectionScoped crit_scoped(crit_); | 1056 // Run in a single-threaded manner. |
| 1057 rtc::CritScope cs_render(&crit_render_); |
| 1058 rtc::CritScope cs_capture(&crit_capture_); |
| 968 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1059 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 969 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); | 1060 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); |
| 970 | 1061 |
| 971 if (filename == NULL) { | 1062 if (filename == nullptr) { |
| 972 return kNullPointerError; | 1063 return kNullPointerError; |
| 973 } | 1064 } |
| 974 | 1065 |
| 975 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1066 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 976 // Stop any ongoing recording. | 1067 // Stop any ongoing recording. |
| 977 if (debug_file_->Open()) { | 1068 if (debug_dump_.debug_file->Open()) { |
| 978 if (debug_file_->CloseFile() == -1) { | 1069 if (debug_dump_.debug_file->CloseFile() == -1) { |
| 979 return kFileError; | 1070 return kFileError; |
| 980 } | 1071 } |
| 981 } | 1072 } |
| 982 | 1073 |
| 983 if (debug_file_->OpenFile(filename, false) == -1) { | 1074 if (debug_dump_.debug_file->OpenFile(filename, false) == -1) { |
| 984 debug_file_->CloseFile(); | 1075 debug_dump_.debug_file->CloseFile(); |
| 985 return kFileError; | 1076 return kFileError; |
| 986 } | 1077 } |
| 987 | 1078 |
| 988 RETURN_ON_ERR(WriteConfigMessage(true)); | 1079 RETURN_ON_ERR(WriteConfigMessage(true)); |
| 989 RETURN_ON_ERR(WriteInitMessage()); | 1080 RETURN_ON_ERR(WriteInitMessage()); |
| 990 return kNoError; | 1081 return kNoError; |
| 991 #else | 1082 #else |
| 992 return kUnsupportedFunctionError; | 1083 return kUnsupportedFunctionError; |
| 993 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1084 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 994 } | 1085 } |
| 995 | 1086 |
| 996 int AudioProcessingImpl::StartDebugRecording(FILE* handle) { | 1087 int AudioProcessingImpl::StartDebugRecording(FILE* handle) { |
| 997 CriticalSectionScoped crit_scoped(crit_); | 1088 // Run in a single-threaded manner. |
| 1089 rtc::CritScope cs_render(&crit_render_); |
| 1090 rtc::CritScope cs_capture(&crit_capture_); |
| 998 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1091 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 999 | 1092 |
| 1000 if (handle == NULL) { | 1093 if (handle == nullptr) { |
| 1001 return kNullPointerError; | 1094 return kNullPointerError; |
| 1002 } | 1095 } |
| 1003 | 1096 |
| 1004 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1097 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1005 // Stop any ongoing recording. | 1098 // Stop any ongoing recording. |
| 1006 if (debug_file_->Open()) { | 1099 if (debug_dump_.debug_file->Open()) { |
| 1007 if (debug_file_->CloseFile() == -1) { | 1100 if (debug_dump_.debug_file->CloseFile() == -1) { |
| 1008 return kFileError; | 1101 return kFileError; |
| 1009 } | 1102 } |
| 1010 } | 1103 } |
| 1011 | 1104 |
| 1012 if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) { | 1105 if (debug_dump_.debug_file->OpenFromFileHandle(handle, true, false) == -1) { |
| 1013 return kFileError; | 1106 return kFileError; |
| 1014 } | 1107 } |
| 1015 | 1108 |
| 1016 RETURN_ON_ERR(WriteConfigMessage(true)); | 1109 RETURN_ON_ERR(WriteConfigMessage(true)); |
| 1017 RETURN_ON_ERR(WriteInitMessage()); | 1110 RETURN_ON_ERR(WriteInitMessage()); |
| 1018 return kNoError; | 1111 return kNoError; |
| 1019 #else | 1112 #else |
| 1020 return kUnsupportedFunctionError; | 1113 return kUnsupportedFunctionError; |
| 1021 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1114 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1022 } | 1115 } |
| 1023 | 1116 |
| 1024 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( | 1117 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( |
| 1025 rtc::PlatformFile handle) { | 1118 rtc::PlatformFile handle) { |
| 1119 // Run in a single-threaded manner. |
| 1120 rtc::CritScope cs_render(&crit_render_); |
| 1121 rtc::CritScope cs_capture(&crit_capture_); |
| 1026 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1122 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1027 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); | 1123 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); |
| 1028 return StartDebugRecording(stream); | 1124 return StartDebugRecording(stream); |
| 1029 } | 1125 } |
| 1030 | 1126 |
| 1031 int AudioProcessingImpl::StopDebugRecording() { | 1127 int AudioProcessingImpl::StopDebugRecording() { |
| 1032 CriticalSectionScoped crit_scoped(crit_); | 1128 // Run in a single-threaded manner. |
| 1129 rtc::CritScope cs_render(&crit_render_); |
| 1130 rtc::CritScope cs_capture(&crit_capture_); |
| 1033 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1131 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1034 | 1132 |
| 1035 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1133 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1036 // We just return if recording hasn't started. | 1134 // We just return if recording hasn't started. |
| 1037 if (debug_file_->Open()) { | 1135 if (debug_dump_.debug_file->Open()) { |
| 1038 if (debug_file_->CloseFile() == -1) { | 1136 if (debug_dump_.debug_file->CloseFile() == -1) { |
| 1039 return kFileError; | 1137 return kFileError; |
| 1040 } | 1138 } |
| 1041 } | 1139 } |
| 1042 return kNoError; | 1140 return kNoError; |
| 1043 #else | 1141 #else |
| 1044 return kUnsupportedFunctionError; | 1142 return kUnsupportedFunctionError; |
| 1045 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1143 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1046 } | 1144 } |
| 1047 | 1145 |
| 1048 EchoCancellation* AudioProcessingImpl::echo_cancellation() const { | 1146 EchoCancellation* AudioProcessingImpl::echo_cancellation() const { |
| 1049 return echo_cancellation_; | 1147 // Adding a lock here has no effect as it allows any access to the submodule |
| 1148 // from the returned pointer. |
| 1149 return public_submodules_->echo_cancellation; |
| 1050 } | 1150 } |
| 1051 | 1151 |
| 1052 EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const { | 1152 EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const { |
| 1053 return echo_control_mobile_; | 1153 // Adding a lock here has no effect as it allows any access to the submodule |
| 1154 // from the returned pointer. |
| 1155 return public_submodules_->echo_control_mobile; |
| 1054 } | 1156 } |
| 1055 | 1157 |
| 1056 GainControl* AudioProcessingImpl::gain_control() const { | 1158 GainControl* AudioProcessingImpl::gain_control() const { |
| 1057 if (use_new_agc_) { | 1159 // Adding a lock here has no effect as it allows any access to the submodule |
| 1058 return gain_control_for_new_agc_.get(); | 1160 // from the returned pointer. |
| 1161 if (constants_.use_new_agc) { |
| 1162 return public_submodules_->gain_control_for_new_agc.get(); |
| 1059 } | 1163 } |
| 1060 return gain_control_; | 1164 return public_submodules_->gain_control; |
| 1061 } | 1165 } |
| 1062 | 1166 |
| 1063 HighPassFilter* AudioProcessingImpl::high_pass_filter() const { | 1167 HighPassFilter* AudioProcessingImpl::high_pass_filter() const { |
| 1064 return high_pass_filter_; | 1168 // Adding a lock here has no effect as it allows any access to the submodule |
| 1169 // from the returned pointer. |
| 1170 return public_submodules_->high_pass_filter; |
| 1065 } | 1171 } |
| 1066 | 1172 |
| 1067 LevelEstimator* AudioProcessingImpl::level_estimator() const { | 1173 LevelEstimator* AudioProcessingImpl::level_estimator() const { |
| 1068 return level_estimator_; | 1174 // Adding a lock here has no effect as it allows any access to the submodule |
| 1175 // from the returned pointer. |
| 1176 return public_submodules_->level_estimator; |
| 1069 } | 1177 } |
| 1070 | 1178 |
| 1071 NoiseSuppression* AudioProcessingImpl::noise_suppression() const { | 1179 NoiseSuppression* AudioProcessingImpl::noise_suppression() const { |
| 1072 return noise_suppression_; | 1180 // Adding a lock here has no effect as it allows any access to the submodule |
| 1181 // from the returned pointer. |
| 1182 return public_submodules_->noise_suppression; |
| 1073 } | 1183 } |
| 1074 | 1184 |
| 1075 VoiceDetection* AudioProcessingImpl::voice_detection() const { | 1185 VoiceDetection* AudioProcessingImpl::voice_detection() const { |
| 1076 return voice_detection_; | 1186 // Adding a lock here has no effect as it allows any access to the submodule |
| 1187 // from the returned pointer. |
| 1188 return public_submodules_->voice_detection; |
| 1077 } | 1189 } |
| 1078 | 1190 |
| 1079 bool AudioProcessingImpl::is_data_processed() const { | 1191 bool AudioProcessingImpl::is_data_processed() const { |
| 1080 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1192 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1081 if (beamformer_enabled_) { | 1193 if (constants_.beamformer_enabled) { |
| 1082 return true; | 1194 return true; |
| 1083 } | 1195 } |
| 1084 | 1196 |
| 1085 int enabled_count = 0; | 1197 int enabled_count = 0; |
| 1086 for (auto item : component_list_) { | 1198 for (auto item : private_submodules_->component_list) { |
| 1087 if (item->is_component_enabled()) { | 1199 if (item->is_component_enabled()) { |
| 1088 enabled_count++; | 1200 enabled_count++; |
| 1089 } | 1201 } |
| 1090 } | 1202 } |
| 1091 | 1203 |
| 1092 // Data is unchanged if no components are enabled, or if only level_estimator_ | 1204 // Data is unchanged if no components are enabled, or if only |
| 1093 // or voice_detection_ is enabled. | 1205 // public_submodules_->level_estimator |
| 1206 // or public_submodules_->voice_detection is enabled. |
| 1094 if (enabled_count == 0) { | 1207 if (enabled_count == 0) { |
| 1095 return false; | 1208 return false; |
| 1096 } else if (enabled_count == 1) { | 1209 } else if (enabled_count == 1) { |
| 1097 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) { | 1210 if (public_submodules_->level_estimator->is_enabled() || |
| 1211 public_submodules_->voice_detection->is_enabled()) { |
| 1098 return false; | 1212 return false; |
| 1099 } | 1213 } |
| 1100 } else if (enabled_count == 2) { | 1214 } else if (enabled_count == 2) { |
| 1101 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) { | 1215 if (public_submodules_->level_estimator->is_enabled() && |
| 1216 public_submodules_->voice_detection->is_enabled()) { |
| 1102 return false; | 1217 return false; |
| 1103 } | 1218 } |
| 1104 } | 1219 } |
| 1105 return true; | 1220 return true; |
| 1106 } | 1221 } |
| 1107 | 1222 |
| 1108 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { | 1223 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { |
| 1109 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1224 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1110 // Check if we've upmixed or downmixed the audio. | 1225 // Check if we've upmixed or downmixed the audio. |
| 1111 return ((shared_state_.api_format_.output_stream().num_channels() != | 1226 return ((formats_.api_format.output_stream().num_channels() != |
| 1112 shared_state_.api_format_.input_stream().num_channels()) || | 1227 formats_.api_format.input_stream().num_channels()) || |
| 1113 is_data_processed || transient_suppressor_enabled_); | 1228 is_data_processed || capture_.transient_suppressor_enabled); |
| 1114 } | 1229 } |
| 1115 | 1230 |
| 1116 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { | 1231 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { |
| 1117 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1232 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1118 return (is_data_processed && | 1233 return (is_data_processed && |
| 1119 (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || | 1234 (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == |
| 1120 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz)); | 1235 kSampleRate32kHz || |
| 1236 capture_nonlocked_.fwd_proc_format.sample_rate_hz() == |
| 1237 kSampleRate48kHz)); |
| 1121 } | 1238 } |
| 1122 | 1239 |
| 1123 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { | 1240 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { |
| 1124 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1241 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1125 if (!is_data_processed && !voice_detection_->is_enabled() && | 1242 if (!is_data_processed && |
| 1126 !transient_suppressor_enabled_) { | 1243 !public_submodules_->voice_detection->is_enabled() && |
| 1127 // Only level_estimator_ is enabled. | 1244 !capture_.transient_suppressor_enabled) { |
| 1245 // Only public_submodules_->level_estimator is enabled. |
| 1128 return false; | 1246 return false; |
| 1129 } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || | 1247 } else if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == |
| 1130 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { | 1248 kSampleRate32kHz || |
| 1131 // Something besides level_estimator_ is enabled, and we have super-wb. | 1249 capture_nonlocked_.fwd_proc_format.sample_rate_hz() == |
| 1250 kSampleRate48kHz) { |
| 1251 // Something besides public_submodules_->level_estimator is enabled, and we |
| 1252 // have super-wb. |
| 1132 return true; | 1253 return true; |
| 1133 } | 1254 } |
| 1134 return false; | 1255 return false; |
| 1135 } | 1256 } |
| 1136 | 1257 |
| 1137 bool AudioProcessingImpl::is_rev_processed() const { | 1258 bool AudioProcessingImpl::is_rev_processed() const { |
| 1138 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); | 1259 RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| 1139 return intelligibility_enabled_ && intelligibility_enhancer_->active(); | 1260 return constants_.intelligibility_enabled && |
| 1261 public_submodules_->intelligibility_enhancer->active(); |
| 1140 } | 1262 } |
| 1141 | 1263 |
| 1142 bool AudioProcessingImpl::rev_conversion_needed() const { | 1264 bool AudioProcessingImpl::rev_conversion_needed() const { |
| 1143 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread() || | 1265 return (formats_.api_format.reverse_input_stream() != |
| 1144 render_thread_checker_.CalledOnValidThread() || | 1266 formats_.api_format.reverse_output_stream()); |
| 1145 capture_thread_checker_.CalledOnValidThread()); | |
| 1146 return (shared_state_.api_format_.reverse_input_stream() != | |
| 1147 shared_state_.api_format_.reverse_output_stream()); | |
| 1148 } | 1267 } |
| 1149 | 1268 |
| 1150 void AudioProcessingImpl::InitializeExperimentalAgc() { | 1269 void AudioProcessingImpl::InitializeExperimentalAgc() { |
| 1151 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread() || | 1270 if (constants_.use_new_agc) { |
| 1152 render_thread_checker_.CalledOnValidThread() || | 1271 if (!private_submodules_->agc_manager.get()) { |
| 1153 capture_thread_checker_.CalledOnValidThread()); | 1272 private_submodules_->agc_manager.reset(new AgcManagerDirect( |
| 1154 if (use_new_agc_) { | 1273 public_submodules_->gain_control, |
| 1155 if (!agc_manager_.get()) { | 1274 public_submodules_->gain_control_for_new_agc.get(), |
| 1156 agc_manager_.reset(new AgcManagerDirect(gain_control_, | 1275 constants_.agc_startup_min_volume)); |
| 1157 gain_control_for_new_agc_.get(), | |
| 1158 agc_startup_min_volume_)); | |
| 1159 } | 1276 } |
| 1160 agc_manager_->Initialize(); | 1277 private_submodules_->agc_manager->Initialize(); |
| 1161 agc_manager_->SetCaptureMuted(output_will_be_muted_); | 1278 private_submodules_->agc_manager->SetCaptureMuted( |
| 1279 capture_.output_will_be_muted); |
| 1162 } | 1280 } |
| 1163 } | 1281 } |
| 1164 | 1282 |
| 1165 void AudioProcessingImpl::InitializeTransient() { | 1283 void AudioProcessingImpl::InitializeTransient() { |
| 1166 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread() || | 1284 if (capture_.transient_suppressor_enabled) { |
| 1167 render_thread_checker_.CalledOnValidThread() || | 1285 if (!public_submodules_->transient_suppressor.get()) { |
| 1168 capture_thread_checker_.CalledOnValidThread()); | 1286 public_submodules_->transient_suppressor.reset(new TransientSuppressor()); |
| 1169 if (transient_suppressor_enabled_) { | |
| 1170 if (!transient_suppressor_.get()) { | |
| 1171 transient_suppressor_.reset(new TransientSuppressor()); | |
| 1172 } | 1287 } |
| 1173 transient_suppressor_->Initialize( | 1288 public_submodules_->transient_suppressor->Initialize( |
| 1174 fwd_proc_format_.sample_rate_hz(), split_rate_, | 1289 capture_nonlocked_.fwd_proc_format.sample_rate_hz(), |
| 1175 shared_state_.api_format_.output_stream().num_channels()); | 1290 capture_nonlocked_.split_rate, |
| 1291 formats_.api_format.output_stream().num_channels()); |
| 1176 } | 1292 } |
| 1177 } | 1293 } |
| 1178 | 1294 |
| 1179 void AudioProcessingImpl::InitializeBeamformer() { | 1295 void AudioProcessingImpl::InitializeBeamformer() { |
| 1180 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread() || | 1296 if (constants_.beamformer_enabled) { |
| 1181 render_thread_checker_.CalledOnValidThread() || | 1297 if (!private_submodules_->beamformer) { |
| 1182 capture_thread_checker_.CalledOnValidThread()); | 1298 private_submodules_->beamformer.reset(new NonlinearBeamformer( |
| 1183 if (beamformer_enabled_) { | 1299 constants_.array_geometry, constants_.target_direction)); |
| 1184 if (!beamformer_) { | |
| 1185 beamformer_.reset( | |
| 1186 new NonlinearBeamformer(array_geometry_, target_direction_)); | |
| 1187 } | 1300 } |
| 1188 beamformer_->Initialize(kChunkSizeMs, split_rate_); | 1301 private_submodules_->beamformer->Initialize(kChunkSizeMs, |
| 1302 capture_nonlocked_.split_rate); |
| 1189 } | 1303 } |
| 1190 } | 1304 } |
| 1191 | 1305 |
| 1192 void AudioProcessingImpl::InitializeIntelligibility() { | 1306 void AudioProcessingImpl::InitializeIntelligibility() { |
| 1193 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread() || | 1307 if (constants_.intelligibility_enabled) { |
| 1194 render_thread_checker_.CalledOnValidThread() || | |
| 1195 capture_thread_checker_.CalledOnValidThread()); | |
| 1196 if (intelligibility_enabled_) { | |
| 1197 IntelligibilityEnhancer::Config config; | 1308 IntelligibilityEnhancer::Config config; |
| 1198 config.sample_rate_hz = split_rate_; | 1309 config.sample_rate_hz = capture_nonlocked_.split_rate; |
| 1199 config.num_capture_channels = capture_audio_->num_channels(); | 1310 config.num_capture_channels = capture_.capture_audio->num_channels(); |
| 1200 config.num_render_channels = render_audio_->num_channels(); | 1311 config.num_render_channels = render_.render_audio->num_channels(); |
| 1201 intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config)); | 1312 public_submodules_->intelligibility_enhancer.reset( |
| 1313 new IntelligibilityEnhancer(config)); |
| 1202 } | 1314 } |
| 1203 } | 1315 } |
| 1204 | 1316 |
| 1205 void AudioProcessingImpl::MaybeUpdateHistograms() { | 1317 void AudioProcessingImpl::MaybeUpdateHistograms() { |
| 1206 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1318 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1207 static const int kMinDiffDelayMs = 60; | 1319 static const int kMinDiffDelayMs = 60; |
| 1208 | 1320 |
| 1209 if (echo_cancellation()->is_enabled()) { | 1321 if (echo_cancellation()->is_enabled()) { |
| 1210 // Activate delay_jumps_ counters if we know echo_cancellation is runnning. | 1322 // Activate delay_jumps_ counters if we know echo_cancellation is runnning. |
| 1211 // If a stream has echo we know that the echo_cancellation is in process. | 1323 // If a stream has echo we know that the echo_cancellation is in process. |
| 1212 if (stream_delay_jumps_ == -1 && echo_cancellation()->stream_has_echo()) { | 1324 if (capture_.stream_delay_jumps == -1 && |
| 1213 stream_delay_jumps_ = 0; | 1325 echo_cancellation()->stream_has_echo()) { |
| 1326 capture_.stream_delay_jumps = 0; |
| 1214 } | 1327 } |
| 1215 if (aec_system_delay_jumps_ == -1 && | 1328 if (capture_.aec_system_delay_jumps == -1 && |
| 1216 echo_cancellation()->stream_has_echo()) { | 1329 echo_cancellation()->stream_has_echo()) { |
| 1217 aec_system_delay_jumps_ = 0; | 1330 capture_.aec_system_delay_jumps = 0; |
| 1218 } | 1331 } |
| 1219 | 1332 |
| 1220 // Detect a jump in platform reported system delay and log the difference. | 1333 // Detect a jump in platform reported system delay and log the difference. |
| 1221 const int diff_stream_delay_ms = stream_delay_ms_ - last_stream_delay_ms_; | 1334 const int diff_stream_delay_ms = |
| 1222 if (diff_stream_delay_ms > kMinDiffDelayMs && last_stream_delay_ms_ != 0) { | 1335 capture_nonlocked_.stream_delay_ms - capture_.last_stream_delay_ms; |
| 1336 if (diff_stream_delay_ms > kMinDiffDelayMs && |
| 1337 capture_.last_stream_delay_ms != 0) { |
| 1223 RTC_HISTOGRAM_COUNTS("WebRTC.Audio.PlatformReportedStreamDelayJump", | 1338 RTC_HISTOGRAM_COUNTS("WebRTC.Audio.PlatformReportedStreamDelayJump", |
| 1224 diff_stream_delay_ms, kMinDiffDelayMs, 1000, 100); | 1339 diff_stream_delay_ms, kMinDiffDelayMs, 1000, 100); |
| 1225 if (stream_delay_jumps_ == -1) { | 1340 if (capture_.stream_delay_jumps == -1) { |
| 1226 stream_delay_jumps_ = 0; // Activate counter if needed. | 1341 capture_.stream_delay_jumps = 0; // Activate counter if needed. |
| 1227 } | 1342 } |
| 1228 stream_delay_jumps_++; | 1343 capture_.stream_delay_jumps++; |
| 1229 } | 1344 } |
| 1230 last_stream_delay_ms_ = stream_delay_ms_; | 1345 capture_.last_stream_delay_ms = capture_nonlocked_.stream_delay_ms; |
| 1231 | 1346 |
| 1232 // Detect a jump in AEC system delay and log the difference. | 1347 // Detect a jump in AEC system delay and log the difference. |
| 1233 const int frames_per_ms = rtc::CheckedDivExact(split_rate_, 1000); | 1348 const int frames_per_ms = |
| 1349 rtc::CheckedDivExact(capture_nonlocked_.split_rate, 1000); |
| 1234 const int aec_system_delay_ms = | 1350 const int aec_system_delay_ms = |
| 1235 WebRtcAec_system_delay(echo_cancellation()->aec_core()) / frames_per_ms; | 1351 WebRtcAec_system_delay(echo_cancellation()->aec_core()) / frames_per_ms; |
| 1236 const int diff_aec_system_delay_ms = | 1352 const int diff_aec_system_delay_ms = |
| 1237 aec_system_delay_ms - last_aec_system_delay_ms_; | 1353 aec_system_delay_ms - capture_.last_aec_system_delay_ms; |
| 1238 if (diff_aec_system_delay_ms > kMinDiffDelayMs && | 1354 if (diff_aec_system_delay_ms > kMinDiffDelayMs && |
| 1239 last_aec_system_delay_ms_ != 0) { | 1355 capture_.last_aec_system_delay_ms != 0) { |
| 1240 RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecSystemDelayJump", | 1356 RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecSystemDelayJump", |
| 1241 diff_aec_system_delay_ms, kMinDiffDelayMs, 1000, | 1357 diff_aec_system_delay_ms, kMinDiffDelayMs, 1000, |
| 1242 100); | 1358 100); |
| 1243 if (aec_system_delay_jumps_ == -1) { | 1359 if (capture_.aec_system_delay_jumps == -1) { |
| 1244 aec_system_delay_jumps_ = 0; // Activate counter if needed. | 1360 capture_.aec_system_delay_jumps = 0; // Activate counter if needed. |
| 1245 } | 1361 } |
| 1246 aec_system_delay_jumps_++; | 1362 capture_.aec_system_delay_jumps++; |
| 1247 } | 1363 } |
| 1248 last_aec_system_delay_ms_ = aec_system_delay_ms; | 1364 capture_.last_aec_system_delay_ms = aec_system_delay_ms; |
| 1249 } | 1365 } |
| 1250 } | 1366 } |
| 1251 | 1367 |
| 1252 void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { | 1368 void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { |
| 1369 // Run in a single-threaded manner. |
| 1370 rtc::CritScope cs_render(&crit_render_); |
| 1371 rtc::CritScope cs_capture(&crit_capture_); |
| 1253 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1372 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1254 CriticalSectionScoped crit_scoped(crit_); | 1373 |
| 1255 if (stream_delay_jumps_ > -1) { | 1374 if (capture_.stream_delay_jumps > -1) { |
| 1256 RTC_HISTOGRAM_ENUMERATION( | 1375 RTC_HISTOGRAM_ENUMERATION( |
| 1257 "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps", | 1376 "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps", |
| 1258 stream_delay_jumps_, 51); | 1377 capture_.stream_delay_jumps, 51); |
| 1259 } | 1378 } |
| 1260 stream_delay_jumps_ = -1; | 1379 capture_.stream_delay_jumps = -1; |
| 1261 last_stream_delay_ms_ = 0; | 1380 capture_.last_stream_delay_ms = 0; |
| 1262 | 1381 |
| 1263 if (aec_system_delay_jumps_ > -1) { | 1382 if (capture_.aec_system_delay_jumps > -1) { |
| 1264 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", | 1383 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", |
| 1265 aec_system_delay_jumps_, 51); | 1384 capture_.aec_system_delay_jumps, 51); |
| 1266 } | 1385 } |
| 1267 aec_system_delay_jumps_ = -1; | 1386 capture_.aec_system_delay_jumps = -1; |
| 1268 last_aec_system_delay_ms_ = 0; | 1387 capture_.last_aec_system_delay_ms = 0; |
| 1269 } | 1388 } |
| 1270 | 1389 |
| 1271 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1390 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1272 int AudioProcessingImpl::WriteMessageToDebugFile() { | 1391 int AudioProcessingImpl::WriteMessageToDebugFile( |
| 1273 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1392 FileWrapper* debug_file, |
| 1274 int32_t size = event_msg_->ByteSize(); | 1393 rtc::CriticalSection* crit_debug, |
| 1394 ApmDebugDumpThreadState* debug_state) { |
| 1395 // Thread checker not possible due to function being static. |
| 1396 int32_t size = debug_state->event_msg->ByteSize(); |
| 1275 if (size <= 0) { | 1397 if (size <= 0) { |
| 1276 return kUnspecifiedError; | 1398 return kUnspecifiedError; |
| 1277 } | 1399 } |
| 1278 #if defined(WEBRTC_ARCH_BIG_ENDIAN) | 1400 #if defined(WEBRTC_ARCH_BIG_ENDIAN) |
| 1279 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be | 1401 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be |
| 1280 // pretty safe in assuming little-endian. | 1402 // pretty safe in assuming little-endian. |
| 1281 #endif | 1403 #endif |
| 1282 | 1404 |
| 1283 if (!event_msg_->SerializeToString(&event_str_)) { | 1405 if (!debug_state->event_msg->SerializeToString(&debug_state->event_str)) { |
| 1284 return kUnspecifiedError; | 1406 return kUnspecifiedError; |
| 1285 } | 1407 } |
| 1286 | 1408 |
| 1287 // Write message preceded by its size. | 1409 { |
| 1288 if (!debug_file_->Write(&size, sizeof(int32_t))) { | 1410 // Ensure atomic writes of the message. |
| 1289 return kFileError; | 1411 rtc::CritScope cs_capture(crit_debug); |
| 1290 } | 1412 // Write message preceded by its size. |
| 1291 if (!debug_file_->Write(event_str_.data(), event_str_.length())) { | 1413 if (!debug_file->Write(&size, sizeof(int32_t))) { |
| 1292 return kFileError; | 1414 return kFileError; |
| 1415 } |
| 1416 if (!debug_file->Write(debug_state->event_str.data(), |
| 1417 debug_state->event_str.length())) { |
| 1418 return kFileError; |
| 1419 } |
| 1293 } | 1420 } |
| 1294 | 1421 |
| 1295 event_msg_->Clear(); | 1422 debug_state->event_msg->Clear(); |
| 1296 | 1423 |
| 1297 return kNoError; | 1424 return kNoError; |
| 1298 } | 1425 } |
| 1299 | 1426 |
| 1300 int AudioProcessingImpl::WriteInitMessage() { | 1427 int AudioProcessingImpl::WriteInitMessage() { |
| 1301 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread() || | 1428 debug_dump_.capture.event_msg->set_type(audioproc::Event::INIT); |
| 1302 render_thread_checker_.CalledOnValidThread() || | 1429 audioproc::Init* msg = debug_dump_.capture.event_msg->mutable_init(); |
| 1303 capture_thread_checker_.CalledOnValidThread()); | 1430 msg->set_sample_rate(formats_.api_format.input_stream().sample_rate_hz()); |
| 1304 event_msg_->set_type(audioproc::Event::INIT); | 1431 |
| 1305 audioproc::Init* msg = event_msg_->mutable_init(); | |
| 1306 msg->set_sample_rate( | |
| 1307 shared_state_.api_format_.input_stream().sample_rate_hz()); | |
| 1308 msg->set_num_input_channels( | 1432 msg->set_num_input_channels( |
| 1309 shared_state_.api_format_.input_stream().num_channels()); | 1433 formats_.api_format.input_stream().num_channels()); |
| 1310 msg->set_num_output_channels( | 1434 msg->set_num_output_channels( |
| 1311 shared_state_.api_format_.output_stream().num_channels()); | 1435 formats_.api_format.output_stream().num_channels()); |
| 1312 msg->set_num_reverse_channels( | 1436 msg->set_num_reverse_channels( |
| 1313 shared_state_.api_format_.reverse_input_stream().num_channels()); | 1437 formats_.api_format.reverse_input_stream().num_channels()); |
| 1314 msg->set_reverse_sample_rate( | 1438 msg->set_reverse_sample_rate( |
| 1315 shared_state_.api_format_.reverse_input_stream().sample_rate_hz()); | 1439 formats_.api_format.reverse_input_stream().sample_rate_hz()); |
| 1316 msg->set_output_sample_rate( | 1440 msg->set_output_sample_rate( |
| 1317 shared_state_.api_format_.output_stream().sample_rate_hz()); | 1441 formats_.api_format.output_stream().sample_rate_hz()); |
| 1318 // TODO(ekmeyerson): Add reverse output fields to event_msg_. | 1442 // TODO(ekmeyerson): Add reverse output fields to |
| 1443 // debug_dump_.capture.event_msg. |
| 1319 | 1444 |
| 1320 RETURN_ON_ERR(WriteMessageToDebugFile()); | 1445 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1446 &crit_debug_, &debug_dump_.capture)); |
| 1321 return kNoError; | 1447 return kNoError; |
| 1322 } | 1448 } |
| 1323 | 1449 |
| 1324 int AudioProcessingImpl::WriteConfigMessage(bool forced) { | 1450 int AudioProcessingImpl::WriteConfigMessage(bool forced) { |
| 1325 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); | 1451 RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| 1326 audioproc::Config config; | 1452 audioproc::Config config; |
| 1327 | 1453 |
| 1328 config.set_aec_enabled(echo_cancellation_->is_enabled()); | 1454 config.set_aec_enabled(public_submodules_->echo_cancellation->is_enabled()); |
| 1329 config.set_aec_delay_agnostic_enabled( | 1455 config.set_aec_delay_agnostic_enabled( |
| 1330 echo_cancellation_->is_delay_agnostic_enabled()); | 1456 public_submodules_->echo_cancellation->is_delay_agnostic_enabled()); |
| 1331 config.set_aec_drift_compensation_enabled( | 1457 config.set_aec_drift_compensation_enabled( |
| 1332 echo_cancellation_->is_drift_compensation_enabled()); | 1458 public_submodules_->echo_cancellation->is_drift_compensation_enabled()); |
| 1333 config.set_aec_extended_filter_enabled( | 1459 config.set_aec_extended_filter_enabled( |
| 1334 echo_cancellation_->is_extended_filter_enabled()); | 1460 public_submodules_->echo_cancellation->is_extended_filter_enabled()); |
| 1335 config.set_aec_suppression_level( | 1461 config.set_aec_suppression_level(static_cast<int>( |
| 1336 static_cast<int>(echo_cancellation_->suppression_level())); | 1462 public_submodules_->echo_cancellation->suppression_level())); |
| 1337 | 1463 |
| 1338 config.set_aecm_enabled(echo_control_mobile_->is_enabled()); | 1464 config.set_aecm_enabled( |
| 1465 public_submodules_->echo_control_mobile->is_enabled()); |
| 1339 config.set_aecm_comfort_noise_enabled( | 1466 config.set_aecm_comfort_noise_enabled( |
| 1340 echo_control_mobile_->is_comfort_noise_enabled()); | 1467 public_submodules_->echo_control_mobile->is_comfort_noise_enabled()); |
| 1341 config.set_aecm_routing_mode( | 1468 config.set_aecm_routing_mode(static_cast<int>( |
| 1342 static_cast<int>(echo_control_mobile_->routing_mode())); | 1469 public_submodules_->echo_control_mobile->routing_mode())); |
| 1343 | 1470 |
| 1344 config.set_agc_enabled(gain_control_->is_enabled()); | 1471 config.set_agc_enabled(public_submodules_->gain_control->is_enabled()); |
| 1345 config.set_agc_mode(static_cast<int>(gain_control_->mode())); | 1472 config.set_agc_mode( |
| 1346 config.set_agc_limiter_enabled(gain_control_->is_limiter_enabled()); | 1473 static_cast<int>(public_submodules_->gain_control->mode())); |
| 1347 config.set_noise_robust_agc_enabled(use_new_agc_); | 1474 config.set_agc_limiter_enabled( |
| 1475 public_submodules_->gain_control->is_limiter_enabled()); |
| 1476 config.set_noise_robust_agc_enabled(constants_.use_new_agc); |
| 1348 | 1477 |
| 1349 config.set_hpf_enabled(high_pass_filter_->is_enabled()); | 1478 config.set_hpf_enabled(public_submodules_->high_pass_filter->is_enabled()); |
| 1350 | 1479 |
| 1351 config.set_ns_enabled(noise_suppression_->is_enabled()); | 1480 config.set_ns_enabled(public_submodules_->noise_suppression->is_enabled()); |
| 1352 config.set_ns_level(static_cast<int>(noise_suppression_->level())); | 1481 config.set_ns_level( |
| 1482 static_cast<int>(public_submodules_->noise_suppression->level())); |
| 1353 | 1483 |
| 1354 config.set_transient_suppression_enabled(transient_suppressor_enabled_); | 1484 config.set_transient_suppression_enabled( |
| 1485 capture_.transient_suppressor_enabled); |
| 1355 | 1486 |
| 1356 std::string serialized_config = config.SerializeAsString(); | 1487 std::string serialized_config = config.SerializeAsString(); |
| 1357 if (!forced && last_serialized_config_ == serialized_config) { | 1488 if (!forced && |
| 1489 debug_dump_.capture.last_serialized_config == serialized_config) { |
| 1358 return kNoError; | 1490 return kNoError; |
| 1359 } | 1491 } |
| 1360 | 1492 |
| 1361 last_serialized_config_ = serialized_config; | 1493 debug_dump_.capture.last_serialized_config = serialized_config; |
| 1362 | 1494 |
| 1363 event_msg_->set_type(audioproc::Event::CONFIG); | 1495 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); |
| 1364 event_msg_->mutable_config()->CopyFrom(config); | 1496 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); |
| 1365 | 1497 |
| 1366 RETURN_ON_ERR(WriteMessageToDebugFile()); | 1498 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1499 &crit_debug_, &debug_dump_.capture)); |
| 1367 return kNoError; | 1500 return kNoError; |
| 1368 } | 1501 } |
| 1369 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1502 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1370 | 1503 |
| 1371 } // namespace webrtc | 1504 } // namespace webrtc |
| OLD | NEW |