OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
75 AudioProcessing::kSampleRate16kHz, | 75 AudioProcessing::kSampleRate16kHz, |
76 AudioProcessing::kSampleRate32kHz, | 76 AudioProcessing::kSampleRate32kHz, |
77 AudioProcessing::kSampleRate48kHz}; | 77 AudioProcessing::kSampleRate48kHz}; |
78 const size_t AudioProcessing::kNumNativeSampleRates = | 78 const size_t AudioProcessing::kNumNativeSampleRates = |
79 arraysize(AudioProcessing::kNativeSampleRatesHz); | 79 arraysize(AudioProcessing::kNativeSampleRatesHz); |
80 const int AudioProcessing::kMaxNativeSampleRateHz = AudioProcessing:: | 80 const int AudioProcessing::kMaxNativeSampleRateHz = AudioProcessing:: |
81 kNativeSampleRatesHz[AudioProcessing::kNumNativeSampleRates - 1]; | 81 kNativeSampleRatesHz[AudioProcessing::kNumNativeSampleRates - 1]; |
82 | 82 |
83 namespace { | 83 namespace { |
84 | 84 |
85 const int kInternalNativeRates[] = {AudioProcessing::kSampleRate8kHz, | |
86 AudioProcessing::kSampleRate16kHz, | |
87 #ifdef WEBRTC_ARCH_ARM_FAMILY | |
88 AudioProcessing::kSampleRate32kHz}; | |
89 #else | |
90 AudioProcessing::kSampleRate32kHz, | |
91 AudioProcessing::kSampleRate48kHz}; | |
92 #endif // WEBRTC_ARCH_ARM_FAMILY | |
93 | |
94 static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { | 85 static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { |
95 switch (layout) { | 86 switch (layout) { |
96 case AudioProcessing::kMono: | 87 case AudioProcessing::kMono: |
97 case AudioProcessing::kStereo: | 88 case AudioProcessing::kStereo: |
98 return false; | 89 return false; |
99 case AudioProcessing::kMonoAndKeyboard: | 90 case AudioProcessing::kMonoAndKeyboard: |
100 case AudioProcessing::kStereoAndKeyboard: | 91 case AudioProcessing::kStereoAndKeyboard: |
101 return true; | 92 return true; |
102 } | 93 } |
103 | 94 |
104 assert(false); | 95 assert(false); |
105 return false; | 96 return false; |
106 } | 97 } |
107 | 98 |
108 bool is_multi_band(int sample_rate_hz) { | 99 bool SampleRateSupportsMultiBand(int sample_rate_hz) { |
109 return sample_rate_hz == AudioProcessing::kSampleRate32kHz || | 100 return sample_rate_hz == AudioProcessing::kSampleRate32kHz || |
110 sample_rate_hz == AudioProcessing::kSampleRate48kHz; | 101 sample_rate_hz == AudioProcessing::kSampleRate48kHz; |
111 } | 102 } |
112 | 103 |
113 int ClosestHigherNativeRate(int min_proc_rate) { | 104 int FindNativeProcessRateToUse(int minimum_rate, bool band_splitting_required) { |
114 for (int rate : kInternalNativeRates) { | 105 #ifdef WEBRTC_ARCH_ARM_FAMILY |
115 if (rate >= min_proc_rate) { | 106 const int kMaxSplittingNativeProcessRate = AudioProcessing::kSampleRate32kHz; |
| 107 #else |
| 108 const int kMaxSplittingNativeProcessRate = AudioProcessing::kSampleRate48kHz; |
| 109 #endif |
| 110 RTC_DCHECK_LE(kMaxSplittingNativeProcessRate, |
| 111 AudioProcessing::kMaxNativeSampleRateHz); |
| 112 const int uppermost_native_rate = band_splitting_required |
| 113 ? kMaxSplittingNativeProcessRate |
| 114 : AudioProcessing::kSampleRate48kHz; |
| 115 |
| 116 for (auto rate : AudioProcessing::kNativeSampleRatesHz) { |
| 117 if (rate >= uppermost_native_rate) { |
| 118 return uppermost_native_rate; |
| 119 } |
| 120 if (rate >= minimum_rate) { |
116 return rate; | 121 return rate; |
117 } | 122 } |
118 } | 123 } |
119 return kInternalNativeRates[arraysize(kInternalNativeRates) - 1]; | 124 RTC_NOTREACHED(); |
| 125 return uppermost_native_rate; |
120 } | 126 } |
121 | 127 |
122 } // namespace | 128 } // namespace |
123 | 129 |
124 // Throughout webrtc, it's assumed that success is represented by zero. | 130 // Throughout webrtc, it's assumed that success is represented by zero. |
125 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); | 131 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); |
126 | 132 |
| 133 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} |
| 134 |
| 135 bool AudioProcessingImpl::ApmSubmoduleStates::Update( |
| 136 bool high_pass_filter_enabled, |
| 137 bool echo_canceller_enabled, |
| 138 bool mobile_echo_controller_enabled, |
| 139 bool noise_suppressor_enabled, |
| 140 bool intelligibility_enhancer_enabled, |
| 141 bool beamformer_enabled, |
| 142 bool adaptive_gain_controller_enabled, |
| 143 bool level_controller_enabled, |
| 144 bool voice_activity_detector_enabled, |
| 145 bool level_estimator_enabled, |
| 146 bool transient_suppressor_enabled) { |
| 147 bool changed = false; |
| 148 changed |= (high_pass_filter_enabled != high_pass_filter_enabled_); |
| 149 changed |= (echo_canceller_enabled != echo_canceller_enabled_); |
| 150 changed |= |
| 151 (mobile_echo_controller_enabled != mobile_echo_controller_enabled_); |
| 152 changed |= (noise_suppressor_enabled != noise_suppressor_enabled_); |
| 153 changed |= |
| 154 (intelligibility_enhancer_enabled != intelligibility_enhancer_enabled_); |
| 155 changed |= (beamformer_enabled != beamformer_enabled_); |
| 156 changed |= |
| 157 (adaptive_gain_controller_enabled != adaptive_gain_controller_enabled_); |
| 158 changed |= (level_controller_enabled != level_controller_enabled_); |
| 159 changed |= (level_estimator_enabled != level_estimator_enabled_); |
| 160 changed |= |
| 161 (voice_activity_detector_enabled != voice_activity_detector_enabled_); |
| 162 changed |= (transient_suppressor_enabled != transient_suppressor_enabled_); |
| 163 if (changed) { |
| 164 high_pass_filter_enabled_ = high_pass_filter_enabled; |
| 165 echo_canceller_enabled_ = echo_canceller_enabled; |
| 166 mobile_echo_controller_enabled_ = mobile_echo_controller_enabled; |
| 167 noise_suppressor_enabled_ = noise_suppressor_enabled; |
| 168 intelligibility_enhancer_enabled_ = intelligibility_enhancer_enabled; |
| 169 beamformer_enabled_ = beamformer_enabled; |
| 170 adaptive_gain_controller_enabled_ = adaptive_gain_controller_enabled; |
| 171 level_controller_enabled_ = level_controller_enabled; |
| 172 level_estimator_enabled_ = level_estimator_enabled; |
| 173 voice_activity_detector_enabled_ = voice_activity_detector_enabled; |
| 174 transient_suppressor_enabled_ = transient_suppressor_enabled; |
| 175 } |
| 176 |
| 177 changed |= first_update_; |
| 178 first_update_ = false; |
| 179 return changed; |
| 180 } |
| 181 |
| 182 bool AudioProcessingImpl::ApmSubmoduleStates::CaptureMultiBandSubModulesActive() |
| 183 const { |
| 184 #if WEBRTC_INTELLIGIBILITY_ENHANCER |
| 185 return CaptureMultiBandProcessingActive() || |
| 186 intelligibility_enhancer_enabled_ || voice_activity_detector_enabled_; |
| 187 #else |
| 188 return CaptureMultiBandProcessingActive() || voice_activity_detector_enabled_; |
| 189 #endif |
| 190 } |
| 191 |
| 192 bool AudioProcessingImpl::ApmSubmoduleStates::CaptureMultiBandProcessingActive() |
| 193 const { |
| 194 return high_pass_filter_enabled_ || echo_canceller_enabled_ || |
| 195 mobile_echo_controller_enabled_ || noise_suppressor_enabled_ || |
| 196 beamformer_enabled_ || adaptive_gain_controller_enabled_; |
| 197 } |
| 198 |
| 199 bool AudioProcessingImpl::ApmSubmoduleStates::RenderMultiBandSubModulesActive() |
| 200 const { |
| 201 return RenderMultiBandProcessingActive() || echo_canceller_enabled_ || |
| 202 mobile_echo_controller_enabled_ || adaptive_gain_controller_enabled_; |
| 203 } |
| 204 |
| 205 bool AudioProcessingImpl::ApmSubmoduleStates::RenderMultiBandProcessingActive() |
| 206 const { |
| 207 #if WEBRTC_INTELLIGIBILITY_ENHANCER |
| 208 return intelligibility_enhancer_enabled_; |
| 209 #else |
| 210 return false; |
| 211 #endif |
| 212 } |
| 213 |
127 struct AudioProcessingImpl::ApmPublicSubmodules { | 214 struct AudioProcessingImpl::ApmPublicSubmodules { |
128 ApmPublicSubmodules() {} | 215 ApmPublicSubmodules() {} |
129 // Accessed externally of APM without any lock acquired. | 216 // Accessed externally of APM without any lock acquired. |
130 std::unique_ptr<EchoCancellationImpl> echo_cancellation; | 217 std::unique_ptr<EchoCancellationImpl> echo_cancellation; |
131 std::unique_ptr<EchoControlMobileImpl> echo_control_mobile; | 218 std::unique_ptr<EchoControlMobileImpl> echo_control_mobile; |
132 std::unique_ptr<GainControlImpl> gain_control; | 219 std::unique_ptr<GainControlImpl> gain_control; |
133 std::unique_ptr<HighPassFilterImpl> high_pass_filter; | 220 std::unique_ptr<HighPassFilterImpl> high_pass_filter; |
134 std::unique_ptr<LevelEstimatorImpl> level_estimator; | 221 std::unique_ptr<LevelEstimatorImpl> level_estimator; |
135 std::unique_ptr<NoiseSuppressionImpl> noise_suppression; | 222 std::unique_ptr<NoiseSuppressionImpl> noise_suppression; |
136 std::unique_ptr<VoiceDetectionImpl> voice_detection; | 223 std::unique_ptr<VoiceDetectionImpl> voice_detection; |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
268 | 355 |
269 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { | 356 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { |
270 // Run in a single-threaded manner during initialization. | 357 // Run in a single-threaded manner during initialization. |
271 rtc::CritScope cs_render(&crit_render_); | 358 rtc::CritScope cs_render(&crit_render_); |
272 rtc::CritScope cs_capture(&crit_capture_); | 359 rtc::CritScope cs_capture(&crit_capture_); |
273 return InitializeLocked(processing_config); | 360 return InitializeLocked(processing_config); |
274 } | 361 } |
275 | 362 |
276 int AudioProcessingImpl::MaybeInitializeRender( | 363 int AudioProcessingImpl::MaybeInitializeRender( |
277 const ProcessingConfig& processing_config) { | 364 const ProcessingConfig& processing_config) { |
278 return MaybeInitialize(processing_config); | 365 return MaybeInitialize(processing_config, false); |
279 } | 366 } |
280 | 367 |
281 int AudioProcessingImpl::MaybeInitializeCapture( | 368 int AudioProcessingImpl::MaybeInitializeCapture( |
282 const ProcessingConfig& processing_config) { | 369 const ProcessingConfig& processing_config, |
283 return MaybeInitialize(processing_config); | 370 bool force_initialization) { |
| 371 return MaybeInitialize(processing_config, force_initialization); |
284 } | 372 } |
285 | 373 |
286 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 374 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
287 | 375 |
288 AudioProcessingImpl::ApmDebugDumpThreadState::ApmDebugDumpThreadState() | 376 AudioProcessingImpl::ApmDebugDumpThreadState::ApmDebugDumpThreadState() |
289 : event_msg(new audioproc::Event()) {} | 377 : event_msg(new audioproc::Event()) {} |
290 | 378 |
291 AudioProcessingImpl::ApmDebugDumpThreadState::~ApmDebugDumpThreadState() {} | 379 AudioProcessingImpl::ApmDebugDumpThreadState::~ApmDebugDumpThreadState() {} |
292 | 380 |
293 AudioProcessingImpl::ApmDebugDumpState::ApmDebugDumpState() | 381 AudioProcessingImpl::ApmDebugDumpState::ApmDebugDumpState() |
294 : debug_file(FileWrapper::Create()) {} | 382 : debug_file(FileWrapper::Create()) {} |
295 | 383 |
296 AudioProcessingImpl::ApmDebugDumpState::~ApmDebugDumpState() {} | 384 AudioProcessingImpl::ApmDebugDumpState::~ApmDebugDumpState() {} |
297 | 385 |
298 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 386 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
299 | 387 |
300 // Calls InitializeLocked() if any of the audio parameters have changed from | 388 // Calls InitializeLocked() if any of the audio parameters have changed from |
301 // their current values (needs to be called while holding the crit_render_lock). | 389 // their current values (needs to be called while holding the crit_render_lock). |
302 int AudioProcessingImpl::MaybeInitialize( | 390 int AudioProcessingImpl::MaybeInitialize( |
303 const ProcessingConfig& processing_config) { | 391 const ProcessingConfig& processing_config, |
| 392 bool force_initialization) { |
304 // Called from both threads. Thread check is therefore not possible. | 393 // Called from both threads. Thread check is therefore not possible. |
305 if (processing_config == formats_.api_format) { | 394 if (processing_config == formats_.api_format && !force_initialization) { |
306 return kNoError; | 395 return kNoError; |
307 } | 396 } |
308 | 397 |
309 rtc::CritScope cs_capture(&crit_capture_); | 398 rtc::CritScope cs_capture(&crit_capture_); |
310 return InitializeLocked(processing_config); | 399 return InitializeLocked(processing_config); |
311 } | 400 } |
312 | 401 |
313 int AudioProcessingImpl::InitializeLocked() { | 402 int AudioProcessingImpl::InitializeLocked() { |
314 const int fwd_audio_buffer_channels = | 403 const int fwd_audio_buffer_channels = |
315 capture_nonlocked_.beamformer_enabled | 404 capture_nonlocked_.beamformer_enabled |
316 ? formats_.api_format.input_stream().num_channels() | 405 ? formats_.api_format.input_stream().num_channels() |
317 : formats_.api_format.output_stream().num_channels(); | 406 : formats_.api_format.output_stream().num_channels(); |
318 const int rev_audio_buffer_out_num_frames = | 407 const int rev_audio_buffer_out_num_frames = |
319 formats_.api_format.reverse_output_stream().num_frames() == 0 | 408 formats_.api_format.reverse_output_stream().num_frames() == 0 |
320 ? formats_.rev_proc_format.num_frames() | 409 ? formats_.rev_proc_format.num_frames() |
321 : formats_.api_format.reverse_output_stream().num_frames(); | 410 : formats_.api_format.reverse_output_stream().num_frames(); |
322 if (formats_.api_format.reverse_input_stream().num_channels() > 0) { | 411 if (formats_.api_format.reverse_input_stream().num_channels() > 0) { |
323 render_.render_audio.reset(new AudioBuffer( | 412 render_.render_audio.reset(new AudioBuffer( |
324 formats_.api_format.reverse_input_stream().num_frames(), | 413 formats_.api_format.reverse_input_stream().num_frames(), |
325 formats_.api_format.reverse_input_stream().num_channels(), | 414 formats_.api_format.reverse_input_stream().num_channels(), |
326 formats_.rev_proc_format.num_frames(), | 415 formats_.rev_proc_format.num_frames(), |
327 formats_.rev_proc_format.num_channels(), | 416 formats_.rev_proc_format.num_channels(), |
328 rev_audio_buffer_out_num_frames)); | 417 rev_audio_buffer_out_num_frames)); |
329 if (rev_conversion_needed()) { | 418 if (formats_.api_format.reverse_input_stream() != |
| 419 formats_.api_format.reverse_output_stream()) { |
330 render_.render_converter = AudioConverter::Create( | 420 render_.render_converter = AudioConverter::Create( |
331 formats_.api_format.reverse_input_stream().num_channels(), | 421 formats_.api_format.reverse_input_stream().num_channels(), |
332 formats_.api_format.reverse_input_stream().num_frames(), | 422 formats_.api_format.reverse_input_stream().num_frames(), |
333 formats_.api_format.reverse_output_stream().num_channels(), | 423 formats_.api_format.reverse_output_stream().num_channels(), |
334 formats_.api_format.reverse_output_stream().num_frames()); | 424 formats_.api_format.reverse_output_stream().num_frames()); |
335 } else { | 425 } else { |
336 render_.render_converter.reset(nullptr); | 426 render_.render_converter.reset(nullptr); |
337 } | 427 } |
338 } else { | 428 } else { |
339 render_.render_audio.reset(nullptr); | 429 render_.render_audio.reset(nullptr); |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
390 return kBadNumberChannelsError; | 480 return kBadNumberChannelsError; |
391 } | 481 } |
392 | 482 |
393 if (capture_nonlocked_.beamformer_enabled && | 483 if (capture_nonlocked_.beamformer_enabled && |
394 num_in_channels != capture_.array_geometry.size()) { | 484 num_in_channels != capture_.array_geometry.size()) { |
395 return kBadNumberChannelsError; | 485 return kBadNumberChannelsError; |
396 } | 486 } |
397 | 487 |
398 formats_.api_format = config; | 488 formats_.api_format = config; |
399 | 489 |
400 capture_nonlocked_.fwd_proc_format = StreamConfig(ClosestHigherNativeRate( | 490 int fwd_proc_rate = FindNativeProcessRateToUse( |
401 std::min(formats_.api_format.input_stream().sample_rate_hz(), | 491 std::min(formats_.api_format.input_stream().sample_rate_hz(), |
402 formats_.api_format.output_stream().sample_rate_hz()))); | 492 formats_.api_format.output_stream().sample_rate_hz()), |
| 493 submodule_states_.CaptureMultiBandSubModulesActive() || |
| 494 submodule_states_.RenderMultiBandSubModulesActive()); |
403 | 495 |
404 int rev_proc_rate = ClosestHigherNativeRate(std::min( | 496 capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate); |
405 formats_.api_format.reverse_input_stream().sample_rate_hz(), | 497 |
406 formats_.api_format.reverse_output_stream().sample_rate_hz())); | 498 int rev_proc_rate = FindNativeProcessRateToUse( |
| 499 std::min(formats_.api_format.reverse_input_stream().sample_rate_hz(), |
| 500 formats_.api_format.reverse_output_stream().sample_rate_hz()), |
| 501 submodule_states_.CaptureMultiBandSubModulesActive() || |
| 502 submodule_states_.RenderMultiBandSubModulesActive()); |
407 // TODO(aluebs): Remove this restriction once we figure out why the 3-band | 503 // TODO(aluebs): Remove this restriction once we figure out why the 3-band |
408 // splitting filter degrades the AEC performance. | 504 // splitting filter degrades the AEC performance. |
409 if (rev_proc_rate > kSampleRate32kHz) { | 505 if (rev_proc_rate > kSampleRate32kHz) { |
410 rev_proc_rate = is_rev_processed() ? kSampleRate32kHz : kSampleRate16kHz; | 506 rev_proc_rate = submodule_states_.RenderMultiBandProcessingActive() |
| 507 ? kSampleRate32kHz |
| 508 : kSampleRate16kHz; |
411 } | 509 } |
412 // If the forward sample rate is 8 kHz, the reverse stream is also processed | 510 // If the forward sample rate is 8 kHz, the reverse stream is also processed |
413 // at this rate. | 511 // at this rate. |
414 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) { | 512 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) { |
415 rev_proc_rate = kSampleRate8kHz; | 513 rev_proc_rate = kSampleRate8kHz; |
416 } else { | 514 } else { |
417 rev_proc_rate = std::max(rev_proc_rate, static_cast<int>(kSampleRate16kHz)); | 515 rev_proc_rate = std::max(rev_proc_rate, static_cast<int>(kSampleRate16kHz)); |
418 } | 516 } |
419 | 517 |
420 // Always downmix the reverse stream to mono for analysis. This has been | 518 // Always downmix the reverse stream to mono for analysis. This has been |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
549 } | 647 } |
550 return ProcessStream(src, input_stream, output_stream, dest); | 648 return ProcessStream(src, input_stream, output_stream, dest); |
551 } | 649 } |
552 | 650 |
553 int AudioProcessingImpl::ProcessStream(const float* const* src, | 651 int AudioProcessingImpl::ProcessStream(const float* const* src, |
554 const StreamConfig& input_config, | 652 const StreamConfig& input_config, |
555 const StreamConfig& output_config, | 653 const StreamConfig& output_config, |
556 float* const* dest) { | 654 float* const* dest) { |
557 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_StreamConfig"); | 655 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_StreamConfig"); |
558 ProcessingConfig processing_config; | 656 ProcessingConfig processing_config; |
| 657 bool reinitialization_required = false; |
559 { | 658 { |
560 // Acquire the capture lock in order to safely call the function | 659 // Acquire the capture lock in order to safely call the function |
561 // that retrieves the render side data. This function accesses apm | 660 // that retrieves the render side data. This function accesses apm |
562 // getters that need the capture lock held when being called. | 661 // getters that need the capture lock held when being called. |
563 rtc::CritScope cs_capture(&crit_capture_); | 662 rtc::CritScope cs_capture(&crit_capture_); |
564 public_submodules_->echo_cancellation->ReadQueuedRenderData(); | 663 public_submodules_->echo_cancellation->ReadQueuedRenderData(); |
565 public_submodules_->echo_control_mobile->ReadQueuedRenderData(); | 664 public_submodules_->echo_control_mobile->ReadQueuedRenderData(); |
566 public_submodules_->gain_control->ReadQueuedRenderData(); | 665 public_submodules_->gain_control->ReadQueuedRenderData(); |
567 | 666 |
568 if (!src || !dest) { | 667 if (!src || !dest) { |
569 return kNullPointerError; | 668 return kNullPointerError; |
570 } | 669 } |
571 | 670 |
572 processing_config = formats_.api_format; | 671 processing_config = formats_.api_format; |
| 672 reinitialization_required = UpdateActiveSubmoduleStates(); |
573 } | 673 } |
574 | 674 |
575 processing_config.input_stream() = input_config; | 675 processing_config.input_stream() = input_config; |
576 processing_config.output_stream() = output_config; | 676 processing_config.output_stream() = output_config; |
577 | 677 |
578 { | 678 { |
579 // Do conditional reinitialization. | 679 // Do conditional reinitialization. |
580 rtc::CritScope cs_render(&crit_render_); | 680 rtc::CritScope cs_render(&crit_render_); |
581 RETURN_ON_ERR(MaybeInitializeCapture(processing_config)); | 681 RETURN_ON_ERR( |
| 682 MaybeInitializeCapture(processing_config, reinitialization_required)); |
582 } | 683 } |
583 rtc::CritScope cs_capture(&crit_capture_); | 684 rtc::CritScope cs_capture(&crit_capture_); |
584 assert(processing_config.input_stream().num_frames() == | 685 assert(processing_config.input_stream().num_frames() == |
585 formats_.api_format.input_stream().num_frames()); | 686 formats_.api_format.input_stream().num_frames()); |
586 | 687 |
587 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 688 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
588 if (debug_dump_.debug_file->is_open()) { | 689 if (debug_dump_.debug_file->is_open()) { |
589 RETURN_ON_ERR(WriteConfigMessage(false)); | 690 RETURN_ON_ERR(WriteConfigMessage(false)); |
590 | 691 |
591 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 692 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
639 } | 740 } |
640 // Must be a native rate. | 741 // Must be a native rate. |
641 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 742 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
642 frame->sample_rate_hz_ != kSampleRate16kHz && | 743 frame->sample_rate_hz_ != kSampleRate16kHz && |
643 frame->sample_rate_hz_ != kSampleRate32kHz && | 744 frame->sample_rate_hz_ != kSampleRate32kHz && |
644 frame->sample_rate_hz_ != kSampleRate48kHz) { | 745 frame->sample_rate_hz_ != kSampleRate48kHz) { |
645 return kBadSampleRateError; | 746 return kBadSampleRateError; |
646 } | 747 } |
647 | 748 |
648 ProcessingConfig processing_config; | 749 ProcessingConfig processing_config; |
| 750 bool reinitialization_required = false; |
649 { | 751 { |
650 // Aquire lock for the access of api_format. | 752 // Aquire lock for the access of api_format. |
651 // The lock is released immediately due to the conditional | 753 // The lock is released immediately due to the conditional |
652 // reinitialization. | 754 // reinitialization. |
653 rtc::CritScope cs_capture(&crit_capture_); | 755 rtc::CritScope cs_capture(&crit_capture_); |
654 // TODO(ajm): The input and output rates and channels are currently | 756 // TODO(ajm): The input and output rates and channels are currently |
655 // constrained to be identical in the int16 interface. | 757 // constrained to be identical in the int16 interface. |
656 processing_config = formats_.api_format; | 758 processing_config = formats_.api_format; |
| 759 |
| 760 reinitialization_required = UpdateActiveSubmoduleStates(); |
657 } | 761 } |
658 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 762 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
659 processing_config.input_stream().set_num_channels(frame->num_channels_); | 763 processing_config.input_stream().set_num_channels(frame->num_channels_); |
660 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 764 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
661 processing_config.output_stream().set_num_channels(frame->num_channels_); | 765 processing_config.output_stream().set_num_channels(frame->num_channels_); |
662 | 766 |
663 { | 767 { |
664 // Do conditional reinitialization. | 768 // Do conditional reinitialization. |
665 rtc::CritScope cs_render(&crit_render_); | 769 rtc::CritScope cs_render(&crit_render_); |
666 RETURN_ON_ERR(MaybeInitializeCapture(processing_config)); | 770 RETURN_ON_ERR( |
| 771 MaybeInitializeCapture(processing_config, reinitialization_required)); |
667 } | 772 } |
668 rtc::CritScope cs_capture(&crit_capture_); | 773 rtc::CritScope cs_capture(&crit_capture_); |
669 if (frame->samples_per_channel_ != | 774 if (frame->samples_per_channel_ != |
670 formats_.api_format.input_stream().num_frames()) { | 775 formats_.api_format.input_stream().num_frames()) { |
671 return kBadDataLengthError; | 776 return kBadDataLengthError; |
672 } | 777 } |
673 | 778 |
674 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 779 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
675 if (debug_dump_.debug_file->is_open()) { | 780 if (debug_dump_.debug_file->is_open()) { |
676 RETURN_ON_ERR(WriteConfigMessage(false)); | 781 RETURN_ON_ERR(WriteConfigMessage(false)); |
677 | 782 |
678 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 783 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
679 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 784 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
680 const size_t data_size = | 785 const size_t data_size = |
681 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 786 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
682 msg->set_input_data(frame->data_, data_size); | 787 msg->set_input_data(frame->data_, data_size); |
683 } | 788 } |
684 #endif | 789 #endif |
685 | 790 |
686 capture_.capture_audio->DeinterleaveFrom(frame); | 791 capture_.capture_audio->DeinterleaveFrom(frame); |
687 RETURN_ON_ERR(ProcessStreamLocked()); | 792 RETURN_ON_ERR(ProcessStreamLocked()); |
688 capture_.capture_audio->InterleaveTo(frame, output_copy_needed()); | 793 capture_.capture_audio->InterleaveTo( |
| 794 frame, submodule_states_.CaptureMultiBandProcessingActive()); |
689 | 795 |
690 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 796 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
691 if (debug_dump_.debug_file->is_open()) { | 797 if (debug_dump_.debug_file->is_open()) { |
692 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 798 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
693 const size_t data_size = | 799 const size_t data_size = |
694 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 800 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
695 msg->set_output_data(frame->data_, data_size); | 801 msg->set_output_data(frame->data_, data_size); |
696 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 802 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
697 &debug_dump_.num_bytes_left_for_log_, | 803 &debug_dump_.num_bytes_left_for_log_, |
698 &crit_debug_, &debug_dump_.capture)); | 804 &crit_debug_, &debug_dump_.capture)); |
(...skipping 25 matching lines...) Expand all Loading... |
724 | 830 |
725 AudioBuffer* ca = capture_.capture_audio.get(); // For brevity. | 831 AudioBuffer* ca = capture_.capture_audio.get(); // For brevity. |
726 | 832 |
727 if (constants_.use_experimental_agc && | 833 if (constants_.use_experimental_agc && |
728 public_submodules_->gain_control->is_enabled()) { | 834 public_submodules_->gain_control->is_enabled()) { |
729 private_submodules_->agc_manager->AnalyzePreProcess( | 835 private_submodules_->agc_manager->AnalyzePreProcess( |
730 ca->channels()[0], ca->num_channels(), | 836 ca->channels()[0], ca->num_channels(), |
731 capture_nonlocked_.fwd_proc_format.num_frames()); | 837 capture_nonlocked_.fwd_proc_format.num_frames()); |
732 } | 838 } |
733 | 839 |
734 if (fwd_analysis_needed()) { | 840 if (submodule_states_.CaptureMultiBandSubModulesActive() && |
| 841 SampleRateSupportsMultiBand( |
| 842 capture_nonlocked_.fwd_proc_format.sample_rate_hz())) { |
735 ca->SplitIntoFrequencyBands(); | 843 ca->SplitIntoFrequencyBands(); |
736 } | 844 } |
737 | 845 |
738 if (capture_nonlocked_.beamformer_enabled) { | 846 if (capture_nonlocked_.beamformer_enabled) { |
739 private_submodules_->beamformer->AnalyzeChunk(*ca->split_data_f()); | 847 private_submodules_->beamformer->AnalyzeChunk(*ca->split_data_f()); |
740 // Discards all channels by the leftmost one. | 848 // Discards all channels by the leftmost one. |
741 ca->set_num_channels(1); | 849 ca->set_num_channels(1); |
742 } | 850 } |
743 | 851 |
744 public_submodules_->high_pass_filter->ProcessCaptureAudio(ca); | 852 public_submodules_->high_pass_filter->ProcessCaptureAudio(ca); |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
795 public_submodules_->gain_control->is_enabled() && | 903 public_submodules_->gain_control->is_enabled() && |
796 (!capture_nonlocked_.beamformer_enabled || | 904 (!capture_nonlocked_.beamformer_enabled || |
797 private_submodules_->beamformer->is_target_present())) { | 905 private_submodules_->beamformer->is_target_present())) { |
798 private_submodules_->agc_manager->Process( | 906 private_submodules_->agc_manager->Process( |
799 ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(), | 907 ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(), |
800 capture_nonlocked_.split_rate); | 908 capture_nonlocked_.split_rate); |
801 } | 909 } |
802 RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio( | 910 RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio( |
803 ca, echo_cancellation()->stream_has_echo())); | 911 ca, echo_cancellation()->stream_has_echo())); |
804 | 912 |
805 if (fwd_synthesis_needed()) { | 913 if (submodule_states_.CaptureMultiBandProcessingActive() && |
| 914 SampleRateSupportsMultiBand( |
| 915 capture_nonlocked_.fwd_proc_format.sample_rate_hz())) { |
806 ca->MergeFrequencyBands(); | 916 ca->MergeFrequencyBands(); |
807 } | 917 } |
808 | 918 |
809 // TODO(aluebs): Investigate if the transient suppression placement should be | 919 // TODO(aluebs): Investigate if the transient suppression placement should be |
810 // before or after the AGC. | 920 // before or after the AGC. |
811 if (capture_.transient_suppressor_enabled) { | 921 if (capture_.transient_suppressor_enabled) { |
812 float voice_probability = | 922 float voice_probability = |
813 private_submodules_->agc_manager.get() | 923 private_submodules_->agc_manager.get() |
814 ? private_submodules_->agc_manager->voice_probability() | 924 ? private_submodules_->agc_manager->voice_probability() |
815 : 1.f; | 925 : 1.f; |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
849 | 959 |
850 int AudioProcessingImpl::ProcessReverseStream( | 960 int AudioProcessingImpl::ProcessReverseStream( |
851 const float* const* src, | 961 const float* const* src, |
852 const StreamConfig& reverse_input_config, | 962 const StreamConfig& reverse_input_config, |
853 const StreamConfig& reverse_output_config, | 963 const StreamConfig& reverse_output_config, |
854 float* const* dest) { | 964 float* const* dest) { |
855 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_StreamConfig"); | 965 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_StreamConfig"); |
856 rtc::CritScope cs(&crit_render_); | 966 rtc::CritScope cs(&crit_render_); |
857 RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, reverse_input_config, | 967 RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, reverse_input_config, |
858 reverse_output_config)); | 968 reverse_output_config)); |
859 if (is_rev_processed()) { | 969 if (submodule_states_.RenderMultiBandProcessingActive()) { |
860 render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(), | 970 render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(), |
861 dest); | 971 dest); |
862 } else if (render_check_rev_conversion_needed()) { | 972 } else if (formats_.api_format.reverse_input_stream() != |
| 973 formats_.api_format.reverse_output_stream()) { |
863 render_.render_converter->Convert(src, reverse_input_config.num_samples(), | 974 render_.render_converter->Convert(src, reverse_input_config.num_samples(), |
864 dest, | 975 dest, |
865 reverse_output_config.num_samples()); | 976 reverse_output_config.num_samples()); |
866 } else { | 977 } else { |
867 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), | 978 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), |
868 reverse_input_config.num_channels(), dest); | 979 reverse_input_config.num_channels(), dest); |
869 } | 980 } |
870 | 981 |
871 return kNoError; | 982 return kNoError; |
872 } | 983 } |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
954 const size_t data_size = | 1065 const size_t data_size = |
955 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1066 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
956 msg->set_data(frame->data_, data_size); | 1067 msg->set_data(frame->data_, data_size); |
957 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1068 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
958 &debug_dump_.num_bytes_left_for_log_, | 1069 &debug_dump_.num_bytes_left_for_log_, |
959 &crit_debug_, &debug_dump_.render)); | 1070 &crit_debug_, &debug_dump_.render)); |
960 } | 1071 } |
961 #endif | 1072 #endif |
962 render_.render_audio->DeinterleaveFrom(frame); | 1073 render_.render_audio->DeinterleaveFrom(frame); |
963 RETURN_ON_ERR(ProcessReverseStreamLocked()); | 1074 RETURN_ON_ERR(ProcessReverseStreamLocked()); |
964 if (is_rev_processed()) { | 1075 render_.render_audio->InterleaveTo( |
965 render_.render_audio->InterleaveTo(frame, true); | 1076 frame, submodule_states_.RenderMultiBandProcessingActive()); |
966 } | |
967 return kNoError; | 1077 return kNoError; |
968 } | 1078 } |
969 | 1079 |
970 int AudioProcessingImpl::ProcessReverseStreamLocked() { | 1080 int AudioProcessingImpl::ProcessReverseStreamLocked() { |
971 AudioBuffer* ra = render_.render_audio.get(); // For brevity. | 1081 AudioBuffer* ra = render_.render_audio.get(); // For brevity. |
972 if (rev_analysis_needed()) { | 1082 if (submodule_states_.RenderMultiBandSubModulesActive() && |
| 1083 SampleRateSupportsMultiBand(formats_.rev_proc_format.sample_rate_hz())) { |
973 ra->SplitIntoFrequencyBands(); | 1084 ra->SplitIntoFrequencyBands(); |
974 } | 1085 } |
975 | 1086 |
976 #if WEBRTC_INTELLIGIBILITY_ENHANCER | 1087 #if WEBRTC_INTELLIGIBILITY_ENHANCER |
977 if (capture_nonlocked_.intelligibility_enabled) { | 1088 if (capture_nonlocked_.intelligibility_enabled) { |
978 public_submodules_->intelligibility_enhancer->ProcessRenderAudio( | 1089 public_submodules_->intelligibility_enhancer->ProcessRenderAudio( |
979 ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate, | 1090 ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate, |
980 ra->num_channels()); | 1091 ra->num_channels()); |
981 } | 1092 } |
982 #endif | 1093 #endif |
983 | 1094 |
984 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra)); | 1095 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra)); |
985 RETURN_ON_ERR( | 1096 RETURN_ON_ERR( |
986 public_submodules_->echo_control_mobile->ProcessRenderAudio(ra)); | 1097 public_submodules_->echo_control_mobile->ProcessRenderAudio(ra)); |
987 if (!constants_.use_experimental_agc) { | 1098 if (!constants_.use_experimental_agc) { |
988 RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra)); | 1099 RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra)); |
989 } | 1100 } |
990 | 1101 |
991 if (rev_synthesis_needed()) { | 1102 if (submodule_states_.RenderMultiBandProcessingActive() && |
| 1103 SampleRateSupportsMultiBand(formats_.rev_proc_format.sample_rate_hz())) { |
992 ra->MergeFrequencyBands(); | 1104 ra->MergeFrequencyBands(); |
993 } | 1105 } |
994 | 1106 |
995 return kNoError; | 1107 return kNoError; |
996 } | 1108 } |
997 | 1109 |
998 int AudioProcessingImpl::set_stream_delay_ms(int delay) { | 1110 int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
999 rtc::CritScope cs(&crit_capture_); | 1111 rtc::CritScope cs(&crit_capture_); |
1000 Error retval = kNoError; | 1112 Error retval = kNoError; |
1001 capture_.was_stream_delay_set = true; | 1113 capture_.was_stream_delay_set = true; |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1115 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1227 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
1116 // We just return if recording hasn't started. | 1228 // We just return if recording hasn't started. |
1117 debug_dump_.debug_file->CloseFile(); | 1229 debug_dump_.debug_file->CloseFile(); |
1118 return kNoError; | 1230 return kNoError; |
1119 #else | 1231 #else |
1120 return kUnsupportedFunctionError; | 1232 return kUnsupportedFunctionError; |
1121 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1233 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
1122 } | 1234 } |
1123 | 1235 |
1124 EchoCancellation* AudioProcessingImpl::echo_cancellation() const { | 1236 EchoCancellation* AudioProcessingImpl::echo_cancellation() const { |
1125 // Adding a lock here has no effect as it allows any access to the submodule | |
1126 // from the returned pointer. | |
1127 return public_submodules_->echo_cancellation.get(); | 1237 return public_submodules_->echo_cancellation.get(); |
1128 } | 1238 } |
1129 | 1239 |
1130 EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const { | 1240 EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const { |
1131 // Adding a lock here has no effect as it allows any access to the submodule | |
1132 // from the returned pointer. | |
1133 return public_submodules_->echo_control_mobile.get(); | 1241 return public_submodules_->echo_control_mobile.get(); |
1134 } | 1242 } |
1135 | 1243 |
1136 GainControl* AudioProcessingImpl::gain_control() const { | 1244 GainControl* AudioProcessingImpl::gain_control() const { |
1137 // Adding a lock here has no effect as it allows any access to the submodule | |
1138 // from the returned pointer. | |
1139 if (constants_.use_experimental_agc) { | 1245 if (constants_.use_experimental_agc) { |
1140 return public_submodules_->gain_control_for_experimental_agc.get(); | 1246 return public_submodules_->gain_control_for_experimental_agc.get(); |
1141 } | 1247 } |
1142 return public_submodules_->gain_control.get(); | 1248 return public_submodules_->gain_control.get(); |
1143 } | 1249 } |
1144 | 1250 |
1145 HighPassFilter* AudioProcessingImpl::high_pass_filter() const { | 1251 HighPassFilter* AudioProcessingImpl::high_pass_filter() const { |
1146 // Adding a lock here has no effect as it allows any access to the submodule | |
1147 // from the returned pointer. | |
1148 return public_submodules_->high_pass_filter.get(); | 1252 return public_submodules_->high_pass_filter.get(); |
1149 } | 1253 } |
1150 | 1254 |
1151 LevelEstimator* AudioProcessingImpl::level_estimator() const { | 1255 LevelEstimator* AudioProcessingImpl::level_estimator() const { |
1152 // Adding a lock here has no effect as it allows any access to the submodule | |
1153 // from the returned pointer. | |
1154 return public_submodules_->level_estimator.get(); | 1256 return public_submodules_->level_estimator.get(); |
1155 } | 1257 } |
1156 | 1258 |
1157 NoiseSuppression* AudioProcessingImpl::noise_suppression() const { | 1259 NoiseSuppression* AudioProcessingImpl::noise_suppression() const { |
1158 // Adding a lock here has no effect as it allows any access to the submodule | |
1159 // from the returned pointer. | |
1160 return public_submodules_->noise_suppression.get(); | 1260 return public_submodules_->noise_suppression.get(); |
1161 } | 1261 } |
1162 | 1262 |
1163 VoiceDetection* AudioProcessingImpl::voice_detection() const { | 1263 VoiceDetection* AudioProcessingImpl::voice_detection() const { |
1164 // Adding a lock here has no effect as it allows any access to the submodule | |
1165 // from the returned pointer. | |
1166 return public_submodules_->voice_detection.get(); | 1264 return public_submodules_->voice_detection.get(); |
1167 } | 1265 } |
1168 | 1266 |
1169 bool AudioProcessingImpl::is_fwd_processed() const { | 1267 bool AudioProcessingImpl::UpdateActiveSubmoduleStates() { |
1170 // The beamformer, noise suppressor and highpass filter | 1268 return submodule_states_.Update( |
1171 // modify the data. | 1269 public_submodules_->high_pass_filter->is_enabled(), |
1172 if (capture_nonlocked_.beamformer_enabled || | 1270 public_submodules_->echo_cancellation->is_enabled(), |
1173 public_submodules_->high_pass_filter->is_enabled() || | 1271 public_submodules_->echo_control_mobile->is_enabled(), |
1174 public_submodules_->noise_suppression->is_enabled() || | 1272 public_submodules_->noise_suppression->is_enabled(), |
1175 public_submodules_->echo_cancellation->is_enabled() || | 1273 capture_nonlocked_.intelligibility_enabled, |
1176 public_submodules_->echo_control_mobile->is_enabled() || | 1274 capture_nonlocked_.beamformer_enabled, |
1177 public_submodules_->gain_control->is_enabled()) { | 1275 public_submodules_->gain_control->is_enabled(), |
1178 return true; | 1276 capture_nonlocked_.level_controller_enabled, |
1179 } | 1277 public_submodules_->voice_detection->is_enabled(), |
1180 | 1278 public_submodules_->level_estimator->is_enabled(), |
1181 // The capture data is otherwise unchanged. | 1279 capture_.transient_suppressor_enabled); |
1182 return false; | |
1183 } | |
1184 | |
1185 bool AudioProcessingImpl::output_copy_needed() const { | |
1186 // Check if we've upmixed or downmixed the audio. | |
1187 return ((formats_.api_format.output_stream().num_channels() != | |
1188 formats_.api_format.input_stream().num_channels()) || | |
1189 is_fwd_processed() || capture_.transient_suppressor_enabled || | |
1190 capture_nonlocked_.level_controller_enabled); | |
1191 } | |
1192 | |
1193 bool AudioProcessingImpl::fwd_synthesis_needed() const { | |
1194 return (is_fwd_processed() && | |
1195 is_multi_band(capture_nonlocked_.fwd_proc_format.sample_rate_hz())); | |
1196 } | |
1197 | |
1198 bool AudioProcessingImpl::fwd_analysis_needed() const { | |
1199 if (!is_fwd_processed() && | |
1200 !public_submodules_->voice_detection->is_enabled() && | |
1201 !capture_.transient_suppressor_enabled) { | |
1202 // Only public_submodules_->level_estimator is enabled. | |
1203 return false; | |
1204 } else if (is_multi_band( | |
1205 capture_nonlocked_.fwd_proc_format.sample_rate_hz())) { | |
1206 // Something besides public_submodules_->level_estimator is enabled, and we | |
1207 // have super-wb. | |
1208 return true; | |
1209 } | |
1210 return false; | |
1211 } | |
1212 | |
1213 bool AudioProcessingImpl::is_rev_processed() const { | |
1214 #if WEBRTC_INTELLIGIBILITY_ENHANCER | |
1215 return capture_nonlocked_.intelligibility_enabled; | |
1216 #else | |
1217 return false; | |
1218 #endif | |
1219 } | |
1220 | |
1221 bool AudioProcessingImpl::rev_synthesis_needed() const { | |
1222 return (is_rev_processed() && | |
1223 is_multi_band(formats_.rev_proc_format.sample_rate_hz())); | |
1224 } | |
1225 | |
1226 bool AudioProcessingImpl::rev_analysis_needed() const { | |
1227 return is_multi_band(formats_.rev_proc_format.sample_rate_hz()) && | |
1228 (is_rev_processed() || | |
1229 public_submodules_->echo_cancellation | |
1230 ->is_enabled_render_side_query() || | |
1231 public_submodules_->echo_control_mobile | |
1232 ->is_enabled_render_side_query() || | |
1233 public_submodules_->gain_control->is_enabled_render_side_query()); | |
1234 } | |
1235 | |
1236 bool AudioProcessingImpl::render_check_rev_conversion_needed() const { | |
1237 return rev_conversion_needed(); | |
1238 } | |
1239 | |
1240 bool AudioProcessingImpl::rev_conversion_needed() const { | |
1241 return (formats_.api_format.reverse_input_stream() != | |
1242 formats_.api_format.reverse_output_stream()); | |
1243 } | 1280 } |
1244 | 1281 |
1245 void AudioProcessingImpl::InitializeExperimentalAgc() { | 1282 void AudioProcessingImpl::InitializeExperimentalAgc() { |
1246 if (constants_.use_experimental_agc) { | 1283 if (constants_.use_experimental_agc) { |
1247 if (!private_submodules_->agc_manager.get()) { | 1284 if (!private_submodules_->agc_manager.get()) { |
1248 private_submodules_->agc_manager.reset(new AgcManagerDirect( | 1285 private_submodules_->agc_manager.reset(new AgcManagerDirect( |
1249 public_submodules_->gain_control.get(), | 1286 public_submodules_->gain_control.get(), |
1250 public_submodules_->gain_control_for_experimental_agc.get(), | 1287 public_submodules_->gain_control_for_experimental_agc.get(), |
1251 constants_.agc_startup_min_volume)); | 1288 constants_.agc_startup_min_volume)); |
1252 } | 1289 } |
(...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1561 fwd_proc_format(kSampleRate16kHz), | 1598 fwd_proc_format(kSampleRate16kHz), |
1562 split_rate(kSampleRate16kHz) {} | 1599 split_rate(kSampleRate16kHz) {} |
1563 | 1600 |
1564 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; | 1601 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; |
1565 | 1602 |
1566 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; | 1603 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; |
1567 | 1604 |
1568 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; | 1605 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; |
1569 | 1606 |
1570 } // namespace webrtc | 1607 } // namespace webrtc |
OLD | NEW |