OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
75 AudioProcessing::kSampleRate16kHz, | 75 AudioProcessing::kSampleRate16kHz, |
76 AudioProcessing::kSampleRate32kHz, | 76 AudioProcessing::kSampleRate32kHz, |
77 AudioProcessing::kSampleRate48kHz}; | 77 AudioProcessing::kSampleRate48kHz}; |
78 const size_t AudioProcessing::kNumNativeSampleRates = | 78 const size_t AudioProcessing::kNumNativeSampleRates = |
79 arraysize(AudioProcessing::kNativeSampleRatesHz); | 79 arraysize(AudioProcessing::kNativeSampleRatesHz); |
80 const int AudioProcessing::kMaxNativeSampleRateHz = AudioProcessing:: | 80 const int AudioProcessing::kMaxNativeSampleRateHz = AudioProcessing:: |
81 kNativeSampleRatesHz[AudioProcessing::kNumNativeSampleRates - 1]; | 81 kNativeSampleRatesHz[AudioProcessing::kNumNativeSampleRates - 1]; |
82 | 82 |
83 namespace { | 83 namespace { |
84 | 84 |
85 const int kInternalNativeRates[] = {AudioProcessing::kSampleRate8kHz, | |
86 AudioProcessing::kSampleRate16kHz, | |
87 #ifdef WEBRTC_ARCH_ARM_FAMILY | |
88 AudioProcessing::kSampleRate32kHz}; | |
89 #else | |
90 AudioProcessing::kSampleRate32kHz, | |
91 AudioProcessing::kSampleRate48kHz}; | |
92 #endif // WEBRTC_ARCH_ARM_FAMILY | |
93 | |
94 static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { | 85 static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { |
95 switch (layout) { | 86 switch (layout) { |
96 case AudioProcessing::kMono: | 87 case AudioProcessing::kMono: |
97 case AudioProcessing::kStereo: | 88 case AudioProcessing::kStereo: |
98 return false; | 89 return false; |
99 case AudioProcessing::kMonoAndKeyboard: | 90 case AudioProcessing::kMonoAndKeyboard: |
100 case AudioProcessing::kStereoAndKeyboard: | 91 case AudioProcessing::kStereoAndKeyboard: |
101 return true; | 92 return true; |
102 } | 93 } |
103 | 94 |
104 assert(false); | 95 assert(false); |
105 return false; | 96 return false; |
106 } | 97 } |
107 | 98 |
108 bool is_multi_band(int sample_rate_hz) { | 99 bool SampleRateSupportsMultiBand(int sample_rate_hz) { |
109 return sample_rate_hz == AudioProcessing::kSampleRate32kHz || | 100 return sample_rate_hz == AudioProcessing::kSampleRate32kHz || |
110 sample_rate_hz == AudioProcessing::kSampleRate48kHz; | 101 sample_rate_hz == AudioProcessing::kSampleRate48kHz; |
111 } | 102 } |
112 | 103 |
113 int ClosestHigherNativeRate(int min_proc_rate) { | 104 int NativeProcessRateToUse(int minimum_rate, bool band_splitting_required) { |
the sun
2016/09/08 08:10:15
FindNativeProcessRateToUse()
also, this is the ty
peah-webrtc
2016/09/08 08:44:03
Changed the name.
It is indeed a good function to
the sun
2016/09/08 09:20:48
I think so. Unless the functionality is covered by
peah-webrtc
2016/09/08 18:57:50
This is indirectly tested by the tests that operat
the sun
2016/09/08 20:36:38
Very well. I assume the non-bitexact changes canno
peah-webrtc
2016/09/10 07:52:37
Of course, they can. But we have at least coverage
| |
114 for (int rate : kInternalNativeRates) { | 105 #ifdef WEBRTC_ARCH_ARM_FAMILY |
115 if (rate >= min_proc_rate) { | 106 const int kMaxSplittingNativeProcessRate = AudioProcessing::kSampleRate32kHz; |
107 #else | |
108 const int kMaxSplittingNativeProcessRate = AudioProcessing::kSampleRate48kHz; | |
109 #endif | |
110 RTC_DCHECK_LE(kMaxSplittingNativeProcessRate, | |
111 AudioProcessing::kMaxNativeSampleRateHz); | |
112 const int uppermost_native_rate = band_splitting_required | |
113 ? kMaxSplittingNativeProcessRate | |
114 : AudioProcessing::kSampleRate48kHz; | |
115 | |
116 for (auto rate : AudioProcessing::kNativeSampleRatesHz) { | |
117 if (rate >= uppermost_native_rate) { | |
118 return uppermost_native_rate; | |
119 } | |
120 if (rate >= minimum_rate) { | |
116 return rate; | 121 return rate; |
117 } | 122 } |
118 } | 123 } |
119 return kInternalNativeRates[arraysize(kInternalNativeRates) - 1]; | 124 RTC_NOTREACHED(); |
120 } | 125 } |
121 | 126 |
122 } // namespace | 127 } // namespace |
123 | 128 |
124 // Throughout webrtc, it's assumed that success is represented by zero. | 129 // Throughout webrtc, it's assumed that success is represented by zero. |
125 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); | 130 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); |
126 | 131 |
132 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} | |
133 | |
134 bool AudioProcessingImpl::ApmSubmoduleStates::Update( | |
135 bool high_pass_filter_enabled, | |
136 bool echo_canceller_enabled, | |
137 bool mobile_echo_controller_enabled, | |
138 bool noise_suppressor_enabled, | |
139 bool intelligibility_enhancer_enabled, | |
140 bool beamformer_enabled, | |
141 bool adaptive_gain_controller_enabled, | |
142 bool level_controller_enabled, | |
143 bool voice_activity_detector_enabled, | |
144 bool level_estimator_enabled, | |
145 bool transient_suppressor_enabled) { | |
146 bool changed = false; | |
147 changed |= (high_pass_filter_enabled != high_pass_filter_enabled_); | |
148 changed |= (echo_canceller_enabled != echo_canceller_enabled_); | |
149 changed |= | |
150 (mobile_echo_controller_enabled != mobile_echo_controller_enabled_); | |
151 changed |= (noise_suppressor_enabled != noise_suppressor_enabled_); | |
152 changed |= | |
153 (intelligibility_enhancer_enabled != intelligibility_enhancer_enabled_); | |
154 changed |= (beamformer_enabled != beamformer_enabled_); | |
155 changed |= | |
156 (adaptive_gain_controller_enabled != adaptive_gain_controller_enabled_); | |
157 changed |= (level_controller_enabled != level_controller_enabled_); | |
158 changed |= (level_estimator_enabled != level_estimator_enabled_); | |
159 changed |= | |
160 (voice_activity_detector_enabled != voice_activity_detector_enabled_); | |
161 changed |= (transient_suppressor_enabled != transient_suppressor_enabled_); | |
162 if (changed) { | |
163 high_pass_filter_enabled_ = high_pass_filter_enabled; | |
164 echo_canceller_enabled_ = echo_canceller_enabled; | |
165 mobile_echo_controller_enabled_ = mobile_echo_controller_enabled; | |
166 noise_suppressor_enabled_ = noise_suppressor_enabled; | |
167 intelligibility_enhancer_enabled_ = intelligibility_enhancer_enabled; | |
168 beamformer_enabled_ = beamformer_enabled; | |
169 adaptive_gain_controller_enabled_ = adaptive_gain_controller_enabled; | |
170 level_controller_enabled_ = level_controller_enabled; | |
171 level_estimator_enabled_ = level_estimator_enabled; | |
172 voice_activity_detector_enabled_ = voice_activity_detector_enabled; | |
173 transient_suppressor_enabled_ = transient_suppressor_enabled; | |
174 } | |
175 | |
176 changed |= first_update_; | |
177 first_update_ = false; | |
178 return changed; | |
179 } | |
180 | |
181 bool AudioProcessingImpl::ApmSubmoduleStates::CaptureMultiBandModulesActive() | |
the sun
2016/09/08 08:10:15
Is this distinction between "modules" and "effects
peah-webrtc
2016/09/08 08:44:04
Good points!
It is definitely not an accepted ter
| |
182 const { | |
183 return CaptureMultiBandEffectsActive() || intelligibility_enhancer_enabled_ || | |
184 voice_activity_detector_enabled_; | |
185 } | |
186 | |
187 bool AudioProcessingImpl::ApmSubmoduleStates::CaptureMultiBandEffectsActive() | |
188 const { | |
189 return high_pass_filter_enabled_ || echo_canceller_enabled_ || | |
190 mobile_echo_controller_enabled_ || noise_suppressor_enabled_ || | |
191 beamformer_enabled_ || adaptive_gain_controller_enabled_; | |
192 } | |
193 | |
194 bool AudioProcessingImpl::ApmSubmoduleStates::RenderMultiBandModulesActive() | |
195 const { | |
196 return RenderMultiBandEffectsActive() || echo_canceller_enabled_ || | |
197 mobile_echo_controller_enabled_ || adaptive_gain_controller_enabled_; | |
198 } | |
199 | |
200 bool AudioProcessingImpl::ApmSubmoduleStates::RenderMultiBandEffectsActive() | |
201 const { | |
202 return intelligibility_enhancer_enabled_; | |
203 } | |
204 | |
127 struct AudioProcessingImpl::ApmPublicSubmodules { | 205 struct AudioProcessingImpl::ApmPublicSubmodules { |
128 ApmPublicSubmodules() {} | 206 ApmPublicSubmodules() {} |
129 // Accessed externally of APM without any lock acquired. | 207 // Accessed externally of APM without any lock acquired. |
130 std::unique_ptr<EchoCancellationImpl> echo_cancellation; | 208 std::unique_ptr<EchoCancellationImpl> echo_cancellation; |
131 std::unique_ptr<EchoControlMobileImpl> echo_control_mobile; | 209 std::unique_ptr<EchoControlMobileImpl> echo_control_mobile; |
132 std::unique_ptr<GainControlImpl> gain_control; | 210 std::unique_ptr<GainControlImpl> gain_control; |
133 std::unique_ptr<HighPassFilterImpl> high_pass_filter; | 211 std::unique_ptr<HighPassFilterImpl> high_pass_filter; |
134 std::unique_ptr<LevelEstimatorImpl> level_estimator; | 212 std::unique_ptr<LevelEstimatorImpl> level_estimator; |
135 std::unique_ptr<NoiseSuppressionImpl> noise_suppression; | 213 std::unique_ptr<NoiseSuppressionImpl> noise_suppression; |
136 std::unique_ptr<VoiceDetectionImpl> voice_detection; | 214 std::unique_ptr<VoiceDetectionImpl> voice_detection; |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
268 | 346 |
269 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { | 347 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { |
270 // Run in a single-threaded manner during initialization. | 348 // Run in a single-threaded manner during initialization. |
271 rtc::CritScope cs_render(&crit_render_); | 349 rtc::CritScope cs_render(&crit_render_); |
272 rtc::CritScope cs_capture(&crit_capture_); | 350 rtc::CritScope cs_capture(&crit_capture_); |
273 return InitializeLocked(processing_config); | 351 return InitializeLocked(processing_config); |
274 } | 352 } |
275 | 353 |
276 int AudioProcessingImpl::MaybeInitializeRender( | 354 int AudioProcessingImpl::MaybeInitializeRender( |
277 const ProcessingConfig& processing_config) { | 355 const ProcessingConfig& processing_config) { |
278 return MaybeInitialize(processing_config); | 356 return MaybeInitialize(processing_config, false); |
279 } | 357 } |
280 | 358 |
281 int AudioProcessingImpl::MaybeInitializeCapture( | 359 int AudioProcessingImpl::MaybeInitializeCapture( |
282 const ProcessingConfig& processing_config) { | 360 const ProcessingConfig& processing_config, |
283 return MaybeInitialize(processing_config); | 361 bool force_initialization) { |
362 return MaybeInitialize(processing_config, force_initialization); | |
284 } | 363 } |
285 | 364 |
286 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 365 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
287 | 366 |
288 AudioProcessingImpl::ApmDebugDumpThreadState::ApmDebugDumpThreadState() | 367 AudioProcessingImpl::ApmDebugDumpThreadState::ApmDebugDumpThreadState() |
289 : event_msg(new audioproc::Event()) {} | 368 : event_msg(new audioproc::Event()) {} |
290 | 369 |
291 AudioProcessingImpl::ApmDebugDumpThreadState::~ApmDebugDumpThreadState() {} | 370 AudioProcessingImpl::ApmDebugDumpThreadState::~ApmDebugDumpThreadState() {} |
292 | 371 |
293 AudioProcessingImpl::ApmDebugDumpState::ApmDebugDumpState() | 372 AudioProcessingImpl::ApmDebugDumpState::ApmDebugDumpState() |
294 : debug_file(FileWrapper::Create()) {} | 373 : debug_file(FileWrapper::Create()) {} |
295 | 374 |
296 AudioProcessingImpl::ApmDebugDumpState::~ApmDebugDumpState() {} | 375 AudioProcessingImpl::ApmDebugDumpState::~ApmDebugDumpState() {} |
297 | 376 |
298 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 377 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
299 | 378 |
300 // Calls InitializeLocked() if any of the audio parameters have changed from | 379 // Calls InitializeLocked() if any of the audio parameters have changed from |
301 // their current values (needs to be called while holding the crit_render_lock). | 380 // their current values (needs to be called while holding the crit_render_lock). |
302 int AudioProcessingImpl::MaybeInitialize( | 381 int AudioProcessingImpl::MaybeInitialize( |
303 const ProcessingConfig& processing_config) { | 382 const ProcessingConfig& processing_config, |
383 bool force_initialization) { | |
304 // Called from both threads. Thread check is therefore not possible. | 384 // Called from both threads. Thread check is therefore not possible. |
305 if (processing_config == formats_.api_format) { | 385 if (processing_config == formats_.api_format && !force_initialization) { |
306 return kNoError; | 386 return kNoError; |
307 } | 387 } |
308 | 388 |
309 rtc::CritScope cs_capture(&crit_capture_); | 389 rtc::CritScope cs_capture(&crit_capture_); |
310 return InitializeLocked(processing_config); | 390 return InitializeLocked(processing_config); |
311 } | 391 } |
312 | 392 |
313 int AudioProcessingImpl::InitializeLocked() { | 393 int AudioProcessingImpl::InitializeLocked() { |
314 const int fwd_audio_buffer_channels = | 394 const int fwd_audio_buffer_channels = |
315 capture_nonlocked_.beamformer_enabled | 395 capture_nonlocked_.beamformer_enabled |
316 ? formats_.api_format.input_stream().num_channels() | 396 ? formats_.api_format.input_stream().num_channels() |
317 : formats_.api_format.output_stream().num_channels(); | 397 : formats_.api_format.output_stream().num_channels(); |
318 const int rev_audio_buffer_out_num_frames = | 398 const int rev_audio_buffer_out_num_frames = |
319 formats_.api_format.reverse_output_stream().num_frames() == 0 | 399 formats_.api_format.reverse_output_stream().num_frames() == 0 |
320 ? formats_.rev_proc_format.num_frames() | 400 ? formats_.rev_proc_format.num_frames() |
321 : formats_.api_format.reverse_output_stream().num_frames(); | 401 : formats_.api_format.reverse_output_stream().num_frames(); |
322 if (formats_.api_format.reverse_input_stream().num_channels() > 0) { | 402 if (formats_.api_format.reverse_input_stream().num_channels() > 0) { |
323 render_.render_audio.reset(new AudioBuffer( | 403 render_.render_audio.reset(new AudioBuffer( |
324 formats_.api_format.reverse_input_stream().num_frames(), | 404 formats_.api_format.reverse_input_stream().num_frames(), |
325 formats_.api_format.reverse_input_stream().num_channels(), | 405 formats_.api_format.reverse_input_stream().num_channels(), |
326 formats_.rev_proc_format.num_frames(), | 406 formats_.rev_proc_format.num_frames(), |
327 formats_.rev_proc_format.num_channels(), | 407 formats_.rev_proc_format.num_channels(), |
328 rev_audio_buffer_out_num_frames)); | 408 rev_audio_buffer_out_num_frames)); |
329 if (rev_conversion_needed()) { | 409 if (formats_.api_format.reverse_input_stream() != |
410 formats_.api_format.reverse_output_stream()) { | |
330 render_.render_converter = AudioConverter::Create( | 411 render_.render_converter = AudioConverter::Create( |
331 formats_.api_format.reverse_input_stream().num_channels(), | 412 formats_.api_format.reverse_input_stream().num_channels(), |
332 formats_.api_format.reverse_input_stream().num_frames(), | 413 formats_.api_format.reverse_input_stream().num_frames(), |
333 formats_.api_format.reverse_output_stream().num_channels(), | 414 formats_.api_format.reverse_output_stream().num_channels(), |
334 formats_.api_format.reverse_output_stream().num_frames()); | 415 formats_.api_format.reverse_output_stream().num_frames()); |
335 } else { | 416 } else { |
336 render_.render_converter.reset(nullptr); | 417 render_.render_converter.reset(nullptr); |
337 } | 418 } |
338 } else { | 419 } else { |
339 render_.render_audio.reset(nullptr); | 420 render_.render_audio.reset(nullptr); |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
390 return kBadNumberChannelsError; | 471 return kBadNumberChannelsError; |
391 } | 472 } |
392 | 473 |
393 if (capture_nonlocked_.beamformer_enabled && | 474 if (capture_nonlocked_.beamformer_enabled && |
394 num_in_channels != capture_.array_geometry.size()) { | 475 num_in_channels != capture_.array_geometry.size()) { |
395 return kBadNumberChannelsError; | 476 return kBadNumberChannelsError; |
396 } | 477 } |
397 | 478 |
398 formats_.api_format = config; | 479 formats_.api_format = config; |
399 | 480 |
400 capture_nonlocked_.fwd_proc_format = StreamConfig(ClosestHigherNativeRate( | 481 int fwd_proc_rate = NativeProcessRateToUse( |
the sun
2016/09/08 08:10:15
I find it confusing that "fwd" and "rev" are used
peah-webrtc
2016/09/08 08:44:03
I agree, and that is something that has been prese
the sun
2016/09/08 09:20:48
Personally I think Render and Capture makes much m
peah-webrtc
2016/09/08 18:57:50
That sounds great to me. Note that I named the var
the sun
2016/09/08 20:36:38
sgtm
| |
401 std::min(formats_.api_format.input_stream().sample_rate_hz(), | 482 std::min(formats_.api_format.input_stream().sample_rate_hz(), |
402 formats_.api_format.output_stream().sample_rate_hz()))); | 483 formats_.api_format.output_stream().sample_rate_hz()), |
484 submodule_states_.CaptureMultiBandModulesActive() || | |
485 submodule_states_.RenderMultiBandModulesActive()); | |
403 | 486 |
404 int rev_proc_rate = ClosestHigherNativeRate(std::min( | 487 capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate); |
405 formats_.api_format.reverse_input_stream().sample_rate_hz(), | 488 |
406 formats_.api_format.reverse_output_stream().sample_rate_hz())); | 489 int rev_proc_rate = NativeProcessRateToUse( |
490 std::min(formats_.api_format.reverse_input_stream().sample_rate_hz(), | |
491 formats_.api_format.reverse_output_stream().sample_rate_hz()), | |
492 submodule_states_.CaptureMultiBandModulesActive() || | |
493 submodule_states_.RenderMultiBandModulesActive()); | |
407 // TODO(aluebs): Remove this restriction once we figure out why the 3-band | 494 // TODO(aluebs): Remove this restriction once we figure out why the 3-band |
408 // splitting filter degrades the AEC performance. | 495 // splitting filter degrades the AEC performance. |
409 if (rev_proc_rate > kSampleRate32kHz) { | 496 if (rev_proc_rate > kSampleRate32kHz) { |
410 rev_proc_rate = is_rev_processed() ? kSampleRate32kHz : kSampleRate16kHz; | 497 rev_proc_rate = submodule_states_.RenderMultiBandEffectsActive() |
498 ? kSampleRate32kHz | |
499 : kSampleRate16kHz; | |
411 } | 500 } |
412 // If the forward sample rate is 8 kHz, the reverse stream is also processed | 501 // If the forward sample rate is 8 kHz, the reverse stream is also processed |
413 // at this rate. | 502 // at this rate. |
414 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) { | 503 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) { |
415 rev_proc_rate = kSampleRate8kHz; | 504 rev_proc_rate = kSampleRate8kHz; |
416 } else { | 505 } else { |
417 rev_proc_rate = std::max(rev_proc_rate, static_cast<int>(kSampleRate16kHz)); | 506 rev_proc_rate = std::max(rev_proc_rate, static_cast<int>(kSampleRate16kHz)); |
418 } | 507 } |
419 | 508 |
420 // Always downmix the reverse stream to mono for analysis. This has been | 509 // Always downmix the reverse stream to mono for analysis. This has been |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
549 } | 638 } |
550 return ProcessStream(src, input_stream, output_stream, dest); | 639 return ProcessStream(src, input_stream, output_stream, dest); |
551 } | 640 } |
552 | 641 |
553 int AudioProcessingImpl::ProcessStream(const float* const* src, | 642 int AudioProcessingImpl::ProcessStream(const float* const* src, |
554 const StreamConfig& input_config, | 643 const StreamConfig& input_config, |
555 const StreamConfig& output_config, | 644 const StreamConfig& output_config, |
556 float* const* dest) { | 645 float* const* dest) { |
557 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_StreamConfig"); | 646 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessStream_StreamConfig"); |
558 ProcessingConfig processing_config; | 647 ProcessingConfig processing_config; |
648 bool reinitialization_required; | |
the sun
2016/09/08 08:10:15
Please, give me a default value, in case someone l
peah-webrtc
2016/09/08 08:44:03
Done.
| |
559 { | 649 { |
560 // Acquire the capture lock in order to safely call the function | 650 // Acquire the capture lock in order to safely call the function |
561 // that retrieves the render side data. This function accesses apm | 651 // that retrieves the render side data. This function accesses apm |
562 // getters that need the capture lock held when being called. | 652 // getters that need the capture lock held when being called. |
563 rtc::CritScope cs_capture(&crit_capture_); | 653 rtc::CritScope cs_capture(&crit_capture_); |
564 public_submodules_->echo_cancellation->ReadQueuedRenderData(); | 654 public_submodules_->echo_cancellation->ReadQueuedRenderData(); |
565 public_submodules_->echo_control_mobile->ReadQueuedRenderData(); | 655 public_submodules_->echo_control_mobile->ReadQueuedRenderData(); |
566 public_submodules_->gain_control->ReadQueuedRenderData(); | 656 public_submodules_->gain_control->ReadQueuedRenderData(); |
567 | 657 |
568 if (!src || !dest) { | 658 if (!src || !dest) { |
569 return kNullPointerError; | 659 return kNullPointerError; |
570 } | 660 } |
571 | 661 |
572 processing_config = formats_.api_format; | 662 processing_config = formats_.api_format; |
663 reinitialization_required = UpdateActiveSubmoduleStates(); | |
573 } | 664 } |
574 | 665 |
575 processing_config.input_stream() = input_config; | 666 processing_config.input_stream() = input_config; |
576 processing_config.output_stream() = output_config; | 667 processing_config.output_stream() = output_config; |
577 | 668 |
578 { | 669 { |
579 // Do conditional reinitialization. | 670 // Do conditional reinitialization. |
580 rtc::CritScope cs_render(&crit_render_); | 671 rtc::CritScope cs_render(&crit_render_); |
581 RETURN_ON_ERR(MaybeInitializeCapture(processing_config)); | 672 RETURN_ON_ERR( |
673 MaybeInitializeCapture(processing_config, reinitialization_required)); | |
582 } | 674 } |
583 rtc::CritScope cs_capture(&crit_capture_); | 675 rtc::CritScope cs_capture(&crit_capture_); |
584 assert(processing_config.input_stream().num_frames() == | 676 assert(processing_config.input_stream().num_frames() == |
585 formats_.api_format.input_stream().num_frames()); | 677 formats_.api_format.input_stream().num_frames()); |
586 | 678 |
587 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 679 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
588 if (debug_dump_.debug_file->is_open()) { | 680 if (debug_dump_.debug_file->is_open()) { |
589 RETURN_ON_ERR(WriteConfigMessage(false)); | 681 RETURN_ON_ERR(WriteConfigMessage(false)); |
590 | 682 |
591 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 683 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
639 } | 731 } |
640 // Must be a native rate. | 732 // Must be a native rate. |
641 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 733 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
642 frame->sample_rate_hz_ != kSampleRate16kHz && | 734 frame->sample_rate_hz_ != kSampleRate16kHz && |
643 frame->sample_rate_hz_ != kSampleRate32kHz && | 735 frame->sample_rate_hz_ != kSampleRate32kHz && |
644 frame->sample_rate_hz_ != kSampleRate48kHz) { | 736 frame->sample_rate_hz_ != kSampleRate48kHz) { |
645 return kBadSampleRateError; | 737 return kBadSampleRateError; |
646 } | 738 } |
647 | 739 |
648 ProcessingConfig processing_config; | 740 ProcessingConfig processing_config; |
741 bool reinitialization_required; | |
the sun
2016/09/08 08:10:15
default init pls
peah-webrtc
2016/09/08 08:44:04
Done.
| |
649 { | 742 { |
650 // Aquire lock for the access of api_format. | 743 // Aquire lock for the access of api_format. |
651 // The lock is released immediately due to the conditional | 744 // The lock is released immediately due to the conditional |
652 // reinitialization. | 745 // reinitialization. |
653 rtc::CritScope cs_capture(&crit_capture_); | 746 rtc::CritScope cs_capture(&crit_capture_); |
654 // TODO(ajm): The input and output rates and channels are currently | 747 // TODO(ajm): The input and output rates and channels are currently |
655 // constrained to be identical in the int16 interface. | 748 // constrained to be identical in the int16 interface. |
656 processing_config = formats_.api_format; | 749 processing_config = formats_.api_format; |
750 | |
751 reinitialization_required = UpdateActiveSubmoduleStates(); | |
657 } | 752 } |
658 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 753 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
659 processing_config.input_stream().set_num_channels(frame->num_channels_); | 754 processing_config.input_stream().set_num_channels(frame->num_channels_); |
660 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); | 755 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
661 processing_config.output_stream().set_num_channels(frame->num_channels_); | 756 processing_config.output_stream().set_num_channels(frame->num_channels_); |
662 | 757 |
663 { | 758 { |
664 // Do conditional reinitialization. | 759 // Do conditional reinitialization. |
665 rtc::CritScope cs_render(&crit_render_); | 760 rtc::CritScope cs_render(&crit_render_); |
666 RETURN_ON_ERR(MaybeInitializeCapture(processing_config)); | 761 RETURN_ON_ERR( |
762 MaybeInitializeCapture(processing_config, reinitialization_required)); | |
667 } | 763 } |
668 rtc::CritScope cs_capture(&crit_capture_); | 764 rtc::CritScope cs_capture(&crit_capture_); |
669 if (frame->samples_per_channel_ != | 765 if (frame->samples_per_channel_ != |
670 formats_.api_format.input_stream().num_frames()) { | 766 formats_.api_format.input_stream().num_frames()) { |
671 return kBadDataLengthError; | 767 return kBadDataLengthError; |
672 } | 768 } |
673 | 769 |
674 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 770 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
675 if (debug_dump_.debug_file->is_open()) { | 771 if (debug_dump_.debug_file->is_open()) { |
676 RETURN_ON_ERR(WriteConfigMessage(false)); | 772 RETURN_ON_ERR(WriteConfigMessage(false)); |
677 | 773 |
678 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 774 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
679 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 775 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
680 const size_t data_size = | 776 const size_t data_size = |
681 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 777 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
682 msg->set_input_data(frame->data_, data_size); | 778 msg->set_input_data(frame->data_, data_size); |
683 } | 779 } |
684 #endif | 780 #endif |
685 | 781 |
686 capture_.capture_audio->DeinterleaveFrom(frame); | 782 capture_.capture_audio->DeinterleaveFrom(frame); |
687 RETURN_ON_ERR(ProcessStreamLocked()); | 783 RETURN_ON_ERR(ProcessStreamLocked()); |
688 capture_.capture_audio->InterleaveTo(frame, output_copy_needed()); | 784 capture_.capture_audio->InterleaveTo(frame, true); |
689 | 785 |
690 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 786 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
691 if (debug_dump_.debug_file->is_open()) { | 787 if (debug_dump_.debug_file->is_open()) { |
692 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 788 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
693 const size_t data_size = | 789 const size_t data_size = |
694 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 790 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
695 msg->set_output_data(frame->data_, data_size); | 791 msg->set_output_data(frame->data_, data_size); |
696 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 792 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
697 &debug_dump_.num_bytes_left_for_log_, | 793 &debug_dump_.num_bytes_left_for_log_, |
698 &crit_debug_, &debug_dump_.capture)); | 794 &crit_debug_, &debug_dump_.capture)); |
(...skipping 25 matching lines...) Expand all Loading... | |
724 | 820 |
725 AudioBuffer* ca = capture_.capture_audio.get(); // For brevity. | 821 AudioBuffer* ca = capture_.capture_audio.get(); // For brevity. |
726 | 822 |
727 if (constants_.use_experimental_agc && | 823 if (constants_.use_experimental_agc && |
728 public_submodules_->gain_control->is_enabled()) { | 824 public_submodules_->gain_control->is_enabled()) { |
729 private_submodules_->agc_manager->AnalyzePreProcess( | 825 private_submodules_->agc_manager->AnalyzePreProcess( |
730 ca->channels()[0], ca->num_channels(), | 826 ca->channels()[0], ca->num_channels(), |
731 capture_nonlocked_.fwd_proc_format.num_frames()); | 827 capture_nonlocked_.fwd_proc_format.num_frames()); |
732 } | 828 } |
733 | 829 |
734 if (fwd_analysis_needed()) { | 830 if (submodule_states_.CaptureMultiBandModulesActive() && |
831 SampleRateSupportsMultiBand( | |
832 capture_nonlocked_.fwd_proc_format.sample_rate_hz())) { | |
735 ca->SplitIntoFrequencyBands(); | 833 ca->SplitIntoFrequencyBands(); |
736 } | 834 } |
737 | 835 |
738 if (capture_nonlocked_.beamformer_enabled) { | 836 if (capture_nonlocked_.beamformer_enabled) { |
739 private_submodules_->beamformer->AnalyzeChunk(*ca->split_data_f()); | 837 private_submodules_->beamformer->AnalyzeChunk(*ca->split_data_f()); |
740 // Discards all channels by the leftmost one. | 838 // Discards all channels by the leftmost one. |
741 ca->set_num_channels(1); | 839 ca->set_num_channels(1); |
742 } | 840 } |
743 | 841 |
744 public_submodules_->high_pass_filter->ProcessCaptureAudio(ca); | 842 public_submodules_->high_pass_filter->ProcessCaptureAudio(ca); |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
795 public_submodules_->gain_control->is_enabled() && | 893 public_submodules_->gain_control->is_enabled() && |
796 (!capture_nonlocked_.beamformer_enabled || | 894 (!capture_nonlocked_.beamformer_enabled || |
797 private_submodules_->beamformer->is_target_present())) { | 895 private_submodules_->beamformer->is_target_present())) { |
798 private_submodules_->agc_manager->Process( | 896 private_submodules_->agc_manager->Process( |
799 ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(), | 897 ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(), |
800 capture_nonlocked_.split_rate); | 898 capture_nonlocked_.split_rate); |
801 } | 899 } |
802 RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio( | 900 RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio( |
803 ca, echo_cancellation()->stream_has_echo())); | 901 ca, echo_cancellation()->stream_has_echo())); |
804 | 902 |
805 if (fwd_synthesis_needed()) { | 903 if (submodule_states_.CaptureMultiBandEffectsActive() && |
904 SampleRateSupportsMultiBand( | |
905 capture_nonlocked_.fwd_proc_format.sample_rate_hz())) { | |
806 ca->MergeFrequencyBands(); | 906 ca->MergeFrequencyBands(); |
807 } | 907 } |
808 | 908 |
809 // TODO(aluebs): Investigate if the transient suppression placement should be | 909 // TODO(aluebs): Investigate if the transient suppression placement should be |
810 // before or after the AGC. | 910 // before or after the AGC. |
811 if (capture_.transient_suppressor_enabled) { | 911 if (capture_.transient_suppressor_enabled) { |
812 float voice_probability = | 912 float voice_probability = |
813 private_submodules_->agc_manager.get() | 913 private_submodules_->agc_manager.get() |
814 ? private_submodules_->agc_manager->voice_probability() | 914 ? private_submodules_->agc_manager->voice_probability() |
815 : 1.f; | 915 : 1.f; |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
849 | 949 |
850 int AudioProcessingImpl::ProcessReverseStream( | 950 int AudioProcessingImpl::ProcessReverseStream( |
851 const float* const* src, | 951 const float* const* src, |
852 const StreamConfig& reverse_input_config, | 952 const StreamConfig& reverse_input_config, |
853 const StreamConfig& reverse_output_config, | 953 const StreamConfig& reverse_output_config, |
854 float* const* dest) { | 954 float* const* dest) { |
855 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_StreamConfig"); | 955 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_StreamConfig"); |
856 rtc::CritScope cs(&crit_render_); | 956 rtc::CritScope cs(&crit_render_); |
857 RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, reverse_input_config, | 957 RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, reverse_input_config, |
858 reverse_output_config)); | 958 reverse_output_config)); |
859 if (is_rev_processed()) { | 959 if (submodule_states_.RenderMultiBandEffectsActive()) { |
860 render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(), | 960 render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(), |
861 dest); | 961 dest); |
862 } else if (render_check_rev_conversion_needed()) { | 962 } else if (formats_.api_format.reverse_input_stream() != |
963 formats_.api_format.reverse_output_stream()) { | |
863 render_.render_converter->Convert(src, reverse_input_config.num_samples(), | 964 render_.render_converter->Convert(src, reverse_input_config.num_samples(), |
864 dest, | 965 dest, |
865 reverse_output_config.num_samples()); | 966 reverse_output_config.num_samples()); |
866 } else { | 967 } else { |
867 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), | 968 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), |
868 reverse_input_config.num_channels(), dest); | 969 reverse_input_config.num_channels(), dest); |
869 } | 970 } |
870 | 971 |
871 return kNoError; | 972 return kNoError; |
872 } | 973 } |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
954 const size_t data_size = | 1055 const size_t data_size = |
955 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1056 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
956 msg->set_data(frame->data_, data_size); | 1057 msg->set_data(frame->data_, data_size); |
957 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1058 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
958 &debug_dump_.num_bytes_left_for_log_, | 1059 &debug_dump_.num_bytes_left_for_log_, |
959 &crit_debug_, &debug_dump_.render)); | 1060 &crit_debug_, &debug_dump_.render)); |
960 } | 1061 } |
961 #endif | 1062 #endif |
962 render_.render_audio->DeinterleaveFrom(frame); | 1063 render_.render_audio->DeinterleaveFrom(frame); |
963 RETURN_ON_ERR(ProcessReverseStreamLocked()); | 1064 RETURN_ON_ERR(ProcessReverseStreamLocked()); |
964 if (is_rev_processed()) { | 1065 render_.render_audio->InterleaveTo(frame, true); |
965 render_.render_audio->InterleaveTo(frame, true); | |
966 } | |
967 return kNoError; | 1066 return kNoError; |
968 } | 1067 } |
969 | 1068 |
970 int AudioProcessingImpl::ProcessReverseStreamLocked() { | 1069 int AudioProcessingImpl::ProcessReverseStreamLocked() { |
971 AudioBuffer* ra = render_.render_audio.get(); // For brevity. | 1070 AudioBuffer* ra = render_.render_audio.get(); // For brevity. |
972 if (rev_analysis_needed()) { | 1071 if (submodule_states_.RenderMultiBandModulesActive() && |
1072 SampleRateSupportsMultiBand(formats_.rev_proc_format.sample_rate_hz())) { | |
973 ra->SplitIntoFrequencyBands(); | 1073 ra->SplitIntoFrequencyBands(); |
974 } | 1074 } |
975 | 1075 |
976 #if WEBRTC_INTELLIGIBILITY_ENHANCER | 1076 #if WEBRTC_INTELLIGIBILITY_ENHANCER |
977 if (capture_nonlocked_.intelligibility_enabled) { | 1077 if (capture_nonlocked_.intelligibility_enabled) { |
978 public_submodules_->intelligibility_enhancer->ProcessRenderAudio( | 1078 public_submodules_->intelligibility_enhancer->ProcessRenderAudio( |
979 ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate, | 1079 ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate, |
980 ra->num_channels()); | 1080 ra->num_channels()); |
981 } | 1081 } |
982 #endif | 1082 #endif |
983 | 1083 |
984 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra)); | 1084 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra)); |
985 RETURN_ON_ERR( | 1085 RETURN_ON_ERR( |
986 public_submodules_->echo_control_mobile->ProcessRenderAudio(ra)); | 1086 public_submodules_->echo_control_mobile->ProcessRenderAudio(ra)); |
987 if (!constants_.use_experimental_agc) { | 1087 if (!constants_.use_experimental_agc) { |
988 RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra)); | 1088 RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra)); |
989 } | 1089 } |
990 | 1090 |
991 if (rev_synthesis_needed()) { | 1091 if (submodule_states_.RenderMultiBandEffectsActive() && |
1092 SampleRateSupportsMultiBand(formats_.rev_proc_format.sample_rate_hz())) { | |
992 ra->MergeFrequencyBands(); | 1093 ra->MergeFrequencyBands(); |
993 } | 1094 } |
994 | 1095 |
995 return kNoError; | 1096 return kNoError; |
996 } | 1097 } |
997 | 1098 |
998 int AudioProcessingImpl::set_stream_delay_ms(int delay) { | 1099 int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
999 rtc::CritScope cs(&crit_capture_); | 1100 rtc::CritScope cs(&crit_capture_); |
1000 Error retval = kNoError; | 1101 Error retval = kNoError; |
1001 capture_.was_stream_delay_set = true; | 1102 capture_.was_stream_delay_set = true; |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1115 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1216 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
1116 // We just return if recording hasn't started. | 1217 // We just return if recording hasn't started. |
1117 debug_dump_.debug_file->CloseFile(); | 1218 debug_dump_.debug_file->CloseFile(); |
1118 return kNoError; | 1219 return kNoError; |
1119 #else | 1220 #else |
1120 return kUnsupportedFunctionError; | 1221 return kUnsupportedFunctionError; |
1121 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1222 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
1122 } | 1223 } |
1123 | 1224 |
1124 EchoCancellation* AudioProcessingImpl::echo_cancellation() const { | 1225 EchoCancellation* AudioProcessingImpl::echo_cancellation() const { |
1125 // Adding a lock here has no effect as it allows any access to the submodule | |
1126 // from the returned pointer. | |
1127 return public_submodules_->echo_cancellation.get(); | 1226 return public_submodules_->echo_cancellation.get(); |
1128 } | 1227 } |
1129 | 1228 |
1130 EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const { | 1229 EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const { |
1131 // Adding a lock here has no effect as it allows any access to the submodule | |
1132 // from the returned pointer. | |
1133 return public_submodules_->echo_control_mobile.get(); | 1230 return public_submodules_->echo_control_mobile.get(); |
1134 } | 1231 } |
1135 | 1232 |
1136 GainControl* AudioProcessingImpl::gain_control() const { | 1233 GainControl* AudioProcessingImpl::gain_control() const { |
1137 // Adding a lock here has no effect as it allows any access to the submodule | |
1138 // from the returned pointer. | |
1139 if (constants_.use_experimental_agc) { | 1234 if (constants_.use_experimental_agc) { |
1140 return public_submodules_->gain_control_for_experimental_agc.get(); | 1235 return public_submodules_->gain_control_for_experimental_agc.get(); |
1141 } | 1236 } |
1142 return public_submodules_->gain_control.get(); | 1237 return public_submodules_->gain_control.get(); |
1143 } | 1238 } |
1144 | 1239 |
1145 HighPassFilter* AudioProcessingImpl::high_pass_filter() const { | 1240 HighPassFilter* AudioProcessingImpl::high_pass_filter() const { |
1146 // Adding a lock here has no effect as it allows any access to the submodule | |
1147 // from the returned pointer. | |
1148 return public_submodules_->high_pass_filter.get(); | 1241 return public_submodules_->high_pass_filter.get(); |
1149 } | 1242 } |
1150 | 1243 |
1151 LevelEstimator* AudioProcessingImpl::level_estimator() const { | 1244 LevelEstimator* AudioProcessingImpl::level_estimator() const { |
1152 // Adding a lock here has no effect as it allows any access to the submodule | |
1153 // from the returned pointer. | |
1154 return public_submodules_->level_estimator.get(); | 1245 return public_submodules_->level_estimator.get(); |
1155 } | 1246 } |
1156 | 1247 |
1157 NoiseSuppression* AudioProcessingImpl::noise_suppression() const { | 1248 NoiseSuppression* AudioProcessingImpl::noise_suppression() const { |
1158 // Adding a lock here has no effect as it allows any access to the submodule | |
1159 // from the returned pointer. | |
1160 return public_submodules_->noise_suppression.get(); | 1249 return public_submodules_->noise_suppression.get(); |
1161 } | 1250 } |
1162 | 1251 |
1163 VoiceDetection* AudioProcessingImpl::voice_detection() const { | 1252 VoiceDetection* AudioProcessingImpl::voice_detection() const { |
1164 // Adding a lock here has no effect as it allows any access to the submodule | |
1165 // from the returned pointer. | |
1166 return public_submodules_->voice_detection.get(); | 1253 return public_submodules_->voice_detection.get(); |
1167 } | 1254 } |
1168 | 1255 |
1169 bool AudioProcessingImpl::is_fwd_processed() const { | 1256 bool AudioProcessingImpl::UpdateActiveSubmoduleStates() { |
1170 // The beamformer, noise suppressor and highpass filter | 1257 return submodule_states_.Update( |
1171 // modify the data. | 1258 public_submodules_->high_pass_filter->is_enabled(), |
1172 if (capture_nonlocked_.beamformer_enabled || | 1259 public_submodules_->echo_cancellation->is_enabled(), |
1173 public_submodules_->high_pass_filter->is_enabled() || | 1260 public_submodules_->echo_control_mobile->is_enabled(), |
1174 public_submodules_->noise_suppression->is_enabled() || | 1261 public_submodules_->noise_suppression->is_enabled(), |
1175 public_submodules_->echo_cancellation->is_enabled() || | 1262 capture_nonlocked_.intelligibility_enabled, |
1176 public_submodules_->echo_control_mobile->is_enabled() || | 1263 capture_nonlocked_.beamformer_enabled, |
1177 public_submodules_->gain_control->is_enabled()) { | 1264 public_submodules_->gain_control->is_enabled(), |
1178 return true; | 1265 capture_nonlocked_.level_controller_enabled, |
1179 } | 1266 public_submodules_->voice_detection->is_enabled(), |
1180 | 1267 public_submodules_->level_estimator->is_enabled(), |
1181 // The capture data is otherwise unchanged. | 1268 capture_.transient_suppressor_enabled); |
1182 return false; | |
1183 } | |
1184 | |
1185 bool AudioProcessingImpl::output_copy_needed() const { | |
1186 // Check if we've upmixed or downmixed the audio. | |
1187 return ((formats_.api_format.output_stream().num_channels() != | |
1188 formats_.api_format.input_stream().num_channels()) || | |
1189 is_fwd_processed() || capture_.transient_suppressor_enabled || | |
1190 capture_nonlocked_.level_controller_enabled); | |
1191 } | |
1192 | |
1193 bool AudioProcessingImpl::fwd_synthesis_needed() const { | |
1194 return (is_fwd_processed() && | |
1195 is_multi_band(capture_nonlocked_.fwd_proc_format.sample_rate_hz())); | |
1196 } | |
1197 | |
1198 bool AudioProcessingImpl::fwd_analysis_needed() const { | |
1199 if (!is_fwd_processed() && | |
1200 !public_submodules_->voice_detection->is_enabled() && | |
1201 !capture_.transient_suppressor_enabled) { | |
1202 // Only public_submodules_->level_estimator is enabled. | |
1203 return false; | |
1204 } else if (is_multi_band( | |
1205 capture_nonlocked_.fwd_proc_format.sample_rate_hz())) { | |
1206 // Something besides public_submodules_->level_estimator is enabled, and we | |
1207 // have super-wb. | |
1208 return true; | |
1209 } | |
1210 return false; | |
1211 } | |
1212 | |
1213 bool AudioProcessingImpl::is_rev_processed() const { | |
1214 #if WEBRTC_INTELLIGIBILITY_ENHANCER | |
1215 return capture_nonlocked_.intelligibility_enabled; | |
the sun
2016/09/08 08:10:15
This was previously if-deffed but now is not - int
peah-webrtc
2016/09/08 08:44:04
Good point!
Done.
| |
1216 #else | |
1217 return false; | |
1218 #endif | |
1219 } | |
1220 | |
1221 bool AudioProcessingImpl::rev_synthesis_needed() const { | |
1222 return (is_rev_processed() && | |
1223 is_multi_band(formats_.rev_proc_format.sample_rate_hz())); | |
1224 } | |
1225 | |
1226 bool AudioProcessingImpl::rev_analysis_needed() const { | |
1227 return is_multi_band(formats_.rev_proc_format.sample_rate_hz()) && | |
1228 (is_rev_processed() || | |
1229 public_submodules_->echo_cancellation | |
1230 ->is_enabled_render_side_query() || | |
1231 public_submodules_->echo_control_mobile | |
1232 ->is_enabled_render_side_query() || | |
1233 public_submodules_->gain_control->is_enabled_render_side_query()); | |
1234 } | |
1235 | |
1236 bool AudioProcessingImpl::render_check_rev_conversion_needed() const { | |
1237 return rev_conversion_needed(); | |
1238 } | |
1239 | |
1240 bool AudioProcessingImpl::rev_conversion_needed() const { | |
1241 return (formats_.api_format.reverse_input_stream() != | |
1242 formats_.api_format.reverse_output_stream()); | |
1243 } | 1269 } |
1244 | 1270 |
1245 void AudioProcessingImpl::InitializeExperimentalAgc() { | 1271 void AudioProcessingImpl::InitializeExperimentalAgc() { |
1246 if (constants_.use_experimental_agc) { | 1272 if (constants_.use_experimental_agc) { |
1247 if (!private_submodules_->agc_manager.get()) { | 1273 if (!private_submodules_->agc_manager.get()) { |
1248 private_submodules_->agc_manager.reset(new AgcManagerDirect( | 1274 private_submodules_->agc_manager.reset(new AgcManagerDirect( |
1249 public_submodules_->gain_control.get(), | 1275 public_submodules_->gain_control.get(), |
1250 public_submodules_->gain_control_for_experimental_agc.get(), | 1276 public_submodules_->gain_control_for_experimental_agc.get(), |
1251 constants_.agc_startup_min_volume)); | 1277 constants_.agc_startup_min_volume)); |
1252 } | 1278 } |
(...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1561 fwd_proc_format(kSampleRate16kHz), | 1587 fwd_proc_format(kSampleRate16kHz), |
1562 split_rate(kSampleRate16kHz) {} | 1588 split_rate(kSampleRate16kHz) {} |
1563 | 1589 |
1564 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; | 1590 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; |
1565 | 1591 |
1566 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; | 1592 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; |
1567 | 1593 |
1568 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; | 1594 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; |
1569 | 1595 |
1570 } // namespace webrtc | 1596 } // namespace webrtc |
OLD | NEW |