Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 52 | 52 |
| 53 #define RETURN_ON_ERR(expr) \ | 53 #define RETURN_ON_ERR(expr) \ |
| 54 do { \ | 54 do { \ |
| 55 int err = (expr); \ | 55 int err = (expr); \ |
| 56 if (err != kNoError) { \ | 56 if (err != kNoError) { \ |
| 57 return err; \ | 57 return err; \ |
| 58 } \ | 58 } \ |
| 59 } while (0) | 59 } while (0) |
| 60 | 60 |
| 61 namespace webrtc { | 61 namespace webrtc { |
| 62 | |
| 63 const int AudioProcessing::kNativeSampleRatesHz[] = { | |
| 64 AudioProcessing::kSampleRate8kHz, | |
| 65 AudioProcessing::kSampleRate16kHz, | |
| 66 AudioProcessing::kSampleRate32kHz, | |
| 67 AudioProcessing::kSampleRate48kHz}; | |
| 68 const size_t AudioProcessing::kNumNativeSampleRates = | |
| 69 arraysize(AudioProcessing::kNativeSampleRatesHz); | |
| 70 const int AudioProcessing::kMaxNativeSampleRateHz = AudioProcessing:: | |
| 71 kNativeSampleRatesHz[AudioProcessing::kNumNativeSampleRates - 1]; | |
|
peah-webrtc
2016/03/10 06:59:24
Afaics, this means that the tri-band 48 kHz splitt
aluebs-webrtc
2016/03/10 15:34:32
That is adressed in this CL that already landed: h
| |
| 72 | |
| 62 namespace { | 73 namespace { |
| 63 | 74 |
| 64 static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { | 75 static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { |
| 65 switch (layout) { | 76 switch (layout) { |
| 66 case AudioProcessing::kMono: | 77 case AudioProcessing::kMono: |
| 67 case AudioProcessing::kStereo: | 78 case AudioProcessing::kStereo: |
| 68 return false; | 79 return false; |
| 69 case AudioProcessing::kMonoAndKeyboard: | 80 case AudioProcessing::kMonoAndKeyboard: |
| 70 case AudioProcessing::kStereoAndKeyboard: | 81 case AudioProcessing::kStereoAndKeyboard: |
| 71 return true; | 82 return true; |
| 72 } | 83 } |
| 73 | 84 |
| 74 assert(false); | 85 assert(false); |
| 75 return false; | 86 return false; |
| 76 } | 87 } |
| 88 | |
| 89 bool MultiBandSignal(int sample_rate_hz) { | |
| 90 return sample_rate_hz == AudioProcessing::kSampleRate32kHz || | |
| 91 sample_rate_hz == AudioProcessing::kSampleRate48kHz; | |
| 92 } | |
| 93 | |
| 94 int ClosestNativeRate(int min_proc_rate) { | |
| 95 for (int rate : AudioProcessing::kNativeSampleRatesHz) { | |
| 96 if (rate >= min_proc_rate) { | |
| 97 return rate; | |
| 98 } | |
| 99 } | |
| 100 return AudioProcessing::kMaxNativeSampleRateHz; | |
| 101 } | |
| 102 | |
| 77 } // namespace | 103 } // namespace |
| 78 | 104 |
| 79 // Throughout webrtc, it's assumed that success is represented by zero. | 105 // Throughout webrtc, it's assumed that success is represented by zero. |
| 80 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); | 106 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); |
| 81 | 107 |
| 82 struct AudioProcessingImpl::ApmPublicSubmodules { | 108 struct AudioProcessingImpl::ApmPublicSubmodules { |
| 83 ApmPublicSubmodules() | 109 ApmPublicSubmodules() |
| 84 : echo_cancellation(nullptr), | 110 : echo_cancellation(nullptr), |
| 85 echo_control_mobile(nullptr), | 111 echo_control_mobile(nullptr), |
| 86 gain_control(nullptr) {} | 112 gain_control(nullptr) {} |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 102 | 128 |
| 103 struct AudioProcessingImpl::ApmPrivateSubmodules { | 129 struct AudioProcessingImpl::ApmPrivateSubmodules { |
| 104 explicit ApmPrivateSubmodules(Beamformer<float>* beamformer) | 130 explicit ApmPrivateSubmodules(Beamformer<float>* beamformer) |
| 105 : beamformer(beamformer) {} | 131 : beamformer(beamformer) {} |
| 106 // Accessed internally from capture or during initialization | 132 // Accessed internally from capture or during initialization |
| 107 std::list<ProcessingComponent*> component_list; | 133 std::list<ProcessingComponent*> component_list; |
| 108 std::unique_ptr<Beamformer<float>> beamformer; | 134 std::unique_ptr<Beamformer<float>> beamformer; |
| 109 std::unique_ptr<AgcManagerDirect> agc_manager; | 135 std::unique_ptr<AgcManagerDirect> agc_manager; |
| 110 }; | 136 }; |
| 111 | 137 |
| 112 const int AudioProcessing::kNativeSampleRatesHz[] = { | |
| 113 AudioProcessing::kSampleRate8kHz, | |
| 114 AudioProcessing::kSampleRate16kHz, | |
| 115 AudioProcessing::kSampleRate32kHz, | |
| 116 AudioProcessing::kSampleRate48kHz}; | |
| 117 const size_t AudioProcessing::kNumNativeSampleRates = | |
| 118 arraysize(AudioProcessing::kNativeSampleRatesHz); | |
| 119 const int AudioProcessing::kMaxNativeSampleRateHz = AudioProcessing:: | |
| 120 kNativeSampleRatesHz[AudioProcessing::kNumNativeSampleRates - 1]; | |
| 121 | |
| 122 AudioProcessing* AudioProcessing::Create() { | 138 AudioProcessing* AudioProcessing::Create() { |
| 123 Config config; | 139 Config config; |
| 124 return Create(config, nullptr); | 140 return Create(config, nullptr); |
| 125 } | 141 } |
| 126 | 142 |
| 127 AudioProcessing* AudioProcessing::Create(const Config& config) { | 143 AudioProcessing* AudioProcessing::Create(const Config& config) { |
| 128 return Create(config, nullptr); | 144 return Create(config, nullptr); |
| 129 } | 145 } |
| 130 | 146 |
| 131 AudioProcessing* AudioProcessing::Create(const Config& config, | 147 AudioProcessing* AudioProcessing::Create(const Config& config, |
| (...skipping 225 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 357 return kBadNumberChannelsError; | 373 return kBadNumberChannelsError; |
| 358 } | 374 } |
| 359 | 375 |
| 360 if (capture_nonlocked_.beamformer_enabled && | 376 if (capture_nonlocked_.beamformer_enabled && |
| 361 num_in_channels != capture_.array_geometry.size()) { | 377 num_in_channels != capture_.array_geometry.size()) { |
| 362 return kBadNumberChannelsError; | 378 return kBadNumberChannelsError; |
| 363 } | 379 } |
| 364 | 380 |
| 365 formats_.api_format = config; | 381 formats_.api_format = config; |
| 366 | 382 |
| 367 // We process at the closest native rate >= min(input rate, output rate). | 383 capture_nonlocked_.fwd_proc_format = StreamConfig(ClosestNativeRate(std::min( |
| 368 const int min_proc_rate = | 384 formats_.api_format.input_stream().sample_rate_hz(), |
| 369 std::min(formats_.api_format.input_stream().sample_rate_hz(), | 385 formats_.api_format.output_stream().sample_rate_hz()))); |
| 370 formats_.api_format.output_stream().sample_rate_hz()); | |
| 371 int fwd_proc_rate; | |
| 372 for (size_t i = 0; i < kNumNativeSampleRates; ++i) { | |
| 373 fwd_proc_rate = kNativeSampleRatesHz[i]; | |
| 374 if (fwd_proc_rate >= min_proc_rate) { | |
| 375 break; | |
| 376 } | |
| 377 } | |
| 378 | 386 |
| 379 capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate); | 387 int rev_proc_rate = ClosestNativeRate(std::min( |
| 380 | 388 formats_.api_format.reverse_input_stream().sample_rate_hz(), |
| 381 // We normally process the reverse stream at 16 kHz. Unless... | 389 formats_.api_format.reverse_output_stream().sample_rate_hz())); |
|
peah-webrtc
2016/03/10 06:59:24
This change basically means that the tri-band 48 k
aluebs-webrtc
2016/03/10 15:34:32
That is right. But in that case 32kHz was more of
| |
| 382 int rev_proc_rate = kSampleRate16kHz; | 390 // If the forward sample rate is 8 kHz, the reverse stream is also processed |
| 391 // at this rate. | |
| 383 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) { | 392 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) { |
| 384 // ...the forward stream is at 8 kHz. | |
| 385 rev_proc_rate = kSampleRate8kHz; | 393 rev_proc_rate = kSampleRate8kHz; |
| 386 } else { | |
| 387 if (formats_.api_format.reverse_input_stream().sample_rate_hz() == | |
| 388 kSampleRate32kHz) { | |
| 389 // ...or the input is at 32 kHz, in which case we use the splitting | |
| 390 // filter rather than the resampler. | |
| 391 rev_proc_rate = kSampleRate32kHz; | |
| 392 } | |
| 393 } | 394 } |
| 394 | 395 |
| 395 // Always downmix the reverse stream to mono for analysis. This has been | 396 // Always downmix the reverse stream to mono for analysis. This has been |
| 396 // demonstrated to work well for AEC in most practical scenarios. | 397 // demonstrated to work well for AEC in most practical scenarios. |
| 397 formats_.rev_proc_format = StreamConfig(rev_proc_rate, 1); | 398 formats_.rev_proc_format = StreamConfig(rev_proc_rate, 1); |
| 398 | 399 |
| 399 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate32kHz || | 400 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate32kHz || |
| 400 capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate48kHz) { | 401 capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate48kHz) { |
| 401 capture_nonlocked_.split_rate = kSampleRate16kHz; | 402 capture_nonlocked_.split_rate = kSampleRate16kHz; |
| 402 } else { | 403 } else { |
| (...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 641 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 642 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 642 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 643 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 643 const size_t data_size = | 644 const size_t data_size = |
| 644 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 645 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 645 msg->set_input_data(frame->data_, data_size); | 646 msg->set_input_data(frame->data_, data_size); |
| 646 } | 647 } |
| 647 #endif | 648 #endif |
| 648 | 649 |
| 649 capture_.capture_audio->DeinterleaveFrom(frame); | 650 capture_.capture_audio->DeinterleaveFrom(frame); |
| 650 RETURN_ON_ERR(ProcessStreamLocked()); | 651 RETURN_ON_ERR(ProcessStreamLocked()); |
| 651 capture_.capture_audio->InterleaveTo(frame, | 652 capture_.capture_audio->InterleaveTo(frame, output_copy_needed()); |
| 652 output_copy_needed(is_data_processed())); | |
| 653 | 653 |
| 654 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 654 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 655 if (debug_dump_.debug_file->Open()) { | 655 if (debug_dump_.debug_file->Open()) { |
| 656 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 656 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 657 const size_t data_size = | 657 const size_t data_size = |
| 658 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 658 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 659 msg->set_output_data(frame->data_, data_size); | 659 msg->set_output_data(frame->data_, data_size); |
| 660 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 660 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 661 &debug_dump_.num_bytes_left_for_log_, | 661 &debug_dump_.num_bytes_left_for_log_, |
| 662 &crit_debug_, &debug_dump_.capture)); | 662 &crit_debug_, &debug_dump_.capture)); |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 682 | 682 |
| 683 AudioBuffer* ca = capture_.capture_audio.get(); // For brevity. | 683 AudioBuffer* ca = capture_.capture_audio.get(); // For brevity. |
| 684 | 684 |
| 685 if (constants_.use_experimental_agc && | 685 if (constants_.use_experimental_agc && |
| 686 public_submodules_->gain_control->is_enabled()) { | 686 public_submodules_->gain_control->is_enabled()) { |
| 687 private_submodules_->agc_manager->AnalyzePreProcess( | 687 private_submodules_->agc_manager->AnalyzePreProcess( |
| 688 ca->channels()[0], ca->num_channels(), | 688 ca->channels()[0], ca->num_channels(), |
| 689 capture_nonlocked_.fwd_proc_format.num_frames()); | 689 capture_nonlocked_.fwd_proc_format.num_frames()); |
| 690 } | 690 } |
| 691 | 691 |
| 692 bool data_processed = is_data_processed(); | 692 if (fwd_analysis_needed()) { |
| 693 if (analysis_needed(data_processed)) { | |
| 694 ca->SplitIntoFrequencyBands(); | 693 ca->SplitIntoFrequencyBands(); |
| 695 } | 694 } |
| 696 | 695 |
| 697 if (capture_nonlocked_.beamformer_enabled) { | 696 if (capture_nonlocked_.beamformer_enabled) { |
| 698 private_submodules_->beamformer->ProcessChunk(*ca->split_data_f(), | 697 private_submodules_->beamformer->ProcessChunk(*ca->split_data_f(), |
| 699 ca->split_data_f()); | 698 ca->split_data_f()); |
| 700 ca->set_num_channels(1); | 699 ca->set_num_channels(1); |
| 701 } | 700 } |
| 702 | 701 |
| 703 public_submodules_->high_pass_filter->ProcessCaptureAudio(ca); | 702 public_submodules_->high_pass_filter->ProcessCaptureAudio(ca); |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 722 if (constants_.use_experimental_agc && | 721 if (constants_.use_experimental_agc && |
| 723 public_submodules_->gain_control->is_enabled() && | 722 public_submodules_->gain_control->is_enabled() && |
| 724 (!capture_nonlocked_.beamformer_enabled || | 723 (!capture_nonlocked_.beamformer_enabled || |
| 725 private_submodules_->beamformer->is_target_present())) { | 724 private_submodules_->beamformer->is_target_present())) { |
| 726 private_submodules_->agc_manager->Process( | 725 private_submodules_->agc_manager->Process( |
| 727 ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(), | 726 ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(), |
| 728 capture_nonlocked_.split_rate); | 727 capture_nonlocked_.split_rate); |
| 729 } | 728 } |
| 730 RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio(ca)); | 729 RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio(ca)); |
| 731 | 730 |
| 732 if (synthesis_needed(data_processed)) { | 731 if (fwd_synthesis_needed()) { |
| 733 ca->MergeFrequencyBands(); | 732 ca->MergeFrequencyBands(); |
| 734 } | 733 } |
| 735 | 734 |
| 736 // TODO(aluebs): Investigate if the transient suppression placement should be | 735 // TODO(aluebs): Investigate if the transient suppression placement should be |
| 737 // before or after the AGC. | 736 // before or after the AGC. |
| 738 if (capture_.transient_suppressor_enabled) { | 737 if (capture_.transient_suppressor_enabled) { |
| 739 float voice_probability = | 738 float voice_probability = |
| 740 private_submodules_->agc_manager.get() | 739 private_submodules_->agc_manager.get() |
| 741 ? private_submodules_->agc_manager->voice_probability() | 740 ? private_submodules_->agc_manager->voice_probability() |
| 742 : 1.f; | 741 : 1.f; |
| (...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 897 &debug_dump_.num_bytes_left_for_log_, | 896 &debug_dump_.num_bytes_left_for_log_, |
| 898 &crit_debug_, &debug_dump_.render)); | 897 &crit_debug_, &debug_dump_.render)); |
| 899 } | 898 } |
| 900 #endif | 899 #endif |
| 901 render_.render_audio->DeinterleaveFrom(frame); | 900 render_.render_audio->DeinterleaveFrom(frame); |
| 902 return ProcessReverseStreamLocked(); | 901 return ProcessReverseStreamLocked(); |
| 903 } | 902 } |
| 904 | 903 |
| 905 int AudioProcessingImpl::ProcessReverseStreamLocked() { | 904 int AudioProcessingImpl::ProcessReverseStreamLocked() { |
| 906 AudioBuffer* ra = render_.render_audio.get(); // For brevity. | 905 AudioBuffer* ra = render_.render_audio.get(); // For brevity. |
| 907 if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz) { | 906 if (rev_analysis_needed()) { |
| 908 ra->SplitIntoFrequencyBands(); | 907 ra->SplitIntoFrequencyBands(); |
| 909 } | 908 } |
| 910 | 909 |
| 911 if (constants_.intelligibility_enabled) { | 910 if (constants_.intelligibility_enabled) { |
| 912 // Currently run in single-threaded mode when the intelligibility | 911 // Currently run in single-threaded mode when the intelligibility |
| 913 // enhancer is activated. | 912 // enhancer is activated. |
| 914 // TODO(peah): Fix to be properly multi-threaded. | 913 // TODO(peah): Fix to be properly multi-threaded. |
| 915 rtc::CritScope cs(&crit_capture_); | 914 rtc::CritScope cs(&crit_capture_); |
| 916 public_submodules_->intelligibility_enhancer->ProcessRenderAudio( | 915 public_submodules_->intelligibility_enhancer->ProcessRenderAudio( |
| 917 ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate, | 916 ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate, |
| 918 ra->num_channels()); | 917 ra->num_channels()); |
| 919 } | 918 } |
| 920 | 919 |
| 921 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra)); | 920 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra)); |
| 922 RETURN_ON_ERR( | 921 RETURN_ON_ERR( |
| 923 public_submodules_->echo_control_mobile->ProcessRenderAudio(ra)); | 922 public_submodules_->echo_control_mobile->ProcessRenderAudio(ra)); |
| 924 if (!constants_.use_experimental_agc) { | 923 if (!constants_.use_experimental_agc) { |
| 925 RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra)); | 924 RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra)); |
| 926 } | 925 } |
| 927 | 926 |
| 928 if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz && | 927 if (rev_synthesis_needed()) { |
| 929 is_rev_processed()) { | |
| 930 ra->MergeFrequencyBands(); | 928 ra->MergeFrequencyBands(); |
| 931 } | 929 } |
| 932 | 930 |
| 933 return kNoError; | 931 return kNoError; |
| 934 } | 932 } |
| 935 | 933 |
| 936 int AudioProcessingImpl::set_stream_delay_ms(int delay) { | 934 int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
| 937 rtc::CritScope cs(&crit_capture_); | 935 rtc::CritScope cs(&crit_capture_); |
| 938 Error retval = kNoError; | 936 Error retval = kNoError; |
| 939 capture_.was_stream_delay_set = true; | 937 capture_.was_stream_delay_set = true; |
| (...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1131 for (auto item : private_submodules_->component_list) { | 1129 for (auto item : private_submodules_->component_list) { |
| 1132 if (item->is_component_enabled()) { | 1130 if (item->is_component_enabled()) { |
| 1133 return true; | 1131 return true; |
| 1134 } | 1132 } |
| 1135 } | 1133 } |
| 1136 | 1134 |
| 1137 // The capture data is otherwise unchanged. | 1135 // The capture data is otherwise unchanged. |
| 1138 return false; | 1136 return false; |
| 1139 } | 1137 } |
| 1140 | 1138 |
| 1141 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { | 1139 bool AudioProcessingImpl::output_copy_needed() const { |
| 1142 // Check if we've upmixed or downmixed the audio. | 1140 // Check if we've upmixed or downmixed the audio. |
| 1143 return ((formats_.api_format.output_stream().num_channels() != | 1141 return ((formats_.api_format.output_stream().num_channels() != |
| 1144 formats_.api_format.input_stream().num_channels()) || | 1142 formats_.api_format.input_stream().num_channels()) || |
| 1145 is_data_processed || capture_.transient_suppressor_enabled); | 1143 is_data_processed() || capture_.transient_suppressor_enabled); |
| 1146 } | 1144 } |
| 1147 | 1145 |
| 1148 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { | 1146 bool AudioProcessingImpl::fwd_synthesis_needed() const { |
| 1149 return (is_data_processed && | 1147 return (is_data_processed() && |
| 1150 (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == | 1148 MultiBandSignal(capture_nonlocked_.fwd_proc_format.sample_rate_hz())); |
| 1151 kSampleRate32kHz || | |
| 1152 capture_nonlocked_.fwd_proc_format.sample_rate_hz() == | |
| 1153 kSampleRate48kHz)); | |
| 1154 } | 1149 } |
| 1155 | 1150 |
| 1156 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { | 1151 bool AudioProcessingImpl::fwd_analysis_needed() const { |
| 1157 if (!is_data_processed && | 1152 if (!is_data_processed() && |
| 1158 !public_submodules_->voice_detection->is_enabled() && | 1153 !public_submodules_->voice_detection->is_enabled() && |
| 1159 !capture_.transient_suppressor_enabled) { | 1154 !capture_.transient_suppressor_enabled) { |
| 1160 // Only public_submodules_->level_estimator is enabled. | 1155 // Only public_submodules_->level_estimator is enabled. |
| 1161 return false; | 1156 return false; |
| 1162 } else if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == | 1157 } else if (MultiBandSignal( |
| 1163 kSampleRate32kHz || | 1158 capture_nonlocked_.fwd_proc_format.sample_rate_hz())) { |
| 1164 capture_nonlocked_.fwd_proc_format.sample_rate_hz() == | |
| 1165 kSampleRate48kHz) { | |
| 1166 // Something besides public_submodules_->level_estimator is enabled, and we | 1159 // Something besides public_submodules_->level_estimator is enabled, and we |
| 1167 // have super-wb. | 1160 // have super-wb. |
| 1168 return true; | 1161 return true; |
| 1169 } | 1162 } |
| 1170 return false; | 1163 return false; |
| 1171 } | 1164 } |
| 1172 | 1165 |
| 1173 bool AudioProcessingImpl::is_rev_processed() const { | 1166 bool AudioProcessingImpl::is_rev_processed() const { |
| 1174 return constants_.intelligibility_enabled; | 1167 return constants_.intelligibility_enabled; |
| 1175 } | 1168 } |
| 1176 | 1169 |
| 1170 bool AudioProcessingImpl::rev_synthesis_needed() const { | |
| 1171 return (is_rev_processed() && | |
| 1172 MultiBandSignal(formats_.rev_proc_format.sample_rate_hz())); | |
| 1173 } | |
| 1174 | |
| 1175 bool AudioProcessingImpl::rev_analysis_needed() const { | |
| 1176 return MultiBandSignal(formats_.rev_proc_format.sample_rate_hz()); | |
| 1177 } | |
| 1178 | |
| 1177 bool AudioProcessingImpl::render_check_rev_conversion_needed() const { | 1179 bool AudioProcessingImpl::render_check_rev_conversion_needed() const { |
| 1178 return rev_conversion_needed(); | 1180 return rev_conversion_needed(); |
| 1179 } | 1181 } |
| 1180 | 1182 |
| 1181 bool AudioProcessingImpl::rev_conversion_needed() const { | 1183 bool AudioProcessingImpl::rev_conversion_needed() const { |
| 1182 return (formats_.api_format.reverse_input_stream() != | 1184 return (formats_.api_format.reverse_input_stream() != |
| 1183 formats_.api_format.reverse_output_stream()); | 1185 formats_.api_format.reverse_output_stream()); |
| 1184 } | 1186 } |
| 1185 | 1187 |
| 1186 void AudioProcessingImpl::InitializeExperimentalAgc() { | 1188 void AudioProcessingImpl::InitializeExperimentalAgc() { |
| (...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1444 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); | 1446 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); |
| 1445 | 1447 |
| 1446 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1448 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1447 &debug_dump_.num_bytes_left_for_log_, | 1449 &debug_dump_.num_bytes_left_for_log_, |
| 1448 &crit_debug_, &debug_dump_.capture)); | 1450 &crit_debug_, &debug_dump_.capture)); |
| 1449 return kNoError; | 1451 return kNoError; |
| 1450 } | 1452 } |
| 1451 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1453 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1452 | 1454 |
| 1453 } // namespace webrtc | 1455 } // namespace webrtc |
| OLD | NEW |