OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
120 std::unique_ptr<VoiceDetectionImpl> voice_detection; | 120 std::unique_ptr<VoiceDetectionImpl> voice_detection; |
121 std::unique_ptr<GainControlForExperimentalAgc> | 121 std::unique_ptr<GainControlForExperimentalAgc> |
122 gain_control_for_experimental_agc; | 122 gain_control_for_experimental_agc; |
123 | 123 |
124 // Accessed internally from both render and capture. | 124 // Accessed internally from both render and capture. |
125 std::unique_ptr<TransientSuppressor> transient_suppressor; | 125 std::unique_ptr<TransientSuppressor> transient_suppressor; |
126 std::unique_ptr<IntelligibilityEnhancer> intelligibility_enhancer; | 126 std::unique_ptr<IntelligibilityEnhancer> intelligibility_enhancer; |
127 }; | 127 }; |
128 | 128 |
129 struct AudioProcessingImpl::ApmPrivateSubmodules { | 129 struct AudioProcessingImpl::ApmPrivateSubmodules { |
130 explicit ApmPrivateSubmodules(Beamformer<float>* beamformer) | 130 explicit ApmPrivateSubmodules(NonlinearBeamformer* beamformer) |
131 : beamformer(beamformer) {} | 131 : beamformer(beamformer) {} |
132 // Accessed internally from capture or during initialization | 132 // Accessed internally from capture or during initialization |
133 std::unique_ptr<Beamformer<float>> beamformer; | 133 std::unique_ptr<NonlinearBeamformer> beamformer; |
134 std::unique_ptr<AgcManagerDirect> agc_manager; | 134 std::unique_ptr<AgcManagerDirect> agc_manager; |
135 }; | 135 }; |
136 | 136 |
137 AudioProcessing* AudioProcessing::Create() { | 137 AudioProcessing* AudioProcessing::Create() { |
138 Config config; | 138 Config config; |
139 return Create(config, nullptr); | 139 return Create(config, nullptr); |
140 } | 140 } |
141 | 141 |
142 AudioProcessing* AudioProcessing::Create(const Config& config) { | 142 AudioProcessing* AudioProcessing::Create(const Config& config) { |
143 return Create(config, nullptr); | 143 return Create(config, nullptr); |
144 } | 144 } |
145 | 145 |
146 AudioProcessing* AudioProcessing::Create(const Config& config, | 146 AudioProcessing* AudioProcessing::Create(const Config& config, |
147 Beamformer<float>* beamformer) { | 147 NonlinearBeamformer* beamformer) { |
148 AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer); | 148 AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer); |
149 if (apm->Initialize() != kNoError) { | 149 if (apm->Initialize() != kNoError) { |
150 delete apm; | 150 delete apm; |
151 apm = nullptr; | 151 apm = nullptr; |
152 } | 152 } |
153 | 153 |
154 return apm; | 154 return apm; |
155 } | 155 } |
156 | 156 |
157 AudioProcessingImpl::AudioProcessingImpl(const Config& config) | 157 AudioProcessingImpl::AudioProcessingImpl(const Config& config) |
158 : AudioProcessingImpl(config, nullptr) {} | 158 : AudioProcessingImpl(config, nullptr) {} |
159 | 159 |
160 AudioProcessingImpl::AudioProcessingImpl(const Config& config, | 160 AudioProcessingImpl::AudioProcessingImpl(const Config& config, |
161 Beamformer<float>* beamformer) | 161 NonlinearBeamformer* beamformer) |
162 : public_submodules_(new ApmPublicSubmodules()), | 162 : public_submodules_(new ApmPublicSubmodules()), |
163 private_submodules_(new ApmPrivateSubmodules(beamformer)), | 163 private_submodules_(new ApmPrivateSubmodules(beamformer)), |
164 constants_(config.Get<ExperimentalAgc>().startup_min_volume, | 164 constants_(config.Get<ExperimentalAgc>().startup_min_volume, |
165 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 165 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
166 false, | 166 false, |
167 #else | 167 #else |
168 config.Get<ExperimentalAgc>().enabled, | 168 config.Get<ExperimentalAgc>().enabled, |
169 #endif | 169 #endif |
170 config.Get<Intelligibility>().enabled), | 170 config.Get<Intelligibility>().enabled), |
171 | 171 |
(...skipping 501 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
673 private_submodules_->agc_manager->AnalyzePreProcess( | 673 private_submodules_->agc_manager->AnalyzePreProcess( |
674 ca->channels()[0], ca->num_channels(), | 674 ca->channels()[0], ca->num_channels(), |
675 capture_nonlocked_.fwd_proc_format.num_frames()); | 675 capture_nonlocked_.fwd_proc_format.num_frames()); |
676 } | 676 } |
677 | 677 |
678 if (fwd_analysis_needed()) { | 678 if (fwd_analysis_needed()) { |
679 ca->SplitIntoFrequencyBands(); | 679 ca->SplitIntoFrequencyBands(); |
680 } | 680 } |
681 | 681 |
682 if (capture_nonlocked_.beamformer_enabled) { | 682 if (capture_nonlocked_.beamformer_enabled) { |
683 private_submodules_->beamformer->ProcessChunk(*ca->split_data_f(), | 683 private_submodules_->beamformer->AnalyzeChunk(*ca->split_data_f()); |
peah-webrtc
2016/06/08 12:04:55
What has changed here, is that now all the downmix
aluebs-webrtc
2016/06/09 02:11:46
What do you mean with downmixing? Before there was
peah-webrtc
2016/06/09 09:36:30
By downmixing I mean going from 2 to 1 channel, so
aluebs-webrtc
2016/06/09 19:21:42
Personally I find "downmixing" to have a blending
| |
684 ca->split_data_f()); | 684 // Discards all channels by the leftmost one. |
685 ca->set_num_channels(1); | 685 ca->set_num_channels(1); |
686 } | 686 } |
687 | 687 |
688 public_submodules_->high_pass_filter->ProcessCaptureAudio(ca); | 688 public_submodules_->high_pass_filter->ProcessCaptureAudio(ca); |
689 RETURN_ON_ERR(public_submodules_->gain_control->AnalyzeCaptureAudio(ca)); | 689 RETURN_ON_ERR(public_submodules_->gain_control->AnalyzeCaptureAudio(ca)); |
690 public_submodules_->noise_suppression->AnalyzeCaptureAudio(ca); | 690 public_submodules_->noise_suppression->AnalyzeCaptureAudio(ca); |
691 | 691 |
692 // Ensure that the stream delay was set before the call to the | 692 // Ensure that the stream delay was set before the call to the |
693 // AEC ProcessCaptureAudio function. | 693 // AEC ProcessCaptureAudio function. |
694 if (public_submodules_->echo_cancellation->is_enabled() && | 694 if (public_submodules_->echo_cancellation->is_enabled() && |
(...skipping 20 matching lines...) Expand all Loading... | |
715 // Ensure that the stream delay was set before the call to the | 715 // Ensure that the stream delay was set before the call to the |
716 // AECM ProcessCaptureAudio function. | 716 // AECM ProcessCaptureAudio function. |
717 if (public_submodules_->echo_control_mobile->is_enabled() && | 717 if (public_submodules_->echo_control_mobile->is_enabled() && |
718 !was_stream_delay_set()) { | 718 !was_stream_delay_set()) { |
719 return AudioProcessing::kStreamParameterNotSetError; | 719 return AudioProcessing::kStreamParameterNotSetError; |
720 } | 720 } |
721 | 721 |
722 RETURN_ON_ERR(public_submodules_->echo_control_mobile->ProcessCaptureAudio( | 722 RETURN_ON_ERR(public_submodules_->echo_control_mobile->ProcessCaptureAudio( |
723 ca, stream_delay_ms())); | 723 ca, stream_delay_ms())); |
724 | 724 |
725 if (capture_nonlocked_.beamformer_enabled) { | |
726 private_submodules_->beamformer->PostFilter(ca->split_data_f()); | |
727 } | |
728 | |
725 public_submodules_->voice_detection->ProcessCaptureAudio(ca); | 729 public_submodules_->voice_detection->ProcessCaptureAudio(ca); |
726 | 730 |
727 if (constants_.use_experimental_agc && | 731 if (constants_.use_experimental_agc && |
728 public_submodules_->gain_control->is_enabled() && | 732 public_submodules_->gain_control->is_enabled() && |
729 (!capture_nonlocked_.beamformer_enabled || | 733 (!capture_nonlocked_.beamformer_enabled || |
730 private_submodules_->beamformer->is_target_present())) { | 734 private_submodules_->beamformer->is_target_present())) { |
731 private_submodules_->agc_manager->Process( | 735 private_submodules_->agc_manager->Process( |
732 ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(), | 736 ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(), |
733 capture_nonlocked_.split_rate); | 737 capture_nonlocked_.split_rate); |
734 } | 738 } |
(...skipping 726 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1461 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); | 1465 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); |
1462 | 1466 |
1463 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1467 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
1464 &debug_dump_.num_bytes_left_for_log_, | 1468 &debug_dump_.num_bytes_left_for_log_, |
1465 &crit_debug_, &debug_dump_.capture)); | 1469 &crit_debug_, &debug_dump_.capture)); |
1466 return kNoError; | 1470 return kNoError; |
1467 } | 1471 } |
1468 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1472 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
1469 | 1473 |
1470 } // namespace webrtc | 1474 } // namespace webrtc |
OLD | NEW |