OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 #include <iostream> |
aluebs-webrtc
2015/07/21 01:50:55
Left from debugging, right?
ekm
2015/07/21 19:22:13
Yep. Done.
| |
11 #include "webrtc/modules/audio_processing/audio_processing_impl.h" | 11 #include "webrtc/modules/audio_processing/audio_processing_impl.h" |
12 | 12 |
13 #include <assert.h> | 13 #include <assert.h> |
14 | 14 |
15 #include "webrtc/base/checks.h" | 15 #include "webrtc/base/checks.h" |
16 #include "webrtc/base/platform_file.h" | 16 #include "webrtc/base/platform_file.h" |
17 #include "webrtc/common_audio/include/audio_util.h" | 17 #include "webrtc/common_audio/include/audio_util.h" |
18 #include "webrtc/common_audio/channel_buffer.h" | 18 #include "webrtc/common_audio/channel_buffer.h" |
19 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar y.h" | 19 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar y.h" |
20 extern "C" { | 20 extern "C" { |
21 #include "webrtc/modules/audio_processing/aec/aec_core.h" | 21 #include "webrtc/modules/audio_processing/aec/aec_core.h" |
22 } | 22 } |
23 #include "webrtc/modules/audio_processing/agc/agc_manager_direct.h" | 23 #include "webrtc/modules/audio_processing/agc/agc_manager_direct.h" |
24 #include "webrtc/modules/audio_processing/audio_buffer.h" | 24 #include "webrtc/modules/audio_processing/audio_buffer.h" |
25 #include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h" | 25 #include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h" |
26 #include "webrtc/modules/audio_processing/common.h" | 26 #include "webrtc/modules/audio_processing/common.h" |
27 #include "webrtc/modules/audio_processing/echo_cancellation_impl.h" | 27 #include "webrtc/modules/audio_processing/echo_cancellation_impl.h" |
28 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" | 28 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" |
29 #include "webrtc/modules/audio_processing/gain_control_impl.h" | 29 #include "webrtc/modules/audio_processing/gain_control_impl.h" |
30 #include "webrtc/modules/audio_processing/high_pass_filter_impl.h" | 30 #include "webrtc/modules/audio_processing/high_pass_filter_impl.h" |
31 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc er.h" | |
31 #include "webrtc/modules/audio_processing/level_estimator_impl.h" | 32 #include "webrtc/modules/audio_processing/level_estimator_impl.h" |
32 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" | 33 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" |
33 #include "webrtc/modules/audio_processing/processing_component.h" | 34 #include "webrtc/modules/audio_processing/processing_component.h" |
34 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" | 35 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" |
35 #include "webrtc/modules/audio_processing/voice_detection_impl.h" | 36 #include "webrtc/modules/audio_processing/voice_detection_impl.h" |
36 #include "webrtc/modules/interface/module_common_types.h" | 37 #include "webrtc/modules/interface/module_common_types.h" |
37 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h" | 38 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h" |
38 #include "webrtc/system_wrappers/interface/file_wrapper.h" | 39 #include "webrtc/system_wrappers/interface/file_wrapper.h" |
39 #include "webrtc/system_wrappers/interface/logging.h" | 40 #include "webrtc/system_wrappers/interface/logging.h" |
40 #include "webrtc/system_wrappers/interface/metrics.h" | 41 #include "webrtc/system_wrappers/interface/metrics.h" |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
188 #endif | 189 #endif |
189 agc_startup_min_volume_(config.Get<ExperimentalAgc>().startup_min_volume), | 190 agc_startup_min_volume_(config.Get<ExperimentalAgc>().startup_min_volume), |
190 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 191 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
191 transient_suppressor_enabled_(false), | 192 transient_suppressor_enabled_(false), |
192 #else | 193 #else |
193 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled), | 194 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled), |
194 #endif | 195 #endif |
195 beamformer_enabled_(config.Get<Beamforming>().enabled), | 196 beamformer_enabled_(config.Get<Beamforming>().enabled), |
196 beamformer_(beamformer), | 197 beamformer_(beamformer), |
197 array_geometry_(config.Get<Beamforming>().array_geometry), | 198 array_geometry_(config.Get<Beamforming>().array_geometry), |
198 supports_48kHz_(config.Get<AudioProcessing48kHzSupport>().enabled) { | 199 supports_48kHz_(config.Get<AudioProcessing48kHzSupport>().enabled), |
200 intelligibility_enabled_(config.Get<Intelligibility>().enabled) { | |
199 echo_cancellation_ = new EchoCancellationImpl(this, crit_); | 201 echo_cancellation_ = new EchoCancellationImpl(this, crit_); |
200 component_list_.push_back(echo_cancellation_); | 202 component_list_.push_back(echo_cancellation_); |
201 | 203 |
202 echo_control_mobile_ = new EchoControlMobileImpl(this, crit_); | 204 echo_control_mobile_ = new EchoControlMobileImpl(this, crit_); |
203 component_list_.push_back(echo_control_mobile_); | 205 component_list_.push_back(echo_control_mobile_); |
204 | 206 |
205 gain_control_ = new GainControlImpl(this, crit_); | 207 gain_control_ = new GainControlImpl(this, crit_); |
206 component_list_.push_back(gain_control_); | 208 component_list_.push_back(gain_control_); |
207 | 209 |
208 high_pass_filter_ = new HighPassFilterImpl(this, crit_); | 210 high_pass_filter_ = new HighPassFilterImpl(this, crit_); |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
298 return err; | 300 return err; |
299 } | 301 } |
300 } | 302 } |
301 | 303 |
302 InitializeExperimentalAgc(); | 304 InitializeExperimentalAgc(); |
303 | 305 |
304 InitializeTransient(); | 306 InitializeTransient(); |
305 | 307 |
306 InitializeBeamformer(); | 308 InitializeBeamformer(); |
307 | 309 |
310 InitializeIntelligibility(); | |
311 | |
308 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 312 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
309 if (debug_file_->Open()) { | 313 if (debug_file_->Open()) { |
310 int err = WriteInitMessage(); | 314 int err = WriteInitMessage(); |
311 if (err != kNoError) { | 315 if (err != kNoError) { |
312 return err; | 316 return err; |
313 } | 317 } |
314 } | 318 } |
315 #endif | 319 #endif |
316 | 320 |
317 return kNoError; | 321 return kNoError; |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
420 void AudioProcessingImpl::SetExtraOptions(const Config& config) { | 424 void AudioProcessingImpl::SetExtraOptions(const Config& config) { |
421 CriticalSectionScoped crit_scoped(crit_); | 425 CriticalSectionScoped crit_scoped(crit_); |
422 for (auto item : component_list_) { | 426 for (auto item : component_list_) { |
423 item->SetExtraOptions(config); | 427 item->SetExtraOptions(config); |
424 } | 428 } |
425 | 429 |
426 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { | 430 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { |
427 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; | 431 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; |
428 InitializeTransient(); | 432 InitializeTransient(); |
429 } | 433 } |
434 | |
435 if (intelligibility_enabled_ != config.Get<Intelligibility>().enabled) { | |
Andrew MacDonald
2015/07/21 19:29:21
So, remove this.
ekm
2015/07/23 00:26:28
Done.
| |
436 intelligibility_enabled_ = config.Get<Intelligibility>().enabled; | |
437 InitializeIntelligibility(); | |
438 } | |
430 } | 439 } |
431 | 440 |
432 int AudioProcessingImpl::input_sample_rate_hz() const { | 441 int AudioProcessingImpl::input_sample_rate_hz() const { |
433 CriticalSectionScoped crit_scoped(crit_); | 442 CriticalSectionScoped crit_scoped(crit_); |
434 return fwd_in_format_.rate(); | 443 return fwd_in_format_.rate(); |
435 } | 444 } |
436 | 445 |
437 int AudioProcessingImpl::sample_rate_hz() const { | 446 int AudioProcessingImpl::sample_rate_hz() const { |
438 CriticalSectionScoped crit_scoped(crit_); | 447 CriticalSectionScoped crit_scoped(crit_); |
439 return fwd_in_format_.rate(); | 448 return fwd_in_format_.rate(); |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
592 msg->set_delay(stream_delay_ms_); | 601 msg->set_delay(stream_delay_ms_); |
593 msg->set_drift(echo_cancellation_->stream_drift_samples()); | 602 msg->set_drift(echo_cancellation_->stream_drift_samples()); |
594 msg->set_level(gain_control()->stream_analog_level()); | 603 msg->set_level(gain_control()->stream_analog_level()); |
595 msg->set_keypress(key_pressed_); | 604 msg->set_keypress(key_pressed_); |
596 } | 605 } |
597 #endif | 606 #endif |
598 | 607 |
599 MaybeUpdateHistograms(); | 608 MaybeUpdateHistograms(); |
600 | 609 |
601 AudioBuffer* ca = capture_audio_.get(); // For brevity. | 610 AudioBuffer* ca = capture_audio_.get(); // For brevity. |
611 | |
602 if (use_new_agc_ && gain_control_->is_enabled()) { | 612 if (use_new_agc_ && gain_control_->is_enabled()) { |
603 agc_manager_->AnalyzePreProcess(ca->channels()[0], | 613 agc_manager_->AnalyzePreProcess(ca->channels()[0], |
604 ca->num_channels(), | 614 ca->num_channels(), |
605 fwd_proc_format_.samples_per_channel()); | 615 fwd_proc_format_.samples_per_channel()); |
606 } | 616 } |
607 | 617 |
608 bool data_processed = is_data_processed(); | 618 bool data_processed = is_data_processed(); |
609 if (analysis_needed(data_processed)) { | 619 if (analysis_needed(data_processed)) { |
610 ca->SplitIntoFrequencyBands(); | 620 ca->SplitIntoFrequencyBands(); |
611 } | 621 } |
612 | 622 |
623 if (intelligibility_enabled_) { | |
624 intelligibility_enhancer_->AnalyzeCaptureAudio( | |
625 ca->split_channels_f(kBand0To8kHz), split_rate_, ca->num_channels()); | |
626 } | |
627 | |
613 if (beamformer_enabled_) { | 628 if (beamformer_enabled_) { |
614 beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f()); | 629 beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f()); |
615 ca->set_num_channels(1); | 630 ca->set_num_channels(1); |
616 } | 631 } |
617 | 632 |
618 RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca)); | 633 RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca)); |
619 RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca)); | 634 RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca)); |
620 RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca)); | 635 RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca)); |
621 RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca)); | 636 RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca)); |
622 | 637 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
657 key_pressed_); | 672 key_pressed_); |
658 } | 673 } |
659 | 674 |
660 // The level estimator operates on the recombined data. | 675 // The level estimator operates on the recombined data. |
661 RETURN_ON_ERR(level_estimator_->ProcessStream(ca)); | 676 RETURN_ON_ERR(level_estimator_->ProcessStream(ca)); |
662 | 677 |
663 was_stream_delay_set_ = false; | 678 was_stream_delay_set_ = false; |
664 return kNoError; | 679 return kNoError; |
665 } | 680 } |
666 | 681 |
682 int AudioProcessingImpl::ProcessReverseStream(float* const* data, | |
683 int samples_per_channel, | |
684 int rev_sample_rate_hz, | |
685 ChannelLayout layout) { | |
686 RETURN_ON_ERR(AnalyzeReverseStream(data, samples_per_channel, | |
687 rev_sample_rate_hz, layout)); | |
688 if (intelligibility_enabled_) { | |
689 render_audio_->CopyTo(samples_per_channel, layout, data); | |
690 } | |
691 | |
692 return kNoError; | |
693 } | |
694 | |
667 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, | 695 int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, |
668 int samples_per_channel, | 696 int samples_per_channel, |
669 int sample_rate_hz, | 697 int rev_sample_rate_hz, |
670 ChannelLayout layout) { | 698 ChannelLayout layout) { |
671 CriticalSectionScoped crit_scoped(crit_); | 699 CriticalSectionScoped crit_scoped(crit_); |
672 if (data == NULL) { | 700 if (data == NULL) { |
673 return kNullPointerError; | 701 return kNullPointerError; |
674 } | 702 } |
675 | 703 |
676 const int num_channels = ChannelsFromLayout(layout); | 704 const int num_channels = ChannelsFromLayout(layout); |
677 RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(), | 705 RETURN_ON_ERR( |
678 fwd_out_format_.rate(), | 706 MaybeInitializeLocked(fwd_in_format_.rate(), fwd_out_format_.rate(), |
679 sample_rate_hz, | 707 rev_sample_rate_hz, fwd_in_format_.num_channels(), |
680 fwd_in_format_.num_channels(), | 708 fwd_out_format_.num_channels(), num_channels)); |
681 fwd_out_format_.num_channels(), | |
682 num_channels)); | |
683 if (samples_per_channel != rev_in_format_.samples_per_channel()) { | 709 if (samples_per_channel != rev_in_format_.samples_per_channel()) { |
684 return kBadDataLengthError; | 710 return kBadDataLengthError; |
685 } | 711 } |
686 | 712 |
687 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 713 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
688 if (debug_file_->Open()) { | 714 if (debug_file_->Open()) { |
689 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 715 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); |
690 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 716 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); |
691 const size_t channel_size = | 717 const size_t channel_size = |
692 sizeof(float) * rev_in_format_.samples_per_channel(); | 718 sizeof(float) * rev_in_format_.samples_per_channel(); |
(...skipping 16 matching lines...) Expand all Loading... | |
709 if (frame->sample_rate_hz_ != kSampleRate8kHz && | 735 if (frame->sample_rate_hz_ != kSampleRate8kHz && |
710 frame->sample_rate_hz_ != kSampleRate16kHz && | 736 frame->sample_rate_hz_ != kSampleRate16kHz && |
711 frame->sample_rate_hz_ != kSampleRate32kHz && | 737 frame->sample_rate_hz_ != kSampleRate32kHz && |
712 frame->sample_rate_hz_ != kSampleRate48kHz) { | 738 frame->sample_rate_hz_ != kSampleRate48kHz) { |
713 return kBadSampleRateError; | 739 return kBadSampleRateError; |
714 } | 740 } |
715 // This interface does not tolerate different forward and reverse rates. | 741 // This interface does not tolerate different forward and reverse rates. |
716 if (frame->sample_rate_hz_ != fwd_in_format_.rate()) { | 742 if (frame->sample_rate_hz_ != fwd_in_format_.rate()) { |
717 return kBadSampleRateError; | 743 return kBadSampleRateError; |
718 } | 744 } |
719 | |
720 RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(), | 745 RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(), |
721 fwd_out_format_.rate(), | 746 fwd_out_format_.rate(), |
722 frame->sample_rate_hz_, | 747 frame->sample_rate_hz_, |
723 fwd_in_format_.num_channels(), | 748 fwd_in_format_.num_channels(), |
724 fwd_in_format_.num_channels(), | 749 fwd_in_format_.num_channels(), |
725 frame->num_channels_)); | 750 frame->num_channels_)); |
726 if (frame->samples_per_channel_ != rev_in_format_.samples_per_channel()) { | 751 if (frame->samples_per_channel_ != rev_in_format_.samples_per_channel()) { |
727 return kBadDataLengthError; | 752 return kBadDataLengthError; |
728 } | 753 } |
729 | 754 |
730 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 755 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
731 if (debug_file_->Open()) { | 756 if (debug_file_->Open()) { |
732 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); | 757 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); |
733 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); | 758 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); |
734 const size_t data_size = sizeof(int16_t) * | 759 const size_t data_size = sizeof(int16_t) * |
735 frame->samples_per_channel_ * | 760 frame->samples_per_channel_ * |
736 frame->num_channels_; | 761 frame->num_channels_; |
737 msg->set_data(frame->data_, data_size); | 762 msg->set_data(frame->data_, data_size); |
738 RETURN_ON_ERR(WriteMessageToDebugFile()); | 763 RETURN_ON_ERR(WriteMessageToDebugFile()); |
739 } | 764 } |
740 #endif | 765 #endif |
766 render_audio_->DeinterleaveFrom(frame); | |
767 RETURN_ON_ERR(AnalyzeReverseStreamLocked()); | |
768 render_audio_->InterleaveTo(frame, intelligibility_enabled_); | |
741 | 769 |
742 render_audio_->DeinterleaveFrom(frame); | 770 return kNoError; |
743 return AnalyzeReverseStreamLocked(); | |
744 } | 771 } |
745 | 772 |
746 int AudioProcessingImpl::AnalyzeReverseStreamLocked() { | 773 int AudioProcessingImpl::AnalyzeReverseStreamLocked() { |
Andrew MacDonald
2015/07/21 19:29:22
So AnalyzeReverseStream is no longer just for anal
ekm
2015/07/23 00:26:28
Yep, sounds good. I've re-renamed AnalyzeReverseSt
Andrew MacDonald
2015/07/24 23:50:39
Yes, mark AnalyzeReverseStream as deprecated in au
ekm
2015/07/29 00:37:19
Done.
| |
747 AudioBuffer* ra = render_audio_.get(); // For brevity. | 774 AudioBuffer* ra = render_audio_.get(); // For brevity. |
748 if (rev_proc_format_.rate() == kSampleRate32kHz) { | 775 if (rev_proc_format_.rate() == kSampleRate32kHz) { |
749 ra->SplitIntoFrequencyBands(); | 776 ra->SplitIntoFrequencyBands(); |
750 } | 777 } |
751 | 778 |
779 if (intelligibility_enabled_) { | |
780 intelligibility_enhancer_->ProcessRenderAudio( | |
781 ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels()); | |
782 } | |
783 | |
752 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra)); | 784 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra)); |
753 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra)); | 785 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra)); |
754 if (!use_new_agc_) { | 786 if (!use_new_agc_) { |
755 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra)); | 787 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra)); |
756 } | 788 } |
757 | 789 |
790 if (rev_proc_format_.rate() == kSampleRate32kHz) { | |
791 ra->MergeFrequencyBands(); | |
792 } | |
793 | |
758 return kNoError; | 794 return kNoError; |
759 } | 795 } |
760 | 796 |
761 int AudioProcessingImpl::set_stream_delay_ms(int delay) { | 797 int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
762 Error retval = kNoError; | 798 Error retval = kNoError; |
763 was_stream_delay_set_ = true; | 799 was_stream_delay_set_ = true; |
764 delay += delay_offset_ms_; | 800 delay += delay_offset_ms_; |
765 | 801 |
766 if (delay < 0) { | 802 if (delay < 0) { |
767 delay = 0; | 803 delay = 0; |
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
994 | 1030 |
995 void AudioProcessingImpl::InitializeBeamformer() { | 1031 void AudioProcessingImpl::InitializeBeamformer() { |
996 if (beamformer_enabled_) { | 1032 if (beamformer_enabled_) { |
997 if (!beamformer_) { | 1033 if (!beamformer_) { |
998 beamformer_.reset(new NonlinearBeamformer(array_geometry_)); | 1034 beamformer_.reset(new NonlinearBeamformer(array_geometry_)); |
999 } | 1035 } |
1000 beamformer_->Initialize(kChunkSizeMs, split_rate_); | 1036 beamformer_->Initialize(kChunkSizeMs, split_rate_); |
1001 } | 1037 } |
1002 } | 1038 } |
1003 | 1039 |
1040 void AudioProcessingImpl::InitializeIntelligibility() { | |
1041 if (intelligibility_enabled_) { | |
1042 IntelligibilityEnhancer::Config config; | |
1043 config.sample_rate_hz = split_rate_; | |
1044 config.num_channels = 1; // TODO(ekmeyerson): Handle multiple channels. | |
1045 intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config)); | |
1046 } | |
1047 } | |
1048 | |
1004 void AudioProcessingImpl::MaybeUpdateHistograms() { | 1049 void AudioProcessingImpl::MaybeUpdateHistograms() { |
1005 static const int kMinDiffDelayMs = 60; | 1050 static const int kMinDiffDelayMs = 60; |
1006 | 1051 |
1007 if (echo_cancellation()->is_enabled()) { | 1052 if (echo_cancellation()->is_enabled()) { |
1008 // Activate delay_jumps_ counters if we know echo_cancellation is runnning. | 1053 // Activate delay_jumps_ counters if we know echo_cancellation is runnning. |
1009 // If a stream has echo we know that the echo_cancellation is in process. | 1054 // If a stream has echo we know that the echo_cancellation is in process. |
1010 if (stream_delay_jumps_ == -1 && echo_cancellation()->stream_has_echo()) { | 1055 if (stream_delay_jumps_ == -1 && echo_cancellation()->stream_has_echo()) { |
1011 stream_delay_jumps_ = 0; | 1056 stream_delay_jumps_ = 0; |
1012 } | 1057 } |
1013 if (aec_system_delay_jumps_ == -1 && | 1058 if (aec_system_delay_jumps_ == -1 && |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1106 int err = WriteMessageToDebugFile(); | 1151 int err = WriteMessageToDebugFile(); |
1107 if (err != kNoError) { | 1152 if (err != kNoError) { |
1108 return err; | 1153 return err; |
1109 } | 1154 } |
1110 | 1155 |
1111 return kNoError; | 1156 return kNoError; |
1112 } | 1157 } |
1113 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1158 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
1114 | 1159 |
1115 } // namespace webrtc | 1160 } // namespace webrtc |
OLD | NEW |