Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(26)

Side by Side Diff: webrtc/modules/audio_processing/audio_processing_impl.cc

Issue 1773173002: Dont always downsample to 16kHz in the reverse stream in APM (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@aecm
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
67 case AudioProcessing::kStereo: 67 case AudioProcessing::kStereo:
68 return false; 68 return false;
69 case AudioProcessing::kMonoAndKeyboard: 69 case AudioProcessing::kMonoAndKeyboard:
70 case AudioProcessing::kStereoAndKeyboard: 70 case AudioProcessing::kStereoAndKeyboard:
71 return true; 71 return true;
72 } 72 }
73 73
74 assert(false); 74 assert(false);
75 return false; 75 return false;
76 } 76 }
77
78 bool is_multi_band(int sample_rate_hz) {
peah-webrtc 2016/03/09 13:25:43 I think the style guide warrants a different name
aluebs-webrtc 2016/03/09 14:27:48 I thought 2 "==" and 1 "&&" could be considered "c
peah-webrtc 2016/03/10 06:59:24 You are of course right about the cheapness. And h
aluebs-webrtc 2016/03/10 15:34:32 Done.
79 return sample_rate_hz == AudioProcessing::kSampleRate32kHz ||
80 sample_rate_hz == AudioProcessing::kSampleRate48kHz;
81 }
82
83 int get_proc_rate(int input_rate, int output_rate) {
peah-webrtc 2016/03/09 13:25:43 The term proc is currently quite ambiguous in APM.
peah-webrtc 2016/03/09 13:25:44 I think the style guide warrants a different name
aluebs-webrtc 2016/03/09 14:27:48 "proc" always means "for processing" and both "fwd
aluebs-webrtc 2016/03/09 14:27:48 In this case I agree that the definition of "cheap
peah-webrtc 2016/03/10 06:59:24 Nice! Acknowledged.
84 // We process at the closest native rate >= min(input rate, output rate)...
85 const int min_proc_rate = std::min(input_rate, output_rate);
86 int proc_rate;
87 for (size_t i = 0; i < AudioProcessing::kNumNativeSampleRates; ++i) {
hlundin-webrtc 2016/03/09 12:10:57 I suggest a re-write of the for loop: for (int rat
aluebs-webrtc 2016/03/09 13:16:02 I see that I am more sensitive than most when writ
peah-webrtc 2016/03/09 13:25:44 I think this should be an example where an ArrayVi
peah-webrtc 2016/03/09 13:31:18 My error, Hlundins example is actually much better
88 proc_rate = AudioProcessing::kNativeSampleRatesHz[i];
89 if (proc_rate >= min_proc_rate) {
90 break;
91 }
92 }
93 return proc_rate;
94 }
95
77 } // namespace 96 } // namespace
78 97
79 // Throughout webrtc, it's assumed that success is represented by zero. 98 // Throughout webrtc, it's assumed that success is represented by zero.
80 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); 99 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero");
81 100
82 struct AudioProcessingImpl::ApmPublicSubmodules { 101 struct AudioProcessingImpl::ApmPublicSubmodules {
83 ApmPublicSubmodules() 102 ApmPublicSubmodules()
84 : echo_cancellation(nullptr), 103 : echo_cancellation(nullptr),
85 echo_control_mobile(nullptr), 104 echo_control_mobile(nullptr),
86 gain_control(nullptr) {} 105 gain_control(nullptr) {}
(...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after
357 return kBadNumberChannelsError; 376 return kBadNumberChannelsError;
358 } 377 }
359 378
360 if (capture_nonlocked_.beamformer_enabled && 379 if (capture_nonlocked_.beamformer_enabled &&
361 num_in_channels != capture_.array_geometry.size()) { 380 num_in_channels != capture_.array_geometry.size()) {
362 return kBadNumberChannelsError; 381 return kBadNumberChannelsError;
363 } 382 }
364 383
365 formats_.api_format = config; 384 formats_.api_format = config;
366 385
367 // We process at the closest native rate >= min(input rate, output rate). 386 capture_nonlocked_.fwd_proc_format = StreamConfig(get_proc_rate(
368 const int min_proc_rate = 387 formats_.api_format.input_stream().sample_rate_hz(),
369 std::min(formats_.api_format.input_stream().sample_rate_hz(), 388 formats_.api_format.output_stream().sample_rate_hz()));
370 formats_.api_format.output_stream().sample_rate_hz());
371 int fwd_proc_rate;
372 for (size_t i = 0; i < kNumNativeSampleRates; ++i) {
373 fwd_proc_rate = kNativeSampleRatesHz[i];
374 if (fwd_proc_rate >= min_proc_rate) {
375 break;
376 }
377 }
378 389
379 capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate); 390 int rev_proc_rate = get_proc_rate(
380 391 formats_.api_format.reverse_input_stream().sample_rate_hz(),
381 // We normally process the reverse stream at 16 kHz. Unless... 392 formats_.api_format.reverse_output_stream().sample_rate_hz());
382 int rev_proc_rate = kSampleRate16kHz; 393 // If the forward sample rate is 8 kHz, the reverse stream is also processed
394 // at this rate.
383 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) { 395 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) {
384 // ...the forward stream is at 8 kHz.
385 rev_proc_rate = kSampleRate8kHz; 396 rev_proc_rate = kSampleRate8kHz;
386 } else {
387 if (formats_.api_format.reverse_input_stream().sample_rate_hz() ==
388 kSampleRate32kHz) {
389 // ...or the input is at 32 kHz, in which case we use the splitting
390 // filter rather than the resampler.
391 rev_proc_rate = kSampleRate32kHz;
392 }
393 } 397 }
394 398
395 // Always downmix the reverse stream to mono for analysis. This has been 399 // Always downmix the reverse stream to mono for analysis. This has been
396 // demonstrated to work well for AEC in most practical scenarios. 400 // demonstrated to work well for AEC in most practical scenarios.
397 formats_.rev_proc_format = StreamConfig(rev_proc_rate, 1); 401 formats_.rev_proc_format = StreamConfig(rev_proc_rate, 1);
398 402
399 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate32kHz || 403 if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate32kHz ||
400 capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate48kHz) { 404 capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate48kHz) {
401 capture_nonlocked_.split_rate = kSampleRate16kHz; 405 capture_nonlocked_.split_rate = kSampleRate16kHz;
402 } else { 406 } else {
(...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after
641 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); 645 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM);
642 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); 646 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
643 const size_t data_size = 647 const size_t data_size =
644 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; 648 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
645 msg->set_input_data(frame->data_, data_size); 649 msg->set_input_data(frame->data_, data_size);
646 } 650 }
647 #endif 651 #endif
648 652
649 capture_.capture_audio->DeinterleaveFrom(frame); 653 capture_.capture_audio->DeinterleaveFrom(frame);
650 RETURN_ON_ERR(ProcessStreamLocked()); 654 RETURN_ON_ERR(ProcessStreamLocked());
651 capture_.capture_audio->InterleaveTo(frame, 655 capture_.capture_audio->InterleaveTo(frame, output_copy_needed());
652 output_copy_needed(is_data_processed()));
653 656
654 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 657 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
655 if (debug_dump_.debug_file->Open()) { 658 if (debug_dump_.debug_file->Open()) {
656 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); 659 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
657 const size_t data_size = 660 const size_t data_size =
658 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; 661 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
659 msg->set_output_data(frame->data_, data_size); 662 msg->set_output_data(frame->data_, data_size);
660 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), 663 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
661 &debug_dump_.num_bytes_left_for_log_, 664 &debug_dump_.num_bytes_left_for_log_,
662 &crit_debug_, &debug_dump_.capture)); 665 &crit_debug_, &debug_dump_.capture));
(...skipping 19 matching lines...) Expand all
682 685
683 AudioBuffer* ca = capture_.capture_audio.get(); // For brevity. 686 AudioBuffer* ca = capture_.capture_audio.get(); // For brevity.
684 687
685 if (constants_.use_experimental_agc && 688 if (constants_.use_experimental_agc &&
686 public_submodules_->gain_control->is_enabled()) { 689 public_submodules_->gain_control->is_enabled()) {
687 private_submodules_->agc_manager->AnalyzePreProcess( 690 private_submodules_->agc_manager->AnalyzePreProcess(
688 ca->channels()[0], ca->num_channels(), 691 ca->channels()[0], ca->num_channels(),
689 capture_nonlocked_.fwd_proc_format.num_frames()); 692 capture_nonlocked_.fwd_proc_format.num_frames());
690 } 693 }
691 694
692 bool data_processed = is_data_processed(); 695 if (analysis_needed()) {
693 if (analysis_needed(data_processed)) {
694 ca->SplitIntoFrequencyBands(); 696 ca->SplitIntoFrequencyBands();
695 } 697 }
696 698
697 if (capture_nonlocked_.beamformer_enabled) { 699 if (capture_nonlocked_.beamformer_enabled) {
698 private_submodules_->beamformer->ProcessChunk(*ca->split_data_f(), 700 private_submodules_->beamformer->ProcessChunk(*ca->split_data_f(),
699 ca->split_data_f()); 701 ca->split_data_f());
700 ca->set_num_channels(1); 702 ca->set_num_channels(1);
701 } 703 }
702 704
703 public_submodules_->high_pass_filter->ProcessCaptureAudio(ca); 705 public_submodules_->high_pass_filter->ProcessCaptureAudio(ca);
(...skipping 18 matching lines...) Expand all
722 if (constants_.use_experimental_agc && 724 if (constants_.use_experimental_agc &&
723 public_submodules_->gain_control->is_enabled() && 725 public_submodules_->gain_control->is_enabled() &&
724 (!capture_nonlocked_.beamformer_enabled || 726 (!capture_nonlocked_.beamformer_enabled ||
725 private_submodules_->beamformer->is_target_present())) { 727 private_submodules_->beamformer->is_target_present())) {
726 private_submodules_->agc_manager->Process( 728 private_submodules_->agc_manager->Process(
727 ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(), 729 ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(),
728 capture_nonlocked_.split_rate); 730 capture_nonlocked_.split_rate);
729 } 731 }
730 RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio(ca)); 732 RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio(ca));
731 733
732 if (synthesis_needed(data_processed)) { 734 if (synthesis_needed()) {
733 ca->MergeFrequencyBands(); 735 ca->MergeFrequencyBands();
734 } 736 }
735 737
736 // TODO(aluebs): Investigate if the transient suppression placement should be 738 // TODO(aluebs): Investigate if the transient suppression placement should be
737 // before or after the AGC. 739 // before or after the AGC.
738 if (capture_.transient_suppressor_enabled) { 740 if (capture_.transient_suppressor_enabled) {
739 float voice_probability = 741 float voice_probability =
740 private_submodules_->agc_manager.get() 742 private_submodules_->agc_manager.get()
741 ? private_submodules_->agc_manager->voice_probability() 743 ? private_submodules_->agc_manager->voice_probability()
742 : 1.f; 744 : 1.f;
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
897 &debug_dump_.num_bytes_left_for_log_, 899 &debug_dump_.num_bytes_left_for_log_,
898 &crit_debug_, &debug_dump_.render)); 900 &crit_debug_, &debug_dump_.render));
899 } 901 }
900 #endif 902 #endif
901 render_.render_audio->DeinterleaveFrom(frame); 903 render_.render_audio->DeinterleaveFrom(frame);
902 return ProcessReverseStreamLocked(); 904 return ProcessReverseStreamLocked();
903 } 905 }
904 906
905 int AudioProcessingImpl::ProcessReverseStreamLocked() { 907 int AudioProcessingImpl::ProcessReverseStreamLocked() {
906 AudioBuffer* ra = render_.render_audio.get(); // For brevity. 908 AudioBuffer* ra = render_.render_audio.get(); // For brevity.
907 if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz) { 909 if (rev_analysis_needed()) {
908 ra->SplitIntoFrequencyBands(); 910 ra->SplitIntoFrequencyBands();
909 } 911 }
910 912
911 if (constants_.intelligibility_enabled) { 913 if (constants_.intelligibility_enabled) {
912 // Currently run in single-threaded mode when the intelligibility 914 // Currently run in single-threaded mode when the intelligibility
913 // enhancer is activated. 915 // enhancer is activated.
914 // TODO(peah): Fix to be properly multi-threaded. 916 // TODO(peah): Fix to be properly multi-threaded.
915 rtc::CritScope cs(&crit_capture_); 917 rtc::CritScope cs(&crit_capture_);
916 public_submodules_->intelligibility_enhancer->ProcessRenderAudio( 918 public_submodules_->intelligibility_enhancer->ProcessRenderAudio(
917 ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate, 919 ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate,
918 ra->num_channels()); 920 ra->num_channels());
919 } 921 }
920 922
921 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra)); 923 RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra));
922 RETURN_ON_ERR( 924 RETURN_ON_ERR(
923 public_submodules_->echo_control_mobile->ProcessRenderAudio(ra)); 925 public_submodules_->echo_control_mobile->ProcessRenderAudio(ra));
924 if (!constants_.use_experimental_agc) { 926 if (!constants_.use_experimental_agc) {
925 RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra)); 927 RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra));
926 } 928 }
927 929
928 if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz && 930 if (rev_synthesis_needed()) {
929 is_rev_processed()) {
930 ra->MergeFrequencyBands(); 931 ra->MergeFrequencyBands();
931 } 932 }
932 933
933 return kNoError; 934 return kNoError;
934 } 935 }
935 936
936 int AudioProcessingImpl::set_stream_delay_ms(int delay) { 937 int AudioProcessingImpl::set_stream_delay_ms(int delay) {
937 rtc::CritScope cs(&crit_capture_); 938 rtc::CritScope cs(&crit_capture_);
938 Error retval = kNoError; 939 Error retval = kNoError;
939 capture_.was_stream_delay_set = true; 940 capture_.was_stream_delay_set = true;
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
1110 // from the returned pointer. 1111 // from the returned pointer.
1111 return public_submodules_->noise_suppression.get(); 1112 return public_submodules_->noise_suppression.get();
1112 } 1113 }
1113 1114
1114 VoiceDetection* AudioProcessingImpl::voice_detection() const { 1115 VoiceDetection* AudioProcessingImpl::voice_detection() const {
1115 // Adding a lock here has no effect as it allows any access to the submodule 1116 // Adding a lock here has no effect as it allows any access to the submodule
1116 // from the returned pointer. 1117 // from the returned pointer.
1117 return public_submodules_->voice_detection.get(); 1118 return public_submodules_->voice_detection.get();
1118 } 1119 }
1119 1120
1120 bool AudioProcessingImpl::is_data_processed() const { 1121 bool AudioProcessingImpl::is_data_processed() const {
peah-webrtc 2016/03/09 13:25:43 This is the counterpart to output_copy_needed(). I
aluebs-webrtc 2016/03/09 14:27:48 Might make sense, but in another CL.
1121 // The beamformer, noise suppressor and highpass filter 1122 // The beamformer, noise suppressor and highpass filter
1122 // modify the data. 1123 // modify the data.
1123 if (capture_nonlocked_.beamformer_enabled || 1124 if (capture_nonlocked_.beamformer_enabled ||
1124 public_submodules_->high_pass_filter->is_enabled() || 1125 public_submodules_->high_pass_filter->is_enabled() ||
1125 public_submodules_->noise_suppression->is_enabled() || 1126 public_submodules_->noise_suppression->is_enabled() ||
1126 public_submodules_->echo_cancellation->is_enabled()) { 1127 public_submodules_->echo_cancellation->is_enabled()) {
1127 return true; 1128 return true;
1128 } 1129 }
1129 1130
1130 // All of the private submodules modify the data. 1131 // All of the private submodules modify the data.
1131 for (auto item : private_submodules_->component_list) { 1132 for (auto item : private_submodules_->component_list) {
1132 if (item->is_component_enabled()) { 1133 if (item->is_component_enabled()) {
1133 return true; 1134 return true;
1134 } 1135 }
1135 } 1136 }
1136 1137
1137 // The capture data is otherwise unchanged. 1138 // The capture data is otherwise unchanged.
1138 return false; 1139 return false;
1139 } 1140 }
1140 1141
1141 bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { 1142 bool AudioProcessingImpl::output_copy_needed() const {
peah-webrtc 2016/03/09 13:25:43 The naming rule https://google.github.io/styleguid
aluebs-webrtc 2016/03/09 14:27:48 I am pretty sure this naming was chosen assuming i
peah-webrtc 2016/03/10 06:59:24 Acknowledged.
1142 // Check if we've upmixed or downmixed the audio. 1143 // Check if we've upmixed or downmixed the audio.
1143 return ((formats_.api_format.output_stream().num_channels() != 1144 return ((formats_.api_format.output_stream().num_channels() !=
1144 formats_.api_format.input_stream().num_channels()) || 1145 formats_.api_format.input_stream().num_channels()) ||
1145 is_data_processed || capture_.transient_suppressor_enabled); 1146 is_data_processed() || capture_.transient_suppressor_enabled);
1146 } 1147 }
1147 1148
1148 bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { 1149 bool AudioProcessingImpl::synthesis_needed() const {
peah-webrtc 2016/03/09 13:25:43 The naming rule https://google.github.io/styleguid
aluebs-webrtc 2016/03/09 14:27:48 Again, I am pretty sure all these names were chose
peah-webrtc 2016/03/10 06:59:24 Acknowledged.
1149 return (is_data_processed && 1150 return (is_data_processed() &&
1150 (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == 1151 is_multi_band(capture_nonlocked_.fwd_proc_format.sample_rate_hz()));
1151 kSampleRate32kHz ||
1152 capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
1153 kSampleRate48kHz));
1154 } 1152 }
1155 1153
1156 bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { 1154 bool AudioProcessingImpl::analysis_needed() const {
peah-webrtc 2016/03/09 13:25:43 As above, what about What about ForwardStreamBandA
aluebs-webrtc 2016/03/09 14:27:48 As above, if we change the naming of these functio
peah-webrtc 2016/03/10 06:59:24 Acknowledged.
1157 if (!is_data_processed && 1155 if (!is_data_processed() &&
1158 !public_submodules_->voice_detection->is_enabled() && 1156 !public_submodules_->voice_detection->is_enabled() &&
1159 !capture_.transient_suppressor_enabled) { 1157 !capture_.transient_suppressor_enabled) {
1160 // Only public_submodules_->level_estimator is enabled. 1158 // Only public_submodules_->level_estimator is enabled.
1161 return false; 1159 return false;
1162 } else if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == 1160 } else if (is_multi_band(
1163 kSampleRate32kHz || 1161 capture_nonlocked_.fwd_proc_format.sample_rate_hz())) {
1164 capture_nonlocked_.fwd_proc_format.sample_rate_hz() ==
1165 kSampleRate48kHz) {
1166 // Something besides public_submodules_->level_estimator is enabled, and we 1162 // Something besides public_submodules_->level_estimator is enabled, and we
1167 // have super-wb. 1163 // have super-wb.
1168 return true; 1164 return true;
1169 } 1165 }
1170 return false; 1166 return false;
1171 } 1167 }
1172 1168
1173 bool AudioProcessingImpl::is_rev_processed() const { 1169 bool AudioProcessingImpl::is_rev_processed() const {
1174 return constants_.intelligibility_enabled; 1170 return constants_.intelligibility_enabled;
1175 } 1171 }
1176 1172
1173 bool AudioProcessingImpl::rev_synthesis_needed() const {
1174 return (is_rev_processed() &&
1175 is_multi_band(formats_.rev_proc_format.sample_rate_hz()));
peah-webrtc 2016/03/09 13:25:43 As above, what about What about ReverseStreamBandS
aluebs-webrtc 2016/03/09 14:27:48 I would want these to have names that are consiste
peah-webrtc 2016/03/10 06:59:24 Acknowledged.
1176 }
1177
1178 bool AudioProcessingImpl::rev_analysis_needed() const {
peah-webrtc 2016/03/09 13:25:43 As above, what about What about ReverseStreamBandA
aluebs-webrtc 2016/03/09 14:27:48 I would want these to have names that are consiste
peah-webrtc 2016/03/10 06:59:24 Acknowledged.
1179 return is_multi_band(formats_.rev_proc_format.sample_rate_hz());
1180 }
1181
1177 bool AudioProcessingImpl::render_check_rev_conversion_needed() const { 1182 bool AudioProcessingImpl::render_check_rev_conversion_needed() const {
1178 return rev_conversion_needed(); 1183 return rev_conversion_needed();
1179 } 1184 }
1180 1185
1181 bool AudioProcessingImpl::rev_conversion_needed() const { 1186 bool AudioProcessingImpl::rev_conversion_needed() const {
1182 return (formats_.api_format.reverse_input_stream() != 1187 return (formats_.api_format.reverse_input_stream() !=
1183 formats_.api_format.reverse_output_stream()); 1188 formats_.api_format.reverse_output_stream());
1184 } 1189 }
1185 1190
1186 void AudioProcessingImpl::InitializeExperimentalAgc() { 1191 void AudioProcessingImpl::InitializeExperimentalAgc() {
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after
1444 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); 1449 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config);
1445 1450
1446 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), 1451 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
1447 &debug_dump_.num_bytes_left_for_log_, 1452 &debug_dump_.num_bytes_left_for_log_,
1448 &crit_debug_, &debug_dump_.capture)); 1453 &crit_debug_, &debug_dump_.capture));
1449 return kNoError; 1454 return kNoError;
1450 } 1455 }
1451 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP 1456 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
1452 1457
1453 } // namespace webrtc 1458 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698