Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(194)

Side by Side Diff: webrtc/modules/audio_processing/audio_processing_impl.cc

Issue 1422013002: Preparational work for an upcoming addition of a threadchecking scheme for APM (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@bundling_of_state_CL
Patch Set: Merge with master Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 288 matching lines...) Expand 10 before | Expand all | Expand 10 after
299 LayoutHasKeyboard(reverse_layout)}}}; 299 LayoutHasKeyboard(reverse_layout)}}};
300 300
301 return Initialize(processing_config); 301 return Initialize(processing_config);
302 } 302 }
303 303
304 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { 304 int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) {
305 CriticalSectionScoped crit_scoped(crit_); 305 CriticalSectionScoped crit_scoped(crit_);
306 return InitializeLocked(processing_config); 306 return InitializeLocked(processing_config);
307 } 307 }
308 308
309 int AudioProcessingImpl::MaybeInitializeLockedRender(
310 const ProcessingConfig& processing_config) {
311 return MaybeInitializeLocked(processing_config);
312 }
313
314 int AudioProcessingImpl::MaybeInitializeLockedCapture(
315 const ProcessingConfig& processing_config) {
316 return MaybeInitializeLocked(processing_config);
317 }
318
309 // Calls InitializeLocked() if any of the audio parameters have changed from 319 // Calls InitializeLocked() if any of the audio parameters have changed from
310 // their current values. 320 // their current values.
311 int AudioProcessingImpl::MaybeInitializeLocked( 321 int AudioProcessingImpl::MaybeInitializeLocked(
312 const ProcessingConfig& processing_config) { 322 const ProcessingConfig& processing_config) {
313 if (processing_config == shared_state_.api_format_) { 323 if (processing_config == shared_state_.api_format_) {
314 return kNoError; 324 return kNoError;
315 } 325 }
316 return InitializeLocked(processing_config); 326 return InitializeLocked(processing_config);
317 } 327 }
318 328
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
372 if (err != kNoError) { 382 if (err != kNoError) {
373 return err; 383 return err;
374 } 384 }
375 } 385 }
376 #endif 386 #endif
377 387
378 return kNoError; 388 return kNoError;
379 } 389 }
380 390
381 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { 391 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
392 // TODO(peah): Refactor to be allowed to verify using thread annotations.
382 for (const auto& stream : config.streams) { 393 for (const auto& stream : config.streams) {
383 if (stream.num_channels() < 0) { 394 if (stream.num_channels() < 0) {
384 return kBadNumberChannelsError; 395 return kBadNumberChannelsError;
385 } 396 }
386 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { 397 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) {
387 return kBadSampleRateError; 398 return kBadSampleRateError;
388 } 399 }
389 } 400 }
390 401
391 const int num_in_channels = config.input_stream().num_channels(); 402 const int num_in_channels = config.input_stream().num_channels();
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
446 if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || 457 if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
447 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { 458 fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) {
448 split_rate_ = kSampleRate16kHz; 459 split_rate_ = kSampleRate16kHz;
449 } else { 460 } else {
450 split_rate_ = fwd_proc_format_.sample_rate_hz(); 461 split_rate_ = fwd_proc_format_.sample_rate_hz();
451 } 462 }
452 463
453 return InitializeLocked(); 464 return InitializeLocked();
454 } 465 }
455 466
456
457 void AudioProcessingImpl::SetExtraOptions(const Config& config) { 467 void AudioProcessingImpl::SetExtraOptions(const Config& config) {
458 CriticalSectionScoped crit_scoped(crit_); 468 CriticalSectionScoped crit_scoped(crit_);
459 for (auto item : component_list_) { 469 for (auto item : component_list_) {
460 item->SetExtraOptions(config); 470 item->SetExtraOptions(config);
461 } 471 }
462 472
463 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { 473 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) {
464 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; 474 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled;
465 InitializeTransient(); 475 InitializeTransient();
466 } 476 }
467 } 477 }
468 478
469 479
470 int AudioProcessingImpl::proc_sample_rate_hz() const { 480 int AudioProcessingImpl::proc_sample_rate_hz() const {
481 // TODO(peah): Refactor to be allowed to verify using thread annotations.
471 return fwd_proc_format_.sample_rate_hz(); 482 return fwd_proc_format_.sample_rate_hz();
472 } 483 }
473 484
474 int AudioProcessingImpl::proc_split_sample_rate_hz() const { 485 int AudioProcessingImpl::proc_split_sample_rate_hz() const {
486 // TODO(peah): Refactor to be allowed to verify using thread annotations.
487
475 return split_rate_; 488 return split_rate_;
476 } 489 }
477 490
478 int AudioProcessingImpl::num_reverse_channels() const { 491 int AudioProcessingImpl::num_reverse_channels() const {
492 // TODO(peah): Refactor to be allowed to verify using thread annotations.
479 return rev_proc_format_.num_channels(); 493 return rev_proc_format_.num_channels();
480 } 494 }
481 495
482 int AudioProcessingImpl::num_input_channels() const { 496 int AudioProcessingImpl::num_input_channels() const {
483 return shared_state_.api_format_.input_stream().num_channels(); 497 return shared_state_.api_format_.input_stream().num_channels();
484 } 498 }
485 499
486 int AudioProcessingImpl::num_output_channels() const { 500 int AudioProcessingImpl::num_output_channels() const {
501 // TODO(peah): Refactor to be allowed to verify using thread annotations.
487 return shared_state_.api_format_.output_stream().num_channels(); 502 return shared_state_.api_format_.output_stream().num_channels();
488 } 503 }
489 504
490 void AudioProcessingImpl::set_output_will_be_muted(bool muted) { 505 void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
491 CriticalSectionScoped lock(crit_); 506 CriticalSectionScoped lock(crit_);
492 output_will_be_muted_ = muted; 507 output_will_be_muted_ = muted;
493 if (agc_manager_.get()) { 508 if (agc_manager_.get()) {
494 agc_manager_->SetCaptureMuted(output_will_be_muted_); 509 agc_manager_->SetCaptureMuted(output_will_be_muted_);
495 } 510 }
496 } 511 }
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
530 } 545 }
531 546
532 echo_cancellation_->ReadQueuedRenderData(); 547 echo_cancellation_->ReadQueuedRenderData();
533 echo_control_mobile_->ReadQueuedRenderData(); 548 echo_control_mobile_->ReadQueuedRenderData();
534 gain_control_->ReadQueuedRenderData(); 549 gain_control_->ReadQueuedRenderData();
535 550
536 ProcessingConfig processing_config = shared_state_.api_format_; 551 ProcessingConfig processing_config = shared_state_.api_format_;
537 processing_config.input_stream() = input_config; 552 processing_config.input_stream() = input_config;
538 processing_config.output_stream() = output_config; 553 processing_config.output_stream() = output_config;
539 554
540 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); 555 RETURN_ON_ERR(MaybeInitializeLockedCapture(processing_config));
541 assert(processing_config.input_stream().num_frames() == 556 assert(processing_config.input_stream().num_frames() ==
542 shared_state_.api_format_.input_stream().num_frames()); 557 shared_state_.api_format_.input_stream().num_frames());
543 558
544 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 559 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
545 if (debug_file_->Open()) { 560 if (debug_file_->Open()) {
546 RETURN_ON_ERR(WriteConfigMessage(false)); 561 RETURN_ON_ERR(WriteConfigMessage(false));
547 562
548 event_msg_->set_type(audioproc::Event::STREAM); 563 event_msg_->set_type(audioproc::Event::STREAM);
549 audioproc::Stream* msg = event_msg_->mutable_stream(); 564 audioproc::Stream* msg = event_msg_->mutable_stream();
550 const size_t channel_size = 565 const size_t channel_size =
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
598 } 613 }
599 614
600 // TODO(ajm): The input and output rates and channels are currently 615 // TODO(ajm): The input and output rates and channels are currently
601 // constrained to be identical in the int16 interface. 616 // constrained to be identical in the int16 interface.
602 ProcessingConfig processing_config = shared_state_.api_format_; 617 ProcessingConfig processing_config = shared_state_.api_format_;
603 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); 618 processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_);
604 processing_config.input_stream().set_num_channels(frame->num_channels_); 619 processing_config.input_stream().set_num_channels(frame->num_channels_);
605 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); 620 processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_);
606 processing_config.output_stream().set_num_channels(frame->num_channels_); 621 processing_config.output_stream().set_num_channels(frame->num_channels_);
607 622
608 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); 623 RETURN_ON_ERR(MaybeInitializeLockedCapture(processing_config));
609 if (frame->samples_per_channel_ != 624 if (frame->samples_per_channel_ !=
610 shared_state_.api_format_.input_stream().num_frames()) { 625 shared_state_.api_format_.input_stream().num_frames()) {
611 return kBadDataLengthError; 626 return kBadDataLengthError;
612 } 627 }
613 628
614 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 629 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
615 if (debug_file_->Open()) { 630 if (debug_file_->Open()) {
616 event_msg_->set_type(audioproc::Event::STREAM); 631 event_msg_->set_type(audioproc::Event::STREAM);
617 audioproc::Stream* msg = event_msg_->mutable_stream(); 632 audioproc::Stream* msg = event_msg_->mutable_stream();
618 const size_t data_size = 633 const size_t data_size =
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after
732 int AudioProcessingImpl::ProcessReverseStream( 747 int AudioProcessingImpl::ProcessReverseStream(
733 const float* const* src, 748 const float* const* src,
734 const StreamConfig& reverse_input_config, 749 const StreamConfig& reverse_input_config,
735 const StreamConfig& reverse_output_config, 750 const StreamConfig& reverse_output_config,
736 float* const* dest) { 751 float* const* dest) {
737 RETURN_ON_ERR( 752 RETURN_ON_ERR(
738 AnalyzeReverseStream(src, reverse_input_config, reverse_output_config)); 753 AnalyzeReverseStream(src, reverse_input_config, reverse_output_config));
739 if (is_rev_processed()) { 754 if (is_rev_processed()) {
740 render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(), 755 render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(),
741 dest); 756 dest);
742 } else if (rev_conversion_needed()) { 757 } else if (render_check_rev_conversion_needed()) {
743 render_converter_->Convert(src, reverse_input_config.num_samples(), dest, 758 render_converter_->Convert(src, reverse_input_config.num_samples(), dest,
744 reverse_output_config.num_samples()); 759 reverse_output_config.num_samples());
745 } else { 760 } else {
746 CopyAudioIfNeeded(src, reverse_input_config.num_frames(), 761 CopyAudioIfNeeded(src, reverse_input_config.num_frames(),
747 reverse_input_config.num_channels(), dest); 762 reverse_input_config.num_channels(), dest);
748 } 763 }
749 764
750 return kNoError; 765 return kNoError;
751 } 766 }
752 767
753 int AudioProcessingImpl::AnalyzeReverseStream( 768 int AudioProcessingImpl::AnalyzeReverseStream(
754 const float* const* src, 769 const float* const* src,
755 const StreamConfig& reverse_input_config, 770 const StreamConfig& reverse_input_config,
756 const StreamConfig& reverse_output_config) { 771 const StreamConfig& reverse_output_config) {
757 CriticalSectionScoped crit_scoped(crit_); 772 CriticalSectionScoped crit_scoped(crit_);
758 if (src == NULL) { 773 if (src == NULL) {
759 return kNullPointerError; 774 return kNullPointerError;
760 } 775 }
761 776
762 if (reverse_input_config.num_channels() <= 0) { 777 if (reverse_input_config.num_channels() <= 0) {
763 return kBadNumberChannelsError; 778 return kBadNumberChannelsError;
764 } 779 }
765 780
766 ProcessingConfig processing_config = shared_state_.api_format_; 781 ProcessingConfig processing_config = shared_state_.api_format_;
767 processing_config.reverse_input_stream() = reverse_input_config; 782 processing_config.reverse_input_stream() = reverse_input_config;
768 processing_config.reverse_output_stream() = reverse_output_config; 783 processing_config.reverse_output_stream() = reverse_output_config;
769 784
770 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); 785 RETURN_ON_ERR(MaybeInitializeLockedRender(processing_config));
771 assert(reverse_input_config.num_frames() == 786 assert(reverse_input_config.num_frames() ==
772 shared_state_.api_format_.reverse_input_stream().num_frames()); 787 shared_state_.api_format_.reverse_input_stream().num_frames());
773 788
774 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 789 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
775 if (debug_file_->Open()) { 790 if (debug_file_->Open()) {
776 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); 791 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
777 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); 792 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
778 const size_t channel_size = 793 const size_t channel_size =
779 sizeof(float) * 794 sizeof(float) *
780 shared_state_.api_format_.reverse_input_stream().num_frames(); 795 shared_state_.api_format_.reverse_input_stream().num_frames();
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
825 ProcessingConfig processing_config = shared_state_.api_format_; 840 ProcessingConfig processing_config = shared_state_.api_format_;
826 processing_config.reverse_input_stream().set_sample_rate_hz( 841 processing_config.reverse_input_stream().set_sample_rate_hz(
827 frame->sample_rate_hz_); 842 frame->sample_rate_hz_);
828 processing_config.reverse_input_stream().set_num_channels( 843 processing_config.reverse_input_stream().set_num_channels(
829 frame->num_channels_); 844 frame->num_channels_);
830 processing_config.reverse_output_stream().set_sample_rate_hz( 845 processing_config.reverse_output_stream().set_sample_rate_hz(
831 frame->sample_rate_hz_); 846 frame->sample_rate_hz_);
832 processing_config.reverse_output_stream().set_num_channels( 847 processing_config.reverse_output_stream().set_num_channels(
833 frame->num_channels_); 848 frame->num_channels_);
834 849
835 RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); 850 RETURN_ON_ERR(MaybeInitializeLockedRender(processing_config));
836 if (frame->samples_per_channel_ != 851 if (frame->samples_per_channel_ !=
837 shared_state_.api_format_.reverse_input_stream().num_frames()) { 852 shared_state_.api_format_.reverse_input_stream().num_frames()) {
838 return kBadDataLengthError; 853 return kBadDataLengthError;
839 } 854 }
840 855
841 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 856 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
842 if (debug_file_->Open()) { 857 if (debug_file_->Open()) {
843 event_msg_->set_type(audioproc::Event::REVERSE_STREAM); 858 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
844 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); 859 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
845 const size_t data_size = 860 const size_t data_size =
(...skipping 233 matching lines...) Expand 10 before | Expand all | Expand 10 after
1079 // Something besides level_estimator_ is enabled, and we have super-wb. 1094 // Something besides level_estimator_ is enabled, and we have super-wb.
1080 return true; 1095 return true;
1081 } 1096 }
1082 return false; 1097 return false;
1083 } 1098 }
1084 1099
1085 bool AudioProcessingImpl::is_rev_processed() const { 1100 bool AudioProcessingImpl::is_rev_processed() const {
1086 return intelligibility_enabled_ && intelligibility_enhancer_->active(); 1101 return intelligibility_enabled_ && intelligibility_enhancer_->active();
1087 } 1102 }
1088 1103
1104 bool AudioProcessingImpl::render_check_rev_conversion_needed() const {
1105 return rev_conversion_needed();
1106 }
1107
1089 bool AudioProcessingImpl::rev_conversion_needed() const { 1108 bool AudioProcessingImpl::rev_conversion_needed() const {
1109 // TODO(peah): Refactor to be allowed to verify using thread annotations.
1090 return (shared_state_.api_format_.reverse_input_stream() != 1110 return (shared_state_.api_format_.reverse_input_stream() !=
1091 shared_state_.api_format_.reverse_output_stream()); 1111 shared_state_.api_format_.reverse_output_stream());
1092 } 1112 }
1093 1113
1094 void AudioProcessingImpl::InitializeExperimentalAgc() { 1114 void AudioProcessingImpl::InitializeExperimentalAgc() {
1115 // TODO(peah): Refactor to be allowed to verify using thread annotations.
1095 if (use_new_agc_) { 1116 if (use_new_agc_) {
1096 if (!agc_manager_.get()) { 1117 if (!agc_manager_.get()) {
1097 agc_manager_.reset(new AgcManagerDirect(gain_control_, 1118 agc_manager_.reset(new AgcManagerDirect(gain_control_,
1098 gain_control_for_new_agc_.get(), 1119 gain_control_for_new_agc_.get(),
1099 agc_startup_min_volume_)); 1120 agc_startup_min_volume_));
1100 } 1121 }
1101 agc_manager_->Initialize(); 1122 agc_manager_->Initialize();
1102 agc_manager_->SetCaptureMuted(output_will_be_muted_); 1123 agc_manager_->SetCaptureMuted(output_will_be_muted_);
1103 } 1124 }
1104 } 1125 }
1105 1126
1106 void AudioProcessingImpl::InitializeTransient() { 1127 void AudioProcessingImpl::InitializeTransient() {
1128 // TODO(peah): Refactor to be allowed to verify using thread annotations.
1107 if (transient_suppressor_enabled_) { 1129 if (transient_suppressor_enabled_) {
1108 if (!transient_suppressor_.get()) { 1130 if (!transient_suppressor_.get()) {
1109 transient_suppressor_.reset(new TransientSuppressor()); 1131 transient_suppressor_.reset(new TransientSuppressor());
1110 } 1132 }
1111 transient_suppressor_->Initialize( 1133 transient_suppressor_->Initialize(
1112 fwd_proc_format_.sample_rate_hz(), split_rate_, 1134 fwd_proc_format_.sample_rate_hz(), split_rate_,
1113 shared_state_.api_format_.output_stream().num_channels()); 1135 shared_state_.api_format_.output_stream().num_channels());
1114 } 1136 }
1115 } 1137 }
1116 1138
1117 void AudioProcessingImpl::InitializeBeamformer() { 1139 void AudioProcessingImpl::InitializeBeamformer() {
1140 // TODO(peah): Refactor to be allowed to verify using thread annotations.
1118 if (beamformer_enabled_) { 1141 if (beamformer_enabled_) {
1119 if (!beamformer_) { 1142 if (!beamformer_) {
1120 beamformer_.reset( 1143 beamformer_.reset(
1121 new NonlinearBeamformer(array_geometry_, target_direction_)); 1144 new NonlinearBeamformer(array_geometry_, target_direction_));
1122 } 1145 }
1123 beamformer_->Initialize(kChunkSizeMs, split_rate_); 1146 beamformer_->Initialize(kChunkSizeMs, split_rate_);
1124 } 1147 }
1125 } 1148 }
1126 1149
1127 void AudioProcessingImpl::InitializeIntelligibility() { 1150 void AudioProcessingImpl::InitializeIntelligibility() {
1151 // TODO(peah): Refactor to be allowed to verify using thread annotations.
1128 if (intelligibility_enabled_) { 1152 if (intelligibility_enabled_) {
1129 IntelligibilityEnhancer::Config config; 1153 IntelligibilityEnhancer::Config config;
1130 config.sample_rate_hz = split_rate_; 1154 config.sample_rate_hz = split_rate_;
1131 config.num_capture_channels = capture_audio_->num_channels(); 1155 config.num_capture_channels = capture_audio_->num_channels();
1132 config.num_render_channels = render_audio_->num_channels(); 1156 config.num_render_channels = render_audio_->num_channels();
1133 intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config)); 1157 intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config));
1134 } 1158 }
1135 } 1159 }
1136 1160
1137 void AudioProcessingImpl::MaybeUpdateHistograms() { 1161 void AudioProcessingImpl::MaybeUpdateHistograms() {
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
1220 if (!debug_file_->Write(event_str_.data(), event_str_.length())) { 1244 if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
1221 return kFileError; 1245 return kFileError;
1222 } 1246 }
1223 1247
1224 event_msg_->Clear(); 1248 event_msg_->Clear();
1225 1249
1226 return kNoError; 1250 return kNoError;
1227 } 1251 }
1228 1252
1229 int AudioProcessingImpl::WriteInitMessage() { 1253 int AudioProcessingImpl::WriteInitMessage() {
1254 // TODO(peah): Refactor to be allowed to verify using thread annotations.
1230 event_msg_->set_type(audioproc::Event::INIT); 1255 event_msg_->set_type(audioproc::Event::INIT);
1231 audioproc::Init* msg = event_msg_->mutable_init(); 1256 audioproc::Init* msg = event_msg_->mutable_init();
1232 msg->set_sample_rate( 1257 msg->set_sample_rate(
1233 shared_state_.api_format_.input_stream().sample_rate_hz()); 1258 shared_state_.api_format_.input_stream().sample_rate_hz());
1234 msg->set_num_input_channels( 1259 msg->set_num_input_channels(
1235 shared_state_.api_format_.input_stream().num_channels()); 1260 shared_state_.api_format_.input_stream().num_channels());
1236 msg->set_num_output_channels( 1261 msg->set_num_output_channels(
1237 shared_state_.api_format_.output_stream().num_channels()); 1262 shared_state_.api_format_.output_stream().num_channels());
1238 msg->set_num_reverse_channels( 1263 msg->set_num_reverse_channels(
1239 shared_state_.api_format_.reverse_input_stream().num_channels()); 1264 shared_state_.api_format_.reverse_input_stream().num_channels());
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
1287 1312
1288 event_msg_->set_type(audioproc::Event::CONFIG); 1313 event_msg_->set_type(audioproc::Event::CONFIG);
1289 event_msg_->mutable_config()->CopyFrom(config); 1314 event_msg_->mutable_config()->CopyFrom(config);
1290 1315
1291 RETURN_ON_ERR(WriteMessageToDebugFile()); 1316 RETURN_ON_ERR(WriteMessageToDebugFile());
1292 return kNoError; 1317 return kNoError;
1293 } 1318 }
1294 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP 1319 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
1295 1320
1296 } // namespace webrtc 1321 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/audio_processing/audio_processing_impl.h ('k') | webrtc/modules/audio_processing/echo_cancellation_impl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698