OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 26 matching lines...) Loading... |
37 #include "webrtc/modules/audio_processing/low_cut_filter.h" | 37 #include "webrtc/modules/audio_processing/low_cut_filter.h" |
38 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" | 38 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" |
39 #include "webrtc/modules/audio_processing/residual_echo_detector.h" | 39 #include "webrtc/modules/audio_processing/residual_echo_detector.h" |
40 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" | 40 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" |
41 #include "webrtc/modules/audio_processing/voice_detection_impl.h" | 41 #include "webrtc/modules/audio_processing/voice_detection_impl.h" |
42 #include "webrtc/modules/include/module_common_types.h" | 42 #include "webrtc/modules/include/module_common_types.h" |
43 #include "webrtc/system_wrappers/include/file_wrapper.h" | 43 #include "webrtc/system_wrappers/include/file_wrapper.h" |
44 #include "webrtc/system_wrappers/include/logging.h" | 44 #include "webrtc/system_wrappers/include/logging.h" |
45 #include "webrtc/system_wrappers/include/metrics.h" | 45 #include "webrtc/system_wrappers/include/metrics.h" |
46 | 46 |
47 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
48 // Files generated at build-time by the protobuf compiler. | |
49 #ifdef WEBRTC_ANDROID_PLATFORM_BUILD | |
50 #include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h" | |
51 #else | |
52 #include "webrtc/modules/audio_processing/debug.pb.h" | |
53 #endif | |
54 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
55 | |
56 // Check to verify that the define for the intelligibility enhancer is properly | 47 // Check to verify that the define for the intelligibility enhancer is properly |
57 // set. | 48 // set. |
58 #if !defined(WEBRTC_INTELLIGIBILITY_ENHANCER) || \ | 49 #if !defined(WEBRTC_INTELLIGIBILITY_ENHANCER) || \ |
59 (WEBRTC_INTELLIGIBILITY_ENHANCER != 0 && \ | 50 (WEBRTC_INTELLIGIBILITY_ENHANCER != 0 && \ |
60 WEBRTC_INTELLIGIBILITY_ENHANCER != 1) | 51 WEBRTC_INTELLIGIBILITY_ENHANCER != 1) |
61 #error "Set WEBRTC_INTELLIGIBILITY_ENHANCER to either 0 or 1" | 52 #error "Set WEBRTC_INTELLIGIBILITY_ENHANCER to either 0 or 1" |
62 #endif | 53 #endif |
63 | 54 |
64 #define RETURN_ON_ERR(expr) \ | 55 #define RETURN_ON_ERR(expr) \ |
65 do { \ | 56 do { \ |
(...skipping 232 matching lines...) Loading... |
298 | 289 |
299 return apm; | 290 return apm; |
300 } | 291 } |
301 | 292 |
302 AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config) | 293 AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config) |
303 : AudioProcessingImpl(config, nullptr) {} | 294 : AudioProcessingImpl(config, nullptr) {} |
304 | 295 |
305 AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config, | 296 AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config, |
306 NonlinearBeamformer* beamformer) | 297 NonlinearBeamformer* beamformer) |
307 : high_pass_filter_impl_(new HighPassFilterImpl(this)), | 298 : high_pass_filter_impl_(new HighPassFilterImpl(this)), |
| 299 aec_dump_(AecDump::CreateNullDump()), |
308 public_submodules_(new ApmPublicSubmodules()), | 300 public_submodules_(new ApmPublicSubmodules()), |
309 private_submodules_(new ApmPrivateSubmodules(beamformer)), | 301 private_submodules_(new ApmPrivateSubmodules(beamformer)), |
310 constants_(config.Get<ExperimentalAgc>().startup_min_volume, | 302 constants_(config.Get<ExperimentalAgc>().startup_min_volume, |
311 config.Get<ExperimentalAgc>().clipped_level_min, | 303 config.Get<ExperimentalAgc>().clipped_level_min, |
312 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 304 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
313 false), | 305 false), |
314 #else | 306 #else |
315 config.Get<ExperimentalAgc>().enabled), | 307 config.Get<ExperimentalAgc>().enabled), |
316 #endif | 308 #endif |
317 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 309 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
(...skipping 35 matching lines...) Loading... |
353 SetExtraOptions(config); | 345 SetExtraOptions(config); |
354 } | 346 } |
355 | 347 |
356 AudioProcessingImpl::~AudioProcessingImpl() { | 348 AudioProcessingImpl::~AudioProcessingImpl() { |
357 // Depends on gain_control_ and | 349 // Depends on gain_control_ and |
358 // public_submodules_->gain_control_for_experimental_agc. | 350 // public_submodules_->gain_control_for_experimental_agc. |
359 private_submodules_->agc_manager.reset(); | 351 private_submodules_->agc_manager.reset(); |
360 // Depends on gain_control_. | 352 // Depends on gain_control_. |
361 public_submodules_->gain_control_for_experimental_agc.reset(); | 353 public_submodules_->gain_control_for_experimental_agc.reset(); |
362 | 354 |
363 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
364 debug_dump_.debug_file->CloseFile(); | |
365 #endif | |
366 } | 355 } |
367 | 356 |
368 int AudioProcessingImpl::Initialize() { | 357 int AudioProcessingImpl::Initialize() { |
369 // Run in a single-threaded manner during initialization. | 358 // Run in a single-threaded manner during initialization. |
370 rtc::CritScope cs_render(&crit_render_); | 359 rtc::CritScope cs_render(&crit_render_); |
371 rtc::CritScope cs_capture(&crit_capture_); | 360 rtc::CritScope cs_capture(&crit_capture_); |
372 return InitializeLocked(); | 361 return InitializeLocked(); |
373 } | 362 } |
374 | 363 |
375 int AudioProcessingImpl::Initialize(int capture_input_sample_rate_hz, | 364 int AudioProcessingImpl::Initialize(int capture_input_sample_rate_hz, |
(...skipping 27 matching lines...) Loading... |
403 const ProcessingConfig& processing_config) { | 392 const ProcessingConfig& processing_config) { |
404 return MaybeInitialize(processing_config, false); | 393 return MaybeInitialize(processing_config, false); |
405 } | 394 } |
406 | 395 |
407 int AudioProcessingImpl::MaybeInitializeCapture( | 396 int AudioProcessingImpl::MaybeInitializeCapture( |
408 const ProcessingConfig& processing_config, | 397 const ProcessingConfig& processing_config, |
409 bool force_initialization) { | 398 bool force_initialization) { |
410 return MaybeInitialize(processing_config, force_initialization); | 399 return MaybeInitialize(processing_config, force_initialization); |
411 } | 400 } |
412 | 401 |
413 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
414 | |
415 AudioProcessingImpl::ApmDebugDumpThreadState::ApmDebugDumpThreadState() | |
416 : event_msg(new audioproc::Event()) {} | |
417 | |
418 AudioProcessingImpl::ApmDebugDumpThreadState::~ApmDebugDumpThreadState() {} | |
419 | |
420 AudioProcessingImpl::ApmDebugDumpState::ApmDebugDumpState() | |
421 : debug_file(FileWrapper::Create()) {} | |
422 | |
423 AudioProcessingImpl::ApmDebugDumpState::~ApmDebugDumpState() {} | |
424 | |
425 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
426 | 402 |
427 // Calls InitializeLocked() if any of the audio parameters have changed from | 403 // Calls InitializeLocked() if any of the audio parameters have changed from |
428 // their current values (needs to be called while holding the crit_render_lock). | 404 // their current values (needs to be called while holding the crit_render_lock). |
429 int AudioProcessingImpl::MaybeInitialize( | 405 int AudioProcessingImpl::MaybeInitialize( |
430 const ProcessingConfig& processing_config, | 406 const ProcessingConfig& processing_config, |
431 bool force_initialization) { | 407 bool force_initialization) { |
432 // Called from both threads. Thread check is therefore not possible. | 408 // Called from both threads. Thread check is therefore not possible. |
433 if (processing_config == formats_.api_format && !force_initialization) { | 409 if (processing_config == formats_.api_format && !force_initialization) { |
434 return kNoError; | 410 return kNoError; |
435 } | 411 } |
(...skipping 74 matching lines...) Loading... |
510 #endif | 486 #endif |
511 InitializeLowCutFilter(); | 487 InitializeLowCutFilter(); |
512 public_submodules_->noise_suppression->Initialize(num_proc_channels(), | 488 public_submodules_->noise_suppression->Initialize(num_proc_channels(), |
513 proc_sample_rate_hz()); | 489 proc_sample_rate_hz()); |
514 public_submodules_->voice_detection->Initialize(proc_split_sample_rate_hz()); | 490 public_submodules_->voice_detection->Initialize(proc_split_sample_rate_hz()); |
515 public_submodules_->level_estimator->Initialize(); | 491 public_submodules_->level_estimator->Initialize(); |
516 InitializeLevelController(); | 492 InitializeLevelController(); |
517 InitializeResidualEchoDetector(); | 493 InitializeResidualEchoDetector(); |
518 InitializeEchoCanceller3(); | 494 InitializeEchoCanceller3(); |
519 | 495 |
520 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 496 aec_dump_->WriteInitMessage(formats_.api_format); |
521 if (debug_dump_.debug_file->is_open()) { | |
522 int err = WriteInitMessage(); | |
523 if (err != kNoError) { | |
524 return err; | |
525 } | |
526 } | |
527 #endif | |
528 | |
529 return kNoError; | 497 return kNoError; |
530 } | 498 } |
531 | 499 |
532 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 500 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
533 for (const auto& stream : config.streams) { | 501 for (const auto& stream : config.streams) { |
534 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 502 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { |
535 return kBadSampleRateError; | 503 return kBadSampleRateError; |
536 } | 504 } |
537 } | 505 } |
538 | 506 |
(...skipping 255 matching lines...) Loading... |
794 { | 762 { |
795 // Do conditional reinitialization. | 763 // Do conditional reinitialization. |
796 rtc::CritScope cs_render(&crit_render_); | 764 rtc::CritScope cs_render(&crit_render_); |
797 RETURN_ON_ERR( | 765 RETURN_ON_ERR( |
798 MaybeInitializeCapture(processing_config, reinitialization_required)); | 766 MaybeInitializeCapture(processing_config, reinitialization_required)); |
799 } | 767 } |
800 rtc::CritScope cs_capture(&crit_capture_); | 768 rtc::CritScope cs_capture(&crit_capture_); |
801 RTC_DCHECK_EQ(processing_config.input_stream().num_frames(), | 769 RTC_DCHECK_EQ(processing_config.input_stream().num_frames(), |
802 formats_.api_format.input_stream().num_frames()); | 770 formats_.api_format.input_stream().num_frames()); |
803 | 771 |
804 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 772 std::unique_ptr<AecDump::CaptureStreamInfo> stream_info = |
805 if (debug_dump_.debug_file->is_open()) { | 773 aec_dump_->GetCaptureStreamInfo(); |
806 RETURN_ON_ERR(WriteConfigMessage(false)); | 774 RETURN_ON_ERR(WriteConfigMessage(false)); |
| 775 const size_t channel_size = |
| 776 sizeof(float) * formats_.api_format.input_stream().num_frames(); |
807 | 777 |
808 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 778 { |
809 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 779 std::vector<rtc::ArrayView<const float>> src_view; |
810 const size_t channel_size = | |
811 sizeof(float) * formats_.api_format.input_stream().num_frames(); | |
812 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | 780 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); |
813 ++i) | 781 ++i) { |
814 msg->add_input_channel(src[i], channel_size); | 782 src_view.emplace_back(src[i], channel_size); |
| 783 } |
| 784 stream_info->AddInput(src_view); |
815 } | 785 } |
816 #endif | 786 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
817 | 787 |
818 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); | 788 // This earlier happened in ProcessCaptureStreamLocked(). |
| 789 RTC_DCHECK(!(public_submodules_->echo_cancellation->is_enabled() && |
| 790 public_submodules_->echo_control_mobile->is_enabled())); |
| 791 |
| 792 stream_info->set_delay(capture_nonlocked_.stream_delay_ms); |
| 793 stream_info->set_drift( |
| 794 public_submodules_->echo_cancellation->stream_drift_samples()); |
| 795 stream_info->set_level(gain_control()->stream_analog_level()); |
| 796 stream_info->set_keypress(capture_.key_pressed); |
| 797 |
819 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 798 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
820 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); | 799 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
821 | 800 |
822 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 801 { |
823 if (debug_dump_.debug_file->is_open()) { | |
824 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | |
825 const size_t channel_size = | 802 const size_t channel_size = |
826 sizeof(float) * formats_.api_format.output_stream().num_frames(); | 803 sizeof(float) * formats_.api_format.output_stream().num_frames(); |
| 804 std::vector<rtc::ArrayView<const float>> dest_view; |
827 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); | 805 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); |
828 ++i) | 806 ++i) { |
829 msg->add_output_channel(dest[i], channel_size); | 807 dest_view.emplace_back(dest[i], channel_size); |
830 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 808 } |
831 &debug_dump_.num_bytes_left_for_log_, | 809 stream_info->AddOutput(dest_view); |
832 &crit_debug_, &debug_dump_.capture)); | |
833 } | 810 } |
834 #endif | 811 aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); |
835 | 812 |
836 return kNoError; | 813 return kNoError; |
837 } | 814 } |
838 | 815 |
839 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { | 816 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { |
840 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), | 817 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), |
841 num_reverse_channels(), | 818 num_reverse_channels(), |
842 &aec_render_queue_buffer_); | 819 &aec_render_queue_buffer_); |
843 | 820 |
844 RTC_DCHECK_GE(160, audio->num_frames_per_band()); | 821 RTC_DCHECK_GE(160, audio->num_frames_per_band()); |
(...skipping 217 matching lines...) Loading... |
1062 rtc::CritScope cs_render(&crit_render_); | 1039 rtc::CritScope cs_render(&crit_render_); |
1063 RETURN_ON_ERR( | 1040 RETURN_ON_ERR( |
1064 MaybeInitializeCapture(processing_config, reinitialization_required)); | 1041 MaybeInitializeCapture(processing_config, reinitialization_required)); |
1065 } | 1042 } |
1066 rtc::CritScope cs_capture(&crit_capture_); | 1043 rtc::CritScope cs_capture(&crit_capture_); |
1067 if (frame->samples_per_channel_ != | 1044 if (frame->samples_per_channel_ != |
1068 formats_.api_format.input_stream().num_frames()) { | 1045 formats_.api_format.input_stream().num_frames()) { |
1069 return kBadDataLengthError; | 1046 return kBadDataLengthError; |
1070 } | 1047 } |
1071 | 1048 |
1072 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1049 std::unique_ptr<AecDump::CaptureStreamInfo> stream_info = |
1073 if (debug_dump_.debug_file->is_open()) { | 1050 aec_dump_->GetCaptureStreamInfo(); |
1074 RETURN_ON_ERR(WriteConfigMessage(false)); | 1051 RETURN_ON_ERR(WriteConfigMessage(false)); |
1075 | 1052 stream_info->AddInput(*frame); |
1076 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | |
1077 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | |
1078 const size_t data_size = | |
1079 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | |
1080 msg->set_input_data(frame->data_, data_size); | |
1081 } | |
1082 #endif | |
1083 | 1053 |
1084 capture_.capture_audio->DeinterleaveFrom(frame); | 1054 capture_.capture_audio->DeinterleaveFrom(frame); |
| 1055 |
| 1056 RTC_DCHECK(!(public_submodules_->echo_cancellation->is_enabled() && |
| 1057 public_submodules_->echo_control_mobile->is_enabled())); |
| 1058 |
| 1059 stream_info->set_delay(capture_nonlocked_.stream_delay_ms); |
| 1060 stream_info->set_drift( |
| 1061 public_submodules_->echo_cancellation->stream_drift_samples()); |
| 1062 stream_info->set_level(gain_control()->stream_analog_level()); |
| 1063 stream_info->set_keypress(capture_.key_pressed); |
| 1064 |
1085 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 1065 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
1086 capture_.capture_audio->InterleaveTo( | 1066 capture_.capture_audio->InterleaveTo( |
1087 frame, submodule_states_.CaptureMultiBandProcessingActive()); | 1067 frame, submodule_states_.CaptureMultiBandProcessingActive()); |
1088 | 1068 |
1089 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1069 stream_info->AddOutput(*frame); |
1090 if (debug_dump_.debug_file->is_open()) { | 1070 aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); |
1091 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | |
1092 const size_t data_size = | |
1093 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | |
1094 msg->set_output_data(frame->data_, data_size); | |
1095 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | |
1096 &debug_dump_.num_bytes_left_for_log_, | |
1097 &crit_debug_, &debug_dump_.capture)); | |
1098 } | |
1099 #endif | |
1100 | |
1101 return kNoError; | 1071 return kNoError; |
1102 } | 1072 } |
1103 | 1073 |
1104 int AudioProcessingImpl::ProcessCaptureStreamLocked() { | 1074 int AudioProcessingImpl::ProcessCaptureStreamLocked() { |
1105 // Ensure that not both the AEC and AECM are active at the same time. | 1075 // Ensure that not both the AEC and AECM are active at the same time. |
1106 // TODO(peah): Simplify once the public API Enable functions for these | 1076 // TODO(peah): Simplify once the public API Enable functions for these |
1107 // are moved to APM. | 1077 // are moved to APM. |
1108 RTC_DCHECK(!(public_submodules_->echo_cancellation->is_enabled() && | 1078 RTC_DCHECK(!(public_submodules_->echo_cancellation->is_enabled() && |
1109 public_submodules_->echo_control_mobile->is_enabled())); | 1079 public_submodules_->echo_control_mobile->is_enabled())); |
1110 | 1080 |
1111 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
1112 if (debug_dump_.debug_file->is_open()) { | |
1113 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | |
1114 msg->set_delay(capture_nonlocked_.stream_delay_ms); | |
1115 msg->set_drift( | |
1116 public_submodules_->echo_cancellation->stream_drift_samples()); | |
1117 msg->set_level(gain_control()->stream_analog_level()); | |
1118 msg->set_keypress(capture_.key_pressed); | |
1119 } | |
1120 #endif | |
1121 | 1081 |
1122 MaybeUpdateHistograms(); | 1082 MaybeUpdateHistograms(); |
1123 | 1083 |
1124 AudioBuffer* capture_buffer = capture_.capture_audio.get(); // For brevity. | 1084 AudioBuffer* capture_buffer = capture_.capture_audio.get(); // For brevity. |
1125 | 1085 |
1126 capture_input_rms_.Analyze(rtc::ArrayView<const int16_t>( | 1086 capture_input_rms_.Analyze(rtc::ArrayView<const int16_t>( |
1127 capture_buffer->channels_const()[0], | 1087 capture_buffer->channels_const()[0], |
1128 capture_nonlocked_.capture_processing_format.num_frames())); | 1088 capture_nonlocked_.capture_processing_format.num_frames())); |
1129 const bool log_rms = ++capture_rms_interval_counter_ >= 1000; | 1089 const bool log_rms = ++capture_rms_interval_counter_ >= 1000; |
1130 if (log_rms) { | 1090 if (log_rms) { |
(...skipping 209 matching lines...) Loading... |
1340 } | 1300 } |
1341 | 1301 |
1342 ProcessingConfig processing_config = formats_.api_format; | 1302 ProcessingConfig processing_config = formats_.api_format; |
1343 processing_config.reverse_input_stream() = input_config; | 1303 processing_config.reverse_input_stream() = input_config; |
1344 processing_config.reverse_output_stream() = output_config; | 1304 processing_config.reverse_output_stream() = output_config; |
1345 | 1305 |
1346 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); | 1306 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); |
1347 assert(input_config.num_frames() == | 1307 assert(input_config.num_frames() == |
1348 formats_.api_format.reverse_input_stream().num_frames()); | 1308 formats_.api_format.reverse_input_stream().num_frames()); |
1349 | 1309 |
1350 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1310 std::vector<rtc::ArrayView<const float>> src_view; |
1351 if (debug_dump_.debug_file->is_open()) { | 1311 const size_t channel_size = |
1352 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); | 1312 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
1353 audioproc::ReverseStream* msg = | 1313 |
1354 debug_dump_.render.event_msg->mutable_reverse_stream(); | 1314 for (size_t i = 0; |
1355 const size_t channel_size = | 1315 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) { |
1356 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 1316 src_view.emplace_back(src[i], channel_size); |
1357 for (size_t i = 0; | |
1358 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) | |
1359 msg->add_channel(src[i], channel_size); | |
1360 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | |
1361 &debug_dump_.num_bytes_left_for_log_, | |
1362 &crit_debug_, &debug_dump_.render)); | |
1363 } | 1317 } |
1364 #endif | 1318 aec_dump_->WriteRenderStreamMessage(src_view); |
1365 | 1319 |
1366 render_.render_audio->CopyFrom(src, | 1320 render_.render_audio->CopyFrom(src, |
1367 formats_.api_format.reverse_input_stream()); | 1321 formats_.api_format.reverse_input_stream()); |
1368 return ProcessRenderStreamLocked(); | 1322 return ProcessRenderStreamLocked(); |
1369 } | 1323 } |
1370 | 1324 |
1371 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 1325 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
1372 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); | 1326 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); |
1373 rtc::CritScope cs(&crit_render_); | 1327 rtc::CritScope cs(&crit_render_); |
1374 if (frame == nullptr) { | 1328 if (frame == nullptr) { |
(...skipping 20 matching lines...) Loading... |
1395 frame->sample_rate_hz_); | 1349 frame->sample_rate_hz_); |
1396 processing_config.reverse_output_stream().set_num_channels( | 1350 processing_config.reverse_output_stream().set_num_channels( |
1397 frame->num_channels_); | 1351 frame->num_channels_); |
1398 | 1352 |
1399 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); | 1353 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); |
1400 if (frame->samples_per_channel_ != | 1354 if (frame->samples_per_channel_ != |
1401 formats_.api_format.reverse_input_stream().num_frames()) { | 1355 formats_.api_format.reverse_input_stream().num_frames()) { |
1402 return kBadDataLengthError; | 1356 return kBadDataLengthError; |
1403 } | 1357 } |
1404 | 1358 |
1405 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1359 aec_dump_->WriteRenderStreamMessage(*frame); |
1406 if (debug_dump_.debug_file->is_open()) { | 1360 |
1407 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); | |
1408 audioproc::ReverseStream* msg = | |
1409 debug_dump_.render.event_msg->mutable_reverse_stream(); | |
1410 const size_t data_size = | |
1411 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | |
1412 msg->set_data(frame->data_, data_size); | |
1413 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | |
1414 &debug_dump_.num_bytes_left_for_log_, | |
1415 &crit_debug_, &debug_dump_.render)); | |
1416 } | |
1417 #endif | |
1418 render_.render_audio->DeinterleaveFrom(frame); | 1361 render_.render_audio->DeinterleaveFrom(frame); |
1419 RETURN_ON_ERR(ProcessRenderStreamLocked()); | 1362 RETURN_ON_ERR(ProcessRenderStreamLocked()); |
1420 render_.render_audio->InterleaveTo( | 1363 render_.render_audio->InterleaveTo( |
1421 frame, submodule_states_.RenderMultiBandProcessingActive()); | 1364 frame, submodule_states_.RenderMultiBandProcessingActive()); |
1422 return kNoError; | 1365 return kNoError; |
1423 } | 1366 } |
1424 | 1367 |
1425 int AudioProcessingImpl::ProcessRenderStreamLocked() { | 1368 int AudioProcessingImpl::ProcessRenderStreamLocked() { |
1426 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. | 1369 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. |
1427 if (submodule_states_.RenderMultiBandSubModulesActive() && | 1370 if (submodule_states_.RenderMultiBandSubModulesActive() && |
(...skipping 65 matching lines...) Loading... |
1493 void AudioProcessingImpl::set_delay_offset_ms(int offset) { | 1436 void AudioProcessingImpl::set_delay_offset_ms(int offset) { |
1494 rtc::CritScope cs(&crit_capture_); | 1437 rtc::CritScope cs(&crit_capture_); |
1495 capture_.delay_offset_ms = offset; | 1438 capture_.delay_offset_ms = offset; |
1496 } | 1439 } |
1497 | 1440 |
1498 int AudioProcessingImpl::delay_offset_ms() const { | 1441 int AudioProcessingImpl::delay_offset_ms() const { |
1499 rtc::CritScope cs(&crit_capture_); | 1442 rtc::CritScope cs(&crit_capture_); |
1500 return capture_.delay_offset_ms; | 1443 return capture_.delay_offset_ms; |
1501 } | 1444 } |
1502 | 1445 |
1503 int AudioProcessingImpl::StartDebugRecording( | 1446 void AudioProcessingImpl::StartDebugRecording( |
1504 const char filename[AudioProcessing::kMaxFilenameSize], | 1447 std::unique_ptr<AecDump> aec_dump) { |
1505 int64_t max_log_size_bytes) { | |
1506 // Run in a single-threaded manner. | |
1507 rtc::CritScope cs_render(&crit_render_); | |
1508 rtc::CritScope cs_capture(&crit_capture_); | 1448 rtc::CritScope cs_capture(&crit_capture_); |
1509 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); | 1449 aec_dump_ = std::move(aec_dump); |
1510 | 1450 const int error = WriteConfigMessage(true); |
1511 if (filename == nullptr) { | 1451 RTC_DCHECK(error); |
1512 return kNullPointerError; | 1452 aec_dump_->WriteInitMessage(formats_.api_format); |
1513 } | |
1514 | |
1515 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
1516 debug_dump_.num_bytes_left_for_log_ = max_log_size_bytes; | |
1517 // Stop any ongoing recording. | |
1518 debug_dump_.debug_file->CloseFile(); | |
1519 | |
1520 if (!debug_dump_.debug_file->OpenFile(filename, false)) { | |
1521 return kFileError; | |
1522 } | |
1523 | |
1524 RETURN_ON_ERR(WriteConfigMessage(true)); | |
1525 RETURN_ON_ERR(WriteInitMessage()); | |
1526 return kNoError; | |
1527 #else | |
1528 return kUnsupportedFunctionError; | |
1529 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
1530 } | 1453 } |
1531 | 1454 |
1532 int AudioProcessingImpl::StartDebugRecording(FILE* handle, | 1455 void AudioProcessingImpl::StopDebugRecording() { |
1533 int64_t max_log_size_bytes) { | 1456 aec_dump_ = AecDump::CreateNullDump(); |
1534 // Run in a single-threaded manner. | |
1535 rtc::CritScope cs_render(&crit_render_); | |
1536 rtc::CritScope cs_capture(&crit_capture_); | |
1537 | |
1538 if (handle == nullptr) { | |
1539 return kNullPointerError; | |
1540 } | |
1541 | |
1542 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
1543 debug_dump_.num_bytes_left_for_log_ = max_log_size_bytes; | |
1544 | |
1545 // Stop any ongoing recording. | |
1546 debug_dump_.debug_file->CloseFile(); | |
1547 | |
1548 if (!debug_dump_.debug_file->OpenFromFileHandle(handle)) { | |
1549 return kFileError; | |
1550 } | |
1551 | |
1552 RETURN_ON_ERR(WriteConfigMessage(true)); | |
1553 RETURN_ON_ERR(WriteInitMessage()); | |
1554 return kNoError; | |
1555 #else | |
1556 return kUnsupportedFunctionError; | |
1557 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
1558 } | |
1559 | |
1560 int AudioProcessingImpl::StartDebugRecording(FILE* handle) { | |
1561 return StartDebugRecording(handle, -1); | |
1562 } | |
1563 | |
1564 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( | |
1565 rtc::PlatformFile handle) { | |
1566 // Run in a single-threaded manner. | |
1567 rtc::CritScope cs_render(&crit_render_); | |
1568 rtc::CritScope cs_capture(&crit_capture_); | |
1569 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); | |
1570 return StartDebugRecording(stream, -1); | |
1571 } | |
1572 | |
1573 int AudioProcessingImpl::StopDebugRecording() { | |
1574 // Run in a single-threaded manner. | |
1575 rtc::CritScope cs_render(&crit_render_); | |
1576 rtc::CritScope cs_capture(&crit_capture_); | |
1577 | |
1578 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
1579 // We just return if recording hasn't started. | |
1580 debug_dump_.debug_file->CloseFile(); | |
1581 return kNoError; | |
1582 #else | |
1583 return kUnsupportedFunctionError; | |
1584 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
1585 } | 1457 } |
1586 | 1458 |
1587 AudioProcessing::AudioProcessingStatistics::AudioProcessingStatistics() { | 1459 AudioProcessing::AudioProcessingStatistics::AudioProcessingStatistics() { |
1588 residual_echo_return_loss.Set(-100.0f, -100.0f, -100.0f, -100.0f); | 1460 residual_echo_return_loss.Set(-100.0f, -100.0f, -100.0f, -100.0f); |
1589 echo_return_loss.Set(-100.0f, -100.0f, -100.0f, -100.0f); | 1461 echo_return_loss.Set(-100.0f, -100.0f, -100.0f, -100.0f); |
1590 echo_return_loss_enhancement.Set(-100.0f, -100.0f, -100.0f, -100.0f); | 1462 echo_return_loss_enhancement.Set(-100.0f, -100.0f, -100.0f, -100.0f); |
1591 a_nlp.Set(-100.0f, -100.0f, -100.0f, -100.0f); | 1463 a_nlp.Set(-100.0f, -100.0f, -100.0f, -100.0f); |
1592 } | 1464 } |
1593 | 1465 |
1594 AudioProcessing::AudioProcessingStatistics::AudioProcessingStatistics( | 1466 AudioProcessing::AudioProcessingStatistics::AudioProcessingStatistics( |
(...skipping 223 matching lines...) Loading... |
1818 capture_.last_stream_delay_ms = 0; | 1690 capture_.last_stream_delay_ms = 0; |
1819 | 1691 |
1820 if (capture_.aec_system_delay_jumps > -1) { | 1692 if (capture_.aec_system_delay_jumps > -1) { |
1821 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", | 1693 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", |
1822 capture_.aec_system_delay_jumps, 51); | 1694 capture_.aec_system_delay_jumps, 51); |
1823 } | 1695 } |
1824 capture_.aec_system_delay_jumps = -1; | 1696 capture_.aec_system_delay_jumps = -1; |
1825 capture_.last_aec_system_delay_ms = 0; | 1697 capture_.last_aec_system_delay_ms = 0; |
1826 } | 1698 } |
1827 | 1699 |
1828 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1700 int AudioProcessingImpl::WriteConfigMessage(bool forced) { |
1829 int AudioProcessingImpl::WriteMessageToDebugFile( | 1701 InternalAPMConfig config; |
1830 FileWrapper* debug_file, | |
1831 int64_t* filesize_limit_bytes, | |
1832 rtc::CriticalSection* crit_debug, | |
1833 ApmDebugDumpThreadState* debug_state) { | |
1834 int32_t size = debug_state->event_msg->ByteSize(); | |
1835 if (size <= 0) { | |
1836 return kUnspecifiedError; | |
1837 } | |
1838 #if defined(WEBRTC_ARCH_BIG_ENDIAN) | |
1839 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be | |
1840 // pretty safe in assuming little-endian. | |
1841 #endif | |
1842 | 1702 |
1843 if (!debug_state->event_msg->SerializeToString(&debug_state->event_str)) { | 1703 config.aec_enabled = public_submodules_->echo_cancellation->is_enabled(); |
1844 return kUnspecifiedError; | 1704 config.aec_delay_agnostic_enabled = |
1845 } | 1705 public_submodules_->echo_cancellation->is_delay_agnostic_enabled(); |
| 1706 config.aec_drift_compensation_enabled = |
| 1707 public_submodules_->echo_cancellation->is_drift_compensation_enabled(); |
| 1708 config.aec_extended_filter_enabled = |
| 1709 public_submodules_->echo_cancellation->is_extended_filter_enabled(); |
| 1710 config.aec_suppression_level = static_cast<int>( |
| 1711 public_submodules_->echo_cancellation->suppression_level()); |
1846 | 1712 |
1847 { | 1713 config.aecm_enabled = public_submodules_->echo_control_mobile->is_enabled(); |
1848 // Ensure atomic writes of the message. | 1714 config.aecm_comfort_noise_enabled = |
1849 rtc::CritScope cs_debug(crit_debug); | 1715 public_submodules_->echo_control_mobile->is_comfort_noise_enabled(); |
| 1716 config.aecm_routing_mode = |
| 1717 static_cast<int>(public_submodules_->echo_control_mobile->routing_mode()); |
1850 | 1718 |
1851 RTC_DCHECK(debug_file->is_open()); | 1719 config.agc_enabled = public_submodules_->gain_control->is_enabled(); |
1852 // Update the byte counter. | 1720 config.agc_mode = static_cast<int>(public_submodules_->gain_control->mode()); |
1853 if (*filesize_limit_bytes >= 0) { | 1721 config.agc_limiter_enabled = |
1854 *filesize_limit_bytes -= | 1722 public_submodules_->gain_control->is_limiter_enabled(); |
1855 (sizeof(int32_t) + debug_state->event_str.length()); | 1723 config.noise_robust_agc_enabled = constants_.use_experimental_agc; |
1856 if (*filesize_limit_bytes < 0) { | |
1857 // Not enough bytes are left to write this message, so stop logging. | |
1858 debug_file->CloseFile(); | |
1859 return kNoError; | |
1860 } | |
1861 } | |
1862 // Write message preceded by its size. | |
1863 if (!debug_file->Write(&size, sizeof(int32_t))) { | |
1864 return kFileError; | |
1865 } | |
1866 if (!debug_file->Write(debug_state->event_str.data(), | |
1867 debug_state->event_str.length())) { | |
1868 return kFileError; | |
1869 } | |
1870 } | |
1871 | 1724 |
1872 debug_state->event_msg->Clear(); | 1725 config.hpf_enabled = config_.high_pass_filter.enabled; |
1873 | 1726 |
1874 return kNoError; | 1727 config.ns_enabled = public_submodules_->noise_suppression->is_enabled(); |
1875 } | 1728 config.ns_level = |
| 1729 static_cast<int>(public_submodules_->noise_suppression->level()); |
1876 | 1730 |
1877 int AudioProcessingImpl::WriteInitMessage() { | 1731 config.transient_suppression_enabled = capture_.transient_suppressor_enabled; |
1878 debug_dump_.capture.event_msg->set_type(audioproc::Event::INIT); | 1732 config.intelligibility_enhancer_enabled = |
1879 audioproc::Init* msg = debug_dump_.capture.event_msg->mutable_init(); | 1733 capture_nonlocked_.intelligibility_enabled; |
1880 msg->set_sample_rate(formats_.api_format.input_stream().sample_rate_hz()); | |
1881 | |
1882 msg->set_num_input_channels(static_cast<google::protobuf::int32>( | |
1883 formats_.api_format.input_stream().num_channels())); | |
1884 msg->set_num_output_channels(static_cast<google::protobuf::int32>( | |
1885 formats_.api_format.output_stream().num_channels())); | |
1886 msg->set_num_reverse_channels(static_cast<google::protobuf::int32>( | |
1887 formats_.api_format.reverse_input_stream().num_channels())); | |
1888 msg->set_reverse_sample_rate( | |
1889 formats_.api_format.reverse_input_stream().sample_rate_hz()); | |
1890 msg->set_output_sample_rate( | |
1891 formats_.api_format.output_stream().sample_rate_hz()); | |
1892 msg->set_reverse_output_sample_rate( | |
1893 formats_.api_format.reverse_output_stream().sample_rate_hz()); | |
1894 msg->set_num_reverse_output_channels( | |
1895 formats_.api_format.reverse_output_stream().num_channels()); | |
1896 | |
1897 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | |
1898 &debug_dump_.num_bytes_left_for_log_, | |
1899 &crit_debug_, &debug_dump_.capture)); | |
1900 return kNoError; | |
1901 } | |
1902 | |
1903 int AudioProcessingImpl::WriteConfigMessage(bool forced) { | |
1904 audioproc::Config config; | |
1905 | |
1906 config.set_aec_enabled(public_submodules_->echo_cancellation->is_enabled()); | |
1907 config.set_aec_delay_agnostic_enabled( | |
1908 public_submodules_->echo_cancellation->is_delay_agnostic_enabled()); | |
1909 config.set_aec_drift_compensation_enabled( | |
1910 public_submodules_->echo_cancellation->is_drift_compensation_enabled()); | |
1911 config.set_aec_extended_filter_enabled( | |
1912 public_submodules_->echo_cancellation->is_extended_filter_enabled()); | |
1913 config.set_aec_suppression_level(static_cast<int>( | |
1914 public_submodules_->echo_cancellation->suppression_level())); | |
1915 | |
1916 config.set_aecm_enabled( | |
1917 public_submodules_->echo_control_mobile->is_enabled()); | |
1918 config.set_aecm_comfort_noise_enabled( | |
1919 public_submodules_->echo_control_mobile->is_comfort_noise_enabled()); | |
1920 config.set_aecm_routing_mode(static_cast<int>( | |
1921 public_submodules_->echo_control_mobile->routing_mode())); | |
1922 | |
1923 config.set_agc_enabled(public_submodules_->gain_control->is_enabled()); | |
1924 config.set_agc_mode( | |
1925 static_cast<int>(public_submodules_->gain_control->mode())); | |
1926 config.set_agc_limiter_enabled( | |
1927 public_submodules_->gain_control->is_limiter_enabled()); | |
1928 config.set_noise_robust_agc_enabled(constants_.use_experimental_agc); | |
1929 | |
1930 config.set_hpf_enabled(config_.high_pass_filter.enabled); | |
1931 | |
1932 config.set_ns_enabled(public_submodules_->noise_suppression->is_enabled()); | |
1933 config.set_ns_level( | |
1934 static_cast<int>(public_submodules_->noise_suppression->level())); | |
1935 | |
1936 config.set_transient_suppression_enabled( | |
1937 capture_.transient_suppressor_enabled); | |
1938 config.set_intelligibility_enhancer_enabled( | |
1939 capture_nonlocked_.intelligibility_enabled); | |
1940 | 1734 |
1941 std::string experiments_description = | 1735 std::string experiments_description = |
1942 public_submodules_->echo_cancellation->GetExperimentsDescription(); | 1736 public_submodules_->echo_cancellation->GetExperimentsDescription(); |
1943 // TODO(peah): Add semicolon-separated concatenations of experiment | 1737 // TODO(peah): Add semicolon-separated concatenations of experiment |
1944 // descriptions for other submodules. | 1738 // descriptions for other submodules. |
1945 if (capture_nonlocked_.level_controller_enabled) { | 1739 if (capture_nonlocked_.level_controller_enabled) { |
1946 experiments_description += "LevelController;"; | 1740 experiments_description += "LevelController;"; |
1947 } | 1741 } |
1948 if (constants_.agc_clipped_level_min != kClippedLevelMin) { | 1742 if (constants_.agc_clipped_level_min != kClippedLevelMin) { |
1949 experiments_description += "AgcClippingLevelExperiment;"; | 1743 experiments_description += "AgcClippingLevelExperiment;"; |
1950 } | 1744 } |
1951 if (capture_nonlocked_.echo_canceller3_enabled) { | 1745 if (capture_nonlocked_.echo_canceller3_enabled) { |
1952 experiments_description += "EchoCanceller3;"; | 1746 experiments_description += "EchoCanceller3;"; |
1953 } | 1747 } |
1954 config.set_experiments_description(experiments_description); | 1748 config.experiments_description = experiments_description; |
1955 | 1749 |
1956 std::string serialized_config = config.SerializeAsString(); | 1750 aec_dump_->WriteConfig(config, forced); |
1957 if (!forced && | |
1958 debug_dump_.capture.last_serialized_config == serialized_config) { | |
1959 return kNoError; | |
1960 } | |
1961 | |
1962 debug_dump_.capture.last_serialized_config = serialized_config; | |
1963 | |
1964 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); | |
1965 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); | |
1966 | |
1967 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | |
1968 &debug_dump_.num_bytes_left_for_log_, | |
1969 &crit_debug_, &debug_dump_.capture)); | |
1970 return kNoError; | 1751 return kNoError; |
1971 } | 1752 } |
1972 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
1973 | 1753 |
1974 AudioProcessingImpl::ApmCaptureState::ApmCaptureState( | 1754 AudioProcessingImpl::ApmCaptureState::ApmCaptureState( |
1975 bool transient_suppressor_enabled, | 1755 bool transient_suppressor_enabled, |
1976 const std::vector<Point>& array_geometry, | 1756 const std::vector<Point>& array_geometry, |
1977 SphericalPointf target_direction) | 1757 SphericalPointf target_direction) |
1978 : aec_system_delay_jumps(-1), | 1758 : aec_system_delay_jumps(-1), |
1979 delay_offset_ms(0), | 1759 delay_offset_ms(0), |
1980 was_stream_delay_set(false), | 1760 was_stream_delay_set(false), |
1981 last_stream_delay_ms(0), | 1761 last_stream_delay_ms(0), |
1982 last_aec_system_delay_ms(0), | 1762 last_aec_system_delay_ms(0), |
1983 stream_delay_jumps(-1), | 1763 stream_delay_jumps(-1), |
1984 output_will_be_muted(false), | 1764 output_will_be_muted(false), |
1985 key_pressed(false), | 1765 key_pressed(false), |
1986 transient_suppressor_enabled(transient_suppressor_enabled), | 1766 transient_suppressor_enabled(transient_suppressor_enabled), |
1987 array_geometry(array_geometry), | 1767 array_geometry(array_geometry), |
1988 target_direction(target_direction), | 1768 target_direction(target_direction), |
1989 capture_processing_format(kSampleRate16kHz), | 1769 capture_processing_format(kSampleRate16kHz), |
1990 split_rate(kSampleRate16kHz) {} | 1770 split_rate(kSampleRate16kHz) {} |
1991 | 1771 |
1992 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; | 1772 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; |
1993 | 1773 |
1994 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; | 1774 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; |
1995 | 1775 |
1996 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; | 1776 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; |
1997 | 1777 |
1998 } // namespace webrtc | 1778 } // namespace webrtc |
OLD | NEW |