| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_processing/audio_processing_impl.h" | 11 #include "webrtc/modules/audio_processing/audio_processing_impl.h" |
| 12 | 12 |
| 13 #include <algorithm> | 13 #include <algorithm> |
| 14 | 14 |
| 15 #include "webrtc/base/checks.h" | 15 #include "webrtc/base/checks.h" |
| 16 #include "webrtc/base/platform_file.h" | 16 #include "webrtc/base/platform_file.h" |
| 17 #include "webrtc/base/trace_event.h" | 17 #include "webrtc/base/trace_event.h" |
| 18 #include "webrtc/common_audio/audio_converter.h" | 18 #include "webrtc/common_audio/audio_converter.h" |
| 19 #include "webrtc/common_audio/channel_buffer.h" | 19 #include "webrtc/common_audio/channel_buffer.h" |
| 20 #include "webrtc/common_audio/include/audio_util.h" | 20 #include "webrtc/common_audio/include/audio_util.h" |
| 21 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
y.h" | 21 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
y.h" |
| 22 #include "webrtc/modules/audio_processing/aec/aec_core.h" | 22 #include "webrtc/modules/audio_processing/aec/aec_core.h" |
| 23 #include "webrtc/modules/audio_processing/aec3/echo_canceller3.h" | 23 #include "webrtc/modules/audio_processing/aec3/echo_canceller3.h" |
| 24 #include "webrtc/modules/audio_processing/aec_dumper/null_aec_dumper.h" |
| 24 #include "webrtc/modules/audio_processing/agc/agc_manager_direct.h" | 25 #include "webrtc/modules/audio_processing/agc/agc_manager_direct.h" |
| 25 #include "webrtc/modules/audio_processing/audio_buffer.h" | 26 #include "webrtc/modules/audio_processing/audio_buffer.h" |
| 26 #include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h" | 27 #include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h" |
| 27 #include "webrtc/modules/audio_processing/common.h" | 28 #include "webrtc/modules/audio_processing/common.h" |
| 28 #include "webrtc/modules/audio_processing/echo_cancellation_impl.h" | 29 #include "webrtc/modules/audio_processing/echo_cancellation_impl.h" |
| 29 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" | 30 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" |
| 30 #include "webrtc/modules/audio_processing/gain_control_for_experimental_agc.h" | 31 #include "webrtc/modules/audio_processing/gain_control_for_experimental_agc.h" |
| 31 #include "webrtc/modules/audio_processing/gain_control_impl.h" | 32 #include "webrtc/modules/audio_processing/gain_control_impl.h" |
| 32 #if WEBRTC_INTELLIGIBILITY_ENHANCER | 33 #if WEBRTC_INTELLIGIBILITY_ENHANCER |
| 33 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc
er.h" | 34 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc
er.h" |
| 34 #endif | 35 #endif |
| 35 #include "webrtc/modules/audio_processing/level_controller/level_controller.h" | 36 #include "webrtc/modules/audio_processing/level_controller/level_controller.h" |
| 36 #include "webrtc/modules/audio_processing/level_estimator_impl.h" | 37 #include "webrtc/modules/audio_processing/level_estimator_impl.h" |
| 37 #include "webrtc/modules/audio_processing/low_cut_filter.h" | 38 #include "webrtc/modules/audio_processing/low_cut_filter.h" |
| 38 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" | 39 #include "webrtc/modules/audio_processing/noise_suppression_impl.h" |
| 39 #include "webrtc/modules/audio_processing/residual_echo_detector.h" | 40 #include "webrtc/modules/audio_processing/residual_echo_detector.h" |
| 40 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" | 41 #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" |
| 41 #include "webrtc/modules/audio_processing/voice_detection_impl.h" | 42 #include "webrtc/modules/audio_processing/voice_detection_impl.h" |
| 42 #include "webrtc/modules/include/module_common_types.h" | 43 #include "webrtc/modules/include/module_common_types.h" |
| 43 #include "webrtc/system_wrappers/include/file_wrapper.h" | 44 #include "webrtc/system_wrappers/include/file_wrapper.h" |
| 44 #include "webrtc/system_wrappers/include/logging.h" | 45 #include "webrtc/system_wrappers/include/logging.h" |
| 45 #include "webrtc/system_wrappers/include/metrics.h" | 46 #include "webrtc/system_wrappers/include/metrics.h" |
| 46 | 47 |
| 47 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 48 // Files generated at build-time by the protobuf compiler. | |
| 49 #ifdef WEBRTC_ANDROID_PLATFORM_BUILD | |
| 50 #include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h" | |
| 51 #else | |
| 52 #include "webrtc/modules/audio_processing/debug.pb.h" | |
| 53 #endif | |
| 54 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 55 | |
| 56 // Check to verify that the define for the intelligibility enhancer is properly | 48 // Check to verify that the define for the intelligibility enhancer is properly |
| 57 // set. | 49 // set. |
| 58 #if !defined(WEBRTC_INTELLIGIBILITY_ENHANCER) || \ | 50 #if !defined(WEBRTC_INTELLIGIBILITY_ENHANCER) || \ |
| 59 (WEBRTC_INTELLIGIBILITY_ENHANCER != 0 && \ | 51 (WEBRTC_INTELLIGIBILITY_ENHANCER != 0 && \ |
| 60 WEBRTC_INTELLIGIBILITY_ENHANCER != 1) | 52 WEBRTC_INTELLIGIBILITY_ENHANCER != 1) |
| 61 #error "Set WEBRTC_INTELLIGIBILITY_ENHANCER to either 0 or 1" | 53 #error "Set WEBRTC_INTELLIGIBILITY_ENHANCER to either 0 or 1" |
| 62 #endif | 54 #endif |
| 63 | 55 |
| 64 #define RETURN_ON_ERR(expr) \ | 56 #define RETURN_ON_ERR(expr) \ |
| 65 do { \ | 57 do { \ |
| (...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 298 | 290 |
| 299 return apm; | 291 return apm; |
| 300 } | 292 } |
| 301 | 293 |
| 302 AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config) | 294 AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config) |
| 303 : AudioProcessingImpl(config, nullptr) {} | 295 : AudioProcessingImpl(config, nullptr) {} |
| 304 | 296 |
| 305 AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config, | 297 AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config, |
| 306 NonlinearBeamformer* beamformer) | 298 NonlinearBeamformer* beamformer) |
| 307 : high_pass_filter_impl_(new HighPassFilterImpl(this)), | 299 : high_pass_filter_impl_(new HighPassFilterImpl(this)), |
| 300 aec_dumper_(AecDumper::CreateNullDumper()), |
| 308 public_submodules_(new ApmPublicSubmodules()), | 301 public_submodules_(new ApmPublicSubmodules()), |
| 309 private_submodules_(new ApmPrivateSubmodules(beamformer)), | 302 private_submodules_(new ApmPrivateSubmodules(beamformer)), |
| 310 constants_(config.Get<ExperimentalAgc>().startup_min_volume, | 303 constants_(config.Get<ExperimentalAgc>().startup_min_volume, |
| 311 config.Get<ExperimentalAgc>().clipped_level_min, | 304 config.Get<ExperimentalAgc>().clipped_level_min, |
| 312 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 305 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
| 313 false), | 306 false), |
| 314 #else | 307 #else |
| 315 config.Get<ExperimentalAgc>().enabled), | 308 config.Get<ExperimentalAgc>().enabled), |
| 316 #endif | 309 #endif |
| 317 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 310 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 353 SetExtraOptions(config); | 346 SetExtraOptions(config); |
| 354 } | 347 } |
| 355 | 348 |
| 356 AudioProcessingImpl::~AudioProcessingImpl() { | 349 AudioProcessingImpl::~AudioProcessingImpl() { |
| 357 // Depends on gain_control_ and | 350 // Depends on gain_control_ and |
| 358 // public_submodules_->gain_control_for_experimental_agc. | 351 // public_submodules_->gain_control_for_experimental_agc. |
| 359 private_submodules_->agc_manager.reset(); | 352 private_submodules_->agc_manager.reset(); |
| 360 // Depends on gain_control_. | 353 // Depends on gain_control_. |
| 361 public_submodules_->gain_control_for_experimental_agc.reset(); | 354 public_submodules_->gain_control_for_experimental_agc.reset(); |
| 362 | 355 |
| 363 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 364 debug_dump_.debug_file->CloseFile(); | |
| 365 #endif | |
| 366 } | 356 } |
| 367 | 357 |
| 368 int AudioProcessingImpl::Initialize() { | 358 int AudioProcessingImpl::Initialize() { |
| 369 // Run in a single-threaded manner during initialization. | 359 // Run in a single-threaded manner during initialization. |
| 370 rtc::CritScope cs_render(&crit_render_); | 360 rtc::CritScope cs_render(&crit_render_); |
| 371 rtc::CritScope cs_capture(&crit_capture_); | 361 rtc::CritScope cs_capture(&crit_capture_); |
| 372 return InitializeLocked(); | 362 return InitializeLocked(); |
| 373 } | 363 } |
| 374 | 364 |
| 375 int AudioProcessingImpl::Initialize(int capture_input_sample_rate_hz, | 365 int AudioProcessingImpl::Initialize(int capture_input_sample_rate_hz, |
| (...skipping 27 matching lines...) Expand all Loading... |
| 403 const ProcessingConfig& processing_config) { | 393 const ProcessingConfig& processing_config) { |
| 404 return MaybeInitialize(processing_config, false); | 394 return MaybeInitialize(processing_config, false); |
| 405 } | 395 } |
| 406 | 396 |
| 407 int AudioProcessingImpl::MaybeInitializeCapture( | 397 int AudioProcessingImpl::MaybeInitializeCapture( |
| 408 const ProcessingConfig& processing_config, | 398 const ProcessingConfig& processing_config, |
| 409 bool force_initialization) { | 399 bool force_initialization) { |
| 410 return MaybeInitialize(processing_config, force_initialization); | 400 return MaybeInitialize(processing_config, force_initialization); |
| 411 } | 401 } |
| 412 | 402 |
| 413 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 414 | |
| 415 AudioProcessingImpl::ApmDebugDumpThreadState::ApmDebugDumpThreadState() | |
| 416 : event_msg(new audioproc::Event()) {} | |
| 417 | |
| 418 AudioProcessingImpl::ApmDebugDumpThreadState::~ApmDebugDumpThreadState() {} | |
| 419 | |
| 420 AudioProcessingImpl::ApmDebugDumpState::ApmDebugDumpState() | |
| 421 : debug_file(FileWrapper::Create()) {} | |
| 422 | |
| 423 AudioProcessingImpl::ApmDebugDumpState::~ApmDebugDumpState() {} | |
| 424 | |
| 425 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 426 | 403 |
| 427 // Calls InitializeLocked() if any of the audio parameters have changed from | 404 // Calls InitializeLocked() if any of the audio parameters have changed from |
| 428 // their current values (needs to be called while holding the crit_render_lock). | 405 // their current values (needs to be called while holding the crit_render_lock). |
| 429 int AudioProcessingImpl::MaybeInitialize( | 406 int AudioProcessingImpl::MaybeInitialize( |
| 430 const ProcessingConfig& processing_config, | 407 const ProcessingConfig& processing_config, |
| 431 bool force_initialization) { | 408 bool force_initialization) { |
| 432 // Called from both threads. Thread check is therefore not possible. | 409 // Called from both threads. Thread check is therefore not possible. |
| 433 if (processing_config == formats_.api_format && !force_initialization) { | 410 if (processing_config == formats_.api_format && !force_initialization) { |
| 434 return kNoError; | 411 return kNoError; |
| 435 } | 412 } |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 510 #endif | 487 #endif |
| 511 InitializeLowCutFilter(); | 488 InitializeLowCutFilter(); |
| 512 public_submodules_->noise_suppression->Initialize(num_proc_channels(), | 489 public_submodules_->noise_suppression->Initialize(num_proc_channels(), |
| 513 proc_sample_rate_hz()); | 490 proc_sample_rate_hz()); |
| 514 public_submodules_->voice_detection->Initialize(proc_split_sample_rate_hz()); | 491 public_submodules_->voice_detection->Initialize(proc_split_sample_rate_hz()); |
| 515 public_submodules_->level_estimator->Initialize(); | 492 public_submodules_->level_estimator->Initialize(); |
| 516 InitializeLevelController(); | 493 InitializeLevelController(); |
| 517 InitializeResidualEchoDetector(); | 494 InitializeResidualEchoDetector(); |
| 518 InitializeEchoCanceller3(); | 495 InitializeEchoCanceller3(); |
| 519 | 496 |
| 520 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 497 aec_dumper_->WriteInitMessage(formats_.api_format); |
| 521 if (debug_dump_.debug_file->is_open()) { | |
| 522 int err = WriteInitMessage(); | |
| 523 if (err != kNoError) { | |
| 524 return err; | |
| 525 } | |
| 526 } | |
| 527 #endif | |
| 528 | |
| 529 return kNoError; | 498 return kNoError; |
| 530 } | 499 } |
| 531 | 500 |
| 532 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 501 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
| 533 for (const auto& stream : config.streams) { | 502 for (const auto& stream : config.streams) { |
| 534 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 503 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { |
| 535 return kBadSampleRateError; | 504 return kBadSampleRateError; |
| 536 } | 505 } |
| 537 } | 506 } |
| 538 | 507 |
| (...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 794 { | 763 { |
| 795 // Do conditional reinitialization. | 764 // Do conditional reinitialization. |
| 796 rtc::CritScope cs_render(&crit_render_); | 765 rtc::CritScope cs_render(&crit_render_); |
| 797 RETURN_ON_ERR( | 766 RETURN_ON_ERR( |
| 798 MaybeInitializeCapture(processing_config, reinitialization_required)); | 767 MaybeInitializeCapture(processing_config, reinitialization_required)); |
| 799 } | 768 } |
| 800 rtc::CritScope cs_capture(&crit_capture_); | 769 rtc::CritScope cs_capture(&crit_capture_); |
| 801 RTC_DCHECK_EQ(processing_config.input_stream().num_frames(), | 770 RTC_DCHECK_EQ(processing_config.input_stream().num_frames(), |
| 802 formats_.api_format.input_stream().num_frames()); | 771 formats_.api_format.input_stream().num_frames()); |
| 803 | 772 |
| 804 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 773 std::unique_ptr<AecDumper::CaptureStreamInfo> stream_info = |
| 805 if (debug_dump_.debug_file->is_open()) { | 774 aec_dumper_->GetCaptureStreamInfo(); |
| 806 RETURN_ON_ERR(WriteConfigMessage(false)); | 775 RETURN_ON_ERR(WriteConfigMessage(false)); |
| 776 const size_t channel_size = |
| 777 sizeof(float) * formats_.api_format.input_stream().num_frames(); |
| 807 | 778 |
| 808 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 779 { |
| 809 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 780 std::vector<rtc::ArrayView<const float>> src_view; |
| 810 const size_t channel_size = | |
| 811 sizeof(float) * formats_.api_format.input_stream().num_frames(); | |
| 812 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | 781 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); |
| 813 ++i) | 782 ++i) { |
| 814 msg->add_input_channel(src[i], channel_size); | 783 src_view.emplace_back(src[i], channel_size); |
| 784 } |
| 785 stream_info->AddInput(src_view); |
| 815 } | 786 } |
| 816 #endif | 787 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
| 817 | 788 |
| 818 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); | 789 // This earlier happened in ProcessCaptureStreamLocked(). |
| 790 RTC_DCHECK(!(public_submodules_->echo_cancellation->is_enabled() && |
| 791 public_submodules_->echo_control_mobile->is_enabled())); |
| 792 |
| 793 stream_info->set_delay(capture_nonlocked_.stream_delay_ms); |
| 794 stream_info->set_drift( |
| 795 public_submodules_->echo_cancellation->stream_drift_samples()); |
| 796 stream_info->set_level(gain_control()->stream_analog_level()); |
| 797 stream_info->set_keypress(capture_.key_pressed); |
| 798 |
| 819 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 799 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
| 820 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); | 800 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
| 821 | 801 |
| 822 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 802 { |
| 823 if (debug_dump_.debug_file->is_open()) { | |
| 824 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | |
| 825 const size_t channel_size = | 803 const size_t channel_size = |
| 826 sizeof(float) * formats_.api_format.output_stream().num_frames(); | 804 sizeof(float) * formats_.api_format.output_stream().num_frames(); |
| 805 std::vector<rtc::ArrayView<const float>> dest_view; |
| 827 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); | 806 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); |
| 828 ++i) | 807 ++i) { |
| 829 msg->add_output_channel(dest[i], channel_size); | 808 dest_view.emplace_back(dest[i], channel_size); |
| 830 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 809 } |
| 831 &debug_dump_.num_bytes_left_for_log_, | 810 stream_info->AddOutput(dest_view); |
| 832 &crit_debug_, &debug_dump_.capture)); | |
| 833 } | 811 } |
| 834 #endif | 812 aec_dumper_->WriteCaptureStreamMessage(std::move(stream_info)); |
| 835 | 813 |
| 836 return kNoError; | 814 return kNoError; |
| 837 } | 815 } |
| 838 | 816 |
| 839 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { | 817 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { |
| 840 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), | 818 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), |
| 841 num_reverse_channels(), | 819 num_reverse_channels(), |
| 842 &aec_render_queue_buffer_); | 820 &aec_render_queue_buffer_); |
| 843 | 821 |
| 844 RTC_DCHECK_GE(160, audio->num_frames_per_band()); | 822 RTC_DCHECK_GE(160, audio->num_frames_per_band()); |
| (...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1062 rtc::CritScope cs_render(&crit_render_); | 1040 rtc::CritScope cs_render(&crit_render_); |
| 1063 RETURN_ON_ERR( | 1041 RETURN_ON_ERR( |
| 1064 MaybeInitializeCapture(processing_config, reinitialization_required)); | 1042 MaybeInitializeCapture(processing_config, reinitialization_required)); |
| 1065 } | 1043 } |
| 1066 rtc::CritScope cs_capture(&crit_capture_); | 1044 rtc::CritScope cs_capture(&crit_capture_); |
| 1067 if (frame->samples_per_channel_ != | 1045 if (frame->samples_per_channel_ != |
| 1068 formats_.api_format.input_stream().num_frames()) { | 1046 formats_.api_format.input_stream().num_frames()) { |
| 1069 return kBadDataLengthError; | 1047 return kBadDataLengthError; |
| 1070 } | 1048 } |
| 1071 | 1049 |
| 1072 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1050 std::unique_ptr<AecDumper::CaptureStreamInfo> stream_info = |
| 1073 if (debug_dump_.debug_file->is_open()) { | 1051 aec_dumper_->GetCaptureStreamInfo(); |
| 1074 RETURN_ON_ERR(WriteConfigMessage(false)); | 1052 RETURN_ON_ERR(WriteConfigMessage(false)); |
| 1075 | 1053 stream_info->AddInput(*frame); |
| 1076 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | |
| 1077 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | |
| 1078 const size_t data_size = | |
| 1079 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | |
| 1080 msg->set_input_data(frame->data_, data_size); | |
| 1081 } | |
| 1082 #endif | |
| 1083 | 1054 |
| 1084 capture_.capture_audio->DeinterleaveFrom(frame); | 1055 capture_.capture_audio->DeinterleaveFrom(frame); |
| 1056 |
| 1057 RTC_DCHECK(!(public_submodules_->echo_cancellation->is_enabled() && |
| 1058 public_submodules_->echo_control_mobile->is_enabled())); |
| 1059 |
| 1060 stream_info->set_delay(capture_nonlocked_.stream_delay_ms); |
| 1061 stream_info->set_drift( |
| 1062 public_submodules_->echo_cancellation->stream_drift_samples()); |
| 1063 stream_info->set_level(gain_control()->stream_analog_level()); |
| 1064 stream_info->set_keypress(capture_.key_pressed); |
| 1065 |
| 1085 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 1066 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
| 1086 capture_.capture_audio->InterleaveTo( | 1067 capture_.capture_audio->InterleaveTo( |
| 1087 frame, submodule_states_.CaptureMultiBandProcessingActive()); | 1068 frame, submodule_states_.CaptureMultiBandProcessingActive()); |
| 1088 | 1069 |
| 1089 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1070 stream_info->AddOutput(*frame); |
| 1090 if (debug_dump_.debug_file->is_open()) { | 1071 aec_dumper_->WriteCaptureStreamMessage(std::move(stream_info)); |
| 1091 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | |
| 1092 const size_t data_size = | |
| 1093 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | |
| 1094 msg->set_output_data(frame->data_, data_size); | |
| 1095 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | |
| 1096 &debug_dump_.num_bytes_left_for_log_, | |
| 1097 &crit_debug_, &debug_dump_.capture)); | |
| 1098 } | |
| 1099 #endif | |
| 1100 | |
| 1101 return kNoError; | 1072 return kNoError; |
| 1102 } | 1073 } |
| 1103 | 1074 |
| 1104 int AudioProcessingImpl::ProcessCaptureStreamLocked() { | 1075 int AudioProcessingImpl::ProcessCaptureStreamLocked() { |
| 1105 // Ensure that not both the AEC and AECM are active at the same time. | 1076 // Ensure that not both the AEC and AECM are active at the same time. |
| 1106 // TODO(peah): Simplify once the public API Enable functions for these | 1077 // TODO(peah): Simplify once the public API Enable functions for these |
| 1107 // are moved to APM. | 1078 // are moved to APM. |
| 1108 RTC_DCHECK(!(public_submodules_->echo_cancellation->is_enabled() && | 1079 RTC_DCHECK(!(public_submodules_->echo_cancellation->is_enabled() && |
| 1109 public_submodules_->echo_control_mobile->is_enabled())); | 1080 public_submodules_->echo_control_mobile->is_enabled())); |
| 1110 | 1081 |
| 1111 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1112 if (debug_dump_.debug_file->is_open()) { | |
| 1113 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | |
| 1114 msg->set_delay(capture_nonlocked_.stream_delay_ms); | |
| 1115 msg->set_drift( | |
| 1116 public_submodules_->echo_cancellation->stream_drift_samples()); | |
| 1117 msg->set_level(gain_control()->stream_analog_level()); | |
| 1118 msg->set_keypress(capture_.key_pressed); | |
| 1119 } | |
| 1120 #endif | |
| 1121 | 1082 |
| 1122 MaybeUpdateHistograms(); | 1083 MaybeUpdateHistograms(); |
| 1123 | 1084 |
| 1124 AudioBuffer* capture_buffer = capture_.capture_audio.get(); // For brevity. | 1085 AudioBuffer* capture_buffer = capture_.capture_audio.get(); // For brevity. |
| 1125 | 1086 |
| 1126 capture_input_rms_.Analyze(rtc::ArrayView<const int16_t>( | 1087 capture_input_rms_.Analyze(rtc::ArrayView<const int16_t>( |
| 1127 capture_buffer->channels_const()[0], | 1088 capture_buffer->channels_const()[0], |
| 1128 capture_nonlocked_.capture_processing_format.num_frames())); | 1089 capture_nonlocked_.capture_processing_format.num_frames())); |
| 1129 const bool log_rms = ++capture_rms_interval_counter_ >= 1000; | 1090 const bool log_rms = ++capture_rms_interval_counter_ >= 1000; |
| 1130 if (log_rms) { | 1091 if (log_rms) { |
| (...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1340 } | 1301 } |
| 1341 | 1302 |
| 1342 ProcessingConfig processing_config = formats_.api_format; | 1303 ProcessingConfig processing_config = formats_.api_format; |
| 1343 processing_config.reverse_input_stream() = input_config; | 1304 processing_config.reverse_input_stream() = input_config; |
| 1344 processing_config.reverse_output_stream() = output_config; | 1305 processing_config.reverse_output_stream() = output_config; |
| 1345 | 1306 |
| 1346 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); | 1307 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); |
| 1347 assert(input_config.num_frames() == | 1308 assert(input_config.num_frames() == |
| 1348 formats_.api_format.reverse_input_stream().num_frames()); | 1309 formats_.api_format.reverse_input_stream().num_frames()); |
| 1349 | 1310 |
| 1350 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1311 std::vector<rtc::ArrayView<const float>> src_view; |
| 1351 if (debug_dump_.debug_file->is_open()) { | 1312 const size_t channel_size = |
| 1352 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); | 1313 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
| 1353 audioproc::ReverseStream* msg = | 1314 |
| 1354 debug_dump_.render.event_msg->mutable_reverse_stream(); | 1315 for (size_t i = 0; |
| 1355 const size_t channel_size = | 1316 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) { |
| 1356 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 1317 src_view.emplace_back(src[i], channel_size); |
| 1357 for (size_t i = 0; | |
| 1358 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) | |
| 1359 msg->add_channel(src[i], channel_size); | |
| 1360 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | |
| 1361 &debug_dump_.num_bytes_left_for_log_, | |
| 1362 &crit_debug_, &debug_dump_.render)); | |
| 1363 } | 1318 } |
| 1364 #endif | 1319 aec_dumper_->WriteReverseStreamMessage(src_view); |
| 1365 | 1320 |
| 1366 render_.render_audio->CopyFrom(src, | 1321 render_.render_audio->CopyFrom(src, |
| 1367 formats_.api_format.reverse_input_stream()); | 1322 formats_.api_format.reverse_input_stream()); |
| 1368 return ProcessRenderStreamLocked(); | 1323 return ProcessRenderStreamLocked(); |
| 1369 } | 1324 } |
| 1370 | 1325 |
| 1371 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 1326 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| 1372 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); | 1327 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); |
| 1373 rtc::CritScope cs(&crit_render_); | 1328 rtc::CritScope cs(&crit_render_); |
| 1374 if (frame == nullptr) { | 1329 if (frame == nullptr) { |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1395 frame->sample_rate_hz_); | 1350 frame->sample_rate_hz_); |
| 1396 processing_config.reverse_output_stream().set_num_channels( | 1351 processing_config.reverse_output_stream().set_num_channels( |
| 1397 frame->num_channels_); | 1352 frame->num_channels_); |
| 1398 | 1353 |
| 1399 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); | 1354 RETURN_ON_ERR(MaybeInitializeRender(processing_config)); |
| 1400 if (frame->samples_per_channel_ != | 1355 if (frame->samples_per_channel_ != |
| 1401 formats_.api_format.reverse_input_stream().num_frames()) { | 1356 formats_.api_format.reverse_input_stream().num_frames()) { |
| 1402 return kBadDataLengthError; | 1357 return kBadDataLengthError; |
| 1403 } | 1358 } |
| 1404 | 1359 |
| 1405 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1360 aec_dumper_->WriteReverseStreamMessage(*frame); |
| 1406 if (debug_dump_.debug_file->is_open()) { | 1361 |
| 1407 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); | |
| 1408 audioproc::ReverseStream* msg = | |
| 1409 debug_dump_.render.event_msg->mutable_reverse_stream(); | |
| 1410 const size_t data_size = | |
| 1411 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | |
| 1412 msg->set_data(frame->data_, data_size); | |
| 1413 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | |
| 1414 &debug_dump_.num_bytes_left_for_log_, | |
| 1415 &crit_debug_, &debug_dump_.render)); | |
| 1416 } | |
| 1417 #endif | |
| 1418 render_.render_audio->DeinterleaveFrom(frame); | 1362 render_.render_audio->DeinterleaveFrom(frame); |
| 1419 RETURN_ON_ERR(ProcessRenderStreamLocked()); | 1363 RETURN_ON_ERR(ProcessRenderStreamLocked()); |
| 1420 render_.render_audio->InterleaveTo( | 1364 render_.render_audio->InterleaveTo( |
| 1421 frame, submodule_states_.RenderMultiBandProcessingActive()); | 1365 frame, submodule_states_.RenderMultiBandProcessingActive()); |
| 1422 return kNoError; | 1366 return kNoError; |
| 1423 } | 1367 } |
| 1424 | 1368 |
| 1425 int AudioProcessingImpl::ProcessRenderStreamLocked() { | 1369 int AudioProcessingImpl::ProcessRenderStreamLocked() { |
| 1426 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. | 1370 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. |
| 1427 if (submodule_states_.RenderMultiBandSubModulesActive() && | 1371 if (submodule_states_.RenderMultiBandSubModulesActive() && |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1495 capture_.delay_offset_ms = offset; | 1439 capture_.delay_offset_ms = offset; |
| 1496 } | 1440 } |
| 1497 | 1441 |
| 1498 int AudioProcessingImpl::delay_offset_ms() const { | 1442 int AudioProcessingImpl::delay_offset_ms() const { |
| 1499 rtc::CritScope cs(&crit_capture_); | 1443 rtc::CritScope cs(&crit_capture_); |
| 1500 return capture_.delay_offset_ms; | 1444 return capture_.delay_offset_ms; |
| 1501 } | 1445 } |
| 1502 | 1446 |
| 1503 int AudioProcessingImpl::StartDebugRecording( | 1447 int AudioProcessingImpl::StartDebugRecording( |
| 1504 const char filename[AudioProcessing::kMaxFilenameSize], | 1448 const char filename[AudioProcessing::kMaxFilenameSize], |
| 1505 int64_t max_log_size_bytes) { | 1449 int64_t max_log_size_bytes, |
| 1506 // Run in a single-threaded manner. | 1450 rtc::TaskQueue* worker_queue) { |
| 1507 rtc::CritScope cs_render(&crit_render_); | |
| 1508 rtc::CritScope cs_capture(&crit_capture_); | 1451 rtc::CritScope cs_capture(&crit_capture_); |
| 1509 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); | 1452 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); |
| 1510 | 1453 aec_dumper_ = AecDumper::Create(filename, max_log_size_bytes, worker_queue); |
| 1511 if (filename == nullptr) { | |
| 1512 return kNullPointerError; | |
| 1513 } | |
| 1514 | |
| 1515 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1516 debug_dump_.num_bytes_left_for_log_ = max_log_size_bytes; | |
| 1517 // Stop any ongoing recording. | |
| 1518 debug_dump_.debug_file->CloseFile(); | |
| 1519 | |
| 1520 if (!debug_dump_.debug_file->OpenFile(filename, false)) { | |
| 1521 return kFileError; | |
| 1522 } | |
| 1523 | |
| 1524 RETURN_ON_ERR(WriteConfigMessage(true)); | 1454 RETURN_ON_ERR(WriteConfigMessage(true)); |
| 1525 RETURN_ON_ERR(WriteInitMessage()); | 1455 aec_dumper_->WriteInitMessage(formats_.api_format); |
| 1526 return kNoError; | 1456 return kNoError; |
| 1527 #else | |
| 1528 return kUnsupportedFunctionError; | |
| 1529 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1530 } | 1457 } |
| 1531 | 1458 |
| 1532 int AudioProcessingImpl::StartDebugRecording(FILE* handle, | 1459 int AudioProcessingImpl::StartDebugRecording(FILE* handle, |
| 1533 int64_t max_log_size_bytes) { | 1460 int64_t max_log_size_bytes, |
| 1534 // Run in a single-threaded manner. | 1461 rtc::TaskQueue* worker_queue) { |
| 1535 rtc::CritScope cs_render(&crit_render_); | |
| 1536 rtc::CritScope cs_capture(&crit_capture_); | 1462 rtc::CritScope cs_capture(&crit_capture_); |
| 1537 | 1463 aec_dumper_ = AecDumper::Create(handle, max_log_size_bytes, worker_queue); |
| 1538 if (handle == nullptr) { | |
| 1539 return kNullPointerError; | |
| 1540 } | |
| 1541 | |
| 1542 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1543 debug_dump_.num_bytes_left_for_log_ = max_log_size_bytes; | |
| 1544 | |
| 1545 // Stop any ongoing recording. | |
| 1546 debug_dump_.debug_file->CloseFile(); | |
| 1547 | |
| 1548 if (!debug_dump_.debug_file->OpenFromFileHandle(handle)) { | |
| 1549 return kFileError; | |
| 1550 } | |
| 1551 | |
| 1552 RETURN_ON_ERR(WriteConfigMessage(true)); | 1464 RETURN_ON_ERR(WriteConfigMessage(true)); |
| 1553 RETURN_ON_ERR(WriteInitMessage()); | 1465 aec_dumper_->WriteInitMessage(formats_.api_format); |
| 1554 return kNoError; | 1466 return kNoError; |
| 1555 #else | |
| 1556 return kUnsupportedFunctionError; | |
| 1557 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1558 } | 1467 } |
| 1559 | 1468 |
| 1560 int AudioProcessingImpl::StartDebugRecording(FILE* handle) { | 1469 int AudioProcessingImpl::StartDebugRecording(FILE* handle, |
| 1561 return StartDebugRecording(handle, -1); | 1470 rtc::TaskQueue* worker_queue) { |
| 1471 return StartDebugRecording(handle, -1, worker_queue); |
| 1562 } | 1472 } |
| 1563 | 1473 |
| 1564 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( | 1474 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( |
| 1565 rtc::PlatformFile handle) { | 1475 rtc::PlatformFile handle, |
| 1476 rtc::TaskQueue* worker_queue) { |
| 1566 // Run in a single-threaded manner. | 1477 // Run in a single-threaded manner. |
| 1567 rtc::CritScope cs_render(&crit_render_); | 1478 rtc::CritScope cs_render(&crit_render_); |
| 1568 rtc::CritScope cs_capture(&crit_capture_); | 1479 rtc::CritScope cs_capture(&crit_capture_); |
| 1569 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); | 1480 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); |
| 1570 return StartDebugRecording(stream, -1); | 1481 return StartDebugRecording(stream, -1, worker_queue); |
| 1571 } | 1482 } |
| 1572 | 1483 |
| 1573 int AudioProcessingImpl::StopDebugRecording() { | 1484 int AudioProcessingImpl::StopDebugRecording() { |
| 1574 // Run in a single-threaded manner. | 1485 aec_dumper_ = AecDumper::CreateNullDumper(); |
| 1575 rtc::CritScope cs_render(&crit_render_); | |
| 1576 rtc::CritScope cs_capture(&crit_capture_); | |
| 1577 | |
| 1578 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1579 // We just return if recording hasn't started. | |
| 1580 debug_dump_.debug_file->CloseFile(); | |
| 1581 return kNoError; | 1486 return kNoError; |
| 1582 #else | |
| 1583 return kUnsupportedFunctionError; | |
| 1584 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1585 } | 1487 } |
| 1586 | 1488 |
| 1587 AudioProcessing::AudioProcessingStatistics::AudioProcessingStatistics() { | 1489 AudioProcessing::AudioProcessingStatistics::AudioProcessingStatistics() { |
| 1588 residual_echo_return_loss.Set(-100.0f, -100.0f, -100.0f, -100.0f); | 1490 residual_echo_return_loss.Set(-100.0f, -100.0f, -100.0f, -100.0f); |
| 1589 echo_return_loss.Set(-100.0f, -100.0f, -100.0f, -100.0f); | 1491 echo_return_loss.Set(-100.0f, -100.0f, -100.0f, -100.0f); |
| 1590 echo_return_loss_enhancement.Set(-100.0f, -100.0f, -100.0f, -100.0f); | 1492 echo_return_loss_enhancement.Set(-100.0f, -100.0f, -100.0f, -100.0f); |
| 1591 a_nlp.Set(-100.0f, -100.0f, -100.0f, -100.0f); | 1493 a_nlp.Set(-100.0f, -100.0f, -100.0f, -100.0f); |
| 1592 } | 1494 } |
| 1593 | 1495 |
| 1594 AudioProcessing::AudioProcessingStatistics::AudioProcessingStatistics( | 1496 AudioProcessing::AudioProcessingStatistics::AudioProcessingStatistics( |
| (...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1818 capture_.last_stream_delay_ms = 0; | 1720 capture_.last_stream_delay_ms = 0; |
| 1819 | 1721 |
| 1820 if (capture_.aec_system_delay_jumps > -1) { | 1722 if (capture_.aec_system_delay_jumps > -1) { |
| 1821 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", | 1723 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", |
| 1822 capture_.aec_system_delay_jumps, 51); | 1724 capture_.aec_system_delay_jumps, 51); |
| 1823 } | 1725 } |
| 1824 capture_.aec_system_delay_jumps = -1; | 1726 capture_.aec_system_delay_jumps = -1; |
| 1825 capture_.last_aec_system_delay_ms = 0; | 1727 capture_.last_aec_system_delay_ms = 0; |
| 1826 } | 1728 } |
| 1827 | 1729 |
| 1730 <<<<<<< HEAD |
| 1828 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1731 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1829 int AudioProcessingImpl::WriteMessageToDebugFile( | 1732 int AudioProcessingImpl::WriteMessageToDebugFile( |
| 1830 FileWrapper* debug_file, | 1733 FileWrapper* debug_file, |
| 1831 int64_t* filesize_limit_bytes, | 1734 int64_t* filesize_limit_bytes, |
| 1832 rtc::CriticalSection* crit_debug, | 1735 rtc::CriticalSection* crit_debug, |
| 1833 ApmDebugDumpThreadState* debug_state) { | 1736 ApmDebugDumpThreadState* debug_state) { |
| 1834 int32_t size = debug_state->event_msg->ByteSize(); | 1737 int32_t size = debug_state->event_msg->ByteSize(); |
| 1835 if (size <= 0) { | 1738 if (size <= 0) { |
| 1836 return kUnspecifiedError; | 1739 return kUnspecifiedError; |
| 1837 } | 1740 } |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1893 formats_.api_format.reverse_output_stream().sample_rate_hz()); | 1796 formats_.api_format.reverse_output_stream().sample_rate_hz()); |
| 1894 msg->set_num_reverse_output_channels( | 1797 msg->set_num_reverse_output_channels( |
| 1895 formats_.api_format.reverse_output_stream().num_channels()); | 1798 formats_.api_format.reverse_output_stream().num_channels()); |
| 1896 | 1799 |
| 1897 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1800 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1898 &debug_dump_.num_bytes_left_for_log_, | 1801 &debug_dump_.num_bytes_left_for_log_, |
| 1899 &crit_debug_, &debug_dump_.capture)); | 1802 &crit_debug_, &debug_dump_.capture)); |
| 1900 return kNoError; | 1803 return kNoError; |
| 1901 } | 1804 } |
| 1902 | 1805 |
| 1806 ||||||| parent of 6d4e36d05... aec-dumper and null-aec-dumper. Most parts are im
plemented. |
| 1807 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1808 int AudioProcessingImpl::WriteMessageToDebugFile( |
| 1809 FileWrapper* debug_file, |
| 1810 int64_t* filesize_limit_bytes, |
| 1811 rtc::CriticalSection* crit_debug, |
| 1812 ApmDebugDumpThreadState* debug_state) { |
| 1813 int32_t size = debug_state->event_msg->ByteSize(); |
| 1814 if (size <= 0) { |
| 1815 return kUnspecifiedError; |
| 1816 } |
| 1817 #if defined(WEBRTC_ARCH_BIG_ENDIAN) |
| 1818 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be |
| 1819 // pretty safe in assuming little-endian. |
| 1820 #endif |
| 1821 |
| 1822 if (!debug_state->event_msg->SerializeToString(&debug_state->event_str)) { |
| 1823 return kUnspecifiedError; |
| 1824 } |
| 1825 |
| 1826 { |
| 1827 // Ensure atomic writes of the message. |
| 1828 rtc::CritScope cs_debug(crit_debug); |
| 1829 |
| 1830 RTC_DCHECK(debug_file->is_open()); |
| 1831 // Update the byte counter. |
| 1832 if (*filesize_limit_bytes >= 0) { |
| 1833 *filesize_limit_bytes -= |
| 1834 (sizeof(int32_t) + debug_state->event_str.length()); |
| 1835 if (*filesize_limit_bytes < 0) { |
| 1836 // Not enough bytes are left to write this message, so stop logging. |
| 1837 debug_file->CloseFile(); |
| 1838 return kNoError; |
| 1839 } |
| 1840 } |
| 1841 // Write message preceded by its size. |
| 1842 if (!debug_file->Write(&size, sizeof(int32_t))) { |
| 1843 return kFileError; |
| 1844 } |
| 1845 if (!debug_file->Write(debug_state->event_str.data(), |
| 1846 debug_state->event_str.length())) { |
| 1847 return kFileError; |
| 1848 } |
| 1849 } |
| 1850 |
| 1851 debug_state->event_msg->Clear(); |
| 1852 |
| 1853 return kNoError; |
| 1854 } |
| 1855 |
| 1856 int AudioProcessingImpl::WriteInitMessage() { |
| 1857 debug_dump_.capture.event_msg->set_type(audioproc::Event::INIT); |
| 1858 audioproc::Init* msg = debug_dump_.capture.event_msg->mutable_init(); |
| 1859 msg->set_sample_rate(formats_.api_format.input_stream().sample_rate_hz()); |
| 1860 |
| 1861 msg->set_num_input_channels(static_cast<int32_t>( |
| 1862 formats_.api_format.input_stream().num_channels())); |
| 1863 msg->set_num_output_channels(static_cast<int32_t>( |
| 1864 formats_.api_format.output_stream().num_channels())); |
| 1865 msg->set_num_reverse_channels(static_cast<int32_t>( |
| 1866 formats_.api_format.reverse_input_stream().num_channels())); |
| 1867 msg->set_reverse_sample_rate( |
| 1868 formats_.api_format.reverse_input_stream().sample_rate_hz()); |
| 1869 msg->set_output_sample_rate( |
| 1870 formats_.api_format.output_stream().sample_rate_hz()); |
| 1871 msg->set_reverse_output_sample_rate( |
| 1872 formats_.api_format.reverse_output_stream().sample_rate_hz()); |
| 1873 msg->set_num_reverse_output_channels( |
| 1874 formats_.api_format.reverse_output_stream().num_channels()); |
| 1875 |
| 1876 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1877 &debug_dump_.num_bytes_left_for_log_, |
| 1878 &crit_debug_, &debug_dump_.capture)); |
| 1879 return kNoError; |
| 1880 } |
| 1881 |
| 1882 ======= |
| 1883 >>>>>>> 6d4e36d05... aec-dumper and null-aec-dumper. Most parts are implemented. |
| 1903 int AudioProcessingImpl::WriteConfigMessage(bool forced) { | 1884 int AudioProcessingImpl::WriteConfigMessage(bool forced) { |
| 1904 audioproc::Config config; | 1885 InternalAPMConfig config; |
| 1905 | 1886 |
| 1906 config.set_aec_enabled(public_submodules_->echo_cancellation->is_enabled()); | 1887 config.aec_enabled = public_submodules_->echo_cancellation->is_enabled(); |
| 1907 config.set_aec_delay_agnostic_enabled( | 1888 config.aec_delay_agnostic_enabled = |
| 1908 public_submodules_->echo_cancellation->is_delay_agnostic_enabled()); | 1889 public_submodules_->echo_cancellation->is_delay_agnostic_enabled(); |
| 1909 config.set_aec_drift_compensation_enabled( | 1890 config.aec_drift_compensation_enabled = |
| 1910 public_submodules_->echo_cancellation->is_drift_compensation_enabled()); | 1891 public_submodules_->echo_cancellation->is_drift_compensation_enabled(); |
| 1911 config.set_aec_extended_filter_enabled( | 1892 config.aec_extended_filter_enabled = |
| 1912 public_submodules_->echo_cancellation->is_extended_filter_enabled()); | 1893 public_submodules_->echo_cancellation->is_extended_filter_enabled(); |
| 1913 config.set_aec_suppression_level(static_cast<int>( | 1894 config.aec_suppression_level = static_cast<int>( |
| 1914 public_submodules_->echo_cancellation->suppression_level())); | 1895 public_submodules_->echo_cancellation->suppression_level()); |
| 1915 | 1896 |
| 1916 config.set_aecm_enabled( | 1897 config.aecm_enabled = public_submodules_->echo_control_mobile->is_enabled(); |
| 1917 public_submodules_->echo_control_mobile->is_enabled()); | 1898 config.aecm_comfort_noise_enabled = |
| 1918 config.set_aecm_comfort_noise_enabled( | 1899 public_submodules_->echo_control_mobile->is_comfort_noise_enabled(); |
| 1919 public_submodules_->echo_control_mobile->is_comfort_noise_enabled()); | 1900 config.aecm_routing_mode = |
| 1920 config.set_aecm_routing_mode(static_cast<int>( | 1901 static_cast<int>(public_submodules_->echo_control_mobile->routing_mode()); |
| 1921 public_submodules_->echo_control_mobile->routing_mode())); | |
| 1922 | 1902 |
| 1923 config.set_agc_enabled(public_submodules_->gain_control->is_enabled()); | 1903 config.agc_enabled = public_submodules_->gain_control->is_enabled(); |
| 1924 config.set_agc_mode( | 1904 config.agc_mode = static_cast<int>(public_submodules_->gain_control->mode()); |
| 1925 static_cast<int>(public_submodules_->gain_control->mode())); | 1905 config.agc_limiter_enabled = |
| 1926 config.set_agc_limiter_enabled( | 1906 public_submodules_->gain_control->is_limiter_enabled(); |
| 1927 public_submodules_->gain_control->is_limiter_enabled()); | 1907 config.noise_robust_agc_enabled = constants_.use_experimental_agc; |
| 1928 config.set_noise_robust_agc_enabled(constants_.use_experimental_agc); | |
| 1929 | 1908 |
| 1930 config.set_hpf_enabled(config_.high_pass_filter.enabled); | 1909 config.hpf_enabled = config_.high_pass_filter.enabled; |
| 1931 | 1910 |
| 1932 config.set_ns_enabled(public_submodules_->noise_suppression->is_enabled()); | 1911 config.ns_enabled = public_submodules_->noise_suppression->is_enabled(); |
| 1933 config.set_ns_level( | 1912 config.ns_level = |
| 1934 static_cast<int>(public_submodules_->noise_suppression->level())); | 1913 static_cast<int>(public_submodules_->noise_suppression->level()); |
| 1935 | 1914 |
| 1936 config.set_transient_suppression_enabled( | 1915 config.transient_suppression_enabled = capture_.transient_suppressor_enabled; |
| 1937 capture_.transient_suppressor_enabled); | 1916 config.intelligibility_enhancer_enabled = |
| 1938 config.set_intelligibility_enhancer_enabled( | 1917 capture_nonlocked_.intelligibility_enabled; |
| 1939 capture_nonlocked_.intelligibility_enabled); | |
| 1940 | 1918 |
| 1941 std::string experiments_description = | 1919 std::string experiments_description = |
| 1942 public_submodules_->echo_cancellation->GetExperimentsDescription(); | 1920 public_submodules_->echo_cancellation->GetExperimentsDescription(); |
| 1943 // TODO(peah): Add semicolon-separated concatenations of experiment | 1921 // TODO(peah): Add semicolon-separated concatenations of experiment |
| 1944 // descriptions for other submodules. | 1922 // descriptions for other submodules. |
| 1945 if (capture_nonlocked_.level_controller_enabled) { | 1923 if (capture_nonlocked_.level_controller_enabled) { |
| 1946 experiments_description += "LevelController;"; | 1924 experiments_description += "LevelController;"; |
| 1947 } | 1925 } |
| 1948 if (constants_.agc_clipped_level_min != kClippedLevelMin) { | 1926 if (constants_.agc_clipped_level_min != kClippedLevelMin) { |
| 1949 experiments_description += "AgcClippingLevelExperiment;"; | 1927 experiments_description += "AgcClippingLevelExperiment;"; |
| 1950 } | 1928 } |
| 1951 if (capture_nonlocked_.echo_canceller3_enabled) { | 1929 if (capture_nonlocked_.echo_canceller3_enabled) { |
| 1952 experiments_description += "EchoCanceller3;"; | 1930 experiments_description += "EchoCanceller3;"; |
| 1953 } | 1931 } |
| 1932 <<<<<<< HEAD |
| 1954 config.set_experiments_description(experiments_description); | 1933 config.set_experiments_description(experiments_description); |
| 1955 | 1934 |
| 1956 std::string serialized_config = config.SerializeAsString(); | 1935 std::string serialized_config = config.SerializeAsString(); |
| 1957 if (!forced && | 1936 if (!forced && |
| 1958 debug_dump_.capture.last_serialized_config == serialized_config) { | 1937 debug_dump_.capture.last_serialized_config == serialized_config) { |
| 1959 return kNoError; | 1938 return kNoError; |
| 1960 } | 1939 } |
| 1961 | 1940 |
| 1962 debug_dump_.capture.last_serialized_config = serialized_config; | 1941 debug_dump_.capture.last_serialized_config = serialized_config; |
| 1963 | 1942 |
| 1964 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); | 1943 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); |
| 1965 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); | 1944 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); |
| 1945 ||||||| parent of 6d4e36d05... aec-dumper and null-aec-dumper. Most parts are im
plemented. |
| 1946 config.set_experiments_description(experiments_description); |
| 1966 | 1947 |
| 1967 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1948 ProtoString serialized_config = config.SerializeAsString(); |
| 1968 &debug_dump_.num_bytes_left_for_log_, | 1949 if (!forced && |
| 1969 &crit_debug_, &debug_dump_.capture)); | 1950 debug_dump_.capture.last_serialized_config == serialized_config) { |
| 1951 return kNoError; |
| 1952 } |
| 1953 |
| 1954 debug_dump_.capture.last_serialized_config = serialized_config; |
| 1955 |
| 1956 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); |
| 1957 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); |
| 1958 ======= |
| 1959 config.experiments_description = experiments_description; |
| 1960 >>>>>>> 6d4e36d05... aec-dumper and null-aec-dumper. Most parts are implemented. |
| 1961 |
| 1962 aec_dumper_->WriteConfig(config, forced); |
| 1970 return kNoError; | 1963 return kNoError; |
| 1971 } | 1964 } |
| 1972 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1973 | 1965 |
| 1974 AudioProcessingImpl::ApmCaptureState::ApmCaptureState( | 1966 AudioProcessingImpl::ApmCaptureState::ApmCaptureState( |
| 1975 bool transient_suppressor_enabled, | 1967 bool transient_suppressor_enabled, |
| 1976 const std::vector<Point>& array_geometry, | 1968 const std::vector<Point>& array_geometry, |
| 1977 SphericalPointf target_direction) | 1969 SphericalPointf target_direction) |
| 1978 : aec_system_delay_jumps(-1), | 1970 : aec_system_delay_jumps(-1), |
| 1979 delay_offset_ms(0), | 1971 delay_offset_ms(0), |
| 1980 was_stream_delay_set(false), | 1972 was_stream_delay_set(false), |
| 1981 last_stream_delay_ms(0), | 1973 last_stream_delay_ms(0), |
| 1982 last_aec_system_delay_ms(0), | 1974 last_aec_system_delay_ms(0), |
| 1983 stream_delay_jumps(-1), | 1975 stream_delay_jumps(-1), |
| 1984 output_will_be_muted(false), | 1976 output_will_be_muted(false), |
| 1985 key_pressed(false), | 1977 key_pressed(false), |
| 1986 transient_suppressor_enabled(transient_suppressor_enabled), | 1978 transient_suppressor_enabled(transient_suppressor_enabled), |
| 1987 array_geometry(array_geometry), | 1979 array_geometry(array_geometry), |
| 1988 target_direction(target_direction), | 1980 target_direction(target_direction), |
| 1989 capture_processing_format(kSampleRate16kHz), | 1981 capture_processing_format(kSampleRate16kHz), |
| 1990 split_rate(kSampleRate16kHz) {} | 1982 split_rate(kSampleRate16kHz) {} |
| 1991 | 1983 |
| 1992 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; | 1984 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; |
| 1993 | 1985 |
| 1994 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; | 1986 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; |
| 1995 | 1987 |
| 1996 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; | 1988 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; |
| 1997 | 1989 |
| 1998 } // namespace webrtc | 1990 } // namespace webrtc |
| OLD | NEW |