Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_processing/audio_processing_impl.h" | 11 #include "webrtc/modules/audio_processing/audio_processing_impl.h" |
| 12 | 12 |
| 13 #include <algorithm> | 13 #include <algorithm> |
| 14 | 14 |
| 15 #include "webrtc/base/checks.h" | 15 #include "webrtc/base/checks.h" |
| 16 #include "webrtc/base/platform_file.h" | 16 #include "webrtc/base/platform_file.h" |
| 17 #include "webrtc/base/trace_event.h" | 17 #include "webrtc/base/trace_event.h" |
| 18 #include "webrtc/common_audio/audio_converter.h" | 18 #include "webrtc/common_audio/audio_converter.h" |
| 19 #include "webrtc/common_audio/channel_buffer.h" | 19 #include "webrtc/common_audio/channel_buffer.h" |
| 20 #include "webrtc/common_audio/include/audio_util.h" | 20 #include "webrtc/common_audio/include/audio_util.h" |
| 21 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar y.h" | 21 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar y.h" |
| 22 #include "webrtc/modules/audio_processing/aec/aec_core.h" | 22 #include "webrtc/modules/audio_processing/aec/aec_core.h" |
| 23 #include "webrtc/modules/audio_processing/aec3/echo_canceller3.h" | 23 #include "webrtc/modules/audio_processing/aec3/echo_canceller3.h" |
| 24 #include "webrtc/modules/audio_processing/aec_dumper/null_aec_dumper.h" | |
| 24 #include "webrtc/modules/audio_processing/agc/agc_manager_direct.h" | 25 #include "webrtc/modules/audio_processing/agc/agc_manager_direct.h" |
| 25 #include "webrtc/modules/audio_processing/audio_buffer.h" | 26 #include "webrtc/modules/audio_processing/audio_buffer.h" |
| 26 #include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h" | 27 #include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h" |
| 27 #include "webrtc/modules/audio_processing/common.h" | 28 #include "webrtc/modules/audio_processing/common.h" |
| 28 #include "webrtc/modules/audio_processing/echo_cancellation_impl.h" | 29 #include "webrtc/modules/audio_processing/echo_cancellation_impl.h" |
| 29 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" | 30 #include "webrtc/modules/audio_processing/echo_control_mobile_impl.h" |
| 30 #include "webrtc/modules/audio_processing/gain_control_for_experimental_agc.h" | 31 #include "webrtc/modules/audio_processing/gain_control_for_experimental_agc.h" |
| 31 #include "webrtc/modules/audio_processing/gain_control_impl.h" | 32 #include "webrtc/modules/audio_processing/gain_control_impl.h" |
| 32 #if WEBRTC_INTELLIGIBILITY_ENHANCER | 33 #if WEBRTC_INTELLIGIBILITY_ENHANCER |
| 33 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc er.h" | 34 #include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhanc er.h" |
| (...skipping 264 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 298 | 299 |
| 299 return apm; | 300 return apm; |
| 300 } | 301 } |
| 301 | 302 |
| 302 AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config) | 303 AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config) |
| 303 : AudioProcessingImpl(config, nullptr) {} | 304 : AudioProcessingImpl(config, nullptr) {} |
| 304 | 305 |
| 305 AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config, | 306 AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config, |
| 306 NonlinearBeamformer* beamformer) | 307 NonlinearBeamformer* beamformer) |
| 307 : high_pass_filter_impl_(new HighPassFilterImpl(this)), | 308 : high_pass_filter_impl_(new HighPassFilterImpl(this)), |
| 309 aec_dumper_(AecDumper::CreateNullDumper()), | |
|
peah-webrtc
2017/03/31 07:24:43
Why cannot we use an empty aecdumper?
aleloi
2017/04/06 15:46:11
It's possible. Then we would have to add 'if (aec_
peah-webrtc
2017/04/07 12:57:15
Absolutely, I definitely like the simpler code. Bu
aleloi
2017/04/12 11:05:29
I checked: virtual calls are always generated in r
| |
| 308 public_submodules_(new ApmPublicSubmodules()), | 310 public_submodules_(new ApmPublicSubmodules()), |
| 309 private_submodules_(new ApmPrivateSubmodules(beamformer)), | 311 private_submodules_(new ApmPrivateSubmodules(beamformer)), |
| 310 constants_(config.Get<ExperimentalAgc>().startup_min_volume, | 312 constants_(config.Get<ExperimentalAgc>().startup_min_volume, |
| 311 config.Get<ExperimentalAgc>().clipped_level_min, | 313 config.Get<ExperimentalAgc>().clipped_level_min, |
| 312 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 314 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
| 313 false), | 315 false), |
| 314 #else | 316 #else |
| 315 config.Get<ExperimentalAgc>().enabled), | 317 config.Get<ExperimentalAgc>().enabled), |
| 316 #endif | 318 #endif |
| 317 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | 319 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
| (...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 518 InitializeEchoCanceller3(); | 520 InitializeEchoCanceller3(); |
| 519 | 521 |
| 520 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 522 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 521 if (debug_dump_.debug_file->is_open()) { | 523 if (debug_dump_.debug_file->is_open()) { |
| 522 int err = WriteInitMessage(); | 524 int err = WriteInitMessage(); |
| 523 if (err != kNoError) { | 525 if (err != kNoError) { |
| 524 return err; | 526 return err; |
| 525 } | 527 } |
| 526 } | 528 } |
| 527 #endif | 529 #endif |
| 530 aec_dumper_->WriteInitMessage(formats_.api_format); | |
| 528 | 531 |
| 529 return kNoError; | 532 return kNoError; |
| 530 } | 533 } |
| 531 | 534 |
| 532 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 535 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
| 533 for (const auto& stream : config.streams) { | 536 for (const auto& stream : config.streams) { |
| 534 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 537 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { |
| 535 return kBadSampleRateError; | 538 return kBadSampleRateError; |
| 536 } | 539 } |
| 537 } | 540 } |
| (...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 808 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 811 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 809 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 812 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 810 const size_t channel_size = | 813 const size_t channel_size = |
| 811 sizeof(float) * formats_.api_format.input_stream().num_frames(); | 814 sizeof(float) * formats_.api_format.input_stream().num_frames(); |
| 812 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | 815 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); |
| 813 ++i) | 816 ++i) |
| 814 msg->add_input_channel(src[i], channel_size); | 817 msg->add_input_channel(src[i], channel_size); |
| 815 } | 818 } |
| 816 #endif | 819 #endif |
| 817 | 820 |
| 821 std::unique_ptr<AecDumper::CaptureStreamInfo> stream_info = | |
| 822 aec_dumper_->GetCaptureStreamInfo(); | |
| 823 const size_t channel_size = | |
| 824 sizeof(float) * formats_.api_format.input_stream().num_frames(); | |
| 825 | |
| 826 { | |
| 827 std::vector<rtc::ArrayView<const float>> src_view; | |
|
peah-webrtc
2017/03/31 07:24:43
Afaics, the emplace_back call will cause arrayview
aleloi
2017/04/06 15:46:11
We could use a static array of large enough size.
peah-webrtc
2017/04/07 12:57:15
Yes, the impact of this is definitely not noticeab
| |
| 828 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | |
| 829 ++i) { | |
| 830 src_view.emplace_back(src[i], channel_size); | |
| 831 } | |
| 832 stream_info->AddInput(src_view); | |
| 833 } | |
| 818 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); | 834 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
| 835 | |
| 836 // This earlier happened in ProcessCaptureStreamLocked(). | |
|
peah-webrtc
2017/03/31 07:24:43
Please remove this comment. This is more of a comm
aleloi
2017/04/06 15:46:11
Done.
| |
| 837 RTC_DCHECK(!(public_submodules_->echo_cancellation->is_enabled() && | |
| 838 public_submodules_->echo_control_mobile->is_enabled())); | |
|
peah-webrtc
2017/03/31 07:24:43
Why were there DCHECKs moved to here?
aleloi
2017/04/06 15:46:11
The conditions were DCHECKed before the protobuf s
peah-webrtc
2017/04/07 12:57:15
I agree, it should be safe to leave them there. In
| |
| 839 | |
| 840 stream_info->set_delay(capture_nonlocked_.stream_delay_ms); | |
| 841 stream_info->set_drift( | |
| 842 public_submodules_->echo_cancellation->stream_drift_samples()); | |
| 843 stream_info->set_level(gain_control()->stream_analog_level()); | |
| 844 stream_info->set_keypress(capture_.key_pressed); | |
| 845 | |
| 819 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 846 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
| 820 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); | 847 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
| 821 | 848 |
| 822 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 849 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 823 if (debug_dump_.debug_file->is_open()) { | 850 if (debug_dump_.debug_file->is_open()) { |
| 824 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 851 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 825 const size_t channel_size = | 852 const size_t channel_size = |
| 826 sizeof(float) * formats_.api_format.output_stream().num_frames(); | 853 sizeof(float) * formats_.api_format.output_stream().num_frames(); |
| 827 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); | 854 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); |
| 828 ++i) | 855 ++i) |
| 829 msg->add_output_channel(dest[i], channel_size); | 856 msg->add_output_channel(dest[i], channel_size); |
| 830 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 857 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 831 &debug_dump_.num_bytes_left_for_log_, | 858 &debug_dump_.num_bytes_left_for_log_, |
| 832 &crit_debug_, &debug_dump_.capture)); | 859 &crit_debug_, &debug_dump_.capture)); |
| 833 } | 860 } |
| 834 #endif | 861 #endif |
| 862 { | |
| 863 const size_t channel_size = | |
| 864 sizeof(float) * formats_.api_format.output_stream().num_frames(); | |
| 865 std::vector<rtc::ArrayView<const float>> dest_view; | |
| 866 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); | |
| 867 ++i) { | |
| 868 dest_view.emplace_back(dest[i], channel_size); | |
|
peah-webrtc
2017/03/31 07:24:43
Afaics, the emplace_back call will cause arrayview
aleloi
2017/04/06 15:46:11
Acknowledged.
| |
| 869 } | |
| 870 stream_info->AddOutput(dest_view); | |
| 871 } | |
| 872 aec_dumper_->WriteCaptureStreamMessage(std::move(stream_info)); | |
| 835 | 873 |
| 836 return kNoError; | 874 return kNoError; |
| 837 } | 875 } |
| 838 | 876 |
| 839 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { | 877 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { |
| 840 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), | 878 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), |
| 841 num_reverse_channels(), | 879 num_reverse_channels(), |
| 842 &aec_render_queue_buffer_); | 880 &aec_render_queue_buffer_); |
| 843 | 881 |
| 844 RTC_DCHECK_GE(160, audio->num_frames_per_band()); | 882 RTC_DCHECK_GE(160, audio->num_frames_per_band()); |
| (...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1062 rtc::CritScope cs_render(&crit_render_); | 1100 rtc::CritScope cs_render(&crit_render_); |
| 1063 RETURN_ON_ERR( | 1101 RETURN_ON_ERR( |
| 1064 MaybeInitializeCapture(processing_config, reinitialization_required)); | 1102 MaybeInitializeCapture(processing_config, reinitialization_required)); |
| 1065 } | 1103 } |
| 1066 rtc::CritScope cs_capture(&crit_capture_); | 1104 rtc::CritScope cs_capture(&crit_capture_); |
| 1067 if (frame->samples_per_channel_ != | 1105 if (frame->samples_per_channel_ != |
| 1068 formats_.api_format.input_stream().num_frames()) { | 1106 formats_.api_format.input_stream().num_frames()) { |
| 1069 return kBadDataLengthError; | 1107 return kBadDataLengthError; |
| 1070 } | 1108 } |
| 1071 | 1109 |
| 1110 std::unique_ptr<AecDumper::CaptureStreamInfo> stream_info = | |
| 1111 aec_dumper_->GetCaptureStreamInfo(); | |
| 1112 stream_info->AddInput(*frame); | |
| 1072 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1113 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1073 if (debug_dump_.debug_file->is_open()) { | 1114 if (debug_dump_.debug_file->is_open()) { |
| 1074 RETURN_ON_ERR(WriteConfigMessage(false)); | 1115 RETURN_ON_ERR(WriteConfigMessage(false)); |
| 1075 | 1116 |
| 1076 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 1117 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 1077 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 1118 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 1078 const size_t data_size = | 1119 const size_t data_size = |
| 1079 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1120 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 1080 msg->set_input_data(frame->data_, data_size); | 1121 msg->set_input_data(frame->data_, data_size); |
| 1081 } | 1122 } |
| 1082 #endif | 1123 #endif |
| 1083 | 1124 |
| 1084 capture_.capture_audio->DeinterleaveFrom(frame); | 1125 capture_.capture_audio->DeinterleaveFrom(frame); |
| 1126 | |
| 1127 RTC_DCHECK(!(public_submodules_->echo_cancellation->is_enabled() && | |
|
peah-webrtc
2017/03/31 07:24:43
Why did you move these DCHECKs here?
aleloi
2017/04/06 15:46:11
Same as the DCHECKs above.
| |
| 1128 public_submodules_->echo_control_mobile->is_enabled())); | |
| 1129 | |
| 1130 stream_info->set_delay(capture_nonlocked_.stream_delay_ms); | |
| 1131 stream_info->set_drift( | |
| 1132 public_submodules_->echo_cancellation->stream_drift_samples()); | |
| 1133 stream_info->set_level(gain_control()->stream_analog_level()); | |
| 1134 stream_info->set_keypress(capture_.key_pressed); | |
| 1135 | |
| 1085 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 1136 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
| 1086 capture_.capture_audio->InterleaveTo( | 1137 capture_.capture_audio->InterleaveTo( |
| 1087 frame, submodule_states_.CaptureMultiBandProcessingActive()); | 1138 frame, submodule_states_.CaptureMultiBandProcessingActive()); |
| 1088 | 1139 |
| 1140 stream_info->AddOutput(*frame); | |
| 1141 aec_dumper_->WriteCaptureStreamMessage(std::move(stream_info)); | |
| 1089 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1142 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1090 if (debug_dump_.debug_file->is_open()) { | 1143 if (debug_dump_.debug_file->is_open()) { |
| 1091 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 1144 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 1092 const size_t data_size = | 1145 const size_t data_size = |
| 1093 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1146 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 1094 msg->set_output_data(frame->data_, data_size); | 1147 msg->set_output_data(frame->data_, data_size); |
| 1095 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1148 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1096 &debug_dump_.num_bytes_left_for_log_, | 1149 &debug_dump_.num_bytes_left_for_log_, |
| 1097 &crit_debug_, &debug_dump_.capture)); | 1150 &crit_debug_, &debug_dump_.capture)); |
| 1098 } | 1151 } |
| (...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1355 const size_t channel_size = | 1408 const size_t channel_size = |
| 1356 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 1409 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
| 1357 for (size_t i = 0; | 1410 for (size_t i = 0; |
| 1358 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) | 1411 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) |
| 1359 msg->add_channel(src[i], channel_size); | 1412 msg->add_channel(src[i], channel_size); |
| 1360 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1413 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1361 &debug_dump_.num_bytes_left_for_log_, | 1414 &debug_dump_.num_bytes_left_for_log_, |
| 1362 &crit_debug_, &debug_dump_.render)); | 1415 &crit_debug_, &debug_dump_.render)); |
| 1363 } | 1416 } |
| 1364 #endif | 1417 #endif |
| 1418 std::vector<rtc::ArrayView<const float>> src_view; | |
| 1419 const size_t channel_size = | |
| 1420 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | |
| 1421 | |
| 1422 for (size_t i = 0; | |
| 1423 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) { | |
| 1424 src_view.emplace_back(src[i], channel_size); | |
|
peah-webrtc
2017/03/31 07:24:43
Afaics, the emplace_back call will cause arrayview
aleloi
2017/04/06 15:46:11
Acknowledged.
| |
| 1425 } | |
| 1426 aec_dumper_->WriteReverseStreamMessage(src_view); | |
| 1365 | 1427 |
| 1366 render_.render_audio->CopyFrom(src, | 1428 render_.render_audio->CopyFrom(src, |
| 1367 formats_.api_format.reverse_input_stream()); | 1429 formats_.api_format.reverse_input_stream()); |
| 1368 return ProcessRenderStreamLocked(); | 1430 return ProcessRenderStreamLocked(); |
| 1369 } | 1431 } |
| 1370 | 1432 |
| 1371 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 1433 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| 1372 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); | 1434 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); |
| 1373 rtc::CritScope cs(&crit_render_); | 1435 rtc::CritScope cs(&crit_render_); |
| 1374 if (frame == nullptr) { | 1436 if (frame == nullptr) { |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1408 audioproc::ReverseStream* msg = | 1470 audioproc::ReverseStream* msg = |
| 1409 debug_dump_.render.event_msg->mutable_reverse_stream(); | 1471 debug_dump_.render.event_msg->mutable_reverse_stream(); |
| 1410 const size_t data_size = | 1472 const size_t data_size = |
| 1411 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1473 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 1412 msg->set_data(frame->data_, data_size); | 1474 msg->set_data(frame->data_, data_size); |
| 1413 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1475 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1414 &debug_dump_.num_bytes_left_for_log_, | 1476 &debug_dump_.num_bytes_left_for_log_, |
| 1415 &crit_debug_, &debug_dump_.render)); | 1477 &crit_debug_, &debug_dump_.render)); |
| 1416 } | 1478 } |
| 1417 #endif | 1479 #endif |
| 1480 aec_dumper_->WriteReverseStreamMessage(*frame); | |
|
peah-webrtc
2017/03/31 07:24:43
Does this work? This accesses aec_dumper_ holding
aleloi
2017/04/06 15:46:11
Good point. Accesses to AecDump are thread safe, b
| |
| 1481 | |
| 1418 render_.render_audio->DeinterleaveFrom(frame); | 1482 render_.render_audio->DeinterleaveFrom(frame); |
| 1419 RETURN_ON_ERR(ProcessRenderStreamLocked()); | 1483 RETURN_ON_ERR(ProcessRenderStreamLocked()); |
| 1420 render_.render_audio->InterleaveTo( | 1484 render_.render_audio->InterleaveTo( |
| 1421 frame, submodule_states_.RenderMultiBandProcessingActive()); | 1485 frame, submodule_states_.RenderMultiBandProcessingActive()); |
| 1422 return kNoError; | 1486 return kNoError; |
| 1423 } | 1487 } |
| 1424 | 1488 |
| 1425 int AudioProcessingImpl::ProcessRenderStreamLocked() { | 1489 int AudioProcessingImpl::ProcessRenderStreamLocked() { |
| 1426 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. | 1490 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. |
| 1427 if (submodule_states_.RenderMultiBandSubModulesActive() && | 1491 if (submodule_states_.RenderMultiBandSubModulesActive() && |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1495 capture_.delay_offset_ms = offset; | 1559 capture_.delay_offset_ms = offset; |
| 1496 } | 1560 } |
| 1497 | 1561 |
| 1498 int AudioProcessingImpl::delay_offset_ms() const { | 1562 int AudioProcessingImpl::delay_offset_ms() const { |
| 1499 rtc::CritScope cs(&crit_capture_); | 1563 rtc::CritScope cs(&crit_capture_); |
| 1500 return capture_.delay_offset_ms; | 1564 return capture_.delay_offset_ms; |
| 1501 } | 1565 } |
| 1502 | 1566 |
| 1503 int AudioProcessingImpl::StartDebugRecording( | 1567 int AudioProcessingImpl::StartDebugRecording( |
| 1504 const char filename[AudioProcessing::kMaxFilenameSize], | 1568 const char filename[AudioProcessing::kMaxFilenameSize], |
| 1505 int64_t max_log_size_bytes) { | 1569 int64_t max_log_size_bytes, |
| 1570 rtc::TaskQueue* worker_queue) { | |
| 1506 // Run in a single-threaded manner. | 1571 // Run in a single-threaded manner. |
| 1507 rtc::CritScope cs_render(&crit_render_); | 1572 rtc::CritScope cs_render(&crit_render_); |
| 1508 rtc::CritScope cs_capture(&crit_capture_); | 1573 rtc::CritScope cs_capture(&crit_capture_); |
| 1509 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); | 1574 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); |
| 1510 | 1575 |
| 1511 if (filename == nullptr) { | 1576 if (filename == nullptr) { |
| 1512 return kNullPointerError; | 1577 return kNullPointerError; |
| 1513 } | 1578 } |
| 1514 | 1579 |
| 1580 aec_dumper_ = AecDumper::Create(filename, max_log_size_bytes, worker_queue); | |
| 1581 aec_dumper_->WriteInitMessage(formats_.api_format); | |
| 1515 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1582 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1516 debug_dump_.num_bytes_left_for_log_ = max_log_size_bytes; | 1583 debug_dump_.num_bytes_left_for_log_ = max_log_size_bytes; |
| 1517 // Stop any ongoing recording. | 1584 // Stop any ongoing recording. |
| 1518 debug_dump_.debug_file->CloseFile(); | 1585 debug_dump_.debug_file->CloseFile(); |
| 1519 | 1586 |
| 1520 if (!debug_dump_.debug_file->OpenFile(filename, false)) { | 1587 if (!debug_dump_.debug_file->OpenFile(filename, false)) { |
| 1521 return kFileError; | 1588 return kFileError; |
| 1522 } | 1589 } |
| 1523 | 1590 |
| 1524 RETURN_ON_ERR(WriteConfigMessage(true)); | 1591 RETURN_ON_ERR(WriteConfigMessage(true)); |
| 1525 RETURN_ON_ERR(WriteInitMessage()); | 1592 RETURN_ON_ERR(WriteInitMessage()); |
| 1526 return kNoError; | 1593 return kNoError; |
| 1527 #else | 1594 #else |
| 1528 return kUnsupportedFunctionError; | 1595 return kUnsupportedFunctionError; |
| 1529 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1596 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1530 } | 1597 } |
| 1531 | 1598 |
| 1532 int AudioProcessingImpl::StartDebugRecording(FILE* handle, | 1599 int AudioProcessingImpl::StartDebugRecording(FILE* handle, |
| 1533 int64_t max_log_size_bytes) { | 1600 int64_t max_log_size_bytes, |
| 1601 rtc::TaskQueue* worker_queue) { | |
| 1534 // Run in a single-threaded manner. | 1602 // Run in a single-threaded manner. |
| 1535 rtc::CritScope cs_render(&crit_render_); | 1603 rtc::CritScope cs_render(&crit_render_); |
| 1536 rtc::CritScope cs_capture(&crit_capture_); | 1604 rtc::CritScope cs_capture(&crit_capture_); |
| 1537 | 1605 |
| 1538 if (handle == nullptr) { | 1606 if (handle == nullptr) { |
| 1539 return kNullPointerError; | 1607 return kNullPointerError; |
| 1540 } | 1608 } |
| 1541 | 1609 |
| 1610 aec_dumper_ = AecDumper::Create(handle, max_log_size_bytes, worker_queue); | |
| 1611 aec_dumper_->WriteInitMessage(formats_.api_format); | |
| 1542 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1612 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1543 debug_dump_.num_bytes_left_for_log_ = max_log_size_bytes; | 1613 debug_dump_.num_bytes_left_for_log_ = max_log_size_bytes; |
| 1544 | 1614 |
| 1545 // Stop any ongoing recording. | 1615 // Stop any ongoing recording. |
| 1546 debug_dump_.debug_file->CloseFile(); | 1616 debug_dump_.debug_file->CloseFile(); |
| 1547 | 1617 |
| 1548 if (!debug_dump_.debug_file->OpenFromFileHandle(handle)) { | 1618 if (!debug_dump_.debug_file->OpenFromFileHandle(handle)) { |
| 1549 return kFileError; | 1619 return kFileError; |
| 1550 } | 1620 } |
| 1551 | 1621 |
| 1552 RETURN_ON_ERR(WriteConfigMessage(true)); | 1622 RETURN_ON_ERR(WriteConfigMessage(true)); |
| 1553 RETURN_ON_ERR(WriteInitMessage()); | 1623 RETURN_ON_ERR(WriteInitMessage()); |
| 1554 return kNoError; | 1624 return kNoError; |
| 1555 #else | 1625 #else |
| 1556 return kUnsupportedFunctionError; | 1626 return kUnsupportedFunctionError; |
| 1557 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1627 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1558 } | 1628 } |
| 1559 | 1629 |
| 1560 int AudioProcessingImpl::StartDebugRecording(FILE* handle) { | 1630 int AudioProcessingImpl::StartDebugRecording(FILE* handle, |
| 1561 return StartDebugRecording(handle, -1); | 1631 rtc::TaskQueue* worker_queue) { |
| 1632 return StartDebugRecording(handle, -1, worker_queue); | |
| 1562 } | 1633 } |
| 1563 | 1634 |
| 1564 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( | 1635 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( |
| 1565 rtc::PlatformFile handle) { | 1636 rtc::PlatformFile handle, |
| 1637 rtc::TaskQueue* worker_queue) { | |
| 1566 // Run in a single-threaded manner. | 1638 // Run in a single-threaded manner. |
| 1567 rtc::CritScope cs_render(&crit_render_); | 1639 rtc::CritScope cs_render(&crit_render_); |
| 1568 rtc::CritScope cs_capture(&crit_capture_); | 1640 rtc::CritScope cs_capture(&crit_capture_); |
| 1569 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); | 1641 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); |
| 1570 return StartDebugRecording(stream, -1); | 1642 return StartDebugRecording(stream, -1, worker_queue); |
| 1571 } | 1643 } |
| 1572 | 1644 |
| 1573 int AudioProcessingImpl::StopDebugRecording() { | 1645 int AudioProcessingImpl::StopDebugRecording() { |
| 1646 aec_dumper_ = AecDumper::CreateNullDumper(); | |
|
peah-webrtc
2017/03/31 07:24:43
Why do we need to have a null dumper? Is it not su
aleloi
2017/04/06 15:46:11
It is. I can remove the null dumper if there are s
| |
| 1574 // Run in a single-threaded manner. | 1647 // Run in a single-threaded manner. |
| 1575 rtc::CritScope cs_render(&crit_render_); | 1648 rtc::CritScope cs_render(&crit_render_); |
| 1576 rtc::CritScope cs_capture(&crit_capture_); | 1649 rtc::CritScope cs_capture(&crit_capture_); |
| 1577 | 1650 |
| 1578 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1651 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1579 // We just return if recording hasn't started. | 1652 // We just return if recording hasn't started. |
| 1580 debug_dump_.debug_file->CloseFile(); | 1653 debug_dump_.debug_file->CloseFile(); |
| 1581 return kNoError; | 1654 return kNoError; |
| 1582 #else | 1655 #else |
| 1583 return kUnsupportedFunctionError; | 1656 return kUnsupportedFunctionError; |
| (...skipping 347 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1931 | 2004 |
| 1932 config.set_ns_enabled(public_submodules_->noise_suppression->is_enabled()); | 2005 config.set_ns_enabled(public_submodules_->noise_suppression->is_enabled()); |
| 1933 config.set_ns_level( | 2006 config.set_ns_level( |
| 1934 static_cast<int>(public_submodules_->noise_suppression->level())); | 2007 static_cast<int>(public_submodules_->noise_suppression->level())); |
| 1935 | 2008 |
| 1936 config.set_transient_suppression_enabled( | 2009 config.set_transient_suppression_enabled( |
| 1937 capture_.transient_suppressor_enabled); | 2010 capture_.transient_suppressor_enabled); |
| 1938 config.set_intelligibility_enhancer_enabled( | 2011 config.set_intelligibility_enhancer_enabled( |
| 1939 capture_nonlocked_.intelligibility_enabled); | 2012 capture_nonlocked_.intelligibility_enabled); |
| 1940 | 2013 |
| 2014 InternalAPMConfig apm_config; | |
| 2015 | |
| 2016 apm_config.aec_enabled = public_submodules_->echo_cancellation->is_enabled(); | |
| 2017 apm_config.aec_delay_agnostic_enabled = | |
| 2018 public_submodules_->echo_cancellation->is_delay_agnostic_enabled(); | |
| 2019 apm_config.aec_drift_compensation_enabled = | |
| 2020 public_submodules_->echo_cancellation->is_drift_compensation_enabled(); | |
| 2021 apm_config.aec_extended_filter_enabled = | |
| 2022 public_submodules_->echo_cancellation->is_extended_filter_enabled(); | |
| 2023 apm_config.aec_suppression_level = static_cast<int>( | |
| 2024 public_submodules_->echo_cancellation->suppression_level()); | |
| 2025 | |
| 2026 apm_config.aecm_enabled = | |
| 2027 public_submodules_->echo_control_mobile->is_enabled(); | |
| 2028 apm_config.aecm_comfort_noise_enabled = | |
| 2029 public_submodules_->echo_control_mobile->is_comfort_noise_enabled(); | |
| 2030 apm_config.aecm_routing_mode = | |
| 2031 static_cast<int>(public_submodules_->echo_control_mobile->routing_mode()); | |
| 2032 | |
| 2033 apm_config.agc_enabled = public_submodules_->gain_control->is_enabled(); | |
| 2034 apm_config.agc_mode = | |
| 2035 static_cast<int>(public_submodules_->gain_control->mode()); | |
| 2036 apm_config.agc_limiter_enabled = | |
| 2037 public_submodules_->gain_control->is_limiter_enabled(); | |
| 2038 apm_config.noise_robust_agc_enabled = constants_.use_experimental_agc; | |
| 2039 | |
| 2040 apm_config.hpf_enabled = config_.high_pass_filter.enabled; | |
| 2041 | |
| 2042 apm_config.ns_enabled = public_submodules_->noise_suppression->is_enabled(); | |
| 2043 apm_config.ns_level = | |
| 2044 static_cast<int>(public_submodules_->noise_suppression->level()); | |
| 2045 | |
| 2046 apm_config.transient_suppression_enabled = | |
| 2047 capture_.transient_suppressor_enabled; | |
| 2048 apm_config.intelligibility_enhancer_enabled = | |
| 2049 capture_nonlocked_.intelligibility_enabled; | |
| 2050 | |
| 1941 std::string experiments_description = | 2051 std::string experiments_description = |
| 1942 public_submodules_->echo_cancellation->GetExperimentsDescription(); | 2052 public_submodules_->echo_cancellation->GetExperimentsDescription(); |
| 1943 // TODO(peah): Add semicolon-separated concatenations of experiment | 2053 // TODO(peah): Add semicolon-separated concatenations of experiment |
| 1944 // descriptions for other submodules. | 2054 // descriptions for other submodules. |
| 1945 if (capture_nonlocked_.level_controller_enabled) { | 2055 if (capture_nonlocked_.level_controller_enabled) { |
| 1946 experiments_description += "LevelController;"; | 2056 experiments_description += "LevelController;"; |
| 1947 } | 2057 } |
| 1948 if (constants_.agc_clipped_level_min != kClippedLevelMin) { | 2058 if (constants_.agc_clipped_level_min != kClippedLevelMin) { |
| 1949 experiments_description += "AgcClippingLevelExperiment;"; | 2059 experiments_description += "AgcClippingLevelExperiment;"; |
| 1950 } | 2060 } |
| 1951 if (capture_nonlocked_.echo_canceller3_enabled) { | 2061 if (capture_nonlocked_.echo_canceller3_enabled) { |
| 1952 experiments_description += "EchoCanceller3;"; | 2062 experiments_description += "EchoCanceller3;"; |
| 1953 } | 2063 } |
| 1954 config.set_experiments_description(experiments_description); | 2064 config.set_experiments_description(experiments_description); |
| 1955 | 2065 |
| 2066 apm_config.experiments_description = experiments_description; | |
| 2067 aec_dumper_->WriteConfig(apm_config, forced); | |
| 2068 | |
| 1956 std::string serialized_config = config.SerializeAsString(); | 2069 std::string serialized_config = config.SerializeAsString(); |
| 1957 if (!forced && | 2070 if (!forced && |
| 1958 debug_dump_.capture.last_serialized_config == serialized_config) { | 2071 debug_dump_.capture.last_serialized_config == serialized_config) { |
| 1959 return kNoError; | 2072 return kNoError; |
| 1960 } | 2073 } |
| 1961 | 2074 |
| 1962 debug_dump_.capture.last_serialized_config = serialized_config; | 2075 debug_dump_.capture.last_serialized_config = serialized_config; |
| 1963 | 2076 |
| 1964 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); | 2077 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); |
| 1965 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); | 2078 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 1989 capture_processing_format(kSampleRate16kHz), | 2102 capture_processing_format(kSampleRate16kHz), |
| 1990 split_rate(kSampleRate16kHz) {} | 2103 split_rate(kSampleRate16kHz) {} |
| 1991 | 2104 |
| 1992 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; | 2105 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; |
| 1993 | 2106 |
| 1994 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; | 2107 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; |
| 1995 | 2108 |
| 1996 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; | 2109 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; |
| 1997 | 2110 |
| 1998 } // namespace webrtc | 2111 } // namespace webrtc |
| OLD | NEW |