OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
122 return uppermost_native_rate; | 122 return uppermost_native_rate; |
123 } | 123 } |
124 | 124 |
125 // Maximum length that a frame of samples can have. | 125 // Maximum length that a frame of samples can have. |
126 static const size_t kMaxAllowedValuesOfSamplesPerFrame = 160; | 126 static const size_t kMaxAllowedValuesOfSamplesPerFrame = 160; |
127 // Maximum number of frames to buffer in the render queue. | 127 // Maximum number of frames to buffer in the render queue. |
128 // TODO(peah): Decrease this once we properly handle hugely unbalanced | 128 // TODO(peah): Decrease this once we properly handle hugely unbalanced |
129 // reverse and forward call numbers. | 129 // reverse and forward call numbers. |
130 static const size_t kMaxNumFramesToBuffer = 100; | 130 static const size_t kMaxNumFramesToBuffer = 100; |
131 | 131 |
132 // Maximum number of audio channels in the input and output streams. | |
133 constexpr size_t kMaxNumChannels = 2; | |
peah-webrtc
2017/04/21 05:15:05
To be on the safe side, could we set this to 4 (I'
aleloi
2017/04/21 13:47:55
Sure, done.
| |
134 | |
132 class HighPassFilterImpl : public HighPassFilter { | 135 class HighPassFilterImpl : public HighPassFilter { |
133 public: | 136 public: |
134 explicit HighPassFilterImpl(AudioProcessingImpl* apm) : apm_(apm) {} | 137 explicit HighPassFilterImpl(AudioProcessingImpl* apm) : apm_(apm) {} |
135 ~HighPassFilterImpl() override = default; | 138 ~HighPassFilterImpl() override = default; |
136 | 139 |
137 // HighPassFilter implementation. | 140 // HighPassFilter implementation. |
138 int Enable(bool enable) override { | 141 int Enable(bool enable) override { |
139 apm_->MutateConfig([enable](AudioProcessing::Config* config) { | 142 apm_->MutateConfig([enable](AudioProcessing::Config* config) { |
140 config->high_pass_filter.enabled = enable; | 143 config->high_pass_filter.enabled = enable; |
141 }); | 144 }); |
142 | 145 |
143 return AudioProcessing::kNoError; | 146 return AudioProcessing::kNoError; |
144 } | 147 } |
145 | 148 |
146 bool is_enabled() const override { | 149 bool is_enabled() const override { |
147 return apm_->GetConfig().high_pass_filter.enabled; | 150 return apm_->GetConfig().high_pass_filter.enabled; |
148 } | 151 } |
149 | 152 |
150 private: | 153 private: |
151 AudioProcessingImpl* apm_; | 154 AudioProcessingImpl* apm_; |
152 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl); | 155 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl); |
153 }; | 156 }; |
154 | 157 |
158 webrtc::InternalAPMStreamsConfig ToStreamsConfig( | |
159 const ProcessingConfig& api_format) { | |
160 webrtc::InternalAPMStreamsConfig result; | |
161 result.input_sample_rate = api_format.input_stream().sample_rate_hz(); | |
162 result.input_num_channels = api_format.input_stream().num_channels(); | |
163 result.output_num_channels = api_format.output_stream().num_channels(); | |
164 result.render_input_num_channels = | |
165 api_format.reverse_input_stream().num_channels(); | |
166 result.render_input_sample_rate = | |
167 api_format.reverse_input_stream().sample_rate_hz(); | |
168 result.output_sample_rate = api_format.output_stream().sample_rate_hz(); | |
169 result.render_output_sample_rate = | |
170 api_format.reverse_output_stream().sample_rate_hz(); | |
171 result.render_output_num_channels = | |
172 api_format.reverse_output_stream().num_channels(); | |
173 return result; | |
174 } | |
175 | |
155 } // namespace | 176 } // namespace |
156 | 177 |
157 // Throughout webrtc, it's assumed that success is represented by zero. | 178 // Throughout webrtc, it's assumed that success is represented by zero. |
158 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); | 179 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); |
159 | 180 |
160 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} | 181 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} |
161 | 182 |
162 bool AudioProcessingImpl::ApmSubmoduleStates::Update( | 183 bool AudioProcessingImpl::ApmSubmoduleStates::Update( |
163 bool low_cut_filter_enabled, | 184 bool low_cut_filter_enabled, |
164 bool echo_canceller_enabled, | 185 bool echo_canceller_enabled, |
(...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
519 InitializeEchoCanceller3(); | 540 InitializeEchoCanceller3(); |
520 | 541 |
521 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 542 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
522 if (debug_dump_.debug_file->is_open()) { | 543 if (debug_dump_.debug_file->is_open()) { |
523 int err = WriteInitMessage(); | 544 int err = WriteInitMessage(); |
524 if (err != kNoError) { | 545 if (err != kNoError) { |
525 return err; | 546 return err; |
526 } | 547 } |
527 } | 548 } |
528 #endif | 549 #endif |
529 | 550 if (aec_dump_) { |
551 aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); | |
552 } | |
530 return kNoError; | 553 return kNoError; |
531 } | 554 } |
532 | 555 |
533 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 556 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
534 for (const auto& stream : config.streams) { | 557 for (const auto& stream : config.streams) { |
535 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 558 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { |
536 return kBadSampleRateError; | 559 return kBadSampleRateError; |
537 } | 560 } |
538 } | 561 } |
539 | 562 |
(...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
817 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 840 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
818 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 841 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
819 const size_t channel_size = | 842 const size_t channel_size = |
820 sizeof(float) * formats_.api_format.input_stream().num_frames(); | 843 sizeof(float) * formats_.api_format.input_stream().num_frames(); |
821 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | 844 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); |
822 ++i) | 845 ++i) |
823 msg->add_input_channel(src[i], channel_size); | 846 msg->add_input_channel(src[i], channel_size); |
824 } | 847 } |
825 #endif | 848 #endif |
826 | 849 |
850 std::unique_ptr<AecDump::CaptureStreamInfo> stream_info; | |
851 if (aec_dump_) { | |
852 aec_dump_->WriteConfig(CollectApmConfig(), false); | |
peah-webrtc
2017/04/21 05:15:05
Would it be ok to move this code block, and the ot
aleloi
2017/04/21 13:47:55
Good idea, I didn't think of that. I've tried, see
| |
853 | |
854 stream_info = aec_dump_->GetCaptureStreamInfo(); | |
855 RTC_DCHECK(stream_info); | |
856 const size_t channel_size = | |
857 sizeof(float) * formats_.api_format.input_stream().num_frames(); | |
858 std::array<rtc::ArrayView<const float>, kMaxNumChannels> src_view; | |
859 const size_t num_channels = | |
860 formats_.api_format.input_stream().num_channels(); | |
861 RTC_DCHECK_LE(num_channels, kMaxNumChannels); | |
peah-webrtc
2017/04/21 05:15:05
An alternative here is also to do the for-loop to
aleloi
2017/04/21 13:47:55
Done.
| |
862 for (size_t i = 0; i < num_channels; ++i) { | |
863 src_view[i] = rtc::ArrayView<const float>(src[i], channel_size); | |
864 } | |
865 stream_info->AddInput(rtc::ArrayView<rtc::ArrayView<const float>>( | |
866 &src_view[0], num_channels)); | |
867 stream_info->set_delay(capture_nonlocked_.stream_delay_ms); | |
868 stream_info->set_drift( | |
869 public_submodules_->echo_cancellation->stream_drift_samples()); | |
870 stream_info->set_level(gain_control()->stream_analog_level()); | |
871 stream_info->set_keypress(capture_.key_pressed); | |
872 } | |
873 | |
827 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); | 874 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
828 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 875 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
829 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); | 876 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
830 | 877 |
831 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 878 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
832 if (debug_dump_.debug_file->is_open()) { | 879 if (debug_dump_.debug_file->is_open()) { |
833 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 880 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
834 const size_t channel_size = | 881 const size_t channel_size = |
835 sizeof(float) * formats_.api_format.output_stream().num_frames(); | 882 sizeof(float) * formats_.api_format.output_stream().num_frames(); |
836 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); | 883 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); |
837 ++i) | 884 ++i) |
838 msg->add_output_channel(dest[i], channel_size); | 885 msg->add_output_channel(dest[i], channel_size); |
839 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 886 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
840 &debug_dump_.num_bytes_left_for_log_, | 887 &debug_dump_.num_bytes_left_for_log_, |
841 &crit_debug_, &debug_dump_.capture)); | 888 &crit_debug_, &debug_dump_.capture)); |
842 } | 889 } |
843 #endif | 890 #endif |
891 if (aec_dump_) { | |
892 const size_t channel_size = | |
893 sizeof(float) * formats_.api_format.output_stream().num_frames(); | |
peah-webrtc
2017/04/21 05:15:05
This code is quite similar to the code in 1458. Ha
aleloi
2017/04/21 13:47:55
Done.
| |
894 std::array<rtc::ArrayView<const float>, kMaxNumChannels> dest_view; | |
895 const size_t num_channels = | |
896 formats_.api_format.output_stream().num_channels(); | |
897 RTC_DCHECK_LE(num_channels, kMaxNumChannels); | |
898 for (size_t i = 0; i < num_channels; ++i) { | |
899 dest_view[i] = rtc::ArrayView<const float>(dest[i], channel_size); | |
900 } | |
901 stream_info->AddOutput(rtc::ArrayView<rtc::ArrayView<const float>>( | |
902 &dest_view[0], num_channels)); | |
903 aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); | |
904 } | |
844 | 905 |
845 return kNoError; | 906 return kNoError; |
846 } | 907 } |
847 | 908 |
848 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { | 909 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { |
849 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), | 910 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), |
850 num_reverse_channels(), | 911 num_reverse_channels(), |
851 &aec_render_queue_buffer_); | 912 &aec_render_queue_buffer_); |
852 | 913 |
853 RTC_DCHECK_GE(160, audio->num_frames_per_band()); | 914 RTC_DCHECK_GE(160, audio->num_frames_per_band()); |
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1071 rtc::CritScope cs_render(&crit_render_); | 1132 rtc::CritScope cs_render(&crit_render_); |
1072 RETURN_ON_ERR( | 1133 RETURN_ON_ERR( |
1073 MaybeInitializeCapture(processing_config, reinitialization_required)); | 1134 MaybeInitializeCapture(processing_config, reinitialization_required)); |
1074 } | 1135 } |
1075 rtc::CritScope cs_capture(&crit_capture_); | 1136 rtc::CritScope cs_capture(&crit_capture_); |
1076 if (frame->samples_per_channel_ != | 1137 if (frame->samples_per_channel_ != |
1077 formats_.api_format.input_stream().num_frames()) { | 1138 formats_.api_format.input_stream().num_frames()) { |
1078 return kBadDataLengthError; | 1139 return kBadDataLengthError; |
1079 } | 1140 } |
1080 | 1141 |
1142 std::unique_ptr<AecDump::CaptureStreamInfo> stream_info; | |
1143 if (aec_dump_) { | |
1144 stream_info = aec_dump_->GetCaptureStreamInfo(); | |
1145 RTC_DCHECK(stream_info); | |
1146 stream_info->AddInput(*frame); | |
1147 stream_info->set_delay(capture_nonlocked_.stream_delay_ms); | |
1148 stream_info->set_drift( | |
1149 public_submodules_->echo_cancellation->stream_drift_samples()); | |
1150 stream_info->set_level(gain_control()->stream_analog_level()); | |
1151 stream_info->set_keypress(capture_.key_pressed); | |
1152 | |
1153 aec_dump_->WriteConfig(CollectApmConfig(), false); | |
1154 } | |
1155 | |
1081 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1156 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
1082 if (debug_dump_.debug_file->is_open()) { | 1157 if (debug_dump_.debug_file->is_open()) { |
1083 RETURN_ON_ERR(WriteConfigMessage(false)); | 1158 RETURN_ON_ERR(WriteConfigMessage(false)); |
1084 | 1159 |
1085 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 1160 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
1086 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 1161 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
1087 const size_t data_size = | 1162 const size_t data_size = |
1088 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1163 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
1089 msg->set_input_data(frame->data_, data_size); | 1164 msg->set_input_data(frame->data_, data_size); |
1090 } | 1165 } |
1091 #endif | 1166 #endif |
1092 | 1167 |
1093 capture_.capture_audio->DeinterleaveFrom(frame); | 1168 capture_.capture_audio->DeinterleaveFrom(frame); |
1094 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 1169 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
1095 capture_.capture_audio->InterleaveTo( | 1170 capture_.capture_audio->InterleaveTo( |
1096 frame, submodule_states_.CaptureMultiBandProcessingActive()); | 1171 frame, submodule_states_.CaptureMultiBandProcessingActive()); |
1097 | 1172 |
1173 if (aec_dump_) { | |
1174 stream_info->AddOutput(*frame); | |
1175 aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); | |
1176 } | |
1098 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1177 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
1099 if (debug_dump_.debug_file->is_open()) { | 1178 if (debug_dump_.debug_file->is_open()) { |
1100 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 1179 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
1101 const size_t data_size = | 1180 const size_t data_size = |
1102 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1181 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
1103 msg->set_output_data(frame->data_, data_size); | 1182 msg->set_output_data(frame->data_, data_size); |
1104 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1183 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
1105 &debug_dump_.num_bytes_left_for_log_, | 1184 &debug_dump_.num_bytes_left_for_log_, |
1106 &crit_debug_, &debug_dump_.capture)); | 1185 &crit_debug_, &debug_dump_.capture)); |
1107 } | 1186 } |
(...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1369 const size_t channel_size = | 1448 const size_t channel_size = |
1370 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 1449 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
1371 for (size_t i = 0; | 1450 for (size_t i = 0; |
1372 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) | 1451 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) |
1373 msg->add_channel(src[i], channel_size); | 1452 msg->add_channel(src[i], channel_size); |
1374 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1453 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
1375 &debug_dump_.num_bytes_left_for_log_, | 1454 &debug_dump_.num_bytes_left_for_log_, |
1376 &crit_debug_, &debug_dump_.render)); | 1455 &crit_debug_, &debug_dump_.render)); |
1377 } | 1456 } |
1378 #endif | 1457 #endif |
1379 | 1458 if (aec_dump_) { |
1459 std::array<rtc::ArrayView<const float>, kMaxNumChannels> src_view; | |
1460 const size_t channel_size = | |
1461 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | |
1462 const size_t num_channels = | |
1463 formats_.api_format.reverse_input_stream().num_channels(); | |
1464 RTC_DCHECK_LE(num_channels, kMaxNumChannels); | |
1465 for (size_t i = 0; i < num_channels; ++i) { | |
peah-webrtc
2017/04/21 05:15:05
Same thing here, what about looping against min(nu
aleloi
2017/04/21 13:47:55
Done.
| |
1466 src_view[i] = rtc::ArrayView<const float>(src[i], channel_size); | |
1467 } | |
1468 aec_dump_->WriteRenderStreamMessage( | |
1469 rtc::ArrayView<rtc::ArrayView<const float>>(&src_view[0], | |
1470 num_channels)); | |
1471 } | |
1380 render_.render_audio->CopyFrom(src, | 1472 render_.render_audio->CopyFrom(src, |
1381 formats_.api_format.reverse_input_stream()); | 1473 formats_.api_format.reverse_input_stream()); |
1382 return ProcessRenderStreamLocked(); | 1474 return ProcessRenderStreamLocked(); |
1383 } | 1475 } |
1384 | 1476 |
1385 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 1477 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
1386 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); | 1478 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); |
1387 rtc::CritScope cs(&crit_render_); | 1479 rtc::CritScope cs(&crit_render_); |
1388 if (frame == nullptr) { | 1480 if (frame == nullptr) { |
1389 return kNullPointerError; | 1481 return kNullPointerError; |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1422 audioproc::ReverseStream* msg = | 1514 audioproc::ReverseStream* msg = |
1423 debug_dump_.render.event_msg->mutable_reverse_stream(); | 1515 debug_dump_.render.event_msg->mutable_reverse_stream(); |
1424 const size_t data_size = | 1516 const size_t data_size = |
1425 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1517 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
1426 msg->set_data(frame->data_, data_size); | 1518 msg->set_data(frame->data_, data_size); |
1427 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1519 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
1428 &debug_dump_.num_bytes_left_for_log_, | 1520 &debug_dump_.num_bytes_left_for_log_, |
1429 &crit_debug_, &debug_dump_.render)); | 1521 &crit_debug_, &debug_dump_.render)); |
1430 } | 1522 } |
1431 #endif | 1523 #endif |
1524 if (aec_dump_) { | |
1525 aec_dump_->WriteRenderStreamMessage(*frame); | |
1526 } | |
1527 | |
1432 render_.render_audio->DeinterleaveFrom(frame); | 1528 render_.render_audio->DeinterleaveFrom(frame); |
1433 RETURN_ON_ERR(ProcessRenderStreamLocked()); | 1529 RETURN_ON_ERR(ProcessRenderStreamLocked()); |
1434 render_.render_audio->InterleaveTo( | 1530 render_.render_audio->InterleaveTo( |
1435 frame, submodule_states_.RenderMultiBandProcessingActive()); | 1531 frame, submodule_states_.RenderMultiBandProcessingActive()); |
1436 return kNoError; | 1532 return kNoError; |
1437 } | 1533 } |
1438 | 1534 |
1439 int AudioProcessingImpl::ProcessRenderStreamLocked() { | 1535 int AudioProcessingImpl::ProcessRenderStreamLocked() { |
1440 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. | 1536 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. |
1441 if (submodule_states_.RenderMultiBandSubModulesActive() && | 1537 if (submodule_states_.RenderMultiBandSubModulesActive() && |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1505 void AudioProcessingImpl::set_delay_offset_ms(int offset) { | 1601 void AudioProcessingImpl::set_delay_offset_ms(int offset) { |
1506 rtc::CritScope cs(&crit_capture_); | 1602 rtc::CritScope cs(&crit_capture_); |
1507 capture_.delay_offset_ms = offset; | 1603 capture_.delay_offset_ms = offset; |
1508 } | 1604 } |
1509 | 1605 |
1510 int AudioProcessingImpl::delay_offset_ms() const { | 1606 int AudioProcessingImpl::delay_offset_ms() const { |
1511 rtc::CritScope cs(&crit_capture_); | 1607 rtc::CritScope cs(&crit_capture_); |
1512 return capture_.delay_offset_ms; | 1608 return capture_.delay_offset_ms; |
1513 } | 1609 } |
1514 | 1610 |
1611 void AudioProcessingImpl::StartDebugRecording( | |
1612 std::unique_ptr<AecDump> aec_dump) { | |
1613 rtc::CritScope cs_render(&crit_render_); | |
1614 rtc::CritScope cs_capture(&crit_capture_); | |
1615 RTC_DCHECK(aec_dump); | |
1616 aec_dump_ = std::move(aec_dump); | |
1617 | |
1618 aec_dump_->WriteConfig(CollectApmConfig(), true); | |
1619 aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); | |
1620 } | |
1621 | |
1515 int AudioProcessingImpl::StartDebugRecording( | 1622 int AudioProcessingImpl::StartDebugRecording( |
1516 const char filename[AudioProcessing::kMaxFilenameSize], | 1623 const char filename[AudioProcessing::kMaxFilenameSize], |
1517 int64_t max_log_size_bytes) { | 1624 int64_t max_log_size_bytes) { |
1518 // Run in a single-threaded manner. | 1625 // Run in a single-threaded manner. |
1519 rtc::CritScope cs_render(&crit_render_); | 1626 rtc::CritScope cs_render(&crit_render_); |
1520 rtc::CritScope cs_capture(&crit_capture_); | 1627 rtc::CritScope cs_capture(&crit_capture_); |
1521 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); | 1628 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); |
1522 | 1629 |
1523 if (filename == nullptr) { | 1630 if (filename == nullptr) { |
1524 return kNullPointerError; | 1631 return kNullPointerError; |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1579 rtc::CritScope cs_render(&crit_render_); | 1686 rtc::CritScope cs_render(&crit_render_); |
1580 rtc::CritScope cs_capture(&crit_capture_); | 1687 rtc::CritScope cs_capture(&crit_capture_); |
1581 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); | 1688 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); |
1582 return StartDebugRecording(stream, -1); | 1689 return StartDebugRecording(stream, -1); |
1583 } | 1690 } |
1584 | 1691 |
1585 int AudioProcessingImpl::StopDebugRecording() { | 1692 int AudioProcessingImpl::StopDebugRecording() { |
1586 // Run in a single-threaded manner. | 1693 // Run in a single-threaded manner. |
1587 rtc::CritScope cs_render(&crit_render_); | 1694 rtc::CritScope cs_render(&crit_render_); |
1588 rtc::CritScope cs_capture(&crit_capture_); | 1695 rtc::CritScope cs_capture(&crit_capture_); |
1696 aec_dump_.reset(); | |
1589 | 1697 |
1590 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1698 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
1591 // We just return if recording hasn't started. | 1699 // We just return if recording hasn't started. |
1592 debug_dump_.debug_file->CloseFile(); | 1700 debug_dump_.debug_file->CloseFile(); |
1593 return kNoError; | 1701 return kNoError; |
1594 #else | 1702 #else |
1595 return kUnsupportedFunctionError; | 1703 return kUnsupportedFunctionError; |
1596 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1704 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
1597 } | 1705 } |
1598 | 1706 |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1830 capture_.last_stream_delay_ms = 0; | 1938 capture_.last_stream_delay_ms = 0; |
1831 | 1939 |
1832 if (capture_.aec_system_delay_jumps > -1) { | 1940 if (capture_.aec_system_delay_jumps > -1) { |
1833 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", | 1941 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", |
1834 capture_.aec_system_delay_jumps, 51); | 1942 capture_.aec_system_delay_jumps, 51); |
1835 } | 1943 } |
1836 capture_.aec_system_delay_jumps = -1; | 1944 capture_.aec_system_delay_jumps = -1; |
1837 capture_.last_aec_system_delay_ms = 0; | 1945 capture_.last_aec_system_delay_ms = 0; |
1838 } | 1946 } |
1839 | 1947 |
1948 InternalAPMConfig AudioProcessingImpl::CollectApmConfig() { | |
1949 std::string experiments_description = | |
1950 public_submodules_->echo_cancellation->GetExperimentsDescription(); | |
1951 // TODO(peah): Add semicolon-separated concatenations of experiment | |
1952 // descriptions for other submodules. | |
1953 if (capture_nonlocked_.level_controller_enabled) { | |
1954 experiments_description += "LevelController;"; | |
1955 } | |
1956 if (constants_.agc_clipped_level_min != kClippedLevelMin) { | |
1957 experiments_description += "AgcClippingLevelExperiment;"; | |
1958 } | |
1959 if (capture_nonlocked_.echo_canceller3_enabled) { | |
1960 experiments_description += "EchoCanceller3;"; | |
1961 } | |
1962 | |
1963 InternalAPMConfig apm_config; | |
1964 | |
1965 apm_config.aec_enabled = public_submodules_->echo_cancellation->is_enabled(); | |
1966 apm_config.aec_delay_agnostic_enabled = | |
1967 public_submodules_->echo_cancellation->is_delay_agnostic_enabled(); | |
1968 apm_config.aec_drift_compensation_enabled = | |
1969 public_submodules_->echo_cancellation->is_drift_compensation_enabled(); | |
1970 apm_config.aec_extended_filter_enabled = | |
1971 public_submodules_->echo_cancellation->is_extended_filter_enabled(); | |
1972 apm_config.aec_suppression_level = static_cast<int>( | |
1973 public_submodules_->echo_cancellation->suppression_level()); | |
1974 | |
1975 apm_config.aecm_enabled = | |
1976 public_submodules_->echo_control_mobile->is_enabled(); | |
1977 apm_config.aecm_comfort_noise_enabled = | |
1978 public_submodules_->echo_control_mobile->is_comfort_noise_enabled(); | |
1979 apm_config.aecm_routing_mode = | |
1980 static_cast<int>(public_submodules_->echo_control_mobile->routing_mode()); | |
1981 | |
1982 apm_config.agc_enabled = public_submodules_->gain_control->is_enabled(); | |
1983 apm_config.agc_mode = | |
1984 static_cast<int>(public_submodules_->gain_control->mode()); | |
1985 apm_config.agc_limiter_enabled = | |
1986 public_submodules_->gain_control->is_limiter_enabled(); | |
1987 apm_config.noise_robust_agc_enabled = constants_.use_experimental_agc; | |
1988 | |
1989 apm_config.hpf_enabled = config_.high_pass_filter.enabled; | |
1990 | |
1991 apm_config.ns_enabled = public_submodules_->noise_suppression->is_enabled(); | |
1992 apm_config.ns_level = | |
1993 static_cast<int>(public_submodules_->noise_suppression->level()); | |
1994 | |
1995 apm_config.transient_suppression_enabled = | |
1996 capture_.transient_suppressor_enabled; | |
1997 apm_config.intelligibility_enhancer_enabled = | |
1998 capture_nonlocked_.intelligibility_enabled; | |
1999 apm_config.experiments_description = experiments_description; | |
2000 return apm_config; | |
2001 } | |
2002 | |
1840 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 2003 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
1841 int AudioProcessingImpl::WriteMessageToDebugFile( | 2004 int AudioProcessingImpl::WriteMessageToDebugFile( |
1842 FileWrapper* debug_file, | 2005 FileWrapper* debug_file, |
1843 int64_t* filesize_limit_bytes, | 2006 int64_t* filesize_limit_bytes, |
1844 rtc::CriticalSection* crit_debug, | 2007 rtc::CriticalSection* crit_debug, |
1845 ApmDebugDumpThreadState* debug_state) { | 2008 ApmDebugDumpThreadState* debug_state) { |
1846 int32_t size = debug_state->event_msg->ByteSize(); | 2009 int32_t size = debug_state->event_msg->ByteSize(); |
1847 if (size <= 0) { | 2010 if (size <= 0) { |
1848 return kUnspecifiedError; | 2011 return kUnspecifiedError; |
1849 } | 2012 } |
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2003 previous_agc_level(0), | 2166 previous_agc_level(0), |
2004 echo_path_gain_change(false) {} | 2167 echo_path_gain_change(false) {} |
2005 | 2168 |
2006 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; | 2169 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; |
2007 | 2170 |
2008 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; | 2171 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; |
2009 | 2172 |
2010 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; | 2173 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; |
2011 | 2174 |
2012 } // namespace webrtc | 2175 } // namespace webrtc |
OLD | NEW |