Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(182)

Side by Side Diff: webrtc/modules/audio_processing/audio_processing_impl.cc

Issue 2778783002: AecDump interface (Closed)
Patch Set: Reorder, reduce code duplication, improve readability. Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
122 return uppermost_native_rate; 122 return uppermost_native_rate;
123 } 123 }
124 124
125 // Maximum length that a frame of samples can have. 125 // Maximum length that a frame of samples can have.
126 static const size_t kMaxAllowedValuesOfSamplesPerFrame = 160; 126 static const size_t kMaxAllowedValuesOfSamplesPerFrame = 160;
127 // Maximum number of frames to buffer in the render queue. 127 // Maximum number of frames to buffer in the render queue.
128 // TODO(peah): Decrease this once we properly handle hugely unbalanced 128 // TODO(peah): Decrease this once we properly handle hugely unbalanced
129 // reverse and forward call numbers. 129 // reverse and forward call numbers.
130 static const size_t kMaxNumFramesToBuffer = 100; 130 static const size_t kMaxNumFramesToBuffer = 100;
131 131
132 // Maximum number of audio channels in the input and output streams.
133 constexpr size_t kMaxNumChannels = 4;
peah-webrtc 2017/04/21 14:05:15 I think you commented that this would be changed.
aleloi 2017/04/24 11:10:09 The name or the number? I've changed the name now
134
132 class HighPassFilterImpl : public HighPassFilter { 135 class HighPassFilterImpl : public HighPassFilter {
133 public: 136 public:
134 explicit HighPassFilterImpl(AudioProcessingImpl* apm) : apm_(apm) {} 137 explicit HighPassFilterImpl(AudioProcessingImpl* apm) : apm_(apm) {}
135 ~HighPassFilterImpl() override = default; 138 ~HighPassFilterImpl() override = default;
136 139
137 // HighPassFilter implementation. 140 // HighPassFilter implementation.
138 int Enable(bool enable) override { 141 int Enable(bool enable) override {
139 apm_->MutateConfig([enable](AudioProcessing::Config* config) { 142 apm_->MutateConfig([enable](AudioProcessing::Config* config) {
140 config->high_pass_filter.enabled = enable; 143 config->high_pass_filter.enabled = enable;
141 }); 144 });
142 145
143 return AudioProcessing::kNoError; 146 return AudioProcessing::kNoError;
144 } 147 }
145 148
146 bool is_enabled() const override { 149 bool is_enabled() const override {
147 return apm_->GetConfig().high_pass_filter.enabled; 150 return apm_->GetConfig().high_pass_filter.enabled;
148 } 151 }
149 152
150 private: 153 private:
151 AudioProcessingImpl* apm_; 154 AudioProcessingImpl* apm_;
152 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl); 155 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl);
153 }; 156 };
154 157
158 webrtc::InternalAPMStreamsConfig ToStreamsConfig(
159 const ProcessingConfig& api_format) {
160 webrtc::InternalAPMStreamsConfig result;
161 result.input_sample_rate = api_format.input_stream().sample_rate_hz();
162 result.input_num_channels = api_format.input_stream().num_channels();
163 result.output_num_channels = api_format.output_stream().num_channels();
164 result.render_input_num_channels =
165 api_format.reverse_input_stream().num_channels();
166 result.render_input_sample_rate =
167 api_format.reverse_input_stream().sample_rate_hz();
168 result.output_sample_rate = api_format.output_stream().sample_rate_hz();
169 result.render_output_sample_rate =
170 api_format.reverse_output_stream().sample_rate_hz();
171 result.render_output_num_channels =
172 api_format.reverse_output_stream().num_channels();
173 return result;
174 }
175
176 rtc::ArrayView<rtc::ArrayView<const float>> stream_view(
peah-webrtc 2017/04/21 14:05:15 Does this name follow the style guideline? What ab
aleloi 2017/04/24 11:10:09 I thought the style allowed names_with_underscores
177 const float* const* stream,
178 size_t channel_size,
179 size_t num_channels) {
180 RTC_DCHECK_LE(num_channels, kMaxNumChannels);
181
182 std::array<rtc::ArrayView<const float>, kMaxNumChannels> array_stream_view;
183 for (size_t i = 0; i < std::min(num_channels, kMaxNumChannels); ++i) {
184 array_stream_view[i] = rtc::ArrayView<const float>(stream[i], channel_size);
185 }
186 return rtc::ArrayView<rtc::ArrayView<const float>>(&array_stream_view[0],
187 num_channels);
188 }
155 } // namespace 189 } // namespace
156 190
157 // Throughout webrtc, it's assumed that success is represented by zero. 191 // Throughout webrtc, it's assumed that success is represented by zero.
158 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); 192 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero");
159 193
160 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} 194 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {}
161 195
162 bool AudioProcessingImpl::ApmSubmoduleStates::Update( 196 bool AudioProcessingImpl::ApmSubmoduleStates::Update(
163 bool low_cut_filter_enabled, 197 bool low_cut_filter_enabled,
164 bool echo_canceller_enabled, 198 bool echo_canceller_enabled,
(...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after
519 InitializeEchoCanceller3(); 553 InitializeEchoCanceller3();
520 554
521 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 555 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
522 if (debug_dump_.debug_file->is_open()) { 556 if (debug_dump_.debug_file->is_open()) {
523 int err = WriteInitMessage(); 557 int err = WriteInitMessage();
524 if (err != kNoError) { 558 if (err != kNoError) {
525 return err; 559 return err;
526 } 560 }
527 } 561 }
528 #endif 562 #endif
529 563 if (aec_dump_) {
564 aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format));
565 }
530 return kNoError; 566 return kNoError;
531 } 567 }
532 568
533 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { 569 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
534 for (const auto& stream : config.streams) { 570 for (const auto& stream : config.streams) {
535 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { 571 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) {
536 return kBadSampleRateError; 572 return kBadSampleRateError;
537 } 573 }
538 } 574 }
539 575
(...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after
817 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); 853 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM);
818 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); 854 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
819 const size_t channel_size = 855 const size_t channel_size =
820 sizeof(float) * formats_.api_format.input_stream().num_frames(); 856 sizeof(float) * formats_.api_format.input_stream().num_frames();
821 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); 857 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels();
822 ++i) 858 ++i)
823 msg->add_input_channel(src[i], channel_size); 859 msg->add_input_channel(src[i], channel_size);
824 } 860 }
825 #endif 861 #endif
826 862
863 std::unique_ptr<AecDump::CaptureStreamInfo> stream_info;
864 if (aec_dump_) {
865 aec_dump_->WriteConfig(CollectApmConfig(), false);
peah-webrtc 2017/04/21 14:05:15 Same thing here, would it be possible to move the
aleloi 2017/04/24 11:10:09 WDYT about the latest change? I don't think it's p
866
867 stream_info = aec_dump_->GetCaptureStreamInfo();
868 RTC_DCHECK(stream_info);
869 const size_t channel_size =
870 sizeof(float) * formats_.api_format.input_stream().num_frames();
871 const size_t num_channels =
872 formats_.api_format.input_stream().num_channels();
873 stream_info->AddInput(stream_view(src, channel_size, num_channels));
874 PopulateStreamInfoWithConfig(stream_info.get());
875 }
876
827 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); 877 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream());
828 RETURN_ON_ERR(ProcessCaptureStreamLocked()); 878 RETURN_ON_ERR(ProcessCaptureStreamLocked());
829 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); 879 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest);
830 880
831 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 881 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
832 if (debug_dump_.debug_file->is_open()) { 882 if (debug_dump_.debug_file->is_open()) {
833 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); 883 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
834 const size_t channel_size = 884 const size_t channel_size =
835 sizeof(float) * formats_.api_format.output_stream().num_frames(); 885 sizeof(float) * formats_.api_format.output_stream().num_frames();
836 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); 886 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels();
837 ++i) 887 ++i)
838 msg->add_output_channel(dest[i], channel_size); 888 msg->add_output_channel(dest[i], channel_size);
839 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), 889 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
840 &debug_dump_.num_bytes_left_for_log_, 890 &debug_dump_.num_bytes_left_for_log_,
841 &crit_debug_, &debug_dump_.capture)); 891 &crit_debug_, &debug_dump_.capture));
842 } 892 }
843 #endif 893 #endif
894 if (aec_dump_) {
895 const size_t channel_size =
896 sizeof(float) * formats_.api_format.output_stream().num_frames();
peah-webrtc 2017/04/21 14:05:15 Same thing here, would it be possible to move the
aleloi 2017/04/24 11:10:09 Acknowledged.
897 const size_t num_channels =
898 formats_.api_format.output_stream().num_channels();
899 stream_info->AddOutput(stream_view(dest, channel_size, num_channels));
900 aec_dump_->WriteCaptureStreamMessage(std::move(stream_info));
901 }
844 902
845 return kNoError; 903 return kNoError;
846 } 904 }
847 905
848 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { 906 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) {
849 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), 907 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(),
850 num_reverse_channels(), 908 num_reverse_channels(),
851 &aec_render_queue_buffer_); 909 &aec_render_queue_buffer_);
852 910
853 RTC_DCHECK_GE(160, audio->num_frames_per_band()); 911 RTC_DCHECK_GE(160, audio->num_frames_per_band());
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after
1071 rtc::CritScope cs_render(&crit_render_); 1129 rtc::CritScope cs_render(&crit_render_);
1072 RETURN_ON_ERR( 1130 RETURN_ON_ERR(
1073 MaybeInitializeCapture(processing_config, reinitialization_required)); 1131 MaybeInitializeCapture(processing_config, reinitialization_required));
1074 } 1132 }
1075 rtc::CritScope cs_capture(&crit_capture_); 1133 rtc::CritScope cs_capture(&crit_capture_);
1076 if (frame->samples_per_channel_ != 1134 if (frame->samples_per_channel_ !=
1077 formats_.api_format.input_stream().num_frames()) { 1135 formats_.api_format.input_stream().num_frames()) {
1078 return kBadDataLengthError; 1136 return kBadDataLengthError;
1079 } 1137 }
1080 1138
1139 std::unique_ptr<AecDump::CaptureStreamInfo> stream_info;
1140 if (aec_dump_) {
1141 stream_info = aec_dump_->GetCaptureStreamInfo();
peah-webrtc 2017/04/21 14:05:15 Same thing here, would it be possible to move the
aleloi 2017/04/24 11:10:09 Acknowledged.
1142 RTC_DCHECK(stream_info);
1143 stream_info->AddInput(*frame);
1144 PopulateStreamInfoWithConfig(stream_info.get());
1145 aec_dump_->WriteConfig(CollectApmConfig(), false);
1146 }
1147
1081 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 1148 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1082 if (debug_dump_.debug_file->is_open()) { 1149 if (debug_dump_.debug_file->is_open()) {
1083 RETURN_ON_ERR(WriteConfigMessage(false)); 1150 RETURN_ON_ERR(WriteConfigMessage(false));
1084 1151
1085 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); 1152 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM);
1086 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); 1153 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
1087 const size_t data_size = 1154 const size_t data_size =
1088 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; 1155 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
1089 msg->set_input_data(frame->data_, data_size); 1156 msg->set_input_data(frame->data_, data_size);
1090 } 1157 }
1091 #endif 1158 #endif
1092 1159
1093 capture_.capture_audio->DeinterleaveFrom(frame); 1160 capture_.capture_audio->DeinterleaveFrom(frame);
1094 RETURN_ON_ERR(ProcessCaptureStreamLocked()); 1161 RETURN_ON_ERR(ProcessCaptureStreamLocked());
1095 capture_.capture_audio->InterleaveTo( 1162 capture_.capture_audio->InterleaveTo(
1096 frame, submodule_states_.CaptureMultiBandProcessingActive()); 1163 frame, submodule_states_.CaptureMultiBandProcessingActive());
1097 1164
1165 if (aec_dump_) {
1166 stream_info->AddOutput(*frame);
1167 aec_dump_->WriteCaptureStreamMessage(std::move(stream_info));
1168 }
1098 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 1169 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1099 if (debug_dump_.debug_file->is_open()) { 1170 if (debug_dump_.debug_file->is_open()) {
1100 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); 1171 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
1101 const size_t data_size = 1172 const size_t data_size =
1102 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; 1173 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
1103 msg->set_output_data(frame->data_, data_size); 1174 msg->set_output_data(frame->data_, data_size);
1104 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), 1175 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
1105 &debug_dump_.num_bytes_left_for_log_, 1176 &debug_dump_.num_bytes_left_for_log_,
1106 &crit_debug_, &debug_dump_.capture)); 1177 &crit_debug_, &debug_dump_.capture));
1107 } 1178 }
(...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after
1369 const size_t channel_size = 1440 const size_t channel_size =
1370 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); 1441 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames();
1371 for (size_t i = 0; 1442 for (size_t i = 0;
1372 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) 1443 i < formats_.api_format.reverse_input_stream().num_channels(); ++i)
1373 msg->add_channel(src[i], channel_size); 1444 msg->add_channel(src[i], channel_size);
1374 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), 1445 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
1375 &debug_dump_.num_bytes_left_for_log_, 1446 &debug_dump_.num_bytes_left_for_log_,
1376 &crit_debug_, &debug_dump_.render)); 1447 &crit_debug_, &debug_dump_.render));
1377 } 1448 }
1378 #endif 1449 #endif
1379 1450 if (aec_dump_) {
1451 const size_t channel_size =
1452 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames();
1453 const size_t num_channels =
peah-webrtc 2017/04/21 14:05:15 Same thing here, would it be possible to move the
aleloi 2017/04/24 11:10:09 Acknowledged.
1454 formats_.api_format.reverse_input_stream().num_channels();
1455 aec_dump_->WriteRenderStreamMessage(
1456 stream_view(src, channel_size, num_channels));
1457 }
1380 render_.render_audio->CopyFrom(src, 1458 render_.render_audio->CopyFrom(src,
1381 formats_.api_format.reverse_input_stream()); 1459 formats_.api_format.reverse_input_stream());
1382 return ProcessRenderStreamLocked(); 1460 return ProcessRenderStreamLocked();
1383 } 1461 }
1384 1462
1385 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { 1463 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) {
1386 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); 1464 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame");
1387 rtc::CritScope cs(&crit_render_); 1465 rtc::CritScope cs(&crit_render_);
1388 if (frame == nullptr) { 1466 if (frame == nullptr) {
1389 return kNullPointerError; 1467 return kNullPointerError;
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
1422 audioproc::ReverseStream* msg = 1500 audioproc::ReverseStream* msg =
1423 debug_dump_.render.event_msg->mutable_reverse_stream(); 1501 debug_dump_.render.event_msg->mutable_reverse_stream();
1424 const size_t data_size = 1502 const size_t data_size =
1425 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; 1503 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
1426 msg->set_data(frame->data_, data_size); 1504 msg->set_data(frame->data_, data_size);
1427 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), 1505 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
1428 &debug_dump_.num_bytes_left_for_log_, 1506 &debug_dump_.num_bytes_left_for_log_,
1429 &crit_debug_, &debug_dump_.render)); 1507 &crit_debug_, &debug_dump_.render));
1430 } 1508 }
1431 #endif 1509 #endif
1510 if (aec_dump_) {
1511 aec_dump_->WriteRenderStreamMessage(*frame);
1512 }
1513
1432 render_.render_audio->DeinterleaveFrom(frame); 1514 render_.render_audio->DeinterleaveFrom(frame);
1433 RETURN_ON_ERR(ProcessRenderStreamLocked()); 1515 RETURN_ON_ERR(ProcessRenderStreamLocked());
1434 render_.render_audio->InterleaveTo( 1516 render_.render_audio->InterleaveTo(
1435 frame, submodule_states_.RenderMultiBandProcessingActive()); 1517 frame, submodule_states_.RenderMultiBandProcessingActive());
1436 return kNoError; 1518 return kNoError;
1437 } 1519 }
1438 1520
1439 int AudioProcessingImpl::ProcessRenderStreamLocked() { 1521 int AudioProcessingImpl::ProcessRenderStreamLocked() {
1440 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. 1522 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity.
1441 if (submodule_states_.RenderMultiBandSubModulesActive() && 1523 if (submodule_states_.RenderMultiBandSubModulesActive() &&
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1505 void AudioProcessingImpl::set_delay_offset_ms(int offset) { 1587 void AudioProcessingImpl::set_delay_offset_ms(int offset) {
1506 rtc::CritScope cs(&crit_capture_); 1588 rtc::CritScope cs(&crit_capture_);
1507 capture_.delay_offset_ms = offset; 1589 capture_.delay_offset_ms = offset;
1508 } 1590 }
1509 1591
1510 int AudioProcessingImpl::delay_offset_ms() const { 1592 int AudioProcessingImpl::delay_offset_ms() const {
1511 rtc::CritScope cs(&crit_capture_); 1593 rtc::CritScope cs(&crit_capture_);
1512 return capture_.delay_offset_ms; 1594 return capture_.delay_offset_ms;
1513 } 1595 }
1514 1596
1597 void AudioProcessingImpl::StartDebugRecording(
1598 std::unique_ptr<AecDump> aec_dump) {
1599 rtc::CritScope cs_render(&crit_render_);
1600 rtc::CritScope cs_capture(&crit_capture_);
1601 RTC_DCHECK(aec_dump);
1602 aec_dump_ = std::move(aec_dump);
1603
1604 aec_dump_->WriteConfig(CollectApmConfig(), true);
1605 aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format));
1606 }
1607
1515 int AudioProcessingImpl::StartDebugRecording( 1608 int AudioProcessingImpl::StartDebugRecording(
1516 const char filename[AudioProcessing::kMaxFilenameSize], 1609 const char filename[AudioProcessing::kMaxFilenameSize],
1517 int64_t max_log_size_bytes) { 1610 int64_t max_log_size_bytes) {
1518 // Run in a single-threaded manner. 1611 // Run in a single-threaded manner.
1519 rtc::CritScope cs_render(&crit_render_); 1612 rtc::CritScope cs_render(&crit_render_);
1520 rtc::CritScope cs_capture(&crit_capture_); 1613 rtc::CritScope cs_capture(&crit_capture_);
1521 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); 1614 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, "");
1522 1615
1523 if (filename == nullptr) { 1616 if (filename == nullptr) {
1524 return kNullPointerError; 1617 return kNullPointerError;
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
1579 rtc::CritScope cs_render(&crit_render_); 1672 rtc::CritScope cs_render(&crit_render_);
1580 rtc::CritScope cs_capture(&crit_capture_); 1673 rtc::CritScope cs_capture(&crit_capture_);
1581 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); 1674 FILE* stream = rtc::FdopenPlatformFileForWriting(handle);
1582 return StartDebugRecording(stream, -1); 1675 return StartDebugRecording(stream, -1);
1583 } 1676 }
1584 1677
1585 int AudioProcessingImpl::StopDebugRecording() { 1678 int AudioProcessingImpl::StopDebugRecording() {
1586 // Run in a single-threaded manner. 1679 // Run in a single-threaded manner.
1587 rtc::CritScope cs_render(&crit_render_); 1680 rtc::CritScope cs_render(&crit_render_);
1588 rtc::CritScope cs_capture(&crit_capture_); 1681 rtc::CritScope cs_capture(&crit_capture_);
1682 aec_dump_.reset();
1589 1683
1590 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 1684 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1591 // We just return if recording hasn't started. 1685 // We just return if recording hasn't started.
1592 debug_dump_.debug_file->CloseFile(); 1686 debug_dump_.debug_file->CloseFile();
1593 return kNoError; 1687 return kNoError;
1594 #else 1688 #else
1595 return kUnsupportedFunctionError; 1689 return kUnsupportedFunctionError;
1596 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP 1690 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
1597 } 1691 }
1598 1692
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after
1830 capture_.last_stream_delay_ms = 0; 1924 capture_.last_stream_delay_ms = 0;
1831 1925
1832 if (capture_.aec_system_delay_jumps > -1) { 1926 if (capture_.aec_system_delay_jumps > -1) {
1833 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", 1927 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps",
1834 capture_.aec_system_delay_jumps, 51); 1928 capture_.aec_system_delay_jumps, 51);
1835 } 1929 }
1836 capture_.aec_system_delay_jumps = -1; 1930 capture_.aec_system_delay_jumps = -1;
1837 capture_.last_aec_system_delay_ms = 0; 1931 capture_.last_aec_system_delay_ms = 0;
1838 } 1932 }
1839 1933
1934 InternalAPMConfig AudioProcessingImpl::CollectApmConfig() {
1935 std::string experiments_description =
1936 public_submodules_->echo_cancellation->GetExperimentsDescription();
1937 // TODO(peah): Add semicolon-separated concatenations of experiment
1938 // descriptions for other submodules.
1939 if (capture_nonlocked_.level_controller_enabled) {
1940 experiments_description += "LevelController;";
1941 }
1942 if (constants_.agc_clipped_level_min != kClippedLevelMin) {
1943 experiments_description += "AgcClippingLevelExperiment;";
1944 }
1945 if (capture_nonlocked_.echo_canceller3_enabled) {
1946 experiments_description += "EchoCanceller3;";
1947 }
1948
1949 InternalAPMConfig apm_config;
1950
1951 apm_config.aec_enabled = public_submodules_->echo_cancellation->is_enabled();
1952 apm_config.aec_delay_agnostic_enabled =
1953 public_submodules_->echo_cancellation->is_delay_agnostic_enabled();
1954 apm_config.aec_drift_compensation_enabled =
1955 public_submodules_->echo_cancellation->is_drift_compensation_enabled();
1956 apm_config.aec_extended_filter_enabled =
1957 public_submodules_->echo_cancellation->is_extended_filter_enabled();
1958 apm_config.aec_suppression_level = static_cast<int>(
1959 public_submodules_->echo_cancellation->suppression_level());
1960
1961 apm_config.aecm_enabled =
1962 public_submodules_->echo_control_mobile->is_enabled();
1963 apm_config.aecm_comfort_noise_enabled =
1964 public_submodules_->echo_control_mobile->is_comfort_noise_enabled();
1965 apm_config.aecm_routing_mode =
1966 static_cast<int>(public_submodules_->echo_control_mobile->routing_mode());
1967
1968 apm_config.agc_enabled = public_submodules_->gain_control->is_enabled();
1969 apm_config.agc_mode =
1970 static_cast<int>(public_submodules_->gain_control->mode());
1971 apm_config.agc_limiter_enabled =
1972 public_submodules_->gain_control->is_limiter_enabled();
1973 apm_config.noise_robust_agc_enabled = constants_.use_experimental_agc;
1974
1975 apm_config.hpf_enabled = config_.high_pass_filter.enabled;
1976
1977 apm_config.ns_enabled = public_submodules_->noise_suppression->is_enabled();
1978 apm_config.ns_level =
1979 static_cast<int>(public_submodules_->noise_suppression->level());
1980
1981 apm_config.transient_suppression_enabled =
1982 capture_.transient_suppressor_enabled;
1983 apm_config.intelligibility_enhancer_enabled =
1984 capture_nonlocked_.intelligibility_enabled;
1985 apm_config.experiments_description = experiments_description;
1986 return apm_config;
1987 }
1988
1989 void AudioProcessingImpl::PopulateStreamInfoWithConfig(
1990 AecDump::CaptureStreamInfo* stream_info) const {
1991 RTC_DCHECK(stream_info);
1992 stream_info->set_delay(capture_nonlocked_.stream_delay_ms);
1993 stream_info->set_drift(
1994 public_submodules_->echo_cancellation->stream_drift_samples());
1995 stream_info->set_level(gain_control()->stream_analog_level());
1996 stream_info->set_keypress(capture_.key_pressed);
1997 }
1998
1840 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 1999 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1841 int AudioProcessingImpl::WriteMessageToDebugFile( 2000 int AudioProcessingImpl::WriteMessageToDebugFile(
1842 FileWrapper* debug_file, 2001 FileWrapper* debug_file,
1843 int64_t* filesize_limit_bytes, 2002 int64_t* filesize_limit_bytes,
1844 rtc::CriticalSection* crit_debug, 2003 rtc::CriticalSection* crit_debug,
1845 ApmDebugDumpThreadState* debug_state) { 2004 ApmDebugDumpThreadState* debug_state) {
1846 int32_t size = debug_state->event_msg->ByteSize(); 2005 int32_t size = debug_state->event_msg->ByteSize();
1847 if (size <= 0) { 2006 if (size <= 0) {
1848 return kUnspecifiedError; 2007 return kUnspecifiedError;
1849 } 2008 }
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
2003 previous_agc_level(0), 2162 previous_agc_level(0),
2004 echo_path_gain_change(false) {} 2163 echo_path_gain_change(false) {}
2005 2164
2006 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; 2165 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default;
2007 2166
2008 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; 2167 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default;
2009 2168
2010 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; 2169 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default;
2011 2170
2012 } // namespace webrtc 2171 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/audio_processing/audio_processing_impl.h ('k') | webrtc/modules/audio_processing/include/aec_dump.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698