Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 144 | 144 |
| 145 bool is_enabled() const override { | 145 bool is_enabled() const override { |
| 146 return apm_->GetConfig().high_pass_filter.enabled; | 146 return apm_->GetConfig().high_pass_filter.enabled; |
| 147 } | 147 } |
| 148 | 148 |
| 149 private: | 149 private: |
| 150 AudioProcessingImpl* apm_; | 150 AudioProcessingImpl* apm_; |
| 151 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl); | 151 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl); |
| 152 }; | 152 }; |
| 153 | 153 |
| 154 webrtc::InternalAPMStreamsConfig ToStreamsConfig( | |
| 155 const ProcessingConfig& api_format) { | |
| 156 webrtc::InternalAPMStreamsConfig result; | |
| 157 result.input_sample_rate = api_format.input_stream().sample_rate_hz(); | |
| 158 result.input_num_channels = api_format.input_stream().num_channels(); | |
| 159 result.output_num_channels = api_format.output_stream().num_channels(); | |
| 160 result.render_input_num_channels = | |
| 161 api_format.reverse_input_stream().num_channels(); | |
| 162 result.render_input_sample_rate = | |
| 163 api_format.reverse_input_stream().sample_rate_hz(); | |
| 164 result.output_sample_rate = api_format.output_stream().sample_rate_hz(); | |
| 165 result.render_output_sample_rate = | |
| 166 api_format.reverse_output_stream().sample_rate_hz(); | |
| 167 result.render_output_num_channels = | |
| 168 api_format.reverse_output_stream().num_channels(); | |
| 169 return result; | |
| 170 } | |
| 171 | |
| 154 } // namespace | 172 } // namespace |
| 155 | 173 |
| 156 // Throughout webrtc, it's assumed that success is represented by zero. | 174 // Throughout webrtc, it's assumed that success is represented by zero. |
| 157 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); | 175 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); |
| 158 | 176 |
| 159 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} | 177 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} |
| 160 | 178 |
| 161 bool AudioProcessingImpl::ApmSubmoduleStates::Update( | 179 bool AudioProcessingImpl::ApmSubmoduleStates::Update( |
| 162 bool low_cut_filter_enabled, | 180 bool low_cut_filter_enabled, |
| 163 bool echo_canceller_enabled, | 181 bool echo_canceller_enabled, |
| (...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 518 InitializeEchoCanceller3(); | 536 InitializeEchoCanceller3(); |
| 519 | 537 |
| 520 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 538 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 521 if (debug_dump_.debug_file->is_open()) { | 539 if (debug_dump_.debug_file->is_open()) { |
| 522 int err = WriteInitMessage(); | 540 int err = WriteInitMessage(); |
| 523 if (err != kNoError) { | 541 if (err != kNoError) { |
| 524 return err; | 542 return err; |
| 525 } | 543 } |
| 526 } | 544 } |
| 527 #endif | 545 #endif |
| 528 | 546 if (aec_dump_) { |
| 547 aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); | |
| 548 } | |
| 529 return kNoError; | 549 return kNoError; |
| 530 } | 550 } |
| 531 | 551 |
| 532 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 552 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
| 533 for (const auto& stream : config.streams) { | 553 for (const auto& stream : config.streams) { |
| 534 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 554 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { |
| 535 return kBadSampleRateError; | 555 return kBadSampleRateError; |
| 536 } | 556 } |
| 537 } | 557 } |
| 538 | 558 |
| (...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 816 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 836 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 817 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 837 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 818 const size_t channel_size = | 838 const size_t channel_size = |
| 819 sizeof(float) * formats_.api_format.input_stream().num_frames(); | 839 sizeof(float) * formats_.api_format.input_stream().num_frames(); |
| 820 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | 840 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); |
| 821 ++i) | 841 ++i) |
| 822 msg->add_input_channel(src[i], channel_size); | 842 msg->add_input_channel(src[i], channel_size); |
| 823 } | 843 } |
| 824 #endif | 844 #endif |
| 825 | 845 |
| 846 std::unique_ptr<AecDump::CaptureStreamInfo> stream_info; | |
| 847 if (aec_dump_) { | |
| 848 stream_info = aec_dump_->GetCaptureStreamInfo(); | |
| 849 } | |
| 850 | |
| 851 if (aec_dump_) { | |
| 852 const size_t channel_size = | |
| 853 sizeof(float) * formats_.api_format.input_stream().num_frames(); | |
| 854 std::vector<rtc::ArrayView<const float>> src_view; | |
| 855 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | |
| 856 ++i) { | |
| 857 src_view.emplace_back(src[i], channel_size); | |
| 858 } | |
| 859 stream_info->AddInput(src_view); | |
| 860 } | |
| 826 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); | 861 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
| 862 | |
| 863 if (aec_dump_) { | |
|
the sun
2017/04/18 08:46:12
Looks like you can fold this conditional section w
aleloi
2017/04/18 14:08:15
Done. Thanks for spotting this!
| |
| 864 stream_info->set_delay(capture_nonlocked_.stream_delay_ms); | |
| 865 stream_info->set_drift( | |
| 866 public_submodules_->echo_cancellation->stream_drift_samples()); | |
| 867 stream_info->set_level(gain_control()->stream_analog_level()); | |
| 868 stream_info->set_keypress(capture_.key_pressed); | |
| 869 } | |
| 870 | |
| 827 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 871 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
| 828 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); | 872 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
| 829 | 873 |
| 830 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 874 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 831 if (debug_dump_.debug_file->is_open()) { | 875 if (debug_dump_.debug_file->is_open()) { |
| 832 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 876 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 833 const size_t channel_size = | 877 const size_t channel_size = |
| 834 sizeof(float) * formats_.api_format.output_stream().num_frames(); | 878 sizeof(float) * formats_.api_format.output_stream().num_frames(); |
| 835 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); | 879 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); |
| 836 ++i) | 880 ++i) |
| 837 msg->add_output_channel(dest[i], channel_size); | 881 msg->add_output_channel(dest[i], channel_size); |
| 838 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 882 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 839 &debug_dump_.num_bytes_left_for_log_, | 883 &debug_dump_.num_bytes_left_for_log_, |
| 840 &crit_debug_, &debug_dump_.capture)); | 884 &crit_debug_, &debug_dump_.capture)); |
| 841 } | 885 } |
| 842 #endif | 886 #endif |
| 887 if (aec_dump_) { | |
|
the sun
2017/04/18 08:46:12
if (stream_info) {
...otherwise the contract is t
aleloi
2017/04/18 14:08:15
That should be the case now that there is no Null
| |
| 888 const size_t channel_size = | |
| 889 sizeof(float) * formats_.api_format.output_stream().num_frames(); | |
| 890 std::vector<rtc::ArrayView<const float>> dest_view; | |
| 891 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); | |
| 892 ++i) { | |
| 893 dest_view.emplace_back(dest[i], channel_size); | |
| 894 } | |
| 895 stream_info->AddOutput(dest_view); | |
| 896 aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); | |
| 897 } | |
| 843 | 898 |
| 844 return kNoError; | 899 return kNoError; |
| 845 } | 900 } |
| 846 | 901 |
| 847 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { | 902 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { |
| 848 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), | 903 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), |
| 849 num_reverse_channels(), | 904 num_reverse_channels(), |
| 850 &aec_render_queue_buffer_); | 905 &aec_render_queue_buffer_); |
| 851 | 906 |
| 852 RTC_DCHECK_GE(160, audio->num_frames_per_band()); | 907 RTC_DCHECK_GE(160, audio->num_frames_per_band()); |
| (...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1070 rtc::CritScope cs_render(&crit_render_); | 1125 rtc::CritScope cs_render(&crit_render_); |
| 1071 RETURN_ON_ERR( | 1126 RETURN_ON_ERR( |
| 1072 MaybeInitializeCapture(processing_config, reinitialization_required)); | 1127 MaybeInitializeCapture(processing_config, reinitialization_required)); |
| 1073 } | 1128 } |
| 1074 rtc::CritScope cs_capture(&crit_capture_); | 1129 rtc::CritScope cs_capture(&crit_capture_); |
| 1075 if (frame->samples_per_channel_ != | 1130 if (frame->samples_per_channel_ != |
| 1076 formats_.api_format.input_stream().num_frames()) { | 1131 formats_.api_format.input_stream().num_frames()) { |
| 1077 return kBadDataLengthError; | 1132 return kBadDataLengthError; |
| 1078 } | 1133 } |
| 1079 | 1134 |
| 1135 std::unique_ptr<AecDump::CaptureStreamInfo> stream_info; | |
| 1136 if (aec_dump_) { | |
| 1137 stream_info = aec_dump_->GetCaptureStreamInfo(); | |
| 1138 stream_info->AddInput(*frame); | |
| 1139 } | |
| 1140 | |
| 1080 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1141 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1081 if (debug_dump_.debug_file->is_open()) { | 1142 if (debug_dump_.debug_file->is_open()) { |
| 1082 RETURN_ON_ERR(WriteConfigMessage(false)); | 1143 RETURN_ON_ERR(WriteConfigMessage(false)); |
| 1083 | 1144 |
| 1084 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 1145 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 1085 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 1146 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 1086 const size_t data_size = | 1147 const size_t data_size = |
| 1087 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1148 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 1088 msg->set_input_data(frame->data_, data_size); | 1149 msg->set_input_data(frame->data_, data_size); |
| 1089 } | 1150 } |
| 1090 #endif | 1151 #endif |
| 1091 | 1152 |
| 1092 capture_.capture_audio->DeinterleaveFrom(frame); | 1153 capture_.capture_audio->DeinterleaveFrom(frame); |
| 1154 | |
| 1155 if (aec_dump_) { | |
|
the sun
2017/04/18 08:46:12
Fold with the conditional on line 1136
aleloi
2017/04/18 14:08:15
Done.
| |
| 1156 stream_info->set_delay(capture_nonlocked_.stream_delay_ms); | |
| 1157 stream_info->set_drift( | |
| 1158 public_submodules_->echo_cancellation->stream_drift_samples()); | |
| 1159 stream_info->set_level(gain_control()->stream_analog_level()); | |
| 1160 stream_info->set_keypress(capture_.key_pressed); | |
| 1161 } | |
| 1093 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 1162 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
| 1094 capture_.capture_audio->InterleaveTo( | 1163 capture_.capture_audio->InterleaveTo( |
| 1095 frame, submodule_states_.CaptureMultiBandProcessingActive()); | 1164 frame, submodule_states_.CaptureMultiBandProcessingActive()); |
| 1096 | 1165 |
| 1166 if (aec_dump_) { | |
| 1167 stream_info->AddOutput(*frame); | |
| 1168 aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); | |
| 1169 } | |
| 1097 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1170 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1098 if (debug_dump_.debug_file->is_open()) { | 1171 if (debug_dump_.debug_file->is_open()) { |
| 1099 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 1172 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 1100 const size_t data_size = | 1173 const size_t data_size = |
| 1101 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1174 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 1102 msg->set_output_data(frame->data_, data_size); | 1175 msg->set_output_data(frame->data_, data_size); |
| 1103 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1176 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1104 &debug_dump_.num_bytes_left_for_log_, | 1177 &debug_dump_.num_bytes_left_for_log_, |
| 1105 &crit_debug_, &debug_dump_.capture)); | 1178 &crit_debug_, &debug_dump_.capture)); |
| 1106 } | 1179 } |
| (...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1368 const size_t channel_size = | 1441 const size_t channel_size = |
| 1369 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 1442 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
| 1370 for (size_t i = 0; | 1443 for (size_t i = 0; |
| 1371 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) | 1444 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) |
| 1372 msg->add_channel(src[i], channel_size); | 1445 msg->add_channel(src[i], channel_size); |
| 1373 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1446 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1374 &debug_dump_.num_bytes_left_for_log_, | 1447 &debug_dump_.num_bytes_left_for_log_, |
| 1375 &crit_debug_, &debug_dump_.render)); | 1448 &crit_debug_, &debug_dump_.render)); |
| 1376 } | 1449 } |
| 1377 #endif | 1450 #endif |
| 1451 if (aec_dump_) { | |
| 1452 std::vector<rtc::ArrayView<const float>> src_view; | |
| 1453 const size_t channel_size = | |
| 1454 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | |
| 1378 | 1455 |
| 1456 for (size_t i = 0; | |
| 1457 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) { | |
| 1458 src_view.emplace_back(src[i], channel_size); | |
| 1459 } | |
| 1460 aec_dump_->WriteRenderStreamMessage(src_view); | |
| 1461 } | |
| 1379 render_.render_audio->CopyFrom(src, | 1462 render_.render_audio->CopyFrom(src, |
| 1380 formats_.api_format.reverse_input_stream()); | 1463 formats_.api_format.reverse_input_stream()); |
| 1381 return ProcessRenderStreamLocked(); | 1464 return ProcessRenderStreamLocked(); |
| 1382 } | 1465 } |
| 1383 | 1466 |
| 1384 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 1467 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| 1385 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); | 1468 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); |
| 1386 rtc::CritScope cs(&crit_render_); | 1469 rtc::CritScope cs(&crit_render_); |
| 1387 if (frame == nullptr) { | 1470 if (frame == nullptr) { |
| 1388 return kNullPointerError; | 1471 return kNullPointerError; |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1421 audioproc::ReverseStream* msg = | 1504 audioproc::ReverseStream* msg = |
| 1422 debug_dump_.render.event_msg->mutable_reverse_stream(); | 1505 debug_dump_.render.event_msg->mutable_reverse_stream(); |
| 1423 const size_t data_size = | 1506 const size_t data_size = |
| 1424 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1507 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 1425 msg->set_data(frame->data_, data_size); | 1508 msg->set_data(frame->data_, data_size); |
| 1426 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1509 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1427 &debug_dump_.num_bytes_left_for_log_, | 1510 &debug_dump_.num_bytes_left_for_log_, |
| 1428 &crit_debug_, &debug_dump_.render)); | 1511 &crit_debug_, &debug_dump_.render)); |
| 1429 } | 1512 } |
| 1430 #endif | 1513 #endif |
| 1514 if (aec_dump_) { | |
| 1515 aec_dump_->WriteRenderStreamMessage(*frame); | |
| 1516 } | |
| 1517 | |
| 1431 render_.render_audio->DeinterleaveFrom(frame); | 1518 render_.render_audio->DeinterleaveFrom(frame); |
| 1432 RETURN_ON_ERR(ProcessRenderStreamLocked()); | 1519 RETURN_ON_ERR(ProcessRenderStreamLocked()); |
| 1433 render_.render_audio->InterleaveTo( | 1520 render_.render_audio->InterleaveTo( |
| 1434 frame, submodule_states_.RenderMultiBandProcessingActive()); | 1521 frame, submodule_states_.RenderMultiBandProcessingActive()); |
| 1435 return kNoError; | 1522 return kNoError; |
| 1436 } | 1523 } |
| 1437 | 1524 |
| 1438 int AudioProcessingImpl::ProcessRenderStreamLocked() { | 1525 int AudioProcessingImpl::ProcessRenderStreamLocked() { |
| 1439 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. | 1526 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. |
| 1440 if (submodule_states_.RenderMultiBandSubModulesActive() && | 1527 if (submodule_states_.RenderMultiBandSubModulesActive() && |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1504 void AudioProcessingImpl::set_delay_offset_ms(int offset) { | 1591 void AudioProcessingImpl::set_delay_offset_ms(int offset) { |
| 1505 rtc::CritScope cs(&crit_capture_); | 1592 rtc::CritScope cs(&crit_capture_); |
| 1506 capture_.delay_offset_ms = offset; | 1593 capture_.delay_offset_ms = offset; |
| 1507 } | 1594 } |
| 1508 | 1595 |
| 1509 int AudioProcessingImpl::delay_offset_ms() const { | 1596 int AudioProcessingImpl::delay_offset_ms() const { |
| 1510 rtc::CritScope cs(&crit_capture_); | 1597 rtc::CritScope cs(&crit_capture_); |
| 1511 return capture_.delay_offset_ms; | 1598 return capture_.delay_offset_ms; |
| 1512 } | 1599 } |
| 1513 | 1600 |
| 1601 void AudioProcessingImpl::StartDebugRecording( | |
| 1602 std::unique_ptr<AecDump> aec_dump) { | |
| 1603 rtc::CritScope cs_render(&crit_render_); | |
| 1604 rtc::CritScope cs_capture(&crit_capture_); | |
| 1605 RTC_DCHECK(aec_dump); | |
| 1606 aec_dump_ = std::move(aec_dump); | |
| 1607 | |
| 1608 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1609 const int error = WriteConfigMessage(true); | |
| 1610 RTC_DCHECK(error); | |
| 1611 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1612 | |
| 1613 aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); | |
| 1614 } | |
| 1615 | |
| 1514 int AudioProcessingImpl::StartDebugRecording( | 1616 int AudioProcessingImpl::StartDebugRecording( |
| 1515 const char filename[AudioProcessing::kMaxFilenameSize], | 1617 const char filename[AudioProcessing::kMaxFilenameSize], |
| 1516 int64_t max_log_size_bytes) { | 1618 int64_t max_log_size_bytes) { |
| 1517 // Run in a single-threaded manner. | 1619 // Run in a single-threaded manner. |
| 1518 rtc::CritScope cs_render(&crit_render_); | 1620 rtc::CritScope cs_render(&crit_render_); |
| 1519 rtc::CritScope cs_capture(&crit_capture_); | 1621 rtc::CritScope cs_capture(&crit_capture_); |
| 1520 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); | 1622 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); |
| 1521 | 1623 |
| 1522 if (filename == nullptr) { | 1624 if (filename == nullptr) { |
| 1523 return kNullPointerError; | 1625 return kNullPointerError; |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1578 rtc::CritScope cs_render(&crit_render_); | 1680 rtc::CritScope cs_render(&crit_render_); |
| 1579 rtc::CritScope cs_capture(&crit_capture_); | 1681 rtc::CritScope cs_capture(&crit_capture_); |
| 1580 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); | 1682 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); |
| 1581 return StartDebugRecording(stream, -1); | 1683 return StartDebugRecording(stream, -1); |
| 1582 } | 1684 } |
| 1583 | 1685 |
| 1584 int AudioProcessingImpl::StopDebugRecording() { | 1686 int AudioProcessingImpl::StopDebugRecording() { |
| 1585 // Run in a single-threaded manner. | 1687 // Run in a single-threaded manner. |
| 1586 rtc::CritScope cs_render(&crit_render_); | 1688 rtc::CritScope cs_render(&crit_render_); |
| 1587 rtc::CritScope cs_capture(&crit_capture_); | 1689 rtc::CritScope cs_capture(&crit_capture_); |
| 1690 aec_dump_ = nullptr; | |
| 1588 | 1691 |
| 1589 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1692 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1590 // We just return if recording hasn't started. | 1693 // We just return if recording hasn't started. |
| 1591 debug_dump_.debug_file->CloseFile(); | 1694 debug_dump_.debug_file->CloseFile(); |
| 1592 return kNoError; | 1695 return kNoError; |
| 1593 #else | 1696 #else |
| 1594 return kUnsupportedFunctionError; | 1697 return kUnsupportedFunctionError; |
| 1595 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1698 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1596 } | 1699 } |
| 1597 | 1700 |
| (...skipping 359 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1957 experiments_description += "LevelController;"; | 2060 experiments_description += "LevelController;"; |
| 1958 } | 2061 } |
| 1959 if (constants_.agc_clipped_level_min != kClippedLevelMin) { | 2062 if (constants_.agc_clipped_level_min != kClippedLevelMin) { |
| 1960 experiments_description += "AgcClippingLevelExperiment;"; | 2063 experiments_description += "AgcClippingLevelExperiment;"; |
| 1961 } | 2064 } |
| 1962 if (capture_nonlocked_.echo_canceller3_enabled) { | 2065 if (capture_nonlocked_.echo_canceller3_enabled) { |
| 1963 experiments_description += "EchoCanceller3;"; | 2066 experiments_description += "EchoCanceller3;"; |
| 1964 } | 2067 } |
| 1965 config.set_experiments_description(experiments_description); | 2068 config.set_experiments_description(experiments_description); |
| 1966 | 2069 |
| 2070 if (aec_dump_) { | |
| 2071 InternalAPMConfig apm_config; | |
| 2072 | |
| 2073 apm_config.aec_enabled = | |
| 2074 public_submodules_->echo_cancellation->is_enabled(); | |
| 2075 apm_config.aec_delay_agnostic_enabled = | |
| 2076 public_submodules_->echo_cancellation->is_delay_agnostic_enabled(); | |
| 2077 apm_config.aec_drift_compensation_enabled = | |
| 2078 public_submodules_->echo_cancellation->is_drift_compensation_enabled(); | |
| 2079 apm_config.aec_extended_filter_enabled = | |
| 2080 public_submodules_->echo_cancellation->is_extended_filter_enabled(); | |
| 2081 apm_config.aec_suppression_level = static_cast<int>( | |
| 2082 public_submodules_->echo_cancellation->suppression_level()); | |
| 2083 | |
| 2084 apm_config.aecm_enabled = | |
| 2085 public_submodules_->echo_control_mobile->is_enabled(); | |
| 2086 apm_config.aecm_comfort_noise_enabled = | |
| 2087 public_submodules_->echo_control_mobile->is_comfort_noise_enabled(); | |
| 2088 apm_config.aecm_routing_mode = static_cast<int>( | |
| 2089 public_submodules_->echo_control_mobile->routing_mode()); | |
| 2090 | |
| 2091 apm_config.agc_enabled = public_submodules_->gain_control->is_enabled(); | |
| 2092 apm_config.agc_mode = | |
| 2093 static_cast<int>(public_submodules_->gain_control->mode()); | |
| 2094 apm_config.agc_limiter_enabled = | |
| 2095 public_submodules_->gain_control->is_limiter_enabled(); | |
| 2096 apm_config.noise_robust_agc_enabled = constants_.use_experimental_agc; | |
| 2097 | |
| 2098 apm_config.hpf_enabled = config_.high_pass_filter.enabled; | |
| 2099 | |
| 2100 apm_config.ns_enabled = public_submodules_->noise_suppression->is_enabled(); | |
| 2101 apm_config.ns_level = | |
| 2102 static_cast<int>(public_submodules_->noise_suppression->level()); | |
| 2103 | |
| 2104 apm_config.transient_suppression_enabled = | |
| 2105 capture_.transient_suppressor_enabled; | |
| 2106 apm_config.intelligibility_enhancer_enabled = | |
| 2107 capture_nonlocked_.intelligibility_enabled; | |
| 2108 apm_config.experiments_description = experiments_description; | |
| 2109 aec_dump_->WriteConfig(apm_config, forced); | |
| 2110 } | |
| 2111 | |
| 1967 ProtoString serialized_config = config.SerializeAsString(); | 2112 ProtoString serialized_config = config.SerializeAsString(); |
| 1968 if (!forced && | 2113 if (!forced && |
| 1969 debug_dump_.capture.last_serialized_config == serialized_config) { | 2114 debug_dump_.capture.last_serialized_config == serialized_config) { |
| 1970 return kNoError; | 2115 return kNoError; |
| 1971 } | 2116 } |
| 1972 | 2117 |
| 1973 debug_dump_.capture.last_serialized_config = serialized_config; | 2118 debug_dump_.capture.last_serialized_config = serialized_config; |
| 1974 | 2119 |
| 1975 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); | 2120 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); |
| 1976 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); | 2121 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 2002 previous_agc_level(0), | 2147 previous_agc_level(0), |
| 2003 echo_path_gain_change(false) {} | 2148 echo_path_gain_change(false) {} |
| 2004 | 2149 |
| 2005 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; | 2150 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; |
| 2006 | 2151 |
| 2007 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; | 2152 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; |
| 2008 | 2153 |
| 2009 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; | 2154 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; |
| 2010 | 2155 |
| 2011 } // namespace webrtc | 2156 } // namespace webrtc |
| OLD | NEW |