Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 145 | 145 |
| 146 bool is_enabled() const override { | 146 bool is_enabled() const override { |
| 147 return apm_->GetConfig().high_pass_filter.enabled; | 147 return apm_->GetConfig().high_pass_filter.enabled; |
| 148 } | 148 } |
| 149 | 149 |
| 150 private: | 150 private: |
| 151 AudioProcessingImpl* apm_; | 151 AudioProcessingImpl* apm_; |
| 152 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl); | 152 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl); |
| 153 }; | 153 }; |
| 154 | 154 |
| 155 webrtc::InternalAPMStreamsConfig ToStreamsConfig( | |
| 156 const ProcessingConfig& api_format) { | |
| 157 webrtc::InternalAPMStreamsConfig result; | |
| 158 result.input_sample_rate = api_format.input_stream().sample_rate_hz(); | |
| 159 result.input_num_channels = api_format.input_stream().num_channels(); | |
| 160 result.output_num_channels = api_format.output_stream().num_channels(); | |
| 161 result.render_input_num_channels = | |
| 162 api_format.reverse_input_stream().num_channels(); | |
| 163 result.render_input_sample_rate = | |
| 164 api_format.reverse_input_stream().sample_rate_hz(); | |
| 165 result.output_sample_rate = api_format.output_stream().sample_rate_hz(); | |
| 166 result.render_output_sample_rate = | |
| 167 api_format.reverse_output_stream().sample_rate_hz(); | |
| 168 result.render_output_num_channels = | |
| 169 api_format.reverse_output_stream().num_channels(); | |
| 170 return result; | |
| 171 } | |
| 172 | |
| 155 } // namespace | 173 } // namespace |
| 156 | 174 |
| 157 // Throughout webrtc, it's assumed that success is represented by zero. | 175 // Throughout webrtc, it's assumed that success is represented by zero. |
| 158 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); | 176 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); |
| 159 | 177 |
| 160 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} | 178 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} |
| 161 | 179 |
| 162 bool AudioProcessingImpl::ApmSubmoduleStates::Update( | 180 bool AudioProcessingImpl::ApmSubmoduleStates::Update( |
| 163 bool low_cut_filter_enabled, | 181 bool low_cut_filter_enabled, |
| 164 bool echo_canceller_enabled, | 182 bool echo_canceller_enabled, |
| (...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 519 InitializeEchoCanceller3(); | 537 InitializeEchoCanceller3(); |
| 520 | 538 |
| 521 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 539 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 522 if (debug_dump_.debug_file->is_open()) { | 540 if (debug_dump_.debug_file->is_open()) { |
| 523 int err = WriteInitMessage(); | 541 int err = WriteInitMessage(); |
| 524 if (err != kNoError) { | 542 if (err != kNoError) { |
| 525 return err; | 543 return err; |
| 526 } | 544 } |
| 527 } | 545 } |
| 528 #endif | 546 #endif |
| 529 | 547 if (aec_dump_) { |
| 548 aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); | |
| 549 } | |
| 530 return kNoError; | 550 return kNoError; |
| 531 } | 551 } |
| 532 | 552 |
| 533 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 553 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
| 534 for (const auto& stream : config.streams) { | 554 for (const auto& stream : config.streams) { |
| 535 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 555 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { |
| 536 return kBadSampleRateError; | 556 return kBadSampleRateError; |
| 537 } | 557 } |
| 538 } | 558 } |
| 539 | 559 |
| (...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 817 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 837 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 818 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 838 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 819 const size_t channel_size = | 839 const size_t channel_size = |
| 820 sizeof(float) * formats_.api_format.input_stream().num_frames(); | 840 sizeof(float) * formats_.api_format.input_stream().num_frames(); |
| 821 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | 841 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); |
| 822 ++i) | 842 ++i) |
| 823 msg->add_input_channel(src[i], channel_size); | 843 msg->add_input_channel(src[i], channel_size); |
| 824 } | 844 } |
| 825 #endif | 845 #endif |
| 826 | 846 |
| 847 std::unique_ptr<AecDump::CaptureStreamInfo> stream_info; | |
| 848 if (aec_dump_) { | |
|
peah-webrtc
2017/04/19 12:30:29
Why not merge the if-statements on 848 and 852?
aleloi
2017/04/20 15:26:23
Oh, I thought I did that... Done! Although a bit o
| |
| 849 stream_info = aec_dump_->GetCaptureStreamInfo(); | |
| 850 } | |
| 851 | |
| 852 if (aec_dump_) { | |
| 853 const size_t channel_size = | |
| 854 sizeof(float) * formats_.api_format.input_stream().num_frames(); | |
| 855 std::vector<rtc::ArrayView<const float>> src_view; | |
| 856 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | |
| 857 ++i) { | |
| 858 src_view.emplace_back(src[i], channel_size); | |
| 859 } | |
| 860 stream_info->AddInput(src_view); | |
| 861 stream_info->set_delay(capture_nonlocked_.stream_delay_ms); | |
| 862 stream_info->set_drift( | |
| 863 public_submodules_->echo_cancellation->stream_drift_samples()); | |
| 864 stream_info->set_level(gain_control()->stream_analog_level()); | |
| 865 stream_info->set_keypress(capture_.key_pressed); | |
| 866 } | |
| 827 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); | 867 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
| 868 | |
|
peah-webrtc
2017/04/19 12:30:29
It would probably make sense to have the empty lin
aleloi
2017/04/20 15:26:23
Done.
| |
| 828 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 869 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
| 829 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); | 870 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
| 830 | 871 |
| 831 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 872 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 832 if (debug_dump_.debug_file->is_open()) { | 873 if (debug_dump_.debug_file->is_open()) { |
| 833 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 874 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 834 const size_t channel_size = | 875 const size_t channel_size = |
| 835 sizeof(float) * formats_.api_format.output_stream().num_frames(); | 876 sizeof(float) * formats_.api_format.output_stream().num_frames(); |
| 836 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); | 877 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); |
| 837 ++i) | 878 ++i) |
| 838 msg->add_output_channel(dest[i], channel_size); | 879 msg->add_output_channel(dest[i], channel_size); |
| 839 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 880 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 840 &debug_dump_.num_bytes_left_for_log_, | 881 &debug_dump_.num_bytes_left_for_log_, |
| 841 &crit_debug_, &debug_dump_.capture)); | 882 &crit_debug_, &debug_dump_.capture)); |
| 842 } | 883 } |
| 843 #endif | 884 #endif |
| 885 if (aec_dump_) { | |
| 886 const size_t channel_size = | |
| 887 sizeof(float) * formats_.api_format.output_stream().num_frames(); | |
| 888 std::vector<rtc::ArrayView<const float>> dest_view; | |
| 889 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); | |
| 890 ++i) { | |
| 891 dest_view.emplace_back(dest[i], channel_size); | |
|
peah-webrtc
2017/04/19 12:30:29
This code construct using a vector causes heap all
aleloi
2017/04/20 15:26:23
Done.
| |
| 892 } | |
| 893 RTC_DCHECK(stream_info); | |
| 894 stream_info->AddOutput(dest_view); | |
| 895 aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); | |
| 896 } | |
| 844 | 897 |
| 845 return kNoError; | 898 return kNoError; |
| 846 } | 899 } |
| 847 | 900 |
| 848 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { | 901 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { |
| 849 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), | 902 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), |
| 850 num_reverse_channels(), | 903 num_reverse_channels(), |
| 851 &aec_render_queue_buffer_); | 904 &aec_render_queue_buffer_); |
| 852 | 905 |
| 853 RTC_DCHECK_GE(160, audio->num_frames_per_band()); | 906 RTC_DCHECK_GE(160, audio->num_frames_per_band()); |
| (...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1071 rtc::CritScope cs_render(&crit_render_); | 1124 rtc::CritScope cs_render(&crit_render_); |
| 1072 RETURN_ON_ERR( | 1125 RETURN_ON_ERR( |
| 1073 MaybeInitializeCapture(processing_config, reinitialization_required)); | 1126 MaybeInitializeCapture(processing_config, reinitialization_required)); |
| 1074 } | 1127 } |
| 1075 rtc::CritScope cs_capture(&crit_capture_); | 1128 rtc::CritScope cs_capture(&crit_capture_); |
| 1076 if (frame->samples_per_channel_ != | 1129 if (frame->samples_per_channel_ != |
| 1077 formats_.api_format.input_stream().num_frames()) { | 1130 formats_.api_format.input_stream().num_frames()) { |
| 1078 return kBadDataLengthError; | 1131 return kBadDataLengthError; |
| 1079 } | 1132 } |
| 1080 | 1133 |
| 1134 std::unique_ptr<AecDump::CaptureStreamInfo> stream_info; | |
| 1135 if (aec_dump_) { | |
| 1136 stream_info = aec_dump_->GetCaptureStreamInfo(); | |
| 1137 RTC_DCHECK(stream_info); | |
| 1138 stream_info->AddInput(*frame); | |
| 1139 stream_info->set_delay(capture_nonlocked_.stream_delay_ms); | |
| 1140 stream_info->set_drift( | |
| 1141 public_submodules_->echo_cancellation->stream_drift_samples()); | |
| 1142 stream_info->set_level(gain_control()->stream_analog_level()); | |
| 1143 stream_info->set_keypress(capture_.key_pressed); | |
| 1144 } | |
| 1145 | |
| 1081 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1146 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1082 if (debug_dump_.debug_file->is_open()) { | 1147 if (debug_dump_.debug_file->is_open()) { |
| 1083 RETURN_ON_ERR(WriteConfigMessage(false)); | 1148 RETURN_ON_ERR(WriteConfigMessage(false)); |
| 1084 | 1149 |
| 1085 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 1150 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 1086 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 1151 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 1087 const size_t data_size = | 1152 const size_t data_size = |
| 1088 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1153 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 1089 msg->set_input_data(frame->data_, data_size); | 1154 msg->set_input_data(frame->data_, data_size); |
| 1090 } | 1155 } |
| 1091 #endif | 1156 #endif |
| 1092 | 1157 |
| 1093 capture_.capture_audio->DeinterleaveFrom(frame); | 1158 capture_.capture_audio->DeinterleaveFrom(frame); |
| 1159 | |
|
peah-webrtc
2017/04/19 12:30:29
Remove empty line.
aleloi
2017/04/20 15:26:23
Done.
| |
| 1094 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 1160 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
| 1095 capture_.capture_audio->InterleaveTo( | 1161 capture_.capture_audio->InterleaveTo( |
| 1096 frame, submodule_states_.CaptureMultiBandProcessingActive()); | 1162 frame, submodule_states_.CaptureMultiBandProcessingActive()); |
| 1097 | 1163 |
| 1164 if (aec_dump_) { | |
| 1165 stream_info->AddOutput(*frame); | |
|
peah-webrtc
2017/04/19 12:30:29
DCHECK on stream_info?
aleloi
2017/04/20 15:26:23
It's done just after stream_info is assigned to on
| |
| 1166 aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); | |
| 1167 } | |
| 1098 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1168 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1099 if (debug_dump_.debug_file->is_open()) { | 1169 if (debug_dump_.debug_file->is_open()) { |
| 1100 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 1170 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 1101 const size_t data_size = | 1171 const size_t data_size = |
| 1102 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1172 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 1103 msg->set_output_data(frame->data_, data_size); | 1173 msg->set_output_data(frame->data_, data_size); |
| 1104 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1174 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1105 &debug_dump_.num_bytes_left_for_log_, | 1175 &debug_dump_.num_bytes_left_for_log_, |
| 1106 &crit_debug_, &debug_dump_.capture)); | 1176 &crit_debug_, &debug_dump_.capture)); |
| 1107 } | 1177 } |
| (...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1369 const size_t channel_size = | 1439 const size_t channel_size = |
| 1370 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 1440 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
| 1371 for (size_t i = 0; | 1441 for (size_t i = 0; |
| 1372 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) | 1442 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) |
| 1373 msg->add_channel(src[i], channel_size); | 1443 msg->add_channel(src[i], channel_size); |
| 1374 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1444 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1375 &debug_dump_.num_bytes_left_for_log_, | 1445 &debug_dump_.num_bytes_left_for_log_, |
| 1376 &crit_debug_, &debug_dump_.render)); | 1446 &crit_debug_, &debug_dump_.render)); |
| 1377 } | 1447 } |
| 1378 #endif | 1448 #endif |
| 1449 if (aec_dump_) { | |
| 1450 std::vector<rtc::ArrayView<const float>> src_view; | |
| 1451 const size_t channel_size = | |
| 1452 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | |
| 1379 | 1453 |
| 1454 for (size_t i = 0; | |
| 1455 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) { | |
|
peah-webrtc
2017/04/19 12:30:29
See above comment about heap allocation
aleloi
2017/04/20 15:26:23
Done.
| |
| 1456 src_view.emplace_back(src[i], channel_size); | |
| 1457 } | |
| 1458 aec_dump_->WriteRenderStreamMessage(src_view); | |
| 1459 } | |
| 1380 render_.render_audio->CopyFrom(src, | 1460 render_.render_audio->CopyFrom(src, |
| 1381 formats_.api_format.reverse_input_stream()); | 1461 formats_.api_format.reverse_input_stream()); |
| 1382 return ProcessRenderStreamLocked(); | 1462 return ProcessRenderStreamLocked(); |
| 1383 } | 1463 } |
| 1384 | 1464 |
| 1385 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 1465 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| 1386 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); | 1466 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); |
| 1387 rtc::CritScope cs(&crit_render_); | 1467 rtc::CritScope cs(&crit_render_); |
| 1388 if (frame == nullptr) { | 1468 if (frame == nullptr) { |
| 1389 return kNullPointerError; | 1469 return kNullPointerError; |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1422 audioproc::ReverseStream* msg = | 1502 audioproc::ReverseStream* msg = |
| 1423 debug_dump_.render.event_msg->mutable_reverse_stream(); | 1503 debug_dump_.render.event_msg->mutable_reverse_stream(); |
| 1424 const size_t data_size = | 1504 const size_t data_size = |
| 1425 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1505 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 1426 msg->set_data(frame->data_, data_size); | 1506 msg->set_data(frame->data_, data_size); |
| 1427 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1507 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1428 &debug_dump_.num_bytes_left_for_log_, | 1508 &debug_dump_.num_bytes_left_for_log_, |
| 1429 &crit_debug_, &debug_dump_.render)); | 1509 &crit_debug_, &debug_dump_.render)); |
| 1430 } | 1510 } |
| 1431 #endif | 1511 #endif |
| 1512 if (aec_dump_) { | |
| 1513 aec_dump_->WriteRenderStreamMessage(*frame); | |
| 1514 } | |
| 1515 | |
| 1432 render_.render_audio->DeinterleaveFrom(frame); | 1516 render_.render_audio->DeinterleaveFrom(frame); |
| 1433 RETURN_ON_ERR(ProcessRenderStreamLocked()); | 1517 RETURN_ON_ERR(ProcessRenderStreamLocked()); |
| 1434 render_.render_audio->InterleaveTo( | 1518 render_.render_audio->InterleaveTo( |
| 1435 frame, submodule_states_.RenderMultiBandProcessingActive()); | 1519 frame, submodule_states_.RenderMultiBandProcessingActive()); |
| 1436 return kNoError; | 1520 return kNoError; |
| 1437 } | 1521 } |
| 1438 | 1522 |
| 1439 int AudioProcessingImpl::ProcessRenderStreamLocked() { | 1523 int AudioProcessingImpl::ProcessRenderStreamLocked() { |
| 1440 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. | 1524 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. |
| 1441 if (submodule_states_.RenderMultiBandSubModulesActive() && | 1525 if (submodule_states_.RenderMultiBandSubModulesActive() && |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1505 void AudioProcessingImpl::set_delay_offset_ms(int offset) { | 1589 void AudioProcessingImpl::set_delay_offset_ms(int offset) { |
| 1506 rtc::CritScope cs(&crit_capture_); | 1590 rtc::CritScope cs(&crit_capture_); |
| 1507 capture_.delay_offset_ms = offset; | 1591 capture_.delay_offset_ms = offset; |
| 1508 } | 1592 } |
| 1509 | 1593 |
| 1510 int AudioProcessingImpl::delay_offset_ms() const { | 1594 int AudioProcessingImpl::delay_offset_ms() const { |
| 1511 rtc::CritScope cs(&crit_capture_); | 1595 rtc::CritScope cs(&crit_capture_); |
| 1512 return capture_.delay_offset_ms; | 1596 return capture_.delay_offset_ms; |
| 1513 } | 1597 } |
| 1514 | 1598 |
| 1599 void AudioProcessingImpl::StartDebugRecording( | |
| 1600 std::unique_ptr<AecDump> aec_dump) { | |
| 1601 rtc::CritScope cs_render(&crit_render_); | |
| 1602 rtc::CritScope cs_capture(&crit_capture_); | |
| 1603 RTC_DCHECK(aec_dump); | |
| 1604 aec_dump_ = std::move(aec_dump); | |
| 1605 | |
| 1606 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1607 const int error = WriteConfigMessage(true); | |
|
peah-webrtc
2017/04/19 12:30:29
It would be nice to avoid having the #ifdef-endif
aleloi
2017/04/20 15:26:24
Yes, I've made an aec-dump-only vesrion of WriteCo
| |
| 1608 RTC_DCHECK(error); | |
| 1609 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1610 | |
| 1611 aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); | |
| 1612 } | |
| 1613 | |
| 1515 int AudioProcessingImpl::StartDebugRecording( | 1614 int AudioProcessingImpl::StartDebugRecording( |
| 1516 const char filename[AudioProcessing::kMaxFilenameSize], | 1615 const char filename[AudioProcessing::kMaxFilenameSize], |
| 1517 int64_t max_log_size_bytes) { | 1616 int64_t max_log_size_bytes) { |
| 1518 // Run in a single-threaded manner. | 1617 // Run in a single-threaded manner. |
| 1519 rtc::CritScope cs_render(&crit_render_); | 1618 rtc::CritScope cs_render(&crit_render_); |
| 1520 rtc::CritScope cs_capture(&crit_capture_); | 1619 rtc::CritScope cs_capture(&crit_capture_); |
| 1521 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); | 1620 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); |
| 1522 | 1621 |
| 1523 if (filename == nullptr) { | 1622 if (filename == nullptr) { |
| 1524 return kNullPointerError; | 1623 return kNullPointerError; |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1579 rtc::CritScope cs_render(&crit_render_); | 1678 rtc::CritScope cs_render(&crit_render_); |
| 1580 rtc::CritScope cs_capture(&crit_capture_); | 1679 rtc::CritScope cs_capture(&crit_capture_); |
| 1581 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); | 1680 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); |
| 1582 return StartDebugRecording(stream, -1); | 1681 return StartDebugRecording(stream, -1); |
| 1583 } | 1682 } |
| 1584 | 1683 |
| 1585 int AudioProcessingImpl::StopDebugRecording() { | 1684 int AudioProcessingImpl::StopDebugRecording() { |
| 1586 // Run in a single-threaded manner. | 1685 // Run in a single-threaded manner. |
| 1587 rtc::CritScope cs_render(&crit_render_); | 1686 rtc::CritScope cs_render(&crit_render_); |
| 1588 rtc::CritScope cs_capture(&crit_capture_); | 1687 rtc::CritScope cs_capture(&crit_capture_); |
| 1688 aec_dump_ = nullptr; | |
|
peah-webrtc
2017/04/19 12:30:29
aec_dump_.reset()?
aleloi
2017/04/20 15:26:24
Done.
| |
| 1589 | 1689 |
| 1590 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1690 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1591 // We just return if recording hasn't started. | 1691 // We just return if recording hasn't started. |
| 1592 debug_dump_.debug_file->CloseFile(); | 1692 debug_dump_.debug_file->CloseFile(); |
| 1593 return kNoError; | 1693 return kNoError; |
| 1594 #else | 1694 #else |
| 1595 return kUnsupportedFunctionError; | 1695 return kUnsupportedFunctionError; |
| 1596 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1696 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1597 } | 1697 } |
| 1598 | 1698 |
| (...skipping 359 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1958 experiments_description += "LevelController;"; | 2058 experiments_description += "LevelController;"; |
| 1959 } | 2059 } |
| 1960 if (constants_.agc_clipped_level_min != kClippedLevelMin) { | 2060 if (constants_.agc_clipped_level_min != kClippedLevelMin) { |
| 1961 experiments_description += "AgcClippingLevelExperiment;"; | 2061 experiments_description += "AgcClippingLevelExperiment;"; |
| 1962 } | 2062 } |
| 1963 if (capture_nonlocked_.echo_canceller3_enabled) { | 2063 if (capture_nonlocked_.echo_canceller3_enabled) { |
| 1964 experiments_description += "EchoCanceller3;"; | 2064 experiments_description += "EchoCanceller3;"; |
| 1965 } | 2065 } |
| 1966 config.set_experiments_description(experiments_description); | 2066 config.set_experiments_description(experiments_description); |
| 1967 | 2067 |
| 2068 if (aec_dump_) { | |
| 2069 InternalAPMConfig apm_config; | |
| 2070 | |
| 2071 apm_config.aec_enabled = | |
| 2072 public_submodules_->echo_cancellation->is_enabled(); | |
| 2073 apm_config.aec_delay_agnostic_enabled = | |
| 2074 public_submodules_->echo_cancellation->is_delay_agnostic_enabled(); | |
| 2075 apm_config.aec_drift_compensation_enabled = | |
| 2076 public_submodules_->echo_cancellation->is_drift_compensation_enabled(); | |
| 2077 apm_config.aec_extended_filter_enabled = | |
| 2078 public_submodules_->echo_cancellation->is_extended_filter_enabled(); | |
| 2079 apm_config.aec_suppression_level = static_cast<int>( | |
| 2080 public_submodules_->echo_cancellation->suppression_level()); | |
| 2081 | |
| 2082 apm_config.aecm_enabled = | |
| 2083 public_submodules_->echo_control_mobile->is_enabled(); | |
| 2084 apm_config.aecm_comfort_noise_enabled = | |
| 2085 public_submodules_->echo_control_mobile->is_comfort_noise_enabled(); | |
| 2086 apm_config.aecm_routing_mode = static_cast<int>( | |
| 2087 public_submodules_->echo_control_mobile->routing_mode()); | |
| 2088 | |
| 2089 apm_config.agc_enabled = public_submodules_->gain_control->is_enabled(); | |
| 2090 apm_config.agc_mode = | |
| 2091 static_cast<int>(public_submodules_->gain_control->mode()); | |
| 2092 apm_config.agc_limiter_enabled = | |
| 2093 public_submodules_->gain_control->is_limiter_enabled(); | |
| 2094 apm_config.noise_robust_agc_enabled = constants_.use_experimental_agc; | |
| 2095 | |
| 2096 apm_config.hpf_enabled = config_.high_pass_filter.enabled; | |
| 2097 | |
| 2098 apm_config.ns_enabled = public_submodules_->noise_suppression->is_enabled(); | |
| 2099 apm_config.ns_level = | |
| 2100 static_cast<int>(public_submodules_->noise_suppression->level()); | |
| 2101 | |
| 2102 apm_config.transient_suppression_enabled = | |
| 2103 capture_.transient_suppressor_enabled; | |
| 2104 apm_config.intelligibility_enhancer_enabled = | |
| 2105 capture_nonlocked_.intelligibility_enabled; | |
| 2106 apm_config.experiments_description = experiments_description; | |
| 2107 aec_dump_->WriteConfig(apm_config, forced); | |
| 2108 } | |
| 2109 | |
| 1968 ProtoString serialized_config = config.SerializeAsString(); | 2110 ProtoString serialized_config = config.SerializeAsString(); |
| 1969 if (!forced && | 2111 if (!forced && |
| 1970 debug_dump_.capture.last_serialized_config == serialized_config) { | 2112 debug_dump_.capture.last_serialized_config == serialized_config) { |
| 1971 return kNoError; | 2113 return kNoError; |
| 1972 } | 2114 } |
| 1973 | 2115 |
| 1974 debug_dump_.capture.last_serialized_config = serialized_config; | 2116 debug_dump_.capture.last_serialized_config = serialized_config; |
| 1975 | 2117 |
| 1976 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); | 2118 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); |
| 1977 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); | 2119 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 2003 previous_agc_level(0), | 2145 previous_agc_level(0), |
| 2004 echo_path_gain_change(false) {} | 2146 echo_path_gain_change(false) {} |
| 2005 | 2147 |
| 2006 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; | 2148 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; |
| 2007 | 2149 |
| 2008 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; | 2150 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; |
| 2009 | 2151 |
| 2010 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; | 2152 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; |
| 2011 | 2153 |
| 2012 } // namespace webrtc | 2154 } // namespace webrtc |
| OLD | NEW |