Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 148 | 148 |
| 149 bool is_enabled() const override { | 149 bool is_enabled() const override { |
| 150 return apm_->GetConfig().high_pass_filter.enabled; | 150 return apm_->GetConfig().high_pass_filter.enabled; |
| 151 } | 151 } |
| 152 | 152 |
| 153 private: | 153 private: |
| 154 AudioProcessingImpl* apm_; | 154 AudioProcessingImpl* apm_; |
| 155 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl); | 155 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl); |
| 156 }; | 156 }; |
| 157 | 157 |
| 158 webrtc::InternalAPMStreamsConfig ToStreamsConfig( | |
| 159 const ProcessingConfig& api_format) { | |
| 160 webrtc::InternalAPMStreamsConfig result; | |
| 161 result.input_sample_rate = api_format.input_stream().sample_rate_hz(); | |
| 162 result.input_num_channels = api_format.input_stream().num_channels(); | |
| 163 result.output_num_channels = api_format.output_stream().num_channels(); | |
| 164 result.render_input_num_channels = | |
| 165 api_format.reverse_input_stream().num_channels(); | |
| 166 result.render_input_sample_rate = | |
| 167 api_format.reverse_input_stream().sample_rate_hz(); | |
| 168 result.output_sample_rate = api_format.output_stream().sample_rate_hz(); | |
| 169 result.render_output_sample_rate = | |
| 170 api_format.reverse_output_stream().sample_rate_hz(); | |
| 171 result.render_output_num_channels = | |
| 172 api_format.reverse_output_stream().num_channels(); | |
| 173 return result; | |
| 174 } | |
| 158 } // namespace | 175 } // namespace |
| 159 | 176 |
| 160 // Throughout webrtc, it's assumed that success is represented by zero. | 177 // Throughout webrtc, it's assumed that success is represented by zero. |
| 161 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); | 178 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); |
| 162 | 179 |
| 163 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} | 180 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} |
| 164 | 181 |
| 165 bool AudioProcessingImpl::ApmSubmoduleStates::Update( | 182 bool AudioProcessingImpl::ApmSubmoduleStates::Update( |
| 166 bool low_cut_filter_enabled, | 183 bool low_cut_filter_enabled, |
| 167 bool echo_canceller_enabled, | 184 bool echo_canceller_enabled, |
| (...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 522 InitializeEchoCanceller3(); | 539 InitializeEchoCanceller3(); |
| 523 | 540 |
| 524 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 541 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 525 if (debug_dump_.debug_file->is_open()) { | 542 if (debug_dump_.debug_file->is_open()) { |
| 526 int err = WriteInitMessage(); | 543 int err = WriteInitMessage(); |
| 527 if (err != kNoError) { | 544 if (err != kNoError) { |
| 528 return err; | 545 return err; |
| 529 } | 546 } |
| 530 } | 547 } |
| 531 #endif | 548 #endif |
| 532 | 549 if (aec_dump_) { |
| 550 aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); | |
| 551 } | |
| 533 return kNoError; | 552 return kNoError; |
| 534 } | 553 } |
| 535 | 554 |
| 536 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 555 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
| 537 for (const auto& stream : config.streams) { | 556 for (const auto& stream : config.streams) { |
| 538 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 557 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { |
| 539 return kBadSampleRateError; | 558 return kBadSampleRateError; |
| 540 } | 559 } |
| 541 } | 560 } |
| 542 | 561 |
| (...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 823 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 842 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 824 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 843 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 825 const size_t channel_size = | 844 const size_t channel_size = |
| 826 sizeof(float) * formats_.api_format.input_stream().num_frames(); | 845 sizeof(float) * formats_.api_format.input_stream().num_frames(); |
| 827 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | 846 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); |
| 828 ++i) | 847 ++i) |
| 829 msg->add_input_channel(src[i], channel_size); | 848 msg->add_input_channel(src[i], channel_size); |
| 830 } | 849 } |
| 831 #endif | 850 #endif |
| 832 | 851 |
| 852 if (aec_dump_) { | |
| 853 RecordUnprocessedCaptureStream(src); | |
| 854 } | |
| 855 | |
| 833 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); | 856 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
| 834 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 857 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
| 835 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); | 858 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
| 836 | 859 |
| 837 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 860 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 838 if (debug_dump_.debug_file->is_open()) { | 861 if (debug_dump_.debug_file->is_open()) { |
| 839 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 862 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 840 const size_t channel_size = | 863 const size_t channel_size = |
| 841 sizeof(float) * formats_.api_format.output_stream().num_frames(); | 864 sizeof(float) * formats_.api_format.output_stream().num_frames(); |
| 842 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); | 865 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); |
| 843 ++i) | 866 ++i) |
| 844 msg->add_output_channel(dest[i], channel_size); | 867 msg->add_output_channel(dest[i], channel_size); |
| 845 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 868 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 846 &debug_dump_.num_bytes_left_for_log_, | 869 &debug_dump_.num_bytes_left_for_log_, |
| 847 &crit_debug_, &debug_dump_.capture)); | 870 &crit_debug_, &debug_dump_.capture)); |
| 848 } | 871 } |
| 849 #endif | 872 #endif |
| 850 | 873 if (aec_dump_) { |
| 874 RecordProcessedCaptureStream(dest); | |
| 875 } | |
| 851 return kNoError; | 876 return kNoError; |
| 852 } | 877 } |
| 853 | 878 |
| 854 void AudioProcessingImpl::QueueBandedRenderAudio(AudioBuffer* audio) { | 879 void AudioProcessingImpl::QueueBandedRenderAudio(AudioBuffer* audio) { |
| 855 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), | 880 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), |
| 856 num_reverse_channels(), | 881 num_reverse_channels(), |
| 857 &aec_render_queue_buffer_); | 882 &aec_render_queue_buffer_); |
| 858 | 883 |
| 859 RTC_DCHECK_GE(160, audio->num_frames_per_band()); | 884 RTC_DCHECK_GE(160, audio->num_frames_per_band()); |
| 860 | 885 |
| (...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1079 rtc::CritScope cs_render(&crit_render_); | 1104 rtc::CritScope cs_render(&crit_render_); |
| 1080 RETURN_ON_ERR( | 1105 RETURN_ON_ERR( |
| 1081 MaybeInitializeCapture(processing_config, reinitialization_required)); | 1106 MaybeInitializeCapture(processing_config, reinitialization_required)); |
| 1082 } | 1107 } |
| 1083 rtc::CritScope cs_capture(&crit_capture_); | 1108 rtc::CritScope cs_capture(&crit_capture_); |
| 1084 if (frame->samples_per_channel_ != | 1109 if (frame->samples_per_channel_ != |
| 1085 formats_.api_format.input_stream().num_frames()) { | 1110 formats_.api_format.input_stream().num_frames()) { |
| 1086 return kBadDataLengthError; | 1111 return kBadDataLengthError; |
| 1087 } | 1112 } |
| 1088 | 1113 |
| 1114 if (aec_dump_) { | |
| 1115 RecordUnprocessedCaptureStream(*frame); | |
| 1116 } | |
| 1117 | |
| 1089 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1118 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1090 if (debug_dump_.debug_file->is_open()) { | 1119 if (debug_dump_.debug_file->is_open()) { |
| 1091 RETURN_ON_ERR(WriteConfigMessage(false)); | 1120 RETURN_ON_ERR(WriteConfigMessage(false)); |
| 1092 | 1121 |
| 1093 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 1122 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| 1094 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 1123 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 1095 const size_t data_size = | 1124 const size_t data_size = |
| 1096 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1125 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 1097 msg->set_input_data(frame->data_, data_size); | 1126 msg->set_input_data(frame->data_, data_size); |
| 1098 } | 1127 } |
| 1099 #endif | 1128 #endif |
| 1100 | 1129 |
| 1101 capture_.capture_audio->DeinterleaveFrom(frame); | 1130 capture_.capture_audio->DeinterleaveFrom(frame); |
| 1102 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 1131 RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
| 1103 capture_.capture_audio->InterleaveTo( | 1132 capture_.capture_audio->InterleaveTo( |
| 1104 frame, submodule_states_.CaptureMultiBandProcessingActive()); | 1133 frame, submodule_states_.CaptureMultiBandProcessingActive()); |
| 1105 | 1134 |
| 1135 if (aec_dump_) { | |
| 1136 RecordProcessedCaptureStream(*frame); | |
| 1137 } | |
| 1106 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1138 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1107 if (debug_dump_.debug_file->is_open()) { | 1139 if (debug_dump_.debug_file->is_open()) { |
| 1108 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 1140 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| 1109 const size_t data_size = | 1141 const size_t data_size = |
| 1110 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1142 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 1111 msg->set_output_data(frame->data_, data_size); | 1143 msg->set_output_data(frame->data_, data_size); |
| 1112 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1144 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1113 &debug_dump_.num_bytes_left_for_log_, | 1145 &debug_dump_.num_bytes_left_for_log_, |
| 1114 &crit_debug_, &debug_dump_.capture)); | 1146 &crit_debug_, &debug_dump_.capture)); |
| 1115 } | 1147 } |
| (...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1377 const size_t channel_size = | 1409 const size_t channel_size = |
| 1378 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 1410 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
| 1379 for (size_t i = 0; | 1411 for (size_t i = 0; |
| 1380 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) | 1412 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) |
| 1381 msg->add_channel(src[i], channel_size); | 1413 msg->add_channel(src[i], channel_size); |
| 1382 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1414 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1383 &debug_dump_.num_bytes_left_for_log_, | 1415 &debug_dump_.num_bytes_left_for_log_, |
| 1384 &crit_debug_, &debug_dump_.render)); | 1416 &crit_debug_, &debug_dump_.render)); |
| 1385 } | 1417 } |
| 1386 #endif | 1418 #endif |
| 1387 | 1419 if (aec_dump_) { |
| 1420 const size_t channel_size = | |
| 1421 formats_.api_format.reverse_input_stream().num_frames(); | |
| 1422 const size_t num_channels = | |
| 1423 formats_.api_format.reverse_input_stream().num_channels(); | |
| 1424 aec_dump_->WriteRenderStreamMessage( | |
| 1425 FloatAudioFrame(src, num_channels, channel_size)); | |
| 1426 } | |
| 1388 render_.render_audio->CopyFrom(src, | 1427 render_.render_audio->CopyFrom(src, |
| 1389 formats_.api_format.reverse_input_stream()); | 1428 formats_.api_format.reverse_input_stream()); |
| 1390 return ProcessRenderStreamLocked(); | 1429 return ProcessRenderStreamLocked(); |
| 1391 } | 1430 } |
| 1392 | 1431 |
| 1393 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 1432 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| 1394 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); | 1433 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); |
| 1395 rtc::CritScope cs(&crit_render_); | 1434 rtc::CritScope cs(&crit_render_); |
| 1396 if (frame == nullptr) { | 1435 if (frame == nullptr) { |
| 1397 return kNullPointerError; | 1436 return kNullPointerError; |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1430 audioproc::ReverseStream* msg = | 1469 audioproc::ReverseStream* msg = |
| 1431 debug_dump_.render.event_msg->mutable_reverse_stream(); | 1470 debug_dump_.render.event_msg->mutable_reverse_stream(); |
| 1432 const size_t data_size = | 1471 const size_t data_size = |
| 1433 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1472 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| 1434 msg->set_data(frame->data_, data_size); | 1473 msg->set_data(frame->data_, data_size); |
| 1435 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1474 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| 1436 &debug_dump_.num_bytes_left_for_log_, | 1475 &debug_dump_.num_bytes_left_for_log_, |
| 1437 &crit_debug_, &debug_dump_.render)); | 1476 &crit_debug_, &debug_dump_.render)); |
| 1438 } | 1477 } |
| 1439 #endif | 1478 #endif |
| 1479 if (aec_dump_) { | |
| 1480 aec_dump_->WriteRenderStreamMessage(*frame); | |
| 1481 } | |
| 1482 | |
| 1440 render_.render_audio->DeinterleaveFrom(frame); | 1483 render_.render_audio->DeinterleaveFrom(frame); |
| 1441 RETURN_ON_ERR(ProcessRenderStreamLocked()); | 1484 RETURN_ON_ERR(ProcessRenderStreamLocked()); |
| 1442 render_.render_audio->InterleaveTo( | 1485 render_.render_audio->InterleaveTo( |
| 1443 frame, submodule_states_.RenderMultiBandProcessingActive()); | 1486 frame, submodule_states_.RenderMultiBandProcessingActive()); |
| 1444 return kNoError; | 1487 return kNoError; |
| 1445 } | 1488 } |
| 1446 | 1489 |
| 1447 int AudioProcessingImpl::ProcessRenderStreamLocked() { | 1490 int AudioProcessingImpl::ProcessRenderStreamLocked() { |
| 1448 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. | 1491 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. |
| 1449 | 1492 |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1516 void AudioProcessingImpl::set_delay_offset_ms(int offset) { | 1559 void AudioProcessingImpl::set_delay_offset_ms(int offset) { |
| 1517 rtc::CritScope cs(&crit_capture_); | 1560 rtc::CritScope cs(&crit_capture_); |
| 1518 capture_.delay_offset_ms = offset; | 1561 capture_.delay_offset_ms = offset; |
| 1519 } | 1562 } |
| 1520 | 1563 |
| 1521 int AudioProcessingImpl::delay_offset_ms() const { | 1564 int AudioProcessingImpl::delay_offset_ms() const { |
| 1522 rtc::CritScope cs(&crit_capture_); | 1565 rtc::CritScope cs(&crit_capture_); |
| 1523 return capture_.delay_offset_ms; | 1566 return capture_.delay_offset_ms; |
| 1524 } | 1567 } |
| 1525 | 1568 |
| 1569 void AudioProcessingImpl::AttachAecDump(std::unique_ptr<AecDump> aec_dump) { | |
| 1570 std::unique_ptr<AecDump> old_aec_dump = nullptr; | |
|
peah-webrtc
2017/05/17 04:43:02
You should skip the assignment here. It is not nee
| |
| 1571 { | |
| 1572 rtc::CritScope cs_render(&crit_render_); | |
| 1573 rtc::CritScope cs_capture(&crit_capture_); | |
| 1574 old_aec_dump = std::move(aec_dump_); | |
| 1575 RTC_DCHECK(aec_dump); | |
| 1576 aec_dump_ = std::move(aec_dump); | |
| 1577 aec_dump_->WriteConfig(CollectApmConfig(), true); | |
| 1578 aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); | |
| 1579 } | |
| 1580 } | |
| 1581 | |
| 1582 void AudioProcessingImpl::DetachAecDump() { | |
| 1583 // The d-tor of a task-queue based AecDump blocks until all pending | |
| 1584 // tasks are done. This construction avoids blocking while holding | |
| 1585 // the render and capture locks. | |
| 1586 std::unique_ptr<AecDump> aec_dump = nullptr; | |
| 1587 { | |
| 1588 rtc::CritScope cs_render(&crit_render_); | |
| 1589 rtc::CritScope cs_capture(&crit_capture_); | |
| 1590 aec_dump = std::move(aec_dump_); | |
| 1591 } | |
| 1592 } | |
| 1593 | |
| 1526 int AudioProcessingImpl::StartDebugRecording( | 1594 int AudioProcessingImpl::StartDebugRecording( |
| 1527 const char filename[AudioProcessing::kMaxFilenameSize], | 1595 const char filename[AudioProcessing::kMaxFilenameSize], |
| 1528 int64_t max_log_size_bytes) { | 1596 int64_t max_log_size_bytes) { |
| 1529 // Run in a single-threaded manner. | 1597 // Run in a single-threaded manner. |
| 1530 rtc::CritScope cs_render(&crit_render_); | 1598 rtc::CritScope cs_render(&crit_render_); |
| 1531 rtc::CritScope cs_capture(&crit_capture_); | 1599 rtc::CritScope cs_capture(&crit_capture_); |
| 1532 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); | 1600 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); |
| 1533 | 1601 |
| 1534 if (filename == nullptr) { | 1602 if (filename == nullptr) { |
| 1535 return kNullPointerError; | 1603 return kNullPointerError; |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1587 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( | 1655 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( |
| 1588 rtc::PlatformFile handle) { | 1656 rtc::PlatformFile handle) { |
| 1589 // Run in a single-threaded manner. | 1657 // Run in a single-threaded manner. |
| 1590 rtc::CritScope cs_render(&crit_render_); | 1658 rtc::CritScope cs_render(&crit_render_); |
| 1591 rtc::CritScope cs_capture(&crit_capture_); | 1659 rtc::CritScope cs_capture(&crit_capture_); |
| 1592 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); | 1660 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); |
| 1593 return StartDebugRecording(stream, -1); | 1661 return StartDebugRecording(stream, -1); |
| 1594 } | 1662 } |
| 1595 | 1663 |
| 1596 int AudioProcessingImpl::StopDebugRecording() { | 1664 int AudioProcessingImpl::StopDebugRecording() { |
| 1665 DetachAecDump(); | |
| 1666 | |
| 1667 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1597 // Run in a single-threaded manner. | 1668 // Run in a single-threaded manner. |
| 1598 rtc::CritScope cs_render(&crit_render_); | 1669 rtc::CritScope cs_render(&crit_render_); |
| 1599 rtc::CritScope cs_capture(&crit_capture_); | 1670 rtc::CritScope cs_capture(&crit_capture_); |
| 1600 | |
| 1601 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | |
| 1602 // We just return if recording hasn't started. | 1671 // We just return if recording hasn't started. |
| 1603 debug_dump_.debug_file->CloseFile(); | 1672 debug_dump_.debug_file->CloseFile(); |
| 1604 return kNoError; | 1673 return kNoError; |
| 1605 #else | 1674 #else |
| 1606 return kUnsupportedFunctionError; | 1675 return kUnsupportedFunctionError; |
| 1607 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1676 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1608 } | 1677 } |
| 1609 | 1678 |
| 1610 AudioProcessing::AudioProcessingStatistics::AudioProcessingStatistics() { | 1679 AudioProcessing::AudioProcessingStatistics::AudioProcessingStatistics() { |
| 1611 residual_echo_return_loss.Set(-100.0f, -100.0f, -100.0f, -100.0f); | 1680 residual_echo_return_loss.Set(-100.0f, -100.0f, -100.0f, -100.0f); |
| (...skipping 229 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1841 capture_.last_stream_delay_ms = 0; | 1910 capture_.last_stream_delay_ms = 0; |
| 1842 | 1911 |
| 1843 if (capture_.aec_system_delay_jumps > -1) { | 1912 if (capture_.aec_system_delay_jumps > -1) { |
| 1844 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", | 1913 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", |
| 1845 capture_.aec_system_delay_jumps, 51); | 1914 capture_.aec_system_delay_jumps, 51); |
| 1846 } | 1915 } |
| 1847 capture_.aec_system_delay_jumps = -1; | 1916 capture_.aec_system_delay_jumps = -1; |
| 1848 capture_.last_aec_system_delay_ms = 0; | 1917 capture_.last_aec_system_delay_ms = 0; |
| 1849 } | 1918 } |
| 1850 | 1919 |
| 1920 InternalAPMConfig AudioProcessingImpl::CollectApmConfig() const { | |
| 1921 std::string experiments_description = | |
| 1922 public_submodules_->echo_cancellation->GetExperimentsDescription(); | |
| 1923 // TODO(peah): Add semicolon-separated concatenations of experiment | |
| 1924 // descriptions for other submodules. | |
| 1925 if (capture_nonlocked_.level_controller_enabled) { | |
| 1926 experiments_description += "LevelController;"; | |
| 1927 } | |
| 1928 if (constants_.agc_clipped_level_min != kClippedLevelMin) { | |
| 1929 experiments_description += "AgcClippingLevelExperiment;"; | |
| 1930 } | |
| 1931 if (capture_nonlocked_.echo_canceller3_enabled) { | |
| 1932 experiments_description += "EchoCanceller3;"; | |
| 1933 } | |
| 1934 | |
| 1935 InternalAPMConfig apm_config; | |
| 1936 | |
| 1937 apm_config.aec_enabled = public_submodules_->echo_cancellation->is_enabled(); | |
| 1938 apm_config.aec_delay_agnostic_enabled = | |
| 1939 public_submodules_->echo_cancellation->is_delay_agnostic_enabled(); | |
| 1940 apm_config.aec_drift_compensation_enabled = | |
| 1941 public_submodules_->echo_cancellation->is_drift_compensation_enabled(); | |
| 1942 apm_config.aec_extended_filter_enabled = | |
| 1943 public_submodules_->echo_cancellation->is_extended_filter_enabled(); | |
| 1944 apm_config.aec_suppression_level = static_cast<int>( | |
| 1945 public_submodules_->echo_cancellation->suppression_level()); | |
| 1946 | |
| 1947 apm_config.aecm_enabled = | |
| 1948 public_submodules_->echo_control_mobile->is_enabled(); | |
| 1949 apm_config.aecm_comfort_noise_enabled = | |
| 1950 public_submodules_->echo_control_mobile->is_comfort_noise_enabled(); | |
| 1951 apm_config.aecm_routing_mode = | |
| 1952 static_cast<int>(public_submodules_->echo_control_mobile->routing_mode()); | |
| 1953 | |
| 1954 apm_config.agc_enabled = public_submodules_->gain_control->is_enabled(); | |
| 1955 apm_config.agc_mode = | |
| 1956 static_cast<int>(public_submodules_->gain_control->mode()); | |
| 1957 apm_config.agc_limiter_enabled = | |
| 1958 public_submodules_->gain_control->is_limiter_enabled(); | |
| 1959 apm_config.noise_robust_agc_enabled = constants_.use_experimental_agc; | |
| 1960 | |
| 1961 apm_config.hpf_enabled = config_.high_pass_filter.enabled; | |
| 1962 | |
| 1963 apm_config.ns_enabled = public_submodules_->noise_suppression->is_enabled(); | |
| 1964 apm_config.ns_level = | |
| 1965 static_cast<int>(public_submodules_->noise_suppression->level()); | |
| 1966 | |
| 1967 apm_config.transient_suppression_enabled = | |
| 1968 capture_.transient_suppressor_enabled; | |
| 1969 apm_config.intelligibility_enhancer_enabled = | |
| 1970 capture_nonlocked_.intelligibility_enabled; | |
| 1971 apm_config.experiments_description = experiments_description; | |
| 1972 return apm_config; | |
| 1973 } | |
| 1974 | |
| 1975 void AudioProcessingImpl::RecordUnprocessedCaptureStream( | |
| 1976 const float* const* src) { | |
| 1977 RTC_DCHECK(aec_dump_); | |
| 1978 aec_dump_->WriteConfig(CollectApmConfig(), false); | |
| 1979 | |
| 1980 const size_t channel_size = formats_.api_format.input_stream().num_frames(); | |
| 1981 const size_t num_channels = formats_.api_format.input_stream().num_channels(); | |
| 1982 aec_dump_->AddCaptureStreamInput( | |
| 1983 FloatAudioFrame(src, num_channels, channel_size)); | |
| 1984 RecordAudioProcessingState(); | |
| 1985 } | |
| 1986 | |
| 1987 void AudioProcessingImpl::RecordUnprocessedCaptureStream( | |
| 1988 const AudioFrame& capture_frame) { | |
| 1989 RTC_DCHECK(aec_dump_); | |
| 1990 | |
| 1991 aec_dump_->AddCaptureStreamInput(capture_frame); | |
| 1992 RecordAudioProcessingState(); | |
| 1993 aec_dump_->WriteConfig(CollectApmConfig(), false); | |
| 1994 } | |
| 1995 | |
| 1996 void AudioProcessingImpl::RecordProcessedCaptureStream( | |
| 1997 const float* const* processed_capture_stream) { | |
| 1998 RTC_DCHECK(aec_dump_); | |
| 1999 | |
| 2000 const size_t channel_size = formats_.api_format.output_stream().num_frames(); | |
| 2001 const size_t num_channels = | |
| 2002 formats_.api_format.output_stream().num_channels(); | |
| 2003 aec_dump_->AddCaptureStreamOutput( | |
| 2004 FloatAudioFrame(processed_capture_stream, num_channels, channel_size)); | |
| 2005 aec_dump_->WriteCaptureStreamMessage(); | |
| 2006 } | |
| 2007 | |
| 2008 void AudioProcessingImpl::RecordProcessedCaptureStream( | |
| 2009 const AudioFrame& processed_capture_frame) { | |
| 2010 RTC_DCHECK(aec_dump_); | |
| 2011 | |
| 2012 aec_dump_->AddCaptureStreamOutput(processed_capture_frame); | |
| 2013 aec_dump_->WriteCaptureStreamMessage(); | |
| 2014 } | |
| 2015 | |
| 2016 void AudioProcessingImpl::RecordAudioProcessingState() { | |
| 2017 RTC_DCHECK(aec_dump_); | |
| 2018 AecDump::AudioProcessingState audio_proc_state; | |
| 2019 audio_proc_state.delay = capture_nonlocked_.stream_delay_ms; | |
| 2020 audio_proc_state.drift = | |
| 2021 public_submodules_->echo_cancellation->stream_drift_samples(); | |
| 2022 audio_proc_state.level = gain_control()->stream_analog_level(); | |
| 2023 audio_proc_state.keypress = capture_.key_pressed; | |
| 2024 aec_dump_->AddAudioProcessingState(audio_proc_state); | |
| 2025 } | |
| 2026 | |
| 1851 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 2027 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| 1852 int AudioProcessingImpl::WriteMessageToDebugFile( | 2028 int AudioProcessingImpl::WriteMessageToDebugFile( |
| 1853 FileWrapper* debug_file, | 2029 FileWrapper* debug_file, |
| 1854 int64_t* filesize_limit_bytes, | 2030 int64_t* filesize_limit_bytes, |
| 1855 rtc::CriticalSection* crit_debug, | 2031 rtc::CriticalSection* crit_debug, |
| 1856 ApmDebugDumpThreadState* debug_state) { | 2032 ApmDebugDumpThreadState* debug_state) { |
| 1857 int32_t size = debug_state->event_msg->ByteSize(); | 2033 int32_t size = debug_state->event_msg->ByteSize(); |
| 1858 if (size <= 0) { | 2034 if (size <= 0) { |
| 1859 return kUnspecifiedError; | 2035 return kUnspecifiedError; |
| 1860 } | 2036 } |
| (...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2014 previous_agc_level(0), | 2190 previous_agc_level(0), |
| 2015 echo_path_gain_change(false) {} | 2191 echo_path_gain_change(false) {} |
| 2016 | 2192 |
| 2017 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; | 2193 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; |
| 2018 | 2194 |
| 2019 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; | 2195 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; |
| 2020 | 2196 |
| 2021 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; | 2197 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; |
| 2022 | 2198 |
| 2023 } // namespace webrtc | 2199 } // namespace webrtc |
| OLD | NEW |