Chromium Code Reviews| OLD | NEW | 
|---|---|
| 1 /* | 1 /* | 
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 
| 3 * | 3 * | 
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license | 
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source | 
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found | 
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may | 
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. | 
| 9 */ | 9 */ | 
| 10 | 10 | 
| (...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 145 | 145 | 
| 146 bool is_enabled() const override { | 146 bool is_enabled() const override { | 
| 147 return apm_->GetConfig().high_pass_filter.enabled; | 147 return apm_->GetConfig().high_pass_filter.enabled; | 
| 148 } | 148 } | 
| 149 | 149 | 
| 150 private: | 150 private: | 
| 151 AudioProcessingImpl* apm_; | 151 AudioProcessingImpl* apm_; | 
| 152 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl); | 152 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl); | 
| 153 }; | 153 }; | 
| 154 | 154 | 
| 155 webrtc::InternalAPMStreamsConfig ToStreamsConfig( | |
| 156 const ProcessingConfig& api_format) { | |
| 157 webrtc::InternalAPMStreamsConfig result; | |
| 158 result.input_sample_rate = api_format.input_stream().sample_rate_hz(); | |
| 159 result.input_num_channels = api_format.input_stream().num_channels(); | |
| 160 result.output_num_channels = api_format.output_stream().num_channels(); | |
| 161 result.render_input_num_channels = | |
| 162 api_format.reverse_input_stream().num_channels(); | |
| 163 result.render_input_sample_rate = | |
| 164 api_format.reverse_input_stream().sample_rate_hz(); | |
| 165 result.output_sample_rate = api_format.output_stream().sample_rate_hz(); | |
| 166 result.render_output_sample_rate = | |
| 167 api_format.reverse_output_stream().sample_rate_hz(); | |
| 168 result.render_output_num_channels = | |
| 169 api_format.reverse_output_stream().num_channels(); | |
| 170 return result; | |
| 171 } | |
| 155 } // namespace | 172 } // namespace | 
| 156 | 173 | 
| 157 // Throughout webrtc, it's assumed that success is represented by zero. | 174 // Throughout webrtc, it's assumed that success is represented by zero. | 
| 158 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); | 175 static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); | 
| 159 | 176 | 
| 160 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} | 177 AudioProcessingImpl::ApmSubmoduleStates::ApmSubmoduleStates() {} | 
| 161 | 178 | 
| 162 bool AudioProcessingImpl::ApmSubmoduleStates::Update( | 179 bool AudioProcessingImpl::ApmSubmoduleStates::Update( | 
| 163 bool low_cut_filter_enabled, | 180 bool low_cut_filter_enabled, | 
| 164 bool echo_canceller_enabled, | 181 bool echo_canceller_enabled, | 
| (...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 519 InitializeEchoCanceller3(); | 536 InitializeEchoCanceller3(); | 
| 520 | 537 | 
| 521 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 538 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 
| 522 if (debug_dump_.debug_file->is_open()) { | 539 if (debug_dump_.debug_file->is_open()) { | 
| 523 int err = WriteInitMessage(); | 540 int err = WriteInitMessage(); | 
| 524 if (err != kNoError) { | 541 if (err != kNoError) { | 
| 525 return err; | 542 return err; | 
| 526 } | 543 } | 
| 527 } | 544 } | 
| 528 #endif | 545 #endif | 
| 529 | 546 if (aec_dump_) { | 
| 547 aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); | |
| 548 } | |
| 530 return kNoError; | 549 return kNoError; | 
| 531 } | 550 } | 
| 532 | 551 | 
| 533 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 552 int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { | 
| 534 for (const auto& stream : config.streams) { | 553 for (const auto& stream : config.streams) { | 
| 535 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 554 if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) { | 
| 536 return kBadSampleRateError; | 555 return kBadSampleRateError; | 
| 537 } | 556 } | 
| 538 } | 557 } | 
| 539 | 558 | 
| (...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 817 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 836 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 
| 818 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 837 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 
| 819 const size_t channel_size = | 838 const size_t channel_size = | 
| 820 sizeof(float) * formats_.api_format.input_stream().num_frames(); | 839 sizeof(float) * formats_.api_format.input_stream().num_frames(); | 
| 821 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | 840 for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); | 
| 822 ++i) | 841 ++i) | 
| 823 msg->add_input_channel(src[i], channel_size); | 842 msg->add_input_channel(src[i], channel_size); | 
| 824 } | 843 } | 
| 825 #endif | 844 #endif | 
| 826 | 845 | 
| 846 AecDump::CaptureStreamInfo* stream_info; | |
| 847 if (aec_dump_) { | |
| 848 stream_info = RecordUnprocessedCaptureStream(src); | |
| 849 } | |
| 850 | |
| 827 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); | 851 capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); | 
| 828 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 852 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 
| 829 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); | 853 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); | 
| 830 | 854 | 
| 831 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 855 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 
| 832 if (debug_dump_.debug_file->is_open()) { | 856 if (debug_dump_.debug_file->is_open()) { | 
| 833 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 857 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 
| 834 const size_t channel_size = | 858 const size_t channel_size = | 
| 835 sizeof(float) * formats_.api_format.output_stream().num_frames(); | 859 sizeof(float) * formats_.api_format.output_stream().num_frames(); | 
| 836 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); | 860 for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); | 
| 837 ++i) | 861 ++i) | 
| 838 msg->add_output_channel(dest[i], channel_size); | 862 msg->add_output_channel(dest[i], channel_size); | 
| 839 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 863 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 
| 840 &debug_dump_.num_bytes_left_for_log_, | 864 &debug_dump_.num_bytes_left_for_log_, | 
| 841 &crit_debug_, &debug_dump_.capture)); | 865 &crit_debug_, &debug_dump_.capture)); | 
| 842 } | 866 } | 
| 843 #endif | 867 #endif | 
| 844 | 868 if (aec_dump_) { | 
| 869 RecordProcessedCaptureStream(dest, stream_info); | |
| 870 } | |
| 845 return kNoError; | 871 return kNoError; | 
| 846 } | 872 } | 
| 847 | 873 | 
| 848 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { | 874 void AudioProcessingImpl::QueueRenderAudio(AudioBuffer* audio) { | 
| 849 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), | 875 EchoCancellationImpl::PackRenderAudioBuffer(audio, num_output_channels(), | 
| 850 num_reverse_channels(), | 876 num_reverse_channels(), | 
| 851 &aec_render_queue_buffer_); | 877 &aec_render_queue_buffer_); | 
| 852 | 878 | 
| 853 RTC_DCHECK_GE(160, audio->num_frames_per_band()); | 879 RTC_DCHECK_GE(160, audio->num_frames_per_band()); | 
| 854 | 880 | 
| (...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1071 rtc::CritScope cs_render(&crit_render_); | 1097 rtc::CritScope cs_render(&crit_render_); | 
| 1072 RETURN_ON_ERR( | 1098 RETURN_ON_ERR( | 
| 1073 MaybeInitializeCapture(processing_config, reinitialization_required)); | 1099 MaybeInitializeCapture(processing_config, reinitialization_required)); | 
| 1074 } | 1100 } | 
| 1075 rtc::CritScope cs_capture(&crit_capture_); | 1101 rtc::CritScope cs_capture(&crit_capture_); | 
| 1076 if (frame->samples_per_channel_ != | 1102 if (frame->samples_per_channel_ != | 
| 1077 formats_.api_format.input_stream().num_frames()) { | 1103 formats_.api_format.input_stream().num_frames()) { | 
| 1078 return kBadDataLengthError; | 1104 return kBadDataLengthError; | 
| 1079 } | 1105 } | 
| 1080 | 1106 | 
| 1107 AecDump::CaptureStreamInfo* stream_info; | |
| 1108 if (aec_dump_) { | |
| 1109 stream_info = RecordUnprocessedCaptureStream(*frame); | |
| 
 
peah-webrtc
2017/05/15 05:32:51
I think the usage of having stream_info as an outp
 
aleloi
2017/05/15 13:20:51
I've changed it now. It looks a little better now.
 
 | |
| 1110 } | |
| 1111 | |
| 1081 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1112 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 
| 1082 if (debug_dump_.debug_file->is_open()) { | 1113 if (debug_dump_.debug_file->is_open()) { | 
| 1083 RETURN_ON_ERR(WriteConfigMessage(false)); | 1114 RETURN_ON_ERR(WriteConfigMessage(false)); | 
| 1084 | 1115 | 
| 1085 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 1116 debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); | 
| 1086 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 1117 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 
| 1087 const size_t data_size = | 1118 const size_t data_size = | 
| 1088 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1119 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 
| 1089 msg->set_input_data(frame->data_, data_size); | 1120 msg->set_input_data(frame->data_, data_size); | 
| 1090 } | 1121 } | 
| 1091 #endif | 1122 #endif | 
| 1092 | 1123 | 
| 1093 capture_.capture_audio->DeinterleaveFrom(frame); | 1124 capture_.capture_audio->DeinterleaveFrom(frame); | 
| 1094 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 1125 RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 
| 1095 capture_.capture_audio->InterleaveTo( | 1126 capture_.capture_audio->InterleaveTo( | 
| 1096 frame, submodule_states_.CaptureMultiBandProcessingActive()); | 1127 frame, submodule_states_.CaptureMultiBandProcessingActive()); | 
| 1097 | 1128 | 
| 1129 if (aec_dump_) { | |
| 1130 RecordProcessedCaptureStream(*frame, stream_info); | |
| 1131 } | |
| 1098 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1132 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 
| 1099 if (debug_dump_.debug_file->is_open()) { | 1133 if (debug_dump_.debug_file->is_open()) { | 
| 1100 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 1134 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 
| 1101 const size_t data_size = | 1135 const size_t data_size = | 
| 1102 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1136 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 
| 1103 msg->set_output_data(frame->data_, data_size); | 1137 msg->set_output_data(frame->data_, data_size); | 
| 1104 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1138 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 
| 1105 &debug_dump_.num_bytes_left_for_log_, | 1139 &debug_dump_.num_bytes_left_for_log_, | 
| 1106 &crit_debug_, &debug_dump_.capture)); | 1140 &crit_debug_, &debug_dump_.capture)); | 
| 1107 } | 1141 } | 
| (...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1369 const size_t channel_size = | 1403 const size_t channel_size = | 
| 1370 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 1404 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 
| 1371 for (size_t i = 0; | 1405 for (size_t i = 0; | 
| 1372 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) | 1406 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) | 
| 1373 msg->add_channel(src[i], channel_size); | 1407 msg->add_channel(src[i], channel_size); | 
| 1374 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1408 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 
| 1375 &debug_dump_.num_bytes_left_for_log_, | 1409 &debug_dump_.num_bytes_left_for_log_, | 
| 1376 &crit_debug_, &debug_dump_.render)); | 1410 &crit_debug_, &debug_dump_.render)); | 
| 1377 } | 1411 } | 
| 1378 #endif | 1412 #endif | 
| 1379 | 1413 if (aec_dump_) { | 
| 1414 const size_t channel_size = | |
| 1415 formats_.api_format.reverse_input_stream().num_frames(); | |
| 1416 const size_t num_channels = | |
| 1417 formats_.api_format.reverse_input_stream().num_channels(); | |
| 1418 aec_dump_->WriteRenderStreamMessage( | |
| 1419 FloatAudioFrame(src, num_channels, channel_size)); | |
| 1420 } | |
| 1380 render_.render_audio->CopyFrom(src, | 1421 render_.render_audio->CopyFrom(src, | 
| 1381 formats_.api_format.reverse_input_stream()); | 1422 formats_.api_format.reverse_input_stream()); | 
| 1382 return ProcessRenderStreamLocked(); | 1423 return ProcessRenderStreamLocked(); | 
| 1383 } | 1424 } | 
| 1384 | 1425 | 
| 1385 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 1426 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 
| 1386 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); | 1427 TRACE_EVENT0("webrtc", "AudioProcessing::ProcessReverseStream_AudioFrame"); | 
| 1387 rtc::CritScope cs(&crit_render_); | 1428 rtc::CritScope cs(&crit_render_); | 
| 1388 if (frame == nullptr) { | 1429 if (frame == nullptr) { | 
| 1389 return kNullPointerError; | 1430 return kNullPointerError; | 
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1422 audioproc::ReverseStream* msg = | 1463 audioproc::ReverseStream* msg = | 
| 1423 debug_dump_.render.event_msg->mutable_reverse_stream(); | 1464 debug_dump_.render.event_msg->mutable_reverse_stream(); | 
| 1424 const size_t data_size = | 1465 const size_t data_size = | 
| 1425 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 1466 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; | 
| 1426 msg->set_data(frame->data_, data_size); | 1467 msg->set_data(frame->data_, data_size); | 
| 1427 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 1468 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), | 
| 1428 &debug_dump_.num_bytes_left_for_log_, | 1469 &debug_dump_.num_bytes_left_for_log_, | 
| 1429 &crit_debug_, &debug_dump_.render)); | 1470 &crit_debug_, &debug_dump_.render)); | 
| 1430 } | 1471 } | 
| 1431 #endif | 1472 #endif | 
| 1473 if (aec_dump_) { | |
| 1474 aec_dump_->WriteRenderStreamMessage(*frame); | |
| 1475 } | |
| 1476 | |
| 1432 render_.render_audio->DeinterleaveFrom(frame); | 1477 render_.render_audio->DeinterleaveFrom(frame); | 
| 1433 RETURN_ON_ERR(ProcessRenderStreamLocked()); | 1478 RETURN_ON_ERR(ProcessRenderStreamLocked()); | 
| 1434 render_.render_audio->InterleaveTo( | 1479 render_.render_audio->InterleaveTo( | 
| 1435 frame, submodule_states_.RenderMultiBandProcessingActive()); | 1480 frame, submodule_states_.RenderMultiBandProcessingActive()); | 
| 1436 return kNoError; | 1481 return kNoError; | 
| 1437 } | 1482 } | 
| 1438 | 1483 | 
| 1439 int AudioProcessingImpl::ProcessRenderStreamLocked() { | 1484 int AudioProcessingImpl::ProcessRenderStreamLocked() { | 
| 1440 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. | 1485 AudioBuffer* render_buffer = render_.render_audio.get(); // For brevity. | 
| 1441 if (submodule_states_.RenderMultiBandSubModulesActive() && | 1486 if (submodule_states_.RenderMultiBandSubModulesActive() && | 
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1505 void AudioProcessingImpl::set_delay_offset_ms(int offset) { | 1550 void AudioProcessingImpl::set_delay_offset_ms(int offset) { | 
| 1506 rtc::CritScope cs(&crit_capture_); | 1551 rtc::CritScope cs(&crit_capture_); | 
| 1507 capture_.delay_offset_ms = offset; | 1552 capture_.delay_offset_ms = offset; | 
| 1508 } | 1553 } | 
| 1509 | 1554 | 
| 1510 int AudioProcessingImpl::delay_offset_ms() const { | 1555 int AudioProcessingImpl::delay_offset_ms() const { | 
| 1511 rtc::CritScope cs(&crit_capture_); | 1556 rtc::CritScope cs(&crit_capture_); | 
| 1512 return capture_.delay_offset_ms; | 1557 return capture_.delay_offset_ms; | 
| 1513 } | 1558 } | 
| 1514 | 1559 | 
| 1560 void AudioProcessingImpl::AttachAecDump(std::unique_ptr<AecDump> aec_dump) { | |
| 1561 rtc::CritScope cs_render(&crit_render_); | |
| 1562 rtc::CritScope cs_capture(&crit_capture_); | |
| 1563 RTC_DCHECK(aec_dump); | |
| 1564 aec_dump_ = std::move(aec_dump); | |
| 1565 | |
| 1566 aec_dump_->WriteConfig(CollectApmConfig(), true); | |
| 1567 aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); | |
| 1568 } | |
| 1569 | |
| 1570 void AudioProcessingImpl::DetachAecDump() { | |
| 1571 rtc::CritScope cs_render(&crit_render_); | |
| 1572 rtc::CritScope cs_capture(&crit_capture_); | |
| 1573 aec_dump_.reset(); | |
| 1574 } | |
| 1575 | |
| 1515 int AudioProcessingImpl::StartDebugRecording( | 1576 int AudioProcessingImpl::StartDebugRecording( | 
| 1516 const char filename[AudioProcessing::kMaxFilenameSize], | 1577 const char filename[AudioProcessing::kMaxFilenameSize], | 
| 1517 int64_t max_log_size_bytes) { | 1578 int64_t max_log_size_bytes) { | 
| 1518 // Run in a single-threaded manner. | 1579 // Run in a single-threaded manner. | 
| 1519 rtc::CritScope cs_render(&crit_render_); | 1580 rtc::CritScope cs_render(&crit_render_); | 
| 1520 rtc::CritScope cs_capture(&crit_capture_); | 1581 rtc::CritScope cs_capture(&crit_capture_); | 
| 1521 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); | 1582 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); | 
| 1522 | 1583 | 
| 1523 if (filename == nullptr) { | 1584 if (filename == nullptr) { | 
| 1524 return kNullPointerError; | 1585 return kNullPointerError; | 
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1579 rtc::CritScope cs_render(&crit_render_); | 1640 rtc::CritScope cs_render(&crit_render_); | 
| 1580 rtc::CritScope cs_capture(&crit_capture_); | 1641 rtc::CritScope cs_capture(&crit_capture_); | 
| 1581 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); | 1642 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); | 
| 1582 return StartDebugRecording(stream, -1); | 1643 return StartDebugRecording(stream, -1); | 
| 1583 } | 1644 } | 
| 1584 | 1645 | 
| 1585 int AudioProcessingImpl::StopDebugRecording() { | 1646 int AudioProcessingImpl::StopDebugRecording() { | 
| 1586 // Run in a single-threaded manner. | 1647 // Run in a single-threaded manner. | 
| 1587 rtc::CritScope cs_render(&crit_render_); | 1648 rtc::CritScope cs_render(&crit_render_); | 
| 1588 rtc::CritScope cs_capture(&crit_capture_); | 1649 rtc::CritScope cs_capture(&crit_capture_); | 
| 1650 DetachAecDump(); | |
| 
 
peah-webrtc
2017/05/15 05:32:51
Please move this to before the locks (as DetachAec
 
aleloi
2017/05/15 13:20:52
Done.
 
 | |
| 1589 | 1651 | 
| 1590 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 1652 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 
| 1591 // We just return if recording hasn't started. | 1653 // We just return if recording hasn't started. | 
| 1592 debug_dump_.debug_file->CloseFile(); | 1654 debug_dump_.debug_file->CloseFile(); | 
| 1593 return kNoError; | 1655 return kNoError; | 
| 1594 #else | 1656 #else | 
| 1595 return kUnsupportedFunctionError; | 1657 return kUnsupportedFunctionError; | 
| 1596 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 1658 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP | 
| 1597 } | 1659 } | 
| 1598 | 1660 | 
| (...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1830 capture_.last_stream_delay_ms = 0; | 1892 capture_.last_stream_delay_ms = 0; | 
| 1831 | 1893 | 
| 1832 if (capture_.aec_system_delay_jumps > -1) { | 1894 if (capture_.aec_system_delay_jumps > -1) { | 
| 1833 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", | 1895 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", | 
| 1834 capture_.aec_system_delay_jumps, 51); | 1896 capture_.aec_system_delay_jumps, 51); | 
| 1835 } | 1897 } | 
| 1836 capture_.aec_system_delay_jumps = -1; | 1898 capture_.aec_system_delay_jumps = -1; | 
| 1837 capture_.last_aec_system_delay_ms = 0; | 1899 capture_.last_aec_system_delay_ms = 0; | 
| 1838 } | 1900 } | 
| 1839 | 1901 | 
| 1902 InternalAPMConfig AudioProcessingImpl::CollectApmConfig() const { | |
| 1903 std::string experiments_description = | |
| 1904 public_submodules_->echo_cancellation->GetExperimentsDescription(); | |
| 1905 // TODO(peah): Add semicolon-separated concatenations of experiment | |
| 1906 // descriptions for other submodules. | |
| 1907 if (capture_nonlocked_.level_controller_enabled) { | |
| 1908 experiments_description += "LevelController;"; | |
| 1909 } | |
| 1910 if (constants_.agc_clipped_level_min != kClippedLevelMin) { | |
| 1911 experiments_description += "AgcClippingLevelExperiment;"; | |
| 1912 } | |
| 1913 if (capture_nonlocked_.echo_canceller3_enabled) { | |
| 1914 experiments_description += "EchoCanceller3;"; | |
| 1915 } | |
| 1916 | |
| 1917 InternalAPMConfig apm_config; | |
| 1918 | |
| 1919 apm_config.aec_enabled = public_submodules_->echo_cancellation->is_enabled(); | |
| 1920 apm_config.aec_delay_agnostic_enabled = | |
| 1921 public_submodules_->echo_cancellation->is_delay_agnostic_enabled(); | |
| 1922 apm_config.aec_drift_compensation_enabled = | |
| 1923 public_submodules_->echo_cancellation->is_drift_compensation_enabled(); | |
| 1924 apm_config.aec_extended_filter_enabled = | |
| 1925 public_submodules_->echo_cancellation->is_extended_filter_enabled(); | |
| 1926 apm_config.aec_suppression_level = static_cast<int>( | |
| 1927 public_submodules_->echo_cancellation->suppression_level()); | |
| 1928 | |
| 1929 apm_config.aecm_enabled = | |
| 1930 public_submodules_->echo_control_mobile->is_enabled(); | |
| 1931 apm_config.aecm_comfort_noise_enabled = | |
| 1932 public_submodules_->echo_control_mobile->is_comfort_noise_enabled(); | |
| 1933 apm_config.aecm_routing_mode = | |
| 1934 static_cast<int>(public_submodules_->echo_control_mobile->routing_mode()); | |
| 1935 | |
| 1936 apm_config.agc_enabled = public_submodules_->gain_control->is_enabled(); | |
| 1937 apm_config.agc_mode = | |
| 1938 static_cast<int>(public_submodules_->gain_control->mode()); | |
| 1939 apm_config.agc_limiter_enabled = | |
| 1940 public_submodules_->gain_control->is_limiter_enabled(); | |
| 1941 apm_config.noise_robust_agc_enabled = constants_.use_experimental_agc; | |
| 1942 | |
| 1943 apm_config.hpf_enabled = config_.high_pass_filter.enabled; | |
| 1944 | |
| 1945 apm_config.ns_enabled = public_submodules_->noise_suppression->is_enabled(); | |
| 1946 apm_config.ns_level = | |
| 1947 static_cast<int>(public_submodules_->noise_suppression->level()); | |
| 1948 | |
| 1949 apm_config.transient_suppression_enabled = | |
| 1950 capture_.transient_suppressor_enabled; | |
| 1951 apm_config.intelligibility_enhancer_enabled = | |
| 1952 capture_nonlocked_.intelligibility_enabled; | |
| 1953 apm_config.experiments_description = experiments_description; | |
| 1954 return apm_config; | |
| 1955 } | |
| 1956 | |
| 1957 AecDump::CaptureStreamInfo* AudioProcessingImpl::RecordUnprocessedCaptureStream( | |
| 1958 const float* const* src) const { | |
| 1959 RTC_DCHECK(aec_dump_); | |
| 1960 aec_dump_->WriteConfig(CollectApmConfig(), false); | |
| 
 
peah-webrtc
2017/05/15 05:32:51
How can this method be const? It does writing to a
 
aleloi
2017/05/15 13:20:51
Done.
 
 | |
| 1961 auto* stream_info = aec_dump_->GetCaptureStreamInfo(); | |
| 1962 RTC_DCHECK(stream_info); | |
| 1963 | |
| 1964 const size_t channel_size = formats_.api_format.input_stream().num_frames(); | |
| 1965 const size_t num_channels = formats_.api_format.input_stream().num_channels(); | |
| 1966 stream_info->AddInput(FloatAudioFrame(src, num_channels, channel_size)); | |
| 
 
peah-webrtc
2017/05/15 05:32:51
I'm not strongly against it, but I'd suggest dropp
 
peah-webrtc
2017/05/15 05:57:07
Thinking a bit more about FloatAudioFrame, I guess
 
peah-webrtc
2017/05/15 07:25:18
I now saw your comment about this in the upcoming
 
 | |
| 1967 PopulateStreamInfoWithState(stream_info); | |
| 1968 return stream_info; | |
| 1969 } | |
| 1970 | |
| 1971 AecDump::CaptureStreamInfo* AudioProcessingImpl::RecordUnprocessedCaptureStream( | |
| 1972 const AudioFrame& capture_frame) const { | |
| 1973 RTC_DCHECK(aec_dump_); | |
| 1974 auto* stream_info = aec_dump_->GetCaptureStreamInfo(); | |
| 1975 RTC_DCHECK(stream_info); | |
| 1976 | |
| 1977 stream_info->AddInput(capture_frame); | |
| 1978 PopulateStreamInfoWithState(stream_info); | |
| 1979 aec_dump_->WriteConfig(CollectApmConfig(), false); | |
| 1980 return stream_info; | |
| 1981 } | |
| 1982 | |
| 1983 void AudioProcessingImpl::RecordProcessedCaptureStream( | |
| 1984 const float* const* processed_capture_stream, | |
| 1985 AecDump::CaptureStreamInfo* stream_info) const { | |
| 
 
peah-webrtc
2017/05/15 05:32:51
+1, how can it be const? (and elsewhere)
 
aleloi
2017/05/15 13:20:52
Done.
 
 | |
| 1986 RTC_DCHECK(stream_info); | |
| 1987 RTC_DCHECK(aec_dump_); | |
| 1988 | |
| 1989 const size_t channel_size = formats_.api_format.output_stream().num_frames(); | |
| 1990 const size_t num_channels = | |
| 1991 formats_.api_format.output_stream().num_channels(); | |
| 1992 stream_info->AddOutput( | |
| 1993 FloatAudioFrame(processed_capture_stream, num_channels, channel_size)); | |
| 1994 aec_dump_->WriteCaptureStreamMessage(); | |
| 1995 } | |
| 1996 | |
| 1997 void AudioProcessingImpl::RecordProcessedCaptureStream( | |
| 1998 const AudioFrame& processed_capture_frame, | |
| 1999 AecDump::CaptureStreamInfo* stream_info) const { | |
| 2000 RTC_DCHECK(stream_info); | |
| 2001 RTC_DCHECK(aec_dump_); | |
| 2002 | |
| 2003 stream_info->AddOutput(processed_capture_frame); | |
| 2004 aec_dump_->WriteCaptureStreamMessage(); | |
| 2005 } | |
| 2006 | |
| 2007 void AudioProcessingImpl::PopulateStreamInfoWithState( | |
| 2008 AecDump::CaptureStreamInfo* stream_info) const { | |
| 2009 RTC_DCHECK(stream_info); | |
| 2010 | |
| 2011 stream_info->set_delay(capture_nonlocked_.stream_delay_ms); | |
| 2012 stream_info->set_drift( | |
| 2013 public_submodules_->echo_cancellation->stream_drift_samples()); | |
| 2014 stream_info->set_level(gain_control()->stream_analog_level()); | |
| 2015 stream_info->set_keypress(capture_.key_pressed); | |
| 2016 } | |
| 2017 | |
| 1840 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 2018 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 
| 1841 int AudioProcessingImpl::WriteMessageToDebugFile( | 2019 int AudioProcessingImpl::WriteMessageToDebugFile( | 
| 1842 FileWrapper* debug_file, | 2020 FileWrapper* debug_file, | 
| 1843 int64_t* filesize_limit_bytes, | 2021 int64_t* filesize_limit_bytes, | 
| 1844 rtc::CriticalSection* crit_debug, | 2022 rtc::CriticalSection* crit_debug, | 
| 1845 ApmDebugDumpThreadState* debug_state) { | 2023 ApmDebugDumpThreadState* debug_state) { | 
| 1846 int32_t size = debug_state->event_msg->ByteSize(); | 2024 int32_t size = debug_state->event_msg->ByteSize(); | 
| 1847 if (size <= 0) { | 2025 if (size <= 0) { | 
| 1848 return kUnspecifiedError; | 2026 return kUnspecifiedError; | 
| 1849 } | 2027 } | 
| (...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2003 previous_agc_level(0), | 2181 previous_agc_level(0), | 
| 2004 echo_path_gain_change(false) {} | 2182 echo_path_gain_change(false) {} | 
| 2005 | 2183 | 
| 2006 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; | 2184 AudioProcessingImpl::ApmCaptureState::~ApmCaptureState() = default; | 
| 2007 | 2185 | 
| 2008 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; | 2186 AudioProcessingImpl::ApmRenderState::ApmRenderState() = default; | 
| 2009 | 2187 | 
| 2010 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; | 2188 AudioProcessingImpl::ApmRenderState::~ApmRenderState() = default; | 
| 2011 | 2189 | 
| 2012 } // namespace webrtc | 2190 } // namespace webrtc | 
| OLD | NEW |