Index: webrtc/modules/audio_processing/audio_processing_impl.cc |
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc |
index 816210f34f8738eb7a042adee69f87f5ed1a99d8..639ddd5c3f79b4ee70cb661bb933c1d828946a3c 100644 |
--- a/webrtc/modules/audio_processing/audio_processing_impl.cc |
+++ b/webrtc/modules/audio_processing/audio_processing_impl.cc |
@@ -129,6 +129,9 @@ static const size_t kMaxAllowedValuesOfSamplesPerFrame = 160; |
// reverse and forward call numbers. |
static const size_t kMaxNumFramesToBuffer = 100; |
+// Maximum number of audio channels in the input and output streams. |
+constexpr size_t kMaxNumChannelsToRecord = 4; |
the sun
2017/04/28 11:06:00
Why 4? We only support stereo anyway. And why arbi
peah-webrtc
2017/04/28 11:21:10
Good point :-)
The reason for 4 is that that is th
aleloi
2017/05/03 13:58:18
Removed in latest patch.
|
+ |
class HighPassFilterImpl : public HighPassFilter { |
public: |
explicit HighPassFilterImpl(AudioProcessingImpl* apm) : apm_(apm) {} |
@@ -152,6 +155,38 @@ class HighPassFilterImpl : public HighPassFilter { |
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl); |
}; |
+webrtc::InternalAPMStreamsConfig ToStreamsConfig( |
+ const ProcessingConfig& api_format) { |
+ webrtc::InternalAPMStreamsConfig result; |
+ result.input_sample_rate = api_format.input_stream().sample_rate_hz(); |
+ result.input_num_channels = api_format.input_stream().num_channels(); |
+ result.output_num_channels = api_format.output_stream().num_channels(); |
+ result.render_input_num_channels = |
+ api_format.reverse_input_stream().num_channels(); |
+ result.render_input_sample_rate = |
+ api_format.reverse_input_stream().sample_rate_hz(); |
+ result.output_sample_rate = api_format.output_stream().sample_rate_hz(); |
+ result.render_output_sample_rate = |
+ api_format.reverse_output_stream().sample_rate_hz(); |
+ result.render_output_num_channels = |
+ api_format.reverse_output_stream().num_channels(); |
+ return result; |
+} |
+ |
+rtc::ArrayView<rtc::ArrayView<const float>> CreateStreamView( |
+ const float* const* stream, |
+ size_t channel_size, |
+ size_t num_channels) { |
+ RTC_DCHECK_LE(num_channels, kMaxNumChannelsToRecord); |
+ |
+ std::array<rtc::ArrayView<const float>, kMaxNumChannelsToRecord> |
+ array_stream_view; |
+ for (size_t i = 0; i < std::min(num_channels, kMaxNumChannelsToRecord); ++i) { |
+ array_stream_view[i] = rtc::ArrayView<const float>(stream[i], channel_size); |
+ } |
+ return rtc::ArrayView<rtc::ArrayView<const float>>(&array_stream_view[0], |
the sun
2017/04/28 11:06:00
ArrayView is just a pointer and length, so in this
kwiberg-webrtc
2017/04/28 12:13:06
+1. I haven't read the code closely, but I suspect
aleloi
2017/05/03 13:58:18
Now fixed.
|
+ num_channels); |
+} |
} // namespace |
// Throughout webrtc, it's assumed that success is represented by zero. |
@@ -526,7 +561,9 @@ int AudioProcessingImpl::InitializeLocked() { |
} |
} |
#endif |
- |
+ if (aec_dump_) { |
+ aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); |
+ } |
return kNoError; |
} |
@@ -824,6 +861,9 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
} |
#endif |
+ std::unique_ptr<AecDump::CaptureStreamInfo> stream_info = |
+ aec_dump_ ? RecordUnprocessedCaptureStream(src) : nullptr; |
peah-webrtc
2017/04/25 21:04:46
Is the else statement really needed (the nullptr
aleloi
2017/04/26 09:16:48
Done.
|
+ |
capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
@@ -841,7 +881,9 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
&crit_debug_, &debug_dump_.capture)); |
} |
#endif |
- |
+ if (aec_dump_) { |
+ RecordProcessedCaptureStream(dest, std::move(stream_info)); |
+ } |
return kNoError; |
} |
@@ -1078,6 +1120,9 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
return kBadDataLengthError; |
} |
+ std::unique_ptr<AecDump::CaptureStreamInfo> stream_info = |
peah-webrtc
2017/04/25 21:04:46
Same comment as above.
aleloi
2017/04/26 09:16:48
Done.
|
+ aec_dump_ ? RecordUnprocessedCaptureStream(*frame) : nullptr; |
+ |
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
if (debug_dump_.debug_file->is_open()) { |
RETURN_ON_ERR(WriteConfigMessage(false)); |
@@ -1095,6 +1140,9 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
capture_.capture_audio->InterleaveTo( |
frame, submodule_states_.CaptureMultiBandProcessingActive()); |
+ if (aec_dump_) { |
+ RecordProcessedCaptureStream(*frame, std::move(stream_info)); |
+ } |
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
if (debug_dump_.debug_file->is_open()) { |
audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
@@ -1376,7 +1424,14 @@ int AudioProcessingImpl::AnalyzeReverseStreamLocked( |
&crit_debug_, &debug_dump_.render)); |
} |
#endif |
- |
+ if (aec_dump_) { |
+ const size_t channel_size = |
+ sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
+ const size_t num_channels = |
+ formats_.api_format.reverse_input_stream().num_channels(); |
+ aec_dump_->WriteRenderStreamMessage( |
+ CreateStreamView(src, channel_size, num_channels)); |
+ } |
render_.render_audio->CopyFrom(src, |
formats_.api_format.reverse_input_stream()); |
return ProcessRenderStreamLocked(); |
@@ -1429,6 +1484,10 @@ int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
&crit_debug_, &debug_dump_.render)); |
} |
#endif |
+ if (aec_dump_) { |
+ aec_dump_->WriteRenderStreamMessage(*frame); |
+ } |
+ |
render_.render_audio->DeinterleaveFrom(frame); |
RETURN_ON_ERR(ProcessRenderStreamLocked()); |
render_.render_audio->InterleaveTo( |
@@ -1512,6 +1571,17 @@ int AudioProcessingImpl::delay_offset_ms() const { |
return capture_.delay_offset_ms; |
} |
+void AudioProcessingImpl::StartDebugRecording( |
+ std::unique_ptr<AecDump> aec_dump) { |
+ rtc::CritScope cs_render(&crit_render_); |
+ rtc::CritScope cs_capture(&crit_capture_); |
+ RTC_DCHECK(aec_dump); |
+ aec_dump_ = std::move(aec_dump); |
+ |
+ aec_dump_->WriteConfig(CollectApmConfig(), true); |
+ aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); |
+} |
+ |
int AudioProcessingImpl::StartDebugRecording( |
const char filename[AudioProcessing::kMaxFilenameSize], |
int64_t max_log_size_bytes) { |
@@ -1586,6 +1656,7 @@ int AudioProcessingImpl::StopDebugRecording() { |
// Run in a single-threaded manner. |
rtc::CritScope cs_render(&crit_render_); |
rtc::CritScope cs_capture(&crit_capture_); |
+ aec_dump_.reset(); |
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
// We just return if recording hasn't started. |
@@ -1837,6 +1908,126 @@ void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { |
capture_.last_aec_system_delay_ms = 0; |
} |
+InternalAPMConfig AudioProcessingImpl::CollectApmConfig() const { |
+ std::string experiments_description = |
+ public_submodules_->echo_cancellation->GetExperimentsDescription(); |
+ // TODO(peah): Add semicolon-separated concatenations of experiment |
+ // descriptions for other submodules. |
+ if (capture_nonlocked_.level_controller_enabled) { |
+ experiments_description += "LevelController;"; |
+ } |
+ if (constants_.agc_clipped_level_min != kClippedLevelMin) { |
+ experiments_description += "AgcClippingLevelExperiment;"; |
+ } |
+ if (capture_nonlocked_.echo_canceller3_enabled) { |
+ experiments_description += "EchoCanceller3;"; |
+ } |
+ |
+ InternalAPMConfig apm_config; |
+ |
+ apm_config.aec_enabled = public_submodules_->echo_cancellation->is_enabled(); |
+ apm_config.aec_delay_agnostic_enabled = |
+ public_submodules_->echo_cancellation->is_delay_agnostic_enabled(); |
+ apm_config.aec_drift_compensation_enabled = |
+ public_submodules_->echo_cancellation->is_drift_compensation_enabled(); |
+ apm_config.aec_extended_filter_enabled = |
+ public_submodules_->echo_cancellation->is_extended_filter_enabled(); |
+ apm_config.aec_suppression_level = static_cast<int>( |
+ public_submodules_->echo_cancellation->suppression_level()); |
+ |
+ apm_config.aecm_enabled = |
+ public_submodules_->echo_control_mobile->is_enabled(); |
+ apm_config.aecm_comfort_noise_enabled = |
+ public_submodules_->echo_control_mobile->is_comfort_noise_enabled(); |
+ apm_config.aecm_routing_mode = |
+ static_cast<int>(public_submodules_->echo_control_mobile->routing_mode()); |
+ |
+ apm_config.agc_enabled = public_submodules_->gain_control->is_enabled(); |
+ apm_config.agc_mode = |
+ static_cast<int>(public_submodules_->gain_control->mode()); |
+ apm_config.agc_limiter_enabled = |
+ public_submodules_->gain_control->is_limiter_enabled(); |
+ apm_config.noise_robust_agc_enabled = constants_.use_experimental_agc; |
+ |
+ apm_config.hpf_enabled = config_.high_pass_filter.enabled; |
+ |
+ apm_config.ns_enabled = public_submodules_->noise_suppression->is_enabled(); |
+ apm_config.ns_level = |
+ static_cast<int>(public_submodules_->noise_suppression->level()); |
+ |
+ apm_config.transient_suppression_enabled = |
+ capture_.transient_suppressor_enabled; |
+ apm_config.intelligibility_enhancer_enabled = |
+ capture_nonlocked_.intelligibility_enabled; |
+ apm_config.experiments_description = experiments_description; |
+ return apm_config; |
+} |
+ |
+std::unique_ptr<AecDump::CaptureStreamInfo> |
+AudioProcessingImpl::RecordUnprocessedCaptureStream( |
+ const float* const* src) const { |
+ RTC_DCHECK(aec_dump_); |
+ aec_dump_->WriteConfig(CollectApmConfig(), false); |
+ auto stream_info = aec_dump_->GetCaptureStreamInfo(); |
+ RTC_DCHECK(stream_info); |
+ |
+ const size_t channel_size = |
+ sizeof(float) * formats_.api_format.input_stream().num_frames(); |
+ const size_t num_channels = formats_.api_format.input_stream().num_channels(); |
+ stream_info->AddInput(CreateStreamView(src, channel_size, num_channels)); |
+ PopulateStreamInfoWithConfig(stream_info.get()); |
+ return stream_info; |
+} |
+ |
+std::unique_ptr<AecDump::CaptureStreamInfo> |
+AudioProcessingImpl::RecordUnprocessedCaptureStream( |
+ const AudioFrame& capture_frame) const { |
+ RTC_DCHECK(aec_dump_); |
+ auto stream_info = aec_dump_->GetCaptureStreamInfo(); |
+ RTC_DCHECK(stream_info); |
+ |
+ stream_info->AddInput(capture_frame); |
+ PopulateStreamInfoWithConfig(stream_info.get()); |
+ aec_dump_->WriteConfig(CollectApmConfig(), false); |
+ return stream_info; |
+} |
+ |
+void AudioProcessingImpl::RecordProcessedCaptureStream( |
+ const float* const* processed_capture_stream, |
+ std::unique_ptr<AecDump::CaptureStreamInfo> stream_info) const { |
+ RTC_DCHECK(stream_info); |
+ RTC_DCHECK(aec_dump_); |
+ |
+ const size_t channel_size = |
+ sizeof(float) * formats_.api_format.output_stream().num_frames(); |
+ const size_t num_channels = |
+ formats_.api_format.output_stream().num_channels(); |
+ stream_info->AddOutput( |
+ CreateStreamView(processed_capture_stream, channel_size, num_channels)); |
+ aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); |
+} |
+ |
+void AudioProcessingImpl::RecordProcessedCaptureStream( |
+ const AudioFrame& processed_capture_frame, |
+ std::unique_ptr<AecDump::CaptureStreamInfo> stream_info) const { |
+ RTC_DCHECK(stream_info); |
+ RTC_DCHECK(aec_dump_); |
+ |
+ stream_info->AddOutput(processed_capture_frame); |
+ aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); |
+} |
+ |
+void AudioProcessingImpl::PopulateStreamInfoWithConfig( |
+ AecDump::CaptureStreamInfo* stream_info) const { |
+ RTC_DCHECK(stream_info); |
+ |
+ stream_info->set_delay(capture_nonlocked_.stream_delay_ms); |
+ stream_info->set_drift( |
+ public_submodules_->echo_cancellation->stream_drift_samples()); |
+ stream_info->set_level(gain_control()->stream_analog_level()); |
+ stream_info->set_keypress(capture_.key_pressed); |
+} |
+ |
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
int AudioProcessingImpl::WriteMessageToDebugFile( |
FileWrapper* debug_file, |