Index: webrtc/modules/audio_processing/audio_processing_impl.cc |
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc |
index 88229b4829aea7f0a4a9e68893c96f643e7944d1..fccfd4c8e6ef66c039a5e428ffd999f4dfe9ad2f 100644 |
--- a/webrtc/modules/audio_processing/audio_processing_impl.cc |
+++ b/webrtc/modules/audio_processing/audio_processing_impl.cc |
@@ -305,6 +305,7 @@ AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config) |
AudioProcessingImpl::AudioProcessingImpl(const webrtc::Config& config, |
NonlinearBeamformer* beamformer) |
: high_pass_filter_impl_(new HighPassFilterImpl(this)), |
+ aec_dump_(AecDump::CreateNullDump()), |
public_submodules_(new ApmPublicSubmodules()), |
private_submodules_(new ApmPrivateSubmodules(beamformer)), |
constants_(config.Get<ExperimentalAgc>().startup_min_volume, |
@@ -525,7 +526,7 @@ int AudioProcessingImpl::InitializeLocked() { |
} |
} |
#endif |
- |
+ aec_dump_->WriteInitMessage(formats_.api_format); |
return kNoError; |
} |
@@ -816,7 +817,30 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
} |
#endif |
+ std::unique_ptr<AecDump::CaptureStreamInfo> stream_info = |
peah-webrtc
2017/04/07 12:57:15
I definitely like the aspect of the NullImplementa
aleloi
2017/04/12 11:05:29
I agree. Now everything is wrapped with if (aec_du
|
+ aec_dump_->GetCaptureStreamInfo(); |
+ const size_t channel_size = |
+ sizeof(float) * formats_.api_format.input_stream().num_frames(); |
+ |
+ { |
+ std::vector<rtc::ArrayView<const float>> src_view; |
+ for (size_t i = 0; i < formats_.api_format.input_stream().num_channels(); |
+ ++i) { |
+ src_view.emplace_back(src[i], channel_size); |
+ } |
+ stream_info->AddInput(src_view); |
+ } |
capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
+ |
+ RTC_DCHECK(!(public_submodules_->echo_cancellation->is_enabled() && |
+ public_submodules_->echo_control_mobile->is_enabled())); |
+ |
+ stream_info->set_delay(capture_nonlocked_.stream_delay_ms); |
+ stream_info->set_drift( |
+ public_submodules_->echo_cancellation->stream_drift_samples()); |
+ stream_info->set_level(gain_control()->stream_analog_level()); |
+ stream_info->set_keypress(capture_.key_pressed); |
+ |
RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
@@ -833,6 +857,17 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
&crit_debug_, &debug_dump_.capture)); |
} |
#endif |
+ { |
+ const size_t channel_size = |
+ sizeof(float) * formats_.api_format.output_stream().num_frames(); |
+ std::vector<rtc::ArrayView<const float>> dest_view; |
+ for (size_t i = 0; i < formats_.api_format.output_stream().num_channels(); |
+ ++i) { |
+ dest_view.emplace_back(dest[i], channel_size); |
+ } |
+ stream_info->AddOutput(dest_view); |
+ } |
+ aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); |
return kNoError; |
} |
@@ -1070,6 +1105,9 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
return kBadDataLengthError; |
} |
+ std::unique_ptr<AecDump::CaptureStreamInfo> stream_info = |
+ aec_dump_->GetCaptureStreamInfo(); |
peah-webrtc
2017/04/07 12:57:15
Same as above.
aleloi
2017/04/12 11:05:29
Acknowledged.
|
+ stream_info->AddInput(*frame); |
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
if (debug_dump_.debug_file->is_open()) { |
RETURN_ON_ERR(WriteConfigMessage(false)); |
@@ -1083,10 +1121,22 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
#endif |
capture_.capture_audio->DeinterleaveFrom(frame); |
+ |
+ RTC_DCHECK(!(public_submodules_->echo_cancellation->is_enabled() && |
+ public_submodules_->echo_control_mobile->is_enabled())); |
+ |
+ stream_info->set_delay(capture_nonlocked_.stream_delay_ms); |
+ stream_info->set_drift( |
+ public_submodules_->echo_cancellation->stream_drift_samples()); |
+ stream_info->set_level(gain_control()->stream_analog_level()); |
+ stream_info->set_keypress(capture_.key_pressed); |
+ |
RETURN_ON_ERR(ProcessCaptureStreamLocked()); |
capture_.capture_audio->InterleaveTo( |
frame, submodule_states_.CaptureMultiBandProcessingActive()); |
+ stream_info->AddOutput(*frame); |
+ aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); |
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
if (debug_dump_.debug_file->is_open()) { |
audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
@@ -1363,6 +1413,15 @@ int AudioProcessingImpl::AnalyzeReverseStreamLocked( |
&crit_debug_, &debug_dump_.render)); |
} |
#endif |
+ std::vector<rtc::ArrayView<const float>> src_view; |
+ const size_t channel_size = |
peah-webrtc
2017/04/07 12:57:15
Same as above.
aleloi
2017/04/12 11:05:29
Acknowledged.
|
+ sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
+ |
+ for (size_t i = 0; |
+ i < formats_.api_format.reverse_input_stream().num_channels(); ++i) { |
+ src_view.emplace_back(src[i], channel_size); |
+ } |
+ aec_dump_->WriteRenderStreamMessage(src_view); |
render_.render_audio->CopyFrom(src, |
formats_.api_format.reverse_input_stream()); |
@@ -1416,6 +1475,8 @@ int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
&crit_debug_, &debug_dump_.render)); |
} |
#endif |
+ aec_dump_->WriteRenderStreamMessage(*frame); |
+ |
render_.render_audio->DeinterleaveFrom(frame); |
RETURN_ON_ERR(ProcessRenderStreamLocked()); |
render_.render_audio->InterleaveTo( |
@@ -1499,6 +1560,20 @@ int AudioProcessingImpl::delay_offset_ms() const { |
return capture_.delay_offset_ms; |
} |
+void AudioProcessingImpl::StartDebugRecording( |
+ std::unique_ptr<AecDump> aec_dump) { |
+ rtc::CritScope cs_render(&crit_render_); |
+ rtc::CritScope cs_capture(&crit_capture_); |
+ aec_dump_ = std::move(aec_dump); |
+ |
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
+ const int error = WriteConfigMessage(true); |
+ RTC_DCHECK(error); |
+#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |
+ |
+ aec_dump_->WriteInitMessage(formats_.api_format); |
+} |
+ |
int AudioProcessingImpl::StartDebugRecording( |
const char filename[AudioProcessing::kMaxFilenameSize], |
int64_t max_log_size_bytes) { |
@@ -1511,6 +1586,7 @@ int AudioProcessingImpl::StartDebugRecording( |
return kNullPointerError; |
} |
+ aec_dump_->WriteInitMessage(formats_.api_format); |
peah-webrtc
2017/04/07 12:57:15
Is this needed? Nothing is anyway written until th
aleloi
2017/04/12 11:05:29
Good point. Removed.
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
debug_dump_.num_bytes_left_for_log_ = max_log_size_bytes; |
// Stop any ongoing recording. |
@@ -1538,6 +1614,7 @@ int AudioProcessingImpl::StartDebugRecording(FILE* handle, |
return kNullPointerError; |
} |
+ aec_dump_->WriteInitMessage(formats_.api_format); |
peah-webrtc
2017/04/07 12:57:15
Same as above.
aleloi
2017/04/12 11:05:29
Acknowledged.
|
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
debug_dump_.num_bytes_left_for_log_ = max_log_size_bytes; |
@@ -1573,6 +1650,7 @@ int AudioProcessingImpl::StopDebugRecording() { |
// Run in a single-threaded manner. |
rtc::CritScope cs_render(&crit_render_); |
rtc::CritScope cs_capture(&crit_capture_); |
+ aec_dump_ = AecDump::CreateNullDump(); |
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
// We just return if recording hasn't started. |
@@ -1937,6 +2015,43 @@ int AudioProcessingImpl::WriteConfigMessage(bool forced) { |
config.set_intelligibility_enhancer_enabled( |
capture_nonlocked_.intelligibility_enabled); |
+ InternalAPMConfig apm_config; |
+ |
+ apm_config.aec_enabled = public_submodules_->echo_cancellation->is_enabled(); |
+ apm_config.aec_delay_agnostic_enabled = |
+ public_submodules_->echo_cancellation->is_delay_agnostic_enabled(); |
+ apm_config.aec_drift_compensation_enabled = |
+ public_submodules_->echo_cancellation->is_drift_compensation_enabled(); |
+ apm_config.aec_extended_filter_enabled = |
+ public_submodules_->echo_cancellation->is_extended_filter_enabled(); |
+ apm_config.aec_suppression_level = static_cast<int>( |
+ public_submodules_->echo_cancellation->suppression_level()); |
+ |
+ apm_config.aecm_enabled = |
+ public_submodules_->echo_control_mobile->is_enabled(); |
+ apm_config.aecm_comfort_noise_enabled = |
+ public_submodules_->echo_control_mobile->is_comfort_noise_enabled(); |
+ apm_config.aecm_routing_mode = |
+ static_cast<int>(public_submodules_->echo_control_mobile->routing_mode()); |
+ |
+ apm_config.agc_enabled = public_submodules_->gain_control->is_enabled(); |
+ apm_config.agc_mode = |
+ static_cast<int>(public_submodules_->gain_control->mode()); |
+ apm_config.agc_limiter_enabled = |
+ public_submodules_->gain_control->is_limiter_enabled(); |
+ apm_config.noise_robust_agc_enabled = constants_.use_experimental_agc; |
+ |
+ apm_config.hpf_enabled = config_.high_pass_filter.enabled; |
+ |
+ apm_config.ns_enabled = public_submodules_->noise_suppression->is_enabled(); |
+ apm_config.ns_level = |
+ static_cast<int>(public_submodules_->noise_suppression->level()); |
+ |
+ apm_config.transient_suppression_enabled = |
+ capture_.transient_suppressor_enabled; |
+ apm_config.intelligibility_enhancer_enabled = |
+ capture_nonlocked_.intelligibility_enabled; |
+ |
std::string experiments_description = |
public_submodules_->echo_cancellation->GetExperimentsDescription(); |
// TODO(peah): Add semicolon-separated concatenations of experiment |
@@ -1952,6 +2067,9 @@ int AudioProcessingImpl::WriteConfigMessage(bool forced) { |
} |
config.set_experiments_description(experiments_description); |
+ apm_config.experiments_description = experiments_description; |
+ aec_dump_->WriteConfig(apm_config, forced); |
+ |
std::string serialized_config = config.SerializeAsString(); |
if (!forced && |
debug_dump_.capture.last_serialized_config == serialized_config) { |