Chromium Code Reviews| Index: webrtc/modules/audio_processing/audio_processing_impl.cc | 
| diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc | 
| index 816210f34f8738eb7a042adee69f87f5ed1a99d8..85a3e6003b4c6256e4f1df916267fffec6e075a1 100644 | 
| --- a/webrtc/modules/audio_processing/audio_processing_impl.cc | 
| +++ b/webrtc/modules/audio_processing/audio_processing_impl.cc | 
| @@ -129,6 +129,9 @@ static const size_t kMaxAllowedValuesOfSamplesPerFrame = 160; | 
| // reverse and forward call numbers. | 
| static const size_t kMaxNumFramesToBuffer = 100; | 
| +// Maximum number of audio channels in the input and output streams. | 
| +constexpr size_t kMaxNumChannels = 4; | 
| 
 
peah-webrtc
2017/04/21 14:05:15
I think you commented that this would be changed.
 
aleloi
2017/04/24 11:10:09
The name or the number? I've changed the name now
 
 | 
| + | 
| class HighPassFilterImpl : public HighPassFilter { | 
| public: | 
| explicit HighPassFilterImpl(AudioProcessingImpl* apm) : apm_(apm) {} | 
| @@ -152,6 +155,37 @@ class HighPassFilterImpl : public HighPassFilter { | 
| RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(HighPassFilterImpl); | 
| }; | 
| +webrtc::InternalAPMStreamsConfig ToStreamsConfig( | 
| + const ProcessingConfig& api_format) { | 
| + webrtc::InternalAPMStreamsConfig result; | 
| + result.input_sample_rate = api_format.input_stream().sample_rate_hz(); | 
| + result.input_num_channels = api_format.input_stream().num_channels(); | 
| + result.output_num_channels = api_format.output_stream().num_channels(); | 
| + result.render_input_num_channels = | 
| + api_format.reverse_input_stream().num_channels(); | 
| + result.render_input_sample_rate = | 
| + api_format.reverse_input_stream().sample_rate_hz(); | 
| + result.output_sample_rate = api_format.output_stream().sample_rate_hz(); | 
| + result.render_output_sample_rate = | 
| + api_format.reverse_output_stream().sample_rate_hz(); | 
| + result.render_output_num_channels = | 
| + api_format.reverse_output_stream().num_channels(); | 
| + return result; | 
| +} | 
| + | 
| +rtc::ArrayView<rtc::ArrayView<const float>> stream_view( | 
| 
 
peah-webrtc
2017/04/21 14:05:15
Does this name follow the style guideline? What ab
 
aleloi
2017/04/24 11:10:09
I thought the style allowed names_with_underscores
 
 | 
| + const float* const* stream, | 
| + size_t channel_size, | 
| + size_t num_channels) { | 
| + RTC_DCHECK_LE(num_channels, kMaxNumChannels); | 
| + | 
| + std::array<rtc::ArrayView<const float>, kMaxNumChannels> array_stream_view; | 
| + for (size_t i = 0; i < std::min(num_channels, kMaxNumChannels); ++i) { | 
| + array_stream_view[i] = rtc::ArrayView<const float>(stream[i], channel_size); | 
| + } | 
| + return rtc::ArrayView<rtc::ArrayView<const float>>(&array_stream_view[0], | 
| + num_channels); | 
| +} | 
| } // namespace | 
| // Throughout webrtc, it's assumed that success is represented by zero. | 
| @@ -526,7 +560,9 @@ int AudioProcessingImpl::InitializeLocked() { | 
| } | 
| } | 
| #endif | 
| - | 
| + if (aec_dump_) { | 
| + aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); | 
| + } | 
| return kNoError; | 
| } | 
| @@ -824,6 +860,20 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, | 
| } | 
| #endif | 
| + std::unique_ptr<AecDump::CaptureStreamInfo> stream_info; | 
| + if (aec_dump_) { | 
| + aec_dump_->WriteConfig(CollectApmConfig(), false); | 
| 
 
peah-webrtc
2017/04/21 14:05:15
Same thing here, would it be possible to move the
 
aleloi
2017/04/24 11:10:09
WDYT about the latest change? I don't think it's p
 
 | 
| + | 
| + stream_info = aec_dump_->GetCaptureStreamInfo(); | 
| + RTC_DCHECK(stream_info); | 
| + const size_t channel_size = | 
| + sizeof(float) * formats_.api_format.input_stream().num_frames(); | 
| + const size_t num_channels = | 
| + formats_.api_format.input_stream().num_channels(); | 
| + stream_info->AddInput(stream_view(src, channel_size, num_channels)); | 
| + PopulateStreamInfoWithConfig(stream_info.get()); | 
| + } | 
| + | 
| capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); | 
| RETURN_ON_ERR(ProcessCaptureStreamLocked()); | 
| capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); | 
| @@ -841,6 +891,14 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, | 
| &crit_debug_, &debug_dump_.capture)); | 
| } | 
| #endif | 
| + if (aec_dump_) { | 
| + const size_t channel_size = | 
| + sizeof(float) * formats_.api_format.output_stream().num_frames(); | 
| 
 
peah-webrtc
2017/04/21 14:05:15
Same thing here, would it be possible to move the
 
aleloi
2017/04/24 11:10:09
Acknowledged.
 
 | 
| + const size_t num_channels = | 
| + formats_.api_format.output_stream().num_channels(); | 
| + stream_info->AddOutput(stream_view(dest, channel_size, num_channels)); | 
| + aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); | 
| + } | 
| return kNoError; | 
| } | 
| @@ -1078,6 +1136,15 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { | 
| return kBadDataLengthError; | 
| } | 
| + std::unique_ptr<AecDump::CaptureStreamInfo> stream_info; | 
| + if (aec_dump_) { | 
| + stream_info = aec_dump_->GetCaptureStreamInfo(); | 
| 
 
peah-webrtc
2017/04/21 14:05:15
Same thing here, would it be possible to move the
 
aleloi
2017/04/24 11:10:09
Acknowledged.
 
 | 
| + RTC_DCHECK(stream_info); | 
| + stream_info->AddInput(*frame); | 
| + PopulateStreamInfoWithConfig(stream_info.get()); | 
| + aec_dump_->WriteConfig(CollectApmConfig(), false); | 
| + } | 
| + | 
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 
| if (debug_dump_.debug_file->is_open()) { | 
| RETURN_ON_ERR(WriteConfigMessage(false)); | 
| @@ -1095,6 +1162,10 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { | 
| capture_.capture_audio->InterleaveTo( | 
| frame, submodule_states_.CaptureMultiBandProcessingActive()); | 
| + if (aec_dump_) { | 
| + stream_info->AddOutput(*frame); | 
| + aec_dump_->WriteCaptureStreamMessage(std::move(stream_info)); | 
| + } | 
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 
| if (debug_dump_.debug_file->is_open()) { | 
| audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); | 
| @@ -1376,7 +1447,14 @@ int AudioProcessingImpl::AnalyzeReverseStreamLocked( | 
| &crit_debug_, &debug_dump_.render)); | 
| } | 
| #endif | 
| - | 
| + if (aec_dump_) { | 
| + const size_t channel_size = | 
| + sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); | 
| + const size_t num_channels = | 
| 
 
peah-webrtc
2017/04/21 14:05:15
Same thing here, would it be possible to move the
 
aleloi
2017/04/24 11:10:09
Acknowledged.
 
 | 
| + formats_.api_format.reverse_input_stream().num_channels(); | 
| + aec_dump_->WriteRenderStreamMessage( | 
| + stream_view(src, channel_size, num_channels)); | 
| + } | 
| render_.render_audio->CopyFrom(src, | 
| formats_.api_format.reverse_input_stream()); | 
| return ProcessRenderStreamLocked(); | 
| @@ -1429,6 +1507,10 @@ int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { | 
| &crit_debug_, &debug_dump_.render)); | 
| } | 
| #endif | 
| + if (aec_dump_) { | 
| + aec_dump_->WriteRenderStreamMessage(*frame); | 
| + } | 
| + | 
| render_.render_audio->DeinterleaveFrom(frame); | 
| RETURN_ON_ERR(ProcessRenderStreamLocked()); | 
| render_.render_audio->InterleaveTo( | 
| @@ -1512,6 +1594,17 @@ int AudioProcessingImpl::delay_offset_ms() const { | 
| return capture_.delay_offset_ms; | 
| } | 
| +void AudioProcessingImpl::StartDebugRecording( | 
| + std::unique_ptr<AecDump> aec_dump) { | 
| + rtc::CritScope cs_render(&crit_render_); | 
| + rtc::CritScope cs_capture(&crit_capture_); | 
| + RTC_DCHECK(aec_dump); | 
| + aec_dump_ = std::move(aec_dump); | 
| + | 
| + aec_dump_->WriteConfig(CollectApmConfig(), true); | 
| + aec_dump_->WriteInitMessage(ToStreamsConfig(formats_.api_format)); | 
| +} | 
| + | 
| int AudioProcessingImpl::StartDebugRecording( | 
| const char filename[AudioProcessing::kMaxFilenameSize], | 
| int64_t max_log_size_bytes) { | 
| @@ -1586,6 +1679,7 @@ int AudioProcessingImpl::StopDebugRecording() { | 
| // Run in a single-threaded manner. | 
| rtc::CritScope cs_render(&crit_render_); | 
| rtc::CritScope cs_capture(&crit_capture_); | 
| + aec_dump_.reset(); | 
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 
| // We just return if recording hasn't started. | 
| @@ -1837,6 +1931,71 @@ void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { | 
| capture_.last_aec_system_delay_ms = 0; | 
| } | 
| +InternalAPMConfig AudioProcessingImpl::CollectApmConfig() { | 
| + std::string experiments_description = | 
| + public_submodules_->echo_cancellation->GetExperimentsDescription(); | 
| + // TODO(peah): Add semicolon-separated concatenations of experiment | 
| + // descriptions for other submodules. | 
| + if (capture_nonlocked_.level_controller_enabled) { | 
| + experiments_description += "LevelController;"; | 
| + } | 
| + if (constants_.agc_clipped_level_min != kClippedLevelMin) { | 
| + experiments_description += "AgcClippingLevelExperiment;"; | 
| + } | 
| + if (capture_nonlocked_.echo_canceller3_enabled) { | 
| + experiments_description += "EchoCanceller3;"; | 
| + } | 
| + | 
| + InternalAPMConfig apm_config; | 
| + | 
| + apm_config.aec_enabled = public_submodules_->echo_cancellation->is_enabled(); | 
| + apm_config.aec_delay_agnostic_enabled = | 
| + public_submodules_->echo_cancellation->is_delay_agnostic_enabled(); | 
| + apm_config.aec_drift_compensation_enabled = | 
| + public_submodules_->echo_cancellation->is_drift_compensation_enabled(); | 
| + apm_config.aec_extended_filter_enabled = | 
| + public_submodules_->echo_cancellation->is_extended_filter_enabled(); | 
| + apm_config.aec_suppression_level = static_cast<int>( | 
| + public_submodules_->echo_cancellation->suppression_level()); | 
| + | 
| + apm_config.aecm_enabled = | 
| + public_submodules_->echo_control_mobile->is_enabled(); | 
| + apm_config.aecm_comfort_noise_enabled = | 
| + public_submodules_->echo_control_mobile->is_comfort_noise_enabled(); | 
| + apm_config.aecm_routing_mode = | 
| + static_cast<int>(public_submodules_->echo_control_mobile->routing_mode()); | 
| + | 
| + apm_config.agc_enabled = public_submodules_->gain_control->is_enabled(); | 
| + apm_config.agc_mode = | 
| + static_cast<int>(public_submodules_->gain_control->mode()); | 
| + apm_config.agc_limiter_enabled = | 
| + public_submodules_->gain_control->is_limiter_enabled(); | 
| + apm_config.noise_robust_agc_enabled = constants_.use_experimental_agc; | 
| + | 
| + apm_config.hpf_enabled = config_.high_pass_filter.enabled; | 
| + | 
| + apm_config.ns_enabled = public_submodules_->noise_suppression->is_enabled(); | 
| + apm_config.ns_level = | 
| + static_cast<int>(public_submodules_->noise_suppression->level()); | 
| + | 
| + apm_config.transient_suppression_enabled = | 
| + capture_.transient_suppressor_enabled; | 
| + apm_config.intelligibility_enhancer_enabled = | 
| + capture_nonlocked_.intelligibility_enabled; | 
| + apm_config.experiments_description = experiments_description; | 
| + return apm_config; | 
| +} | 
| + | 
| +void AudioProcessingImpl::PopulateStreamInfoWithConfig( | 
| + AecDump::CaptureStreamInfo* stream_info) const { | 
| + RTC_DCHECK(stream_info); | 
| + stream_info->set_delay(capture_nonlocked_.stream_delay_ms); | 
| + stream_info->set_drift( | 
| + public_submodules_->echo_cancellation->stream_drift_samples()); | 
| + stream_info->set_level(gain_control()->stream_analog_level()); | 
| + stream_info->set_keypress(capture_.key_pressed); | 
| +} | 
| + | 
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP | 
| int AudioProcessingImpl::WriteMessageToDebugFile( | 
| FileWrapper* debug_file, |