Index: webrtc/modules/audio_processing/audio_processing_impl.cc |
diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc |
index 3813a9aca84658dd18ae32427cdbd578d6712561..b96d020595fa274a005b85c372eeb4e19a80ec08 100644 |
--- a/webrtc/modules/audio_processing/audio_processing_impl.cc |
+++ b/webrtc/modules/audio_processing/audio_processing_impl.cc |
@@ -16,6 +16,9 @@ |
#include "webrtc/common_audio/include/audio_util.h" |
#include "webrtc/common_audio/channel_buffer.h" |
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" |
+extern "C" { |
+#include "webrtc/modules/audio_processing/aec/aec_core.h" |
+} |
#include "webrtc/modules/audio_processing/agc/agc_manager_direct.h" |
#include "webrtc/modules/audio_processing/audio_buffer.h" |
#include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h" |
@@ -33,6 +36,7 @@ |
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h" |
#include "webrtc/system_wrappers/interface/file_wrapper.h" |
#include "webrtc/system_wrappers/interface/logging.h" |
+#include "webrtc/system_wrappers/interface/metrics.h" |
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
// Files generated at build-time by the protobuf compiler. |
@@ -170,6 +174,8 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config, |
stream_delay_ms_(0), |
delay_offset_ms_(0), |
was_stream_delay_set_(false), |
+ last_stream_delay_ms_(0), |
+ last_aec_system_delay_ms_(0), |
output_will_be_muted_(false), |
key_pressed_(false), |
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
@@ -587,6 +593,8 @@ int AudioProcessingImpl::ProcessStreamLocked() { |
} |
#endif |
+ MaybeUpdateHistograms(); |
+ |
AudioBuffer* ca = capture_audio_.get(); // For brevity. |
if (use_new_agc_ && gain_control_->is_enabled()) { |
agc_manager_->AnalyzePreProcess(ca->channels()[0], |
@@ -990,6 +998,35 @@ void AudioProcessingImpl::InitializeBeamformer() { |
} |
} |
+void AudioProcessingImpl::MaybeUpdateHistograms() { |
+ static const int kMinDiffDelayMs = 50; |
+ |
+ if (echo_cancellation()->is_enabled()) { |
+ // Detect a jump in platform reported system delay and log the difference. |
+ const int diff_stream_delay_ms = stream_delay_ms_ - last_stream_delay_ms_; |
+ if (diff_stream_delay_ms > kMinDiffDelayMs && last_stream_delay_ms_ != 0) { |
+ RTC_HISTOGRAM_COUNTS("WebRTC.Audio.PlatformReportedStreamDelayJump", |
+ diff_stream_delay_ms, kMinDiffDelayMs, 1000, |
+ 1001 - kMinDiffDelayMs); |
hlundin-webrtc
2015/06/29 08:53:18
Do we need granularity of 1 ms?
bjornv1
2015/06/29 10:40:23
No we don't. 10 ms should be just fine.
|
+ } |
+ last_stream_delay_ms_ = stream_delay_ms_; |
+ |
+ // Detect a jump in AEC system delay and log the difference. |
+ const int frames_per_ms = split_rate_ / 1000; |
hlundin-webrtc
2015/06/29 08:53:18
This should be an exact division (zero remainder),
bjornv1
2015/06/29 10:40:23
Done.
|
+ const int aec_system_delay_ms = |
hlundin-webrtc
2015/06/29 08:53:18
(I'm guessing this one is not expected to be an ex
bjornv1
2015/06/29 10:40:23
No, this can end up with rounding.
|
+ WebRtcAec_system_delay(echo_cancellation()->aec_core()) / frames_per_ms; |
+ const int diff_aec_system_delay_ms = aec_system_delay_ms - |
+ last_aec_system_delay_ms_; |
+ if (diff_aec_system_delay_ms > kMinDiffDelayMs && |
+ last_aec_system_delay_ms_ != 0) { |
+ RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecSystemDelayJump", |
+ diff_aec_system_delay_ms, kMinDiffDelayMs, 1000, |
+ 1001 - kMinDiffDelayMs); |
hlundin-webrtc
2015/06/29 08:53:18
Same here.
bjornv1
2015/06/29 10:40:23
Done.
|
+ } |
+ last_aec_system_delay_ms_ = aec_system_delay_ms; |
+ } |
+} |
+ |
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
int AudioProcessingImpl::WriteMessageToDebugFile() { |
int32_t size = event_msg_->ByteSize(); |