Index: webrtc/voice_engine/channel.cc |
diff --git a/webrtc/voice_engine/channel.cc b/webrtc/voice_engine/channel.cc |
index 16122709c31fe976ddde3c63a477d78957f72070..a2f37252aedd31ca28376d3b764158d7cc6b8ef8 100644 |
--- a/webrtc/voice_engine/channel.cc |
+++ b/webrtc/voice_engine/channel.cc |
@@ -696,7 +696,19 @@ MixerParticipant::AudioFrameInfo Channel::GetAudioFrameWithMuted( |
// Measure audio level (0-9) |
// TODO(henrik.lundin) Use the |muted| information here too. |
+ // TODO(deadbeef): Use RmsLevel for |_outputAudioLevel| as well. |
+ // TODO(zstein): Use sample_rate_hz_? |
hlundin-webrtc
2017/07/03 13:23:03
What would you do with sample_rate_hz_?
Zach Stein
2017/07/06 17:41:37
I wasn't sure if this method was intended to proce
hlundin-webrtc
2017/07/07 06:48:44
Acknowledged.
|
_outputAudioLevel.ComputeLevel(*audioFrame); |
+ // See the description for "totalAudioEnergy" in the WebRTC stats spec for an |
+ // explanation of these formulas. In short, we need a value that can be used |
+ // to compute RMS audio levels over different time intervals, by taking the |
+ // difference between the results from two getStats calls. To do this, the |
+ // value needs to be of units "squared sample value * time". |
+ double additional_energy = |
+ static_cast<double>(_outputAudioLevel.LevelFullRange()) / INT16_MAX; |
+ additional_energy *= additional_energy; |
+ _totalOutputEnergy += additional_energy * 0.01; |
+ _totalOutputDuration += 0.01; |
if (capture_start_rtp_time_stamp_ < 0 && audioFrame->timestamp_ != 0) { |
// The first frame with a valid rtp timestamp. |
@@ -2370,6 +2382,14 @@ int Channel::GetSpeechOutputLevelFullRange() const { |
return _outputAudioLevel.LevelFullRange(); |
} |
+double Channel::GetTotalOutputEnergy() const { |
+ return _totalOutputEnergy; |
+} |
+ |
+double Channel::GetTotalOutputDuration() const { |
+ return _totalOutputDuration; |
+} |
+ |
void Channel::SetInputMute(bool enable) { |
rtc::CritScope cs(&volume_settings_critsect_); |
input_mute_ = enable; |