Index: webrtc/voice_engine/transmit_mixer.cc |
diff --git a/webrtc/voice_engine/transmit_mixer.cc b/webrtc/voice_engine/transmit_mixer.cc |
index 6796f8457c5e5b3060920c6be9995b8f739612b2..4b1084a7ac433cf6aa26b669cf563f92a29cc705 100644 |
--- a/webrtc/voice_engine/transmit_mixer.cc |
+++ b/webrtc/voice_engine/transmit_mixer.cc |
@@ -308,6 +308,20 @@ TransmitMixer::PrepareDemux(const void* audioSamples, |
// --- Measure audio level of speech after all processing. |
_audioLevel.ComputeLevel(_audioFrame); |
+ |
+ // TODO(zstein): Extract helper to share with voice_engine/channel.cc |
Zach Stein
2017/07/06 17:41:37
I think we could add this computation to AudioLeve
|
+ // TODO(zstein): Use sample_rate_hz_? |
hlundin-webrtc
2017/07/03 13:23:03
What would you do with sample_rate_hz_?
Zach Stein
2017/07/06 17:41:37
Done.
|
+ // See the description for "totalAudioEnergy" in the WebRTC stats spec for |
+ // an explanation of these formulas. In short, we need a value that can be |
+ // used to compute RMS audio levels over different time intervals, by taking |
+ // thedifference between the results from two getStats calls. To do this, |
+ // the value needs to be of units "squared sample value * time". |
+ double additional_energy = |
+ static_cast<double>(_audioLevel.LevelFullRange()) / INT16_MAX; |
+ additional_energy *= additional_energy; |
+ _totalInputEnergy += additional_energy * 0.01; |
+ _totalInputDuration += 0.01; |
+ |
return 0; |
} |
@@ -851,6 +865,14 @@ int16_t TransmitMixer::AudioLevelFullRange() const |
return _audioLevel.LevelFullRange(); |
} |
+double TransmitMixer::GetTotalInputEnergy() const { |
+ return _totalInputEnergy; |
+} |
+ |
+double TransmitMixer::GetTotalInputDuration() const { |
+ return _totalInputDuration; |
+} |
+ |
bool TransmitMixer::IsRecordingCall() |
{ |
return _fileCallRecording; |