| Index: webrtc/voice_engine/transmit_mixer.cc
|
| diff --git a/webrtc/voice_engine/transmit_mixer.cc b/webrtc/voice_engine/transmit_mixer.cc
|
| index 6796f8457c5e5b3060920c6be9995b8f739612b2..b620d6f6ebd3b0369cd12c20333033aa57c38b65 100644
|
| --- a/webrtc/voice_engine/transmit_mixer.cc
|
| +++ b/webrtc/voice_engine/transmit_mixer.cc
|
| @@ -308,6 +308,20 @@ TransmitMixer::PrepareDemux(const void* audioSamples,
|
|
|
| // --- Measure audio level of speech after all processing.
|
| _audioLevel.ComputeLevel(_audioFrame);
|
| +
|
| + // See the description for "totalAudioEnergy" in the WebRTC stats spec
|
| + // (https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy)
|
| + // for an explanation of these formulas. In short, we need a value that can
|
| + // be used to compute RMS audio levels over different time intervals, by
|
| + // taking the difference between the results from two getStats calls. To do
|
| + // this, the value needs to be of units "squared sample value * time".
|
| + double additional_energy =
|
| + static_cast<double>(_audioLevel.LevelFullRange()) / INT16_MAX;
|
| + additional_energy *= additional_energy;
|
| + double sample_duration = static_cast<double>(nSamples) / samplesPerSec;
|
| + totalInputEnergy_ += additional_energy * sample_duration;
|
| + totalInputDuration_ += sample_duration;
|
| +
|
| return 0;
|
| }
|
|
|
| @@ -851,6 +865,14 @@ int16_t TransmitMixer::AudioLevelFullRange() const
|
| return _audioLevel.LevelFullRange();
|
| }
|
|
|
| +double TransmitMixer::GetTotalInputEnergy() const {
|
| + return totalInputEnergy_;
|
| +}
|
| +
|
| +double TransmitMixer::GetTotalInputDuration() const {
|
| + return totalInputDuration_;
|
| +}
|
| +
|
| bool TransmitMixer::IsRecordingCall()
|
| {
|
| return _fileCallRecording;
|
|
|