| Index: webrtc/voice_engine/channel.cc
|
| diff --git a/webrtc/voice_engine/channel.cc b/webrtc/voice_engine/channel.cc
|
| index 16122709c31fe976ddde3c63a477d78957f72070..87553048cd938c14fd574fb1d6aef978f4c4e2f1 100644
|
| --- a/webrtc/voice_engine/channel.cc
|
| +++ b/webrtc/voice_engine/channel.cc
|
| @@ -696,7 +696,17 @@ MixerParticipant::AudioFrameInfo Channel::GetAudioFrameWithMuted(
|
|
|
| // Measure audio level (0-9)
|
| // TODO(henrik.lundin) Use the |muted| information here too.
|
| + // TODO(deadbeef): Use RmsLevel for |_outputAudioLevel| as well.
|
| _outputAudioLevel.ComputeLevel(*audioFrame);
|
| + // See the description for "totalAudioEnergy" in the WebRTC stats spec for an
|
| + // explanation of these formulas. In short, we need a value that can be used
|
| + // to compute RMS audio levels over different time intervals, by taking the
|
| + // difference between the results from two getStats calls. To do this, the
|
| + // value needs to be of units "squared sample value * time".
|
| + double additional_energy = _outputAudioLevel.LevelFullRange() / SHRT_MAX;
|
| + additional_energy *= additional_energy;
|
| + _totalOutputEnergy += additional_energy * 0.01;
|
| + _totalOutputDuration += 0.01;
|
|
|
| if (capture_start_rtp_time_stamp_ < 0 && audioFrame->timestamp_ != 0) {
|
| // The first frame with a valid rtp timestamp.
|
| @@ -2370,6 +2380,14 @@ int Channel::GetSpeechOutputLevelFullRange() const {
|
| return _outputAudioLevel.LevelFullRange();
|
| }
|
|
|
| +double Channel::GetTotalOutputEnergy() const {
|
| + return _totalOutputEnergy;
|
| +}
|
| +
|
| +double Channel::GetTotalOutputDuration() const {
|
| + return _totalOutputDuration;
|
| +}
|
| +
|
| void Channel::SetInputMute(bool enable) {
|
| rtc::CritScope cs(&volume_settings_critsect_);
|
| input_mute_ = enable;
|
|
|