Index: webrtc/audio/audio_receive_stream_unittest.cc |
diff --git a/webrtc/audio/audio_receive_stream_unittest.cc b/webrtc/audio/audio_receive_stream_unittest.cc |
index d6cce69dbf09d7b6053abecd43b970819591acd0..af8cd78d84e9030f814c5014e168fa7e19e9c805 100644 |
--- a/webrtc/audio/audio_receive_stream_unittest.cc |
+++ b/webrtc/audio/audio_receive_stream_unittest.cc |
@@ -13,8 +13,11 @@ |
#include "webrtc/audio/audio_receive_stream.h" |
#include "webrtc/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_estimator.h" |
#include "webrtc/modules/rtp_rtcp/source/byte_io.h" |
+#include "webrtc/test/fake_voice_engine.h" |
-namespace webrtc { |
+namespace { |
+ |
+using webrtc::ByteWriter; |
const size_t kAbsoluteSendTimeLength = 4; |
@@ -45,7 +48,7 @@ size_t CreateRtpHeaderWithAbsSendTime(uint8_t* header, |
ByteWriter<uint16_t>::WriteBigEndian(header + 2, 0x1234); // Sequence number. |
ByteWriter<uint32_t>::WriteBigEndian(header + 4, 0x5678); // Timestamp. |
ByteWriter<uint32_t>::WriteBigEndian(header + 8, 0x4321); // SSRC. |
- int32_t rtp_header_length = kRtpHeaderSize; |
+ int32_t rtp_header_length = webrtc::kRtpHeaderSize; |
BuildAbsoluteSendTimeExtension(header + rtp_header_length, extension_id, |
abs_send_time); |
@@ -53,15 +56,24 @@ size_t CreateRtpHeaderWithAbsSendTime(uint8_t* header, |
return rtp_header_length; |
} |
+float Q14ToFloat(uint16_t v) { |
tommi
2015/10/19 12:36:24
ah, is this why?
Can we move this method to a comm
the sun
2015/10/19 14:25:02
Done.
|
+ return static_cast<float>(v) / (1 << 14); |
+} |
+} // namespace |
+ |
+namespace webrtc { |
+namespace test { |
+ |
TEST(AudioReceiveStreamTest, AudioPacketUpdatesBweWithTimestamp) { |
MockRemoteBitrateEstimator rbe; |
+ FakeVoiceEngine fve; |
AudioReceiveStream::Config config; |
config.combined_audio_video_bwe = true; |
- config.voe_channel_id = 1; |
+ config.voe_channel_id = fve.kReceiveChannelId; |
const int kAbsSendTimeId = 3; |
config.rtp.extensions.push_back( |
RtpExtension(RtpExtension::kAbsSendTime, kAbsSendTimeId)); |
- internal::AudioReceiveStream recv_stream(&rbe, config); |
+ internal::AudioReceiveStream recv_stream(&rbe, config, &fve); |
uint8_t rtp_packet[30]; |
const int kAbsSendTimeValue = 1234; |
CreateRtpHeaderWithAbsSendTime(rtp_packet, kAbsSendTimeId, kAbsSendTimeValue); |
@@ -74,4 +86,57 @@ TEST(AudioReceiveStreamTest, AudioPacketUpdatesBweWithTimestamp) { |
EXPECT_TRUE( |
recv_stream.DeliverRtp(rtp_packet, sizeof(rtp_packet), packet_time)); |
} |
+ |
+TEST(AudioReceiveStreamTest, GetStats) { |
+ const uint32_t kSsrc1 = 667; |
tommi
2015/10/19 12:36:24
if ssrc is uint32_t, it would be good to have that
the sun
2015/10/19 14:25:02
The best option would be to create a template to r
tommi
2015/10/19 14:55:44
agreed and sgtm
|
+ |
+ MockRemoteBitrateEstimator rbe; |
+ FakeVoiceEngine fve; |
+ AudioReceiveStream::Config config; |
+ config.rtp.remote_ssrc = kSsrc1; |
+ config.voe_channel_id = fve.kReceiveChannelId; |
+ internal::AudioReceiveStream recv_stream(&rbe, config, &fve); |
+ |
+ AudioReceiveStream::Stats stats = recv_stream.GetStats(); |
+ const CallStatistics& kCallStats = fve.GetRecvCallStats(); |
tommi
2015/10/19 12:36:24
this should just be call_stats. Even though it's
the sun
2015/10/19 14:25:02
As long as you don't mind me keeping the const qua
|
+ const CodecInst& kCodecInst = fve.GetRecvRecCodecInst(); |
tommi
2015/10/19 12:36:24
same here and throughout.
the sun
2015/10/19 14:25:02
Done.
|
+ const NetworkStatistics& kNetStats = fve.GetRecvNetworkStats(); |
+ const AudioDecodingCallStats& kDecodeStats = |
+ fve.GetRecvAudioDecodingCallStats(); |
+ EXPECT_EQ(kSsrc1, stats.remote_ssrc); |
+ EXPECT_EQ(static_cast<int64_t>(kCallStats.bytesReceived), stats.bytes_rcvd); |
+ EXPECT_EQ(static_cast<uint32_t>(kCallStats.packetsReceived), |
+ stats.packets_rcvd); |
+ EXPECT_EQ(kCallStats.cumulativeLost, stats.packets_lost); |
+ EXPECT_EQ(static_cast<float>(kCallStats.fractionLost) / 256, |
+ stats.fraction_lost); |
+ EXPECT_EQ(std::string(kCodecInst.plname), stats.codec_name); |
+ EXPECT_EQ(kCallStats.extendedMax, stats.ext_seqnum); |
+ EXPECT_EQ(kCallStats.jitterSamples / (kCodecInst.plfreq / 1000), |
+ stats.jitter_ms); |
+ EXPECT_EQ(kNetStats.currentBufferSize, stats.jitter_buffer_ms); |
+ EXPECT_EQ(kNetStats.preferredBufferSize, stats.jitter_buffer_preferred_ms); |
+ EXPECT_EQ(static_cast<uint32_t>(fve.kRecvJitterBufferDelay + |
+ fve.kRecvPlayoutBufferDelay), stats.delay_estimate_ms); |
+ EXPECT_EQ(static_cast<int32_t>(fve.kRecvSpeechOutputLevel), |
+ stats.audio_level); |
+ EXPECT_EQ(Q14ToFloat(kNetStats.currentExpandRate), stats.expand_rate); |
+ EXPECT_EQ(Q14ToFloat(kNetStats.currentSpeechExpandRate), |
+ stats.speech_expand_rate); |
+ EXPECT_EQ(Q14ToFloat(kNetStats.currentSecondaryDecodedRate), |
+ stats.secondary_decoded_rate); |
+ EXPECT_EQ(Q14ToFloat(kNetStats.currentAccelerateRate), stats.accelerate_rate); |
+ EXPECT_EQ(Q14ToFloat(kNetStats.currentPreemptiveRate), |
+ stats.preemptive_expand_rate); |
+ EXPECT_EQ(kDecodeStats.calls_to_silence_generator, |
+ stats.decoding_calls_to_silence_generator); |
+ EXPECT_EQ(kDecodeStats.calls_to_neteq, stats.decoding_calls_to_neteq); |
+ EXPECT_EQ(kDecodeStats.decoded_normal, stats.decoding_normal); |
+ EXPECT_EQ(kDecodeStats.decoded_plc, stats.decoding_plc); |
+ EXPECT_EQ(kDecodeStats.decoded_cng, stats.decoding_cng); |
+ EXPECT_EQ(kDecodeStats.decoded_plc_cng, stats.decoding_plc_cng); |
+ EXPECT_EQ(kCallStats.capture_start_ntp_time_ms_, |
+ stats.capture_start_ntp_time_ms); |
+} |
+} // namespace test |
} // namespace webrtc |