Chromium Code Reviews| Index: webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc |
| diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc |
| index 14bbff781c50bfc3fc4ebff9b43da808043298ac..2e2c34f03992ef5b27ec37f19edb7dd4418123b0 100644 |
| --- a/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc |
| +++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc |
| @@ -19,6 +19,7 @@ |
| #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h" |
| #include "webrtc/modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" |
| #include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h" |
| +#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h" |
| #include "webrtc/modules/rtp_rtcp/source/rtp_header_extensions.h" |
| #include "webrtc/modules/rtp_rtcp/source/rtp_packet_received.h" |
| #include "webrtc/modules/rtp_rtcp/source/rtp_packet_to_send.h" |
| @@ -55,6 +56,7 @@ const size_t kMaxPaddingSize = 224u; |
| const int kVideoRotationExtensionId = 5; |
| const size_t kGenericHeaderLength = 1; |
| const uint8_t kPayloadData[] = {47, 11, 32, 93, 89}; |
| +const int64_t kDefaultExpectedRetransmissionTimeMs = 125; |
| using ::testing::_; |
| using ::testing::ElementsAreArray; |
| @@ -238,7 +240,8 @@ class RtpSenderTest : public ::testing::TestWithParam<bool> { |
| EXPECT_TRUE(rtp_sender_->SendOutgoingData( |
| kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs, kPayloadData, |
| - sizeof(kPayloadData), nullptr, nullptr, nullptr)); |
| + sizeof(kPayloadData), nullptr, nullptr, nullptr, |
| + kDefaultExpectedRetransmissionTimeMs)); |
| } |
| }; |
| @@ -249,15 +252,36 @@ class RtpSenderTestWithoutPacer : public RtpSenderTest { |
| void SetUp() override { SetUpRtpSender(false); } |
| }; |
| +class TestRtpSenderVideo : public RTPSenderVideo { |
| + public: |
| + TestRtpSenderVideo(Clock* clock, |
| + RTPSender* rtpSender, |
|
danilchap
2017/08/29 17:31:19
rtp_sender
sprang_webrtc
2017/08/31 15:54:29
Done.
|
| + FlexfecSender* flexfec_sender) |
| + : RTPSenderVideo(clock, rtpSender, flexfec_sender) {} |
| + ~TestRtpSenderVideo() override {} |
| + |
| + StorageType GetStorageType(const RTPVideoHeader& header, |
| + int32_t retransmission_settings, |
| + int64_t expected_retransmission_time, |
| + FrameType frame_type) { |
| + std::unique_ptr<RtpPacketizer> packetizer(RtpPacketizer::Create( |
|
danilchap
2017/08/29 17:31:19
or
auto packetizer = rtc::WrapUnique(RtpPacketizer
sprang_webrtc
2017/08/31 15:54:29
Done.
|
| + header.codec, kMaxPacketLength, 0, &header.codecHeader, frame_type)); |
| + |
| + return RTPSenderVideo::GetStorageType(header, retransmission_settings, |
| + packetizer.get(), |
| + expected_retransmission_time); |
| + } |
| +}; |
| + |
| class RtpSenderVideoTest : public RtpSenderTest { |
| protected: |
| void SetUp() override { |
| // TODO(pbos): Set up to use pacer. |
| SetUpRtpSender(false); |
| rtp_sender_video_.reset( |
| - new RTPSenderVideo(&fake_clock_, rtp_sender_.get(), nullptr)); |
| + new TestRtpSenderVideo(&fake_clock_, rtp_sender_.get(), nullptr)); |
| } |
| - std::unique_ptr<RTPSenderVideo> rtp_sender_video_; |
| + std::unique_ptr<TestRtpSenderVideo> rtp_sender_video_; |
| }; |
| TEST_P(RtpSenderTestWithoutPacer, AllocatePacketSetCsrc) { |
| @@ -861,9 +885,9 @@ TEST_P(RtpSenderTestWithoutPacer, SendGenericVideo) { |
| uint8_t payload[] = {47, 11, 32, 93, 89}; |
| // Send keyframe |
| - ASSERT_TRUE(rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234, |
| - 4321, payload, sizeof(payload), |
| - nullptr, nullptr, nullptr)); |
| + ASSERT_TRUE(rtp_sender_->SendOutgoingData( |
| + kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload), |
| + nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs)); |
| auto sent_payload = transport_.last_sent_packet().payload(); |
| uint8_t generic_header = sent_payload[0]; |
| @@ -878,7 +902,7 @@ TEST_P(RtpSenderTestWithoutPacer, SendGenericVideo) { |
| ASSERT_TRUE(rtp_sender_->SendOutgoingData( |
| kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload), |
| - nullptr, nullptr, nullptr)); |
| + nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs)); |
| sent_payload = transport_.last_sent_packet().payload(); |
| generic_header = sent_payload[0]; |
| @@ -998,7 +1022,8 @@ TEST_P(RtpSenderTest, NoFlexfecForTimingFrames) { |
| video_header.video_timing.flags = TimingFrameFlags::kTriggeredByTimer; |
| EXPECT_TRUE(rtp_sender_->SendOutgoingData( |
| kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs, kPayloadData, |
| - sizeof(kPayloadData), nullptr, &video_header, nullptr)); |
| + sizeof(kPayloadData), nullptr, &video_header, nullptr, |
| + kDefaultExpectedRetransmissionTimeMs)); |
| EXPECT_CALL(mock_rtc_event_log_, |
| LogRtpHeader(PacketDirection::kOutgoingPacket, _, _, _)) |
| @@ -1023,7 +1048,8 @@ TEST_P(RtpSenderTest, NoFlexfecForTimingFrames) { |
| video_header.video_timing.flags = TimingFrameFlags::kInvalid; |
| EXPECT_TRUE(rtp_sender_->SendOutgoingData( |
| kVideoFrameKey, kPayloadType, kTimestamp + 1, kCaptureTimeMs + 1, |
| - kPayloadData, sizeof(kPayloadData), nullptr, &video_header, nullptr)); |
| + kPayloadData, sizeof(kPayloadData), nullptr, &video_header, nullptr, |
| + kDefaultExpectedRetransmissionTimeMs)); |
| EXPECT_CALL(mock_rtc_event_log_, |
| LogRtpHeader(PacketDirection::kOutgoingPacket, _, _, _)) |
| @@ -1168,9 +1194,9 @@ TEST_P(RtpSenderTest, FrameCountCallbacks) { |
| EXPECT_CALL(mock_paced_sender_, InsertPacket(_, _, _, _, _, _)) |
| .Times(::testing::AtLeast(2)); |
| - ASSERT_TRUE(rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234, |
| - 4321, payload, sizeof(payload), |
| - nullptr, nullptr, nullptr)); |
| + ASSERT_TRUE(rtp_sender_->SendOutgoingData( |
| + kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload), |
| + nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs)); |
| EXPECT_EQ(1U, callback.num_calls_); |
| EXPECT_EQ(ssrc, callback.ssrc_); |
| @@ -1179,7 +1205,7 @@ TEST_P(RtpSenderTest, FrameCountCallbacks) { |
| ASSERT_TRUE(rtp_sender_->SendOutgoingData( |
| kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload), |
| - nullptr, nullptr, nullptr)); |
| + nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs)); |
| EXPECT_EQ(2U, callback.num_calls_); |
| EXPECT_EQ(ssrc, callback.ssrc_); |
| @@ -1245,7 +1271,7 @@ TEST_P(RtpSenderTest, BitrateCallbacks) { |
| for (uint32_t i = 0; i < kNumPackets; ++i) { |
| ASSERT_TRUE(rtp_sender_->SendOutgoingData( |
| kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload), |
| - nullptr, nullptr, nullptr)); |
| + nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs)); |
| fake_clock_.AdvanceTimeMilliseconds(kPacketInterval); |
| } |
| @@ -1327,8 +1353,8 @@ TEST_P(RtpSenderTestWithoutPacer, StreamDataCountersCallbacks) { |
| // Send a frame. |
| ASSERT_TRUE(rtp_sender_->SendOutgoingData( |
| - kVideoFrameKey, payload_type, 1234, 4321, payload, |
| - sizeof(payload), nullptr, nullptr, nullptr)); |
| + kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload), |
| + nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs)); |
| StreamDataCounters expected; |
| expected.transmitted.payload_bytes = 6; |
| expected.transmitted.header_bytes = 12; |
| @@ -1369,8 +1395,8 @@ TEST_P(RtpSenderTestWithoutPacer, StreamDataCountersCallbacks) { |
| fec_params.max_fec_frames = 1; |
| rtp_sender_->SetFecParameters(fec_params, fec_params); |
| ASSERT_TRUE(rtp_sender_->SendOutgoingData( |
| - kVideoFrameDelta, payload_type, 1234, 4321, payload, |
| - sizeof(payload), nullptr, nullptr, nullptr)); |
| + kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload), |
| + nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs)); |
| expected.transmitted.payload_bytes = 40; |
| expected.transmitted.header_bytes = 60; |
| expected.transmitted.packets = 5; |
| @@ -1388,8 +1414,8 @@ TEST_P(RtpSenderAudioTest, SendAudio) { |
| uint8_t payload[] = {47, 11, 32, 93, 89}; |
| ASSERT_TRUE(rtp_sender_->SendOutgoingData( |
| - kAudioFrameCN, payload_type, 1234, 4321, payload, |
| - sizeof(payload), nullptr, nullptr, nullptr)); |
| + kAudioFrameCN, payload_type, 1234, 4321, payload, sizeof(payload), |
| + nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs)); |
| auto sent_payload = transport_.last_sent_packet().payload(); |
| EXPECT_THAT(sent_payload, ElementsAreArray(payload)); |
| @@ -1407,8 +1433,8 @@ TEST_P(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) { |
| uint8_t payload[] = {47, 11, 32, 93, 89}; |
| ASSERT_TRUE(rtp_sender_->SendOutgoingData( |
| - kAudioFrameCN, payload_type, 1234, 4321, payload, |
| - sizeof(payload), nullptr, nullptr, nullptr)); |
| + kAudioFrameCN, payload_type, 1234, 4321, payload, sizeof(payload), |
| + nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs)); |
| auto sent_payload = transport_.last_sent_packet().payload(); |
| EXPECT_THAT(sent_payload, ElementsAreArray(payload)); |
| @@ -1445,22 +1471,22 @@ TEST_P(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) { |
| // During start, it takes the starting timestamp as last sent timestamp. |
| // The duration is calculated as the difference of current and last sent |
| // timestamp. So for first call it will skip since the duration is zero. |
| - ASSERT_TRUE(rtp_sender_->SendOutgoingData(kEmptyFrame, kPayloadType, |
| - capture_time_ms, 0, nullptr, 0, |
| - nullptr, nullptr, nullptr)); |
| + ASSERT_TRUE(rtp_sender_->SendOutgoingData( |
| + kEmptyFrame, kPayloadType, capture_time_ms, 0, nullptr, 0, nullptr, |
| + nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs)); |
| // DTMF Sample Length is (Frequency/1000) * Duration. |
| // So in this case, it is (8000/1000) * 500 = 4000. |
| // Sending it as two packets. |
| ASSERT_TRUE(rtp_sender_->SendOutgoingData( |
| - kEmptyFrame, kPayloadType, capture_time_ms + 2000, 0, |
| - nullptr, 0, nullptr, nullptr, nullptr)); |
| + kEmptyFrame, kPayloadType, capture_time_ms + 2000, 0, nullptr, 0, nullptr, |
| + nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs)); |
| // Marker Bit should be set to 1 for first packet. |
| EXPECT_TRUE(transport_.last_sent_packet().Marker()); |
| ASSERT_TRUE(rtp_sender_->SendOutgoingData( |
| - kEmptyFrame, kPayloadType, capture_time_ms + 4000, 0, |
| - nullptr, 0, nullptr, nullptr, nullptr)); |
| + kEmptyFrame, kPayloadType, capture_time_ms + 4000, 0, nullptr, 0, nullptr, |
| + nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs)); |
| // Marker Bit should be set to 0 for rest of the packets. |
| EXPECT_FALSE(transport_.last_sent_packet().Marker()); |
| } |
| @@ -1478,8 +1504,8 @@ TEST_P(RtpSenderTestWithoutPacer, BytesReportedCorrectly) { |
| uint8_t payload[] = {47, 11, 32, 93, 89}; |
| ASSERT_TRUE(rtp_sender_->SendOutgoingData( |
| - kVideoFrameKey, kPayloadType, 1234, 4321, payload, |
| - sizeof(payload), nullptr, nullptr, nullptr)); |
| + kVideoFrameKey, kPayloadType, 1234, 4321, payload, sizeof(payload), |
| + nullptr, nullptr, nullptr, kDefaultExpectedRetransmissionTimeMs)); |
| // Will send 2 full-size padding packets. |
| rtp_sender_->TimeToSendPadding(1, PacedPacketInfo()); |
| @@ -1553,7 +1579,7 @@ TEST_P(RtpSenderVideoTest, KeyFrameHasCVO) { |
| hdr.rotation = kVideoRotation_0; |
| rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, kPayload, |
| kTimestamp, 0, kFrame, sizeof(kFrame), nullptr, |
| - &hdr); |
| + &hdr, kDefaultExpectedRetransmissionTimeMs); |
| VideoRotation rotation; |
| EXPECT_TRUE( |
| @@ -1579,7 +1605,8 @@ TEST_P(RtpSenderVideoTest, TimingFrameHasPacketizationTimstampSet) { |
| fake_clock_.AdvanceTimeMilliseconds(kPacketizationTimeMs); |
| rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, kPayload, |
| kTimestamp, kCaptureTimestamp, kFrame, |
| - sizeof(kFrame), nullptr, &hdr); |
| + sizeof(kFrame), nullptr, &hdr, |
| + kDefaultExpectedRetransmissionTimeMs); |
| VideoSendTiming timing; |
| EXPECT_TRUE(transport_.last_sent_packet().GetExtension<VideoTimingExtension>( |
| &timing)); |
| @@ -1595,14 +1622,14 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenChanged) { |
| RTPVideoHeader hdr = {0}; |
| hdr.rotation = kVideoRotation_90; |
| - EXPECT_TRUE(rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, |
| - kPayload, kTimestamp, 0, kFrame, |
| - sizeof(kFrame), nullptr, &hdr)); |
| + EXPECT_TRUE(rtp_sender_video_->SendVideo( |
| + kRtpVideoGeneric, kVideoFrameKey, kPayload, kTimestamp, 0, kFrame, |
| + sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs)); |
| hdr.rotation = kVideoRotation_0; |
| - EXPECT_TRUE(rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameDelta, |
| - kPayload, kTimestamp + 1, 0, kFrame, |
| - sizeof(kFrame), nullptr, &hdr)); |
| + EXPECT_TRUE(rtp_sender_video_->SendVideo( |
| + kRtpVideoGeneric, kVideoFrameDelta, kPayload, kTimestamp + 1, 0, kFrame, |
| + sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs)); |
| VideoRotation rotation; |
| EXPECT_TRUE( |
| @@ -1617,13 +1644,13 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenNonZero) { |
| RTPVideoHeader hdr = {0}; |
| hdr.rotation = kVideoRotation_90; |
| - EXPECT_TRUE(rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameKey, |
| - kPayload, kTimestamp, 0, kFrame, |
| - sizeof(kFrame), nullptr, &hdr)); |
| + EXPECT_TRUE(rtp_sender_video_->SendVideo( |
| + kRtpVideoGeneric, kVideoFrameKey, kPayload, kTimestamp, 0, kFrame, |
| + sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs)); |
| - EXPECT_TRUE(rtp_sender_video_->SendVideo(kRtpVideoGeneric, kVideoFrameDelta, |
| - kPayload, kTimestamp + 1, 0, kFrame, |
| - sizeof(kFrame), nullptr, &hdr)); |
| + EXPECT_TRUE(rtp_sender_video_->SendVideo( |
| + kRtpVideoGeneric, kVideoFrameDelta, kPayload, kTimestamp + 1, 0, kFrame, |
| + sizeof(kFrame), nullptr, &hdr, kDefaultExpectedRetransmissionTimeMs)); |
| VideoRotation rotation; |
| EXPECT_TRUE( |
| @@ -1652,6 +1679,120 @@ TEST_P(RtpSenderVideoTest, SendVideoWithCameraAndFlipCVO) { |
| ConvertCVOByteToVideoRotation(flip_bit | camera_bit | 3)); |
| } |
| +TEST_P(RtpSenderVideoTest, ConditionalRetransmit) { |
| + const int64_t kWindowSize = 2500; |
|
danilchap
2017/08/29 17:31:19
kWindowSizeMs?
sprang_webrtc
2017/08/31 15:54:29
Done.
|
| + const int64_t kFrameIntervalMs = 33; |
| + const int64_t kRttMs = (kFrameIntervalMs * 3) / 2; |
| + const int32_t kSettings = |
|
danilchap
2017/08/29 17:31:20
may be uint8_t instead of int32_t
sprang_webrtc
2017/08/31 15:54:29
Done.
|
| + kRetransmitBaseLayer | kConditionallyRetransmitHigherLayers; |
| + |
| + // Insert VP8 frames for all temporal layers, but stop before the final index. |
| + RTPVideoHeader header; |
| + header.codec = kRtpVideoVp8; |
| + constexpr int kPatternLength = 4; |
| + // Fill averaging window to prevent rounding errors. |
| + constexpr int kNumRepetitions = |
| + (kWindowSize + (kFrameIntervalMs / 2)) / kFrameIntervalMs; |
| + for (int i = 0; i < kPatternLength * kNumRepetitions; ++i) { |
| + switch (i % kPatternLength) { |
| + case 0: |
| + header.codecHeader.VP8.temporalIdx = 0; |
| + break; |
| + case 2: |
| + header.codecHeader.VP8.temporalIdx = 1; |
| + break; |
| + case 1: |
| + case 3: |
| + header.codecHeader.VP8.temporalIdx = 2; |
| + break; |
| + default: |
| + ADD_FAILURE() << "Bad temporal idx."; |
|
danilchap
2017/08/29 17:31:20
with kPatternLength = 4 and i >= 0 this look impos
sprang_webrtc
2017/08/31 15:54:29
Yes, nice suggestion!
|
| + } |
| + rtp_sender_video_->GetStorageType( |
| + header, kSettings, kRttMs, i == 0 ? kVideoFrameKey : kVideoFrameDelta); |
| + fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); |
| + } |
| + |
| + // Since we're at the start of the pattern, the next expected frame in TL0 is |
| + // right now. We will wait at most one expected retransmission time before |
| + // acknowledging that it did not arrive, which means this frame and the next |
| + // will not be retransmitted. |
| + header.codecHeader.VP8.temporalIdx = 1; |
| + EXPECT_EQ(StorageType::kDontRetransmit, |
| + rtp_sender_video_->GetStorageType(header, kSettings, kRttMs, |
| + kVideoFrameDelta)); |
| + fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); |
| + EXPECT_EQ(StorageType::kDontRetransmit, |
| + rtp_sender_video_->GetStorageType(header, kSettings, kRttMs, |
| + kVideoFrameDelta)); |
| + fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); |
| + |
| + // The TL0 frame did not arrive. So allow retransmission. |
| + EXPECT_EQ(StorageType::kAllowRetransmission, |
| + rtp_sender_video_->GetStorageType(header, kSettings, kRttMs, |
| + kVideoFrameDelta)); |
| + fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); |
| + |
| + // Insert a frame for TL2. We just had frame in TL1, so the next one there is |
| + // in three frames away. TL0 is still too far in the past. So, allow |
| + // retransmission. |
| + header.codecHeader.VP8.temporalIdx = 2; |
| + EXPECT_EQ(StorageType::kAllowRetransmission, |
| + rtp_sender_video_->GetStorageType(header, kSettings, kRttMs, |
| + kVideoFrameDelta)); |
| + fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); |
| + |
| + // Another TL2, next in TL1 is two frames away. Allow again. |
| + EXPECT_EQ(StorageType::kAllowRetransmission, |
| + rtp_sender_video_->GetStorageType(header, kSettings, kRttMs, |
| + kVideoFrameDelta)); |
| + fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); |
| + |
| + // Yet another TL2, next in TL1 is now only one frame away, so don't store |
| + // for retransmission. |
| + EXPECT_EQ(StorageType::kDontRetransmit, |
| + rtp_sender_video_->GetStorageType(header, kSettings, kRttMs, |
| + kVideoFrameDelta)); |
| +} |
| + |
| +TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) { |
| + const int64_t kWindowSize = 2500; |
| + const int64_t kFrameIntervalMs = 200; |
| + const int64_t kRttMs = (kFrameIntervalMs * 3) / 2; |
| + const int32_t kSettings = |
| + kRetransmitBaseLayer | kConditionallyRetransmitHigherLayers; |
| + |
| + // Insert VP8 frames for all temporal layers, but stop before the final index. |
| + RTPVideoHeader header; |
| + header.codec = kRtpVideoVp8; |
| + constexpr int kPatternLength = 4; |
|
danilchap
2017/08/29 17:31:20
and here
kPattern = {0, 1, 1, 1};?
sprang_webrtc
2017/08/31 15:54:28
Done.
|
| + // Fill averaging window to prevent rounding errors. |
| + constexpr int kNumRepetitions = |
| + (kWindowSize + (kFrameIntervalMs / 2)) / kFrameIntervalMs; |
| + for (int i = 0; i < kPatternLength * kNumRepetitions; ++i) { |
| + // Put every fourth frame in tl0. |
| + if (i % kPatternLength == 0) { |
| + header.codecHeader.VP8.temporalIdx = 0; |
| + } else { |
| + header.codecHeader.VP8.temporalIdx = 1; |
| + } |
| + |
| + rtp_sender_video_->GetStorageType( |
| + header, kSettings, kRttMs, i == 0 ? kVideoFrameKey : kVideoFrameDelta); |
| + fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); |
| + } |
| + |
| + // Since we're at the start of the pattern, the next expected frame will be |
| + // right now in TL0. Put it in TL1 instead. Regular rules would dictate that |
| + // we don't store for retransmission because we expect a frame in a lower |
| + // layer, but that last frame in TL1 was a long time ago in absolute terms, |
| + // so allow retransmission anyway. |
| + header.codecHeader.VP8.temporalIdx = 1; |
| + EXPECT_EQ(StorageType::kAllowRetransmission, |
| + rtp_sender_video_->GetStorageType(header, kSettings, kRttMs, |
| + kVideoFrameDelta)); |
| +} |
| + |
| TEST_P(RtpSenderTest, OnOverheadChanged) { |
| MockOverheadObserver mock_overhead_observer; |
| rtp_sender_.reset( |