OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 #include <algorithm> // max | 10 #include <algorithm> // max |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
51 const VideoFrame& frame2); | 51 const VideoFrame& frame2); |
52 void ExpectEqualFramesVector(const std::vector<VideoFrame>& frames1, | 52 void ExpectEqualFramesVector(const std::vector<VideoFrame>& frames1, |
53 const std::vector<VideoFrame>& frames2); | 53 const std::vector<VideoFrame>& frames2); |
54 VideoFrame CreateVideoFrame(int width, int height, uint8_t data); | 54 VideoFrame CreateVideoFrame(int width, int height, uint8_t data); |
55 | 55 |
56 class VideoSendStreamTest : public test::CallTest { | 56 class VideoSendStreamTest : public test::CallTest { |
57 protected: | 57 protected: |
58 void TestNackRetransmission(uint32_t retransmit_ssrc, | 58 void TestNackRetransmission(uint32_t retransmit_ssrc, |
59 uint8_t retransmit_payload_type); | 59 uint8_t retransmit_payload_type); |
60 void TestPacketFragmentationSize(VideoFormat format, bool with_fec); | 60 void TestPacketFragmentationSize(VideoFormat format, bool with_fec); |
61 | |
62 void TestVp9NonFlexMode(uint8_t num_temporal_layers, | |
63 uint8_t num_spatial_layers); | |
61 }; | 64 }; |
62 | 65 |
63 TEST_F(VideoSendStreamTest, CanStartStartedStream) { | 66 TEST_F(VideoSendStreamTest, CanStartStartedStream) { |
64 Call::Config call_config; | 67 Call::Config call_config; |
65 CreateSenderCall(call_config); | 68 CreateSenderCall(call_config); |
66 | 69 |
67 test::NullTransport transport; | 70 test::NullTransport transport; |
68 CreateSendConfig(1, &transport); | 71 CreateSendConfig(1, &transport); |
69 CreateStreams(); | 72 CreateStreams(); |
70 send_stream_->Start(); | 73 send_stream_->Start(); |
(...skipping 1710 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1781 const std::vector<VideoReceiveStream*>& receive_streams) override { | 1784 const std::vector<VideoReceiveStream*>& receive_streams) override { |
1782 send_stream_ = send_stream; | 1785 send_stream_ = send_stream; |
1783 } | 1786 } |
1784 | 1787 |
1785 VideoSendStream* send_stream_; | 1788 VideoSendStream* send_stream_; |
1786 } test; | 1789 } test; |
1787 | 1790 |
1788 RunBaseTest(&test, FakeNetworkPipe::Config()); | 1791 RunBaseTest(&test, FakeNetworkPipe::Config()); |
1789 } | 1792 } |
1790 | 1793 |
1791 class VP9HeaderObeserver : public test::SendTest { | 1794 class Vp9HeaderObserver : public test::SendTest { |
1792 public: | 1795 public: |
1793 VP9HeaderObeserver() | 1796 Vp9HeaderObserver() |
1794 : SendTest(VideoSendStreamTest::kDefaultTimeoutMs), | 1797 : SendTest(VideoSendStreamTest::kDefaultTimeoutMs), |
1795 vp9_encoder_(VP9Encoder::Create()), | 1798 vp9_encoder_(VP9Encoder::Create()), |
1796 vp9_settings_(VideoEncoder::GetDefaultVp9Settings()) { | 1799 vp9_settings_(VideoEncoder::GetDefaultVp9Settings()), |
1797 vp9_settings_.numberOfTemporalLayers = 1; | 1800 packets_sent_(0), |
1798 vp9_settings_.numberOfSpatialLayers = 2; | 1801 frames_sent_(0) {} |
1799 } | |
1800 | 1802 |
1801 virtual void ModifyConfigsHook( | 1803 virtual void ModifyConfigsHook( |
1802 VideoSendStream::Config* send_config, | 1804 VideoSendStream::Config* send_config, |
1803 std::vector<VideoReceiveStream::Config>* receive_configs, | 1805 std::vector<VideoReceiveStream::Config>* receive_configs, |
1804 VideoEncoderConfig* encoder_config) {} | 1806 VideoEncoderConfig* encoder_config) {} |
1805 | 1807 |
1806 virtual void InspectHeader(RTPVideoHeaderVP9* vp9videoHeader) = 0; | 1808 virtual void InspectHeader(const RTPVideoHeaderVP9& vp9) = 0; |
1807 | 1809 |
1808 private: | 1810 private: |
1809 const int kVp9PayloadType = 105; | 1811 const int kVp9PayloadType = 105; |
1810 | 1812 |
1811 void ModifyConfigs(VideoSendStream::Config* send_config, | 1813 void ModifyConfigs(VideoSendStream::Config* send_config, |
1812 std::vector<VideoReceiveStream::Config>* receive_configs, | 1814 std::vector<VideoReceiveStream::Config>* receive_configs, |
1813 VideoEncoderConfig* encoder_config) override { | 1815 VideoEncoderConfig* encoder_config) override { |
1816 vp9_settings_.frameDroppingOn = false; | |
pbos-webrtc
2015/11/16 16:25:15
If you need this put a comment on why.
| |
1817 vp9_settings_.keyFrameInterval = 50; | |
pbos-webrtc
2015/11/16 16:25:15
write a kConstant that explains why it's 50 (set i
åsapersson
2015/11/19 11:37:22
Changed to constant and added comment.
| |
1814 encoder_config->encoder_specific_settings = &vp9_settings_; | 1818 encoder_config->encoder_specific_settings = &vp9_settings_; |
1815 encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen; | |
1816 send_config->encoder_settings.encoder = vp9_encoder_.get(); | 1819 send_config->encoder_settings.encoder = vp9_encoder_.get(); |
1817 send_config->encoder_settings.payload_name = "VP9"; | 1820 send_config->encoder_settings.payload_name = "VP9"; |
1818 send_config->encoder_settings.payload_type = kVp9PayloadType; | 1821 send_config->encoder_settings.payload_type = kVp9PayloadType; |
1819 ModifyConfigsHook(send_config, receive_configs, encoder_config); | 1822 ModifyConfigsHook(send_config, receive_configs, encoder_config); |
1823 EXPECT_EQ(1u, encoder_config->streams.size()); | |
1824 encoder_config->streams[0].temporal_layer_thresholds_bps.resize( | |
1825 vp9_settings_.numberOfTemporalLayers - 1); | |
1826 encoder_config_ = *encoder_config; | |
1820 } | 1827 } |
1821 | 1828 |
1822 void PerformTest() override { | 1829 void PerformTest() override { |
1823 EXPECT_EQ(kEventSignaled, Wait()) | 1830 EXPECT_EQ(kEventSignaled, Wait()) |
1824 << "Test timed out waiting for VP9 packet"; | 1831 << "Test timed out waiting for VP9 packet"; |
1825 } | 1832 } |
1826 | 1833 |
1827 Action OnSendRtp(const uint8_t* packet, size_t length) override { | 1834 Action OnSendRtp(const uint8_t* packet, size_t length) override { |
1828 RTPHeader header; | 1835 RTPHeader header; |
1829 EXPECT_TRUE(parser_->Parse(packet, length, &header)); | 1836 EXPECT_TRUE(parser_->Parse(packet, length, &header)); |
1830 | 1837 |
1831 if (header.payloadType == kVp9PayloadType) { | 1838 EXPECT_EQ(kVp9PayloadType, header.payloadType); |
1832 RtpDepacketizerVp9 vp9depacketizer; | 1839 const uint8_t* payload = packet + header.headerLength; |
1833 RtpDepacketizer::ParsedPayload vp9payload; | 1840 size_t payload_length = length - header.headerLength - header.paddingLength; |
1834 const uint8_t* vp9_packet = packet + header.headerLength; | 1841 |
1835 size_t payload_length = | 1842 bool new_packet = packets_sent_ == 0 || |
1836 length - header.headerLength - header.paddingLength; | 1843 IsNewerSequenceNumber(header.sequenceNumber, |
1837 | 1844 last_header_.sequenceNumber); |
1838 if (payload_length > 0) { | 1845 if (payload_length > 0 && new_packet) { |
1839 bool parse_vp9header_successful = | 1846 RtpDepacketizer::ParsedPayload parsed; |
1840 vp9depacketizer.Parse(&vp9payload, vp9_packet, payload_length); | 1847 RtpDepacketizerVp9 depacketizer; |
1841 bool is_vp9_codec_type = | 1848 EXPECT_TRUE(depacketizer.Parse(&parsed, payload, payload_length)); |
1842 vp9payload.type.Video.codec == RtpVideoCodecTypes::kRtpVideoVp9; | 1849 EXPECT_EQ(RtpVideoCodecTypes::kRtpVideoVp9, parsed.type.Video.codec); |
1843 EXPECT_TRUE(parse_vp9header_successful); | 1850 // Verify common fields for all configurations. |
1844 EXPECT_TRUE(is_vp9_codec_type); | 1851 VerifyHeader(parsed.type.Video.codecHeader.VP9); |
1845 | 1852 CompareConsecutiveFrames(header, parsed.type.Video); |
1846 RTPVideoHeaderVP9* vp9videoHeader = | 1853 // Verify configuration specific settings. |
1847 &vp9payload.type.Video.codecHeader.VP9; | 1854 InspectHeader(parsed.type.Video.codecHeader.VP9); |
stefan-webrtc
2015/11/16 17:36:30
I would perfer a different name for this method. T
åsapersson
2015/11/19 11:37:22
Renamed VerifyHeader...
| |
1848 if (parse_vp9header_successful && is_vp9_codec_type) { | 1855 |
1849 InspectHeader(vp9videoHeader); | 1856 ++packets_sent_; |
1850 } else { | 1857 if (header.markerBit) { |
1851 observation_complete_->Set(); | 1858 ++frames_sent_; |
1852 } | |
1853 } | 1859 } |
1854 } | 1860 last_header_ = header; |
1855 | 1861 last_vp9_ = parsed.type.Video.codecHeader.VP9; |
1862 } | |
1856 return SEND_PACKET; | 1863 return SEND_PACKET; |
1857 } | 1864 } |
1858 | 1865 |
1859 protected: | 1866 protected: |
1867 bool ContinuousPictureId(const RTPVideoHeaderVP9& vp9) const { | |
1868 if (last_vp9_.picture_id > vp9.picture_id) { | |
1869 return vp9.picture_id == 0; // Wrap. | |
1870 } else { | |
1871 return vp9.picture_id == last_vp9_.picture_id + 1; | |
1872 } | |
1873 } | |
1874 | |
1875 void VerifySpatialIdxWithinFrame(const RTPVideoHeaderVP9& vp9) const { | |
1876 if (frames_sent_ == 0) | |
1877 return; | |
1878 | |
1879 bool new_layer = vp9.spatial_idx != last_vp9_.spatial_idx; | |
1880 EXPECT_EQ(new_layer, vp9.beginning_of_frame); | |
1881 EXPECT_EQ(new_layer, last_vp9_.end_of_frame); | |
1882 EXPECT_EQ(new_layer ? last_vp9_.spatial_idx + 1 : last_vp9_.spatial_idx, | |
1883 vp9.spatial_idx); | |
1884 } | |
1885 | |
1886 void VerifyFixedTemporalLayerStructure(const RTPVideoHeaderVP9& vp9, | |
1887 uint8_t num_layers) const { | |
1888 switch (num_layers) { | |
1889 case 0: | |
1890 VerifyTemporalLayerStructure0(vp9); | |
1891 break; | |
1892 case 1: | |
1893 VerifyTemporalLayerStructure1(vp9); | |
1894 break; | |
1895 case 2: | |
1896 VerifyTemporalLayerStructure2(vp9); | |
1897 break; | |
1898 case 3: | |
1899 VerifyTemporalLayerStructure3(vp9); | |
1900 break; | |
1901 default: | |
1902 RTC_NOTREACHED(); | |
1903 } | |
1904 } | |
1905 | |
1906 void VerifyTemporalLayerStructure0(const RTPVideoHeaderVP9& vp9) const { | |
1907 EXPECT_EQ(kNoTl0PicIdx, vp9.tl0_pic_idx); | |
1908 EXPECT_EQ(kNoTemporalIdx, vp9.temporal_idx); // no tid | |
1909 EXPECT_FALSE(vp9.temporal_up_switch); | |
1910 } | |
1911 | |
1912 void VerifyTemporalLayerStructure1(const RTPVideoHeaderVP9& vp9) const { | |
1913 EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx); | |
stefan-webrtc
2015/11/16 17:36:30
Should increase by one between each frame, right?
åsapersson
2015/11/19 11:37:22
Checked in VerifyTl0idx (if set).
| |
1914 EXPECT_EQ(0, vp9.temporal_idx); // 0,0,0,... | |
1915 EXPECT_FALSE(vp9.temporal_up_switch); | |
1916 } | |
1917 | |
1918 void VerifyTemporalLayerStructure2(const RTPVideoHeaderVP9& vp9) const { | |
1919 EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx); | |
stefan-webrtc
2015/11/16 17:36:30
Check that it increases by one for each temporal_i
åsapersson
2015/11/19 11:37:22
see above
| |
1920 EXPECT_GE(vp9.temporal_idx, 0); // 0,1U,0,1U,... | |
1921 EXPECT_LE(vp9.temporal_idx, 1); | |
1922 EXPECT_EQ(vp9.temporal_idx > 0, vp9.temporal_up_switch); | |
1923 if (IsNewPictureId(vp9)) { | |
1924 uint8_t expected_tid = | |
1925 (!vp9.inter_pic_predicted || last_vp9_.temporal_idx == 1) ? 0 : 1; | |
stefan-webrtc
2015/11/16 17:36:30
Maybe a comment to explain this fairly complex exp
åsapersson
2015/11/19 11:37:22
added IsKeyFrame
| |
1926 EXPECT_EQ(expected_tid, vp9.temporal_idx); | |
1927 } | |
1928 } | |
1929 | |
1930 void VerifyTemporalLayerStructure3(const RTPVideoHeaderVP9& vp9) const { | |
1931 EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx); | |
1932 EXPECT_GE(vp9.temporal_idx, 0); // 0,2U,1U,2,... | |
stefan-webrtc
2015/11/16 17:36:30
Remove the 'U's
åsapersson
2015/11/19 11:37:22
Done.
| |
1933 EXPECT_LE(vp9.temporal_idx, 2); | |
1934 if (IsNewPictureId(vp9) && vp9.inter_pic_predicted) { | |
1935 EXPECT_NE(vp9.temporal_idx, last_vp9_.temporal_idx); | |
1936 switch (vp9.temporal_idx) { | |
1937 case 0: | |
1938 EXPECT_EQ(2, last_vp9_.temporal_idx); | |
1939 EXPECT_FALSE(vp9.temporal_up_switch); | |
1940 break; | |
1941 case 1: | |
1942 EXPECT_EQ(2, last_vp9_.temporal_idx); | |
1943 EXPECT_TRUE(vp9.temporal_up_switch); | |
1944 break; | |
1945 case 2: | |
1946 EXPECT_EQ(last_vp9_.temporal_idx == 0, vp9.temporal_up_switch); | |
1947 break; | |
1948 } | |
1949 } | |
1950 } | |
1951 | |
1952 void VerifyTl0idx(const RTPVideoHeaderVP9& vp9) const { | |
stefan-webrtc
2015/11/16 17:36:30
VerifTl0Idx
åsapersson
2015/11/19 11:37:22
Done.
| |
1953 if (vp9.tl0_pic_idx == kNoTl0PicIdx) | |
1954 return; | |
1955 | |
1956 uint8_t expected_tl0_idx = last_vp9_.tl0_pic_idx; | |
1957 if (vp9.temporal_idx == 0) | |
1958 ++expected_tl0_idx; | |
1959 EXPECT_EQ(expected_tl0_idx, vp9.tl0_pic_idx); | |
1960 } | |
1961 | |
1962 bool IsNewPictureId(const RTPVideoHeaderVP9& vp9) const { | |
1963 return frames_sent_ > 0 && (vp9.picture_id != last_vp9_.picture_id); | |
1964 } | |
1965 | |
1966 bool IsFirstPacketInKeyFrame(const RTPVideoHeaderVP9& vp9) const { | |
stefan-webrtc
2015/11/16 17:36:30
I'm not sure how the code below verifies that it's
åsapersson
2015/11/19 11:37:22
change to use IsKeyFrame
| |
1967 return !vp9.inter_pic_predicted && // P | |
1968 vp9.beginning_of_frame && // B | |
1969 vp9.spatial_idx == 0; | |
1970 } | |
1971 | |
1972 // Flexible mode (F=1): Non-flexible mode (F=0): | |
1973 // | |
1974 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
1975 // |I|P|L|F|B|E|V|-| |I|P|L|F|B|E|V|-| | |
1976 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
1977 // I: |M| PICTURE ID | I: |M| PICTURE ID | | |
1978 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
1979 // M: | EXTENDED PID | M: | EXTENDED PID | | |
1980 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
1981 // L: | T |U| S |D| L: | T |U| S |D| | |
1982 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
1983 // P,F: | P_DIFF |X|N| | TL0PICIDX | | |
1984 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
1985 // X: |EXTENDED P_DIFF| V: | SS .. | | |
1986 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
1987 // V: | SS .. | | |
1988 // +-+-+-+-+-+-+-+-+ | |
1989 void VerifyHeader(const RTPVideoHeaderVP9& vp9) const { | |
1990 if (!vp9.inter_pic_predicted) { // P | |
1991 EXPECT_TRUE(vp9.temporal_idx == 0 || vp9.temporal_idx == kNoTemporalIdx); | |
stefan-webrtc
2015/11/16 17:36:30
I'm not following this, why can't we have temporal
åsapersson
2015/11/19 11:37:22
If P is zero, the temporal id should be 0, right?
| |
1992 // TODO(asapersson): This is not set correctly for flexible mode. | |
pbos-webrtc
2015/11/16 16:25:15
Tracking bug since this is wrong in the current bi
åsapersson
2015/11/19 11:37:22
This has been fixed.
| |
1993 if (!vp9.flexible_mode) | |
1994 EXPECT_FALSE(vp9.temporal_up_switch); | |
1995 } | |
1996 EXPECT_EQ(kMaxTwoBytePictureId, vp9.max_picture_id); // M:1 | |
1997 EXPECT_NE(kNoPictureId, vp9.picture_id); // I:1 | |
1998 | |
1999 EXPECT_EQ(vp9_settings_.flexibleMode, vp9.flexible_mode); // F | |
2000 EXPECT_GE(vp9.spatial_idx, 0); // S | |
2001 EXPECT_LE(vp9.spatial_idx, vp9_settings_.numberOfSpatialLayers - 1); | |
stefan-webrtc
2015/11/16 17:36:30
EXPECT_LT(..., vp9_settings_.numberOfSpatialLayers
åsapersson
2015/11/19 11:37:22
Done.
| |
2002 if (vp9.ss_data_available) // V | |
2003 VerifySsData(vp9); | |
2004 if (packets_sent_ == 0) | |
2005 EXPECT_TRUE(IsFirstPacketInKeyFrame(vp9)); | |
2006 if (frames_sent_ == 0) | |
2007 EXPECT_FALSE(vp9.inter_pic_predicted); // P (0:key, 1:delta) | |
2008 } | |
2009 | |
2010 // Scalability structure (SS). | |
2011 // | |
2012 // +-+-+-+-+-+-+-+-+ | |
2013 // V: | N_S |Y|G|-|-|-| | |
2014 // +-+-+-+-+-+-+-+-+ | |
2015 // Y: | WIDTH | N_S + 1 times | |
2016 // +-+-+-+-+-+-+-+-+ | |
2017 // | HEIGHT | | |
2018 // +-+-+-+-+-+-+-+-+ | |
2019 // G: | N_G | | |
2020 // +-+-+-+-+-+-+-+-+ | |
2021 // N_G: | T |U| R |-|-| N_G times | |
2022 // +-+-+-+-+-+-+-+-+ | |
2023 // | P_DIFF | R times | |
2024 // +-+-+-+-+-+-+-+-+ | |
2025 void VerifySsData(const RTPVideoHeaderVP9& vp9) const { | |
2026 EXPECT_TRUE(vp9.ss_data_available); // V | |
2027 EXPECT_EQ(vp9_settings_.numberOfSpatialLayers, // N_S + 1 | |
2028 vp9.num_spatial_layers); | |
2029 EXPECT_TRUE(vp9.spatial_layer_resolution_present); // Y:1 | |
2030 size_t expected_width = encoder_config_.streams[0].width; | |
2031 size_t expected_height = encoder_config_.streams[0].height; | |
2032 for (int i = vp9.num_spatial_layers - 1; i >= 0; --i) { | |
2033 EXPECT_EQ(expected_width, vp9.width[i]); // WIDTH | |
2034 EXPECT_EQ(expected_height, vp9.height[i]); // HEIGHT | |
2035 expected_width /= 2; | |
2036 expected_height /= 2; | |
2037 } | |
2038 } | |
2039 | |
2040 void CompareConsecutiveFrames(const RTPHeader& header, | |
2041 const RTPVideoHeader& video) const { | |
2042 const RTPVideoHeaderVP9& vp9 = video.codecHeader.VP9; | |
2043 | |
2044 bool new_frame = packets_sent_ == 0 || | |
2045 IsNewerTimestamp(header.timestamp, last_header_.timestamp); | |
2046 EXPECT_EQ(new_frame, video.isFirstPacket); | |
2047 if (!new_frame) { | |
2048 EXPECT_FALSE(last_header_.markerBit); | |
2049 EXPECT_EQ(last_header_.timestamp, header.timestamp); | |
2050 EXPECT_EQ(last_vp9_.picture_id, vp9.picture_id); | |
2051 EXPECT_EQ(last_vp9_.temporal_idx, vp9.temporal_idx); | |
2052 EXPECT_EQ(last_vp9_.tl0_pic_idx, vp9.tl0_pic_idx); | |
2053 VerifySpatialIdxWithinFrame(vp9); | |
2054 return; | |
2055 } | |
2056 // New frame. | |
2057 EXPECT_TRUE(vp9.beginning_of_frame); | |
2058 | |
2059 // Compare with last packet in previous frame. | |
2060 if (frames_sent_ == 0) | |
2061 return; | |
2062 EXPECT_TRUE(last_vp9_.end_of_frame); | |
2063 EXPECT_TRUE(last_header_.markerBit); | |
2064 EXPECT_TRUE(ContinuousPictureId(vp9)); | |
2065 VerifyTl0idx(vp9); | |
2066 } | |
2067 | |
1860 rtc::scoped_ptr<VP9Encoder> vp9_encoder_; | 2068 rtc::scoped_ptr<VP9Encoder> vp9_encoder_; |
1861 VideoCodecVP9 vp9_settings_; | 2069 VideoCodecVP9 vp9_settings_; |
2070 webrtc::VideoEncoderConfig encoder_config_; | |
2071 RTPHeader last_header_; | |
2072 RTPVideoHeaderVP9 last_vp9_; | |
2073 size_t packets_sent_; | |
2074 size_t frames_sent_; | |
1862 }; | 2075 }; |
1863 | 2076 |
1864 TEST_F(VideoSendStreamTest, DISABLED_VP9FlexMode) { | 2077 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_1Tl1SLayers) { |
1865 class FlexibleMode : public VP9HeaderObeserver { | 2078 const uint8_t kNumTemporalLayers = 1; |
2079 const uint8_t kNumSpatialLayers = 1; | |
2080 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers); | |
2081 } | |
2082 | |
2083 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_2Tl1SLayers) { | |
2084 const uint8_t kNumTemporalLayers = 2; | |
2085 const uint8_t kNumSpatialLayers = 1; | |
2086 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers); | |
2087 } | |
2088 | |
2089 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_3Tl1SLayers) { | |
2090 const uint8_t kNumTemporalLayers = 3; | |
2091 const uint8_t kNumSpatialLayers = 1; | |
2092 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers); | |
2093 } | |
2094 | |
2095 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_1Tl2SLayers) { | |
2096 const uint8_t kNumTemporalLayers = 1; | |
2097 const uint8_t kNumSpatialLayers = 2; | |
2098 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers); | |
2099 } | |
2100 | |
2101 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_2Tl2SLayers) { | |
2102 const uint8_t kNumTemporalLayers = 2; | |
2103 const uint8_t kNumSpatialLayers = 2; | |
2104 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers); | |
2105 } | |
2106 | |
2107 void VideoSendStreamTest::TestVp9NonFlexMode(uint8_t num_temporal_layers, | |
2108 uint8_t num_spatial_layers) { | |
2109 static const size_t kNumFramesToSend = 100; | |
2110 class NonFlexibleMode : public Vp9HeaderObserver { | |
2111 public: | |
2112 NonFlexibleMode(uint8_t num_temporal_layers, uint8_t num_spatial_layers) | |
2113 : num_temporal_layers_(num_temporal_layers), | |
2114 num_spatial_layers_(num_spatial_layers) { | |
2115 l_field_ = num_temporal_layers_ > 1 || num_spatial_layers_ > 1; | |
stefan-webrtc
2015/11/16 17:36:30
I think you can set this in the initializer list t
åsapersson
2015/11/19 11:37:22
Done.
| |
2116 } | |
1866 void ModifyConfigsHook( | 2117 void ModifyConfigsHook( |
1867 VideoSendStream::Config* send_config, | 2118 VideoSendStream::Config* send_config, |
1868 std::vector<VideoReceiveStream::Config>* receive_configs, | 2119 std::vector<VideoReceiveStream::Config>* receive_configs, |
1869 VideoEncoderConfig* encoder_config) override { | 2120 VideoEncoderConfig* encoder_config) override { |
1870 vp9_settings_.flexibleMode = true; | 2121 vp9_settings_.flexibleMode = false; |
1871 } | 2122 vp9_settings_.numberOfTemporalLayers = num_temporal_layers_; |
1872 | 2123 vp9_settings_.numberOfSpatialLayers = num_spatial_layers_; |
1873 void InspectHeader(RTPVideoHeaderVP9* vp9videoHeader) override { | 2124 } |
1874 EXPECT_TRUE(vp9videoHeader->flexible_mode); | 2125 |
1875 observation_complete_->Set(); | 2126 void InspectHeader(const RTPVideoHeaderVP9& vp9) override { |
1876 } | 2127 EXPECT_EQ(IsFirstPacketInKeyFrame(vp9), vp9.ss_data_available); |
1877 } test; | 2128 EXPECT_EQ(vp9.spatial_idx > 0, vp9.inter_layer_predicted); // D |
2129 | |
2130 if (IsNewPictureId(vp9)) { | |
2131 EXPECT_EQ(0, vp9.spatial_idx); | |
2132 EXPECT_EQ(num_spatial_layers_ - 1, last_vp9_.spatial_idx); | |
2133 } | |
2134 | |
2135 VerifyFixedTemporalLayerStructure(vp9, | |
2136 l_field_ ? num_temporal_layers_ : 0); | |
2137 | |
2138 if (frames_sent_ > kNumFramesToSend) | |
2139 observation_complete_->Set(); | |
2140 } | |
2141 const uint8_t num_temporal_layers_; | |
2142 const uint8_t num_spatial_layers_; | |
2143 bool l_field_; | |
2144 } test(num_temporal_layers, num_spatial_layers); | |
1878 | 2145 |
1879 RunBaseTest(&test, FakeNetworkPipe::Config()); | 2146 RunBaseTest(&test, FakeNetworkPipe::Config()); |
1880 } | 2147 } |
1881 | 2148 |
1882 TEST_F(VideoSendStreamTest, VP9FlexModeHasPictureId) { | 2149 TEST_F(VideoSendStreamTest, Vp9FlexModeRefCount) { |
1883 class FlexibleMode : public VP9HeaderObeserver { | 2150 class FlexibleMode : public Vp9HeaderObserver { |
1884 void ModifyConfigsHook( | 2151 void ModifyConfigsHook( |
1885 VideoSendStream::Config* send_config, | 2152 VideoSendStream::Config* send_config, |
1886 std::vector<VideoReceiveStream::Config>* receive_configs, | 2153 std::vector<VideoReceiveStream::Config>* receive_configs, |
1887 VideoEncoderConfig* encoder_config) override { | 2154 VideoEncoderConfig* encoder_config) override { |
2155 encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen; | |
1888 vp9_settings_.flexibleMode = true; | 2156 vp9_settings_.flexibleMode = true; |
1889 } | 2157 vp9_settings_.numberOfTemporalLayers = 1; |
1890 | 2158 vp9_settings_.numberOfSpatialLayers = 2; |
1891 void InspectHeader(RTPVideoHeaderVP9* vp9videoHeader) override { | 2159 } |
1892 EXPECT_NE(vp9videoHeader->picture_id, kNoPictureId); | 2160 |
1893 observation_complete_->Set(); | 2161 void InspectHeader(const RTPVideoHeaderVP9& vp9_header) override { |
1894 } | 2162 EXPECT_TRUE(vp9_header.flexible_mode); |
1895 } test; | 2163 EXPECT_EQ(kNoTl0PicIdx, vp9_header.tl0_pic_idx); |
1896 | 2164 if (vp9_header.inter_pic_predicted) { |
1897 RunBaseTest(&test, FakeNetworkPipe::Config()); | 2165 EXPECT_GT(vp9_header.num_ref_pics, 0u); |
1898 } | |
1899 | |
1900 TEST_F(VideoSendStreamTest, VP9FlexModeRefCount) { | |
1901 class FlexibleMode : public VP9HeaderObeserver { | |
1902 void ModifyConfigsHook( | |
1903 VideoSendStream::Config* send_config, | |
1904 std::vector<VideoReceiveStream::Config>* receive_configs, | |
1905 VideoEncoderConfig* encoder_config) override { | |
1906 vp9_settings_.flexibleMode = true; | |
1907 } | |
1908 | |
1909 void InspectHeader(RTPVideoHeaderVP9* vp9videoHeader) override { | |
1910 EXPECT_TRUE(vp9videoHeader->flexible_mode); | |
1911 if (vp9videoHeader->inter_pic_predicted) { | |
1912 EXPECT_GT(vp9videoHeader->num_ref_pics, 0u); | |
1913 observation_complete_->Set(); | 2166 observation_complete_->Set(); |
1914 } | 2167 } |
1915 } | 2168 } |
1916 } test; | 2169 } test; |
1917 | 2170 |
1918 RunBaseTest(&test, FakeNetworkPipe::Config()); | 2171 RunBaseTest(&test, FakeNetworkPipe::Config()); |
1919 } | |
1920 | |
1921 TEST_F(VideoSendStreamTest, VP9FlexModeRefs) { | |
1922 class FlexibleMode : public VP9HeaderObeserver { | |
1923 void ModifyConfigsHook( | |
1924 VideoSendStream::Config* send_config, | |
1925 std::vector<VideoReceiveStream::Config>* receive_configs, | |
1926 VideoEncoderConfig* encoder_config) override { | |
1927 vp9_settings_.flexibleMode = true; | |
1928 } | |
1929 | |
1930 void InspectHeader(RTPVideoHeaderVP9* vp9videoHeader) override { | |
1931 EXPECT_TRUE(vp9videoHeader->flexible_mode); | |
1932 if (vp9videoHeader->inter_pic_predicted) { | |
1933 EXPECT_GT(vp9videoHeader->num_ref_pics, 0u); | |
1934 observation_complete_->Set(); | |
1935 } | |
1936 } | |
1937 | |
1938 } test; | |
1939 | |
1940 RunBaseTest(&test, FakeNetworkPipe::Config()); | |
1941 } | 2172 } |
1942 | 2173 |
1943 } // namespace webrtc | 2174 } // namespace webrtc |
OLD | NEW |