Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 #include <algorithm> // max | 10 #include <algorithm> // max |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 51 const VideoFrame& frame2); | 51 const VideoFrame& frame2); |
| 52 void ExpectEqualFramesVector(const std::vector<VideoFrame>& frames1, | 52 void ExpectEqualFramesVector(const std::vector<VideoFrame>& frames1, |
| 53 const std::vector<VideoFrame>& frames2); | 53 const std::vector<VideoFrame>& frames2); |
| 54 VideoFrame CreateVideoFrame(int width, int height, uint8_t data); | 54 VideoFrame CreateVideoFrame(int width, int height, uint8_t data); |
| 55 | 55 |
| 56 class VideoSendStreamTest : public test::CallTest { | 56 class VideoSendStreamTest : public test::CallTest { |
| 57 protected: | 57 protected: |
| 58 void TestNackRetransmission(uint32_t retransmit_ssrc, | 58 void TestNackRetransmission(uint32_t retransmit_ssrc, |
| 59 uint8_t retransmit_payload_type); | 59 uint8_t retransmit_payload_type); |
| 60 void TestPacketFragmentationSize(VideoFormat format, bool with_fec); | 60 void TestPacketFragmentationSize(VideoFormat format, bool with_fec); |
| 61 | |
| 62 void TestVp9NonFlexMode(uint8_t num_temporal_layers, | |
| 63 uint8_t num_spatial_layers); | |
| 61 }; | 64 }; |
| 62 | 65 |
| 63 TEST_F(VideoSendStreamTest, CanStartStartedStream) { | 66 TEST_F(VideoSendStreamTest, CanStartStartedStream) { |
| 64 Call::Config call_config; | 67 Call::Config call_config; |
| 65 CreateSenderCall(call_config); | 68 CreateSenderCall(call_config); |
| 66 | 69 |
| 67 test::NullTransport transport; | 70 test::NullTransport transport; |
| 68 CreateSendConfig(1, &transport); | 71 CreateSendConfig(1, &transport); |
| 69 CreateStreams(); | 72 CreateStreams(); |
| 70 send_stream_->Start(); | 73 send_stream_->Start(); |
| (...skipping 1713 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1784 const std::vector<VideoReceiveStream*>& receive_streams) override { | 1787 const std::vector<VideoReceiveStream*>& receive_streams) override { |
| 1785 send_stream_ = send_stream; | 1788 send_stream_ = send_stream; |
| 1786 } | 1789 } |
| 1787 | 1790 |
| 1788 VideoSendStream* send_stream_; | 1791 VideoSendStream* send_stream_; |
| 1789 } test; | 1792 } test; |
| 1790 | 1793 |
| 1791 RunBaseTest(&test, FakeNetworkPipe::Config()); | 1794 RunBaseTest(&test, FakeNetworkPipe::Config()); |
| 1792 } | 1795 } |
| 1793 | 1796 |
| 1794 class VP9HeaderObeserver : public test::SendTest { | 1797 class Vp9HeaderObserver : public test::SendTest { |
| 1795 public: | 1798 public: |
| 1796 VP9HeaderObeserver() | 1799 Vp9HeaderObserver() |
| 1797 : SendTest(VideoSendStreamTest::kDefaultTimeoutMs), | 1800 : SendTest(VideoSendStreamTest::kDefaultTimeoutMs), |
| 1798 vp9_encoder_(VP9Encoder::Create()), | 1801 vp9_encoder_(VP9Encoder::Create()), |
| 1799 vp9_settings_(VideoEncoder::GetDefaultVp9Settings()) { | 1802 vp9_settings_(VideoEncoder::GetDefaultVp9Settings()), |
| 1800 vp9_settings_.numberOfTemporalLayers = 1; | 1803 packets_sent_(0), |
| 1801 vp9_settings_.numberOfSpatialLayers = 2; | 1804 frames_sent_(0) {} |
| 1802 } | |
| 1803 | 1805 |
| 1804 virtual void ModifyConfigsHook( | 1806 virtual void ModifyConfigsHook( |
| 1805 VideoSendStream::Config* send_config, | 1807 VideoSendStream::Config* send_config, |
| 1806 std::vector<VideoReceiveStream::Config>* receive_configs, | 1808 std::vector<VideoReceiveStream::Config>* receive_configs, |
| 1807 VideoEncoderConfig* encoder_config) {} | 1809 VideoEncoderConfig* encoder_config) {} |
| 1808 | 1810 |
| 1809 virtual void InspectHeader(RTPVideoHeaderVP9* vp9videoHeader) = 0; | 1811 virtual void InspectHeader(const RTPVideoHeaderVP9& vp9) = 0; |
| 1810 | 1812 |
| 1811 private: | 1813 private: |
| 1812 const int kVp9PayloadType = 105; | 1814 const int kVp9PayloadType = 105; |
| 1813 | 1815 |
| 1814 void ModifyConfigs(VideoSendStream::Config* send_config, | 1816 void ModifyConfigs(VideoSendStream::Config* send_config, |
| 1815 std::vector<VideoReceiveStream::Config>* receive_configs, | 1817 std::vector<VideoReceiveStream::Config>* receive_configs, |
| 1816 VideoEncoderConfig* encoder_config) override { | 1818 VideoEncoderConfig* encoder_config) override { |
| 1817 encoder_config->encoder_specific_settings = &vp9_settings_; | 1819 encoder_config->encoder_specific_settings = &vp9_settings_; |
| 1818 encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen; | |
| 1819 send_config->encoder_settings.encoder = vp9_encoder_.get(); | 1820 send_config->encoder_settings.encoder = vp9_encoder_.get(); |
| 1820 send_config->encoder_settings.payload_name = "VP9"; | 1821 send_config->encoder_settings.payload_name = "VP9"; |
| 1821 send_config->encoder_settings.payload_type = kVp9PayloadType; | 1822 send_config->encoder_settings.payload_type = kVp9PayloadType; |
| 1822 ModifyConfigsHook(send_config, receive_configs, encoder_config); | 1823 ModifyConfigsHook(send_config, receive_configs, encoder_config); |
| 1824 EXPECT_EQ(1u, encoder_config->streams.size()); | |
| 1825 encoder_config->streams[0].temporal_layer_thresholds_bps.resize( | |
| 1826 vp9_settings_.numberOfTemporalLayers - 1); | |
| 1827 encoder_config_ = *encoder_config; | |
| 1823 } | 1828 } |
| 1824 | 1829 |
| 1825 void PerformTest() override { | 1830 void PerformTest() override { |
| 1826 EXPECT_EQ(kEventSignaled, Wait()) | 1831 EXPECT_EQ(kEventSignaled, Wait()) |
| 1827 << "Test timed out waiting for VP9 packet"; | 1832 << "Test timed out waiting for VP9 packet"; |
| 1828 } | 1833 } |
| 1829 | 1834 |
| 1830 Action OnSendRtp(const uint8_t* packet, size_t length) override { | 1835 Action OnSendRtp(const uint8_t* packet, size_t length) override { |
| 1831 RTPHeader header; | 1836 RTPHeader header; |
| 1832 EXPECT_TRUE(parser_->Parse(packet, length, &header)); | 1837 EXPECT_TRUE(parser_->Parse(packet, length, &header)); |
| 1833 | 1838 |
| 1834 if (header.payloadType == kVp9PayloadType) { | 1839 EXPECT_EQ(kVp9PayloadType, header.payloadType); |
| 1835 RtpDepacketizerVp9 vp9depacketizer; | 1840 const uint8_t* payload = packet + header.headerLength; |
| 1836 RtpDepacketizer::ParsedPayload vp9payload; | 1841 size_t payload_length = length - header.headerLength - header.paddingLength; |
| 1837 const uint8_t* vp9_packet = packet + header.headerLength; | 1842 |
| 1838 size_t payload_length = | 1843 bool new_packet = packets_sent_ == 0 || |
| 1839 length - header.headerLength - header.paddingLength; | 1844 IsNewerSequenceNumber(header.sequenceNumber, |
| 1840 | 1845 last_header_.sequenceNumber); |
| 1841 if (payload_length > 0) { | 1846 if (payload_length > 0 && new_packet) { |
| 1842 bool parse_vp9header_successful = | 1847 RtpDepacketizer::ParsedPayload parsed; |
| 1843 vp9depacketizer.Parse(&vp9payload, vp9_packet, payload_length); | 1848 RtpDepacketizerVp9 depacketizer; |
| 1844 bool is_vp9_codec_type = | 1849 EXPECT_TRUE(depacketizer.Parse(&parsed, payload, payload_length)); |
| 1845 vp9payload.type.Video.codec == RtpVideoCodecTypes::kRtpVideoVp9; | 1850 EXPECT_EQ(RtpVideoCodecTypes::kRtpVideoVp9, parsed.type.Video.codec); |
| 1846 EXPECT_TRUE(parse_vp9header_successful); | 1851 // Verify common fields for all configurations. |
| 1847 EXPECT_TRUE(is_vp9_codec_type); | 1852 VerifyCommonHeader(parsed.type.Video.codecHeader.VP9); |
| 1848 | 1853 CompareConsecutiveFrames(header, parsed.type.Video); |
| 1849 RTPVideoHeaderVP9* vp9videoHeader = | 1854 // Verify configuration specific settings. |
| 1850 &vp9payload.type.Video.codecHeader.VP9; | 1855 InspectHeader(parsed.type.Video.codecHeader.VP9); |
| 1851 if (parse_vp9header_successful && is_vp9_codec_type) { | 1856 |
| 1852 InspectHeader(vp9videoHeader); | 1857 ++packets_sent_; |
| 1853 } else { | 1858 if (header.markerBit) { |
| 1854 observation_complete_->Set(); | 1859 ++frames_sent_; |
| 1855 } | |
| 1856 } | 1860 } |
| 1857 } | 1861 last_header_ = header; |
| 1858 | 1862 last_vp9_ = parsed.type.Video.codecHeader.VP9; |
| 1863 } | |
| 1859 return SEND_PACKET; | 1864 return SEND_PACKET; |
| 1860 } | 1865 } |
| 1861 | 1866 |
| 1862 protected: | 1867 protected: |
| 1868 bool ContinuousPictureId(const RTPVideoHeaderVP9& vp9) const { | |
| 1869 if (last_vp9_.picture_id > vp9.picture_id) { | |
| 1870 return vp9.picture_id == 0; // Wrap. | |
| 1871 } else { | |
| 1872 return vp9.picture_id == last_vp9_.picture_id + 1; | |
| 1873 } | |
| 1874 } | |
| 1875 | |
| 1876 void VerifySpatialIdxWithinFrame(const RTPVideoHeaderVP9& vp9) const { | |
| 1877 if (frames_sent_ == 0) | |
| 1878 return; | |
| 1879 | |
| 1880 bool new_layer = vp9.spatial_idx != last_vp9_.spatial_idx; | |
| 1881 EXPECT_EQ(new_layer, vp9.beginning_of_frame); | |
| 1882 EXPECT_EQ(new_layer, last_vp9_.end_of_frame); | |
| 1883 EXPECT_EQ(new_layer ? last_vp9_.spatial_idx + 1 : last_vp9_.spatial_idx, | |
| 1884 vp9.spatial_idx); | |
| 1885 } | |
| 1886 | |
| 1887 void VerifyFixedTemporalLayerStructure(const RTPVideoHeaderVP9& vp9, | |
| 1888 uint8_t num_layers) const { | |
| 1889 switch (num_layers) { | |
| 1890 case 0: | |
| 1891 VerifyTemporalLayerStructure0(vp9); | |
| 1892 break; | |
| 1893 case 1: | |
| 1894 VerifyTemporalLayerStructure1(vp9); | |
| 1895 break; | |
| 1896 case 2: | |
| 1897 VerifyTemporalLayerStructure2(vp9); | |
| 1898 break; | |
| 1899 case 3: | |
| 1900 VerifyTemporalLayerStructure3(vp9); | |
| 1901 break; | |
| 1902 default: | |
| 1903 RTC_NOTREACHED(); | |
| 1904 } | |
| 1905 } | |
| 1906 | |
| 1907 void VerifyTemporalLayerStructure0(const RTPVideoHeaderVP9& vp9) const { | |
| 1908 EXPECT_EQ(kNoTl0PicIdx, vp9.tl0_pic_idx); | |
| 1909 EXPECT_EQ(kNoTemporalIdx, vp9.temporal_idx); // no tid | |
| 1910 EXPECT_FALSE(vp9.temporal_up_switch); | |
| 1911 } | |
| 1912 | |
| 1913 void VerifyTemporalLayerStructure1(const RTPVideoHeaderVP9& vp9) const { | |
| 1914 EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx); | |
| 1915 EXPECT_EQ(0, vp9.temporal_idx); // 0,0,0,... | |
| 1916 EXPECT_FALSE(vp9.temporal_up_switch); | |
| 1917 } | |
| 1918 | |
| 1919 void VerifyTemporalLayerStructure2(const RTPVideoHeaderVP9& vp9) const { | |
| 1920 EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx); | |
| 1921 EXPECT_GE(vp9.temporal_idx, 0); // 0,1,0,1,... | |
| 1922 EXPECT_LE(vp9.temporal_idx, 1); | |
| 1923 EXPECT_EQ(vp9.temporal_idx > 0, vp9.temporal_up_switch); | |
| 1924 if (IsNewPictureId(vp9)) { | |
| 1925 uint8_t expected_tid = | |
| 1926 (IsKeyFrame(vp9) || last_vp9_.temporal_idx == 1) ? 0 : 1; | |
| 1927 EXPECT_EQ(expected_tid, vp9.temporal_idx); | |
| 1928 } | |
| 1929 } | |
| 1930 | |
| 1931 void VerifyTemporalLayerStructure3(const RTPVideoHeaderVP9& vp9) const { | |
| 1932 EXPECT_NE(kNoTl0PicIdx, vp9.tl0_pic_idx); | |
| 1933 EXPECT_GE(vp9.temporal_idx, 0); // 0,2,1,2,... | |
| 1934 EXPECT_LE(vp9.temporal_idx, 2); | |
| 1935 if (IsNewPictureId(vp9) && !IsKeyFrame(vp9)) { | |
| 1936 EXPECT_NE(vp9.temporal_idx, last_vp9_.temporal_idx); | |
| 1937 switch (vp9.temporal_idx) { | |
| 1938 case 0: | |
| 1939 EXPECT_EQ(2, last_vp9_.temporal_idx); | |
| 1940 EXPECT_FALSE(vp9.temporal_up_switch); | |
| 1941 break; | |
| 1942 case 1: | |
| 1943 EXPECT_EQ(2, last_vp9_.temporal_idx); | |
| 1944 EXPECT_TRUE(vp9.temporal_up_switch); | |
| 1945 break; | |
| 1946 case 2: | |
| 1947 EXPECT_EQ(last_vp9_.temporal_idx == 0, vp9.temporal_up_switch); | |
| 1948 break; | |
| 1949 } | |
| 1950 } | |
| 1951 } | |
| 1952 | |
| 1953 void VerifyTl0Idx(const RTPVideoHeaderVP9& vp9) const { | |
| 1954 if (vp9.tl0_pic_idx == kNoTl0PicIdx) | |
| 1955 return; | |
| 1956 | |
| 1957 uint8_t expected_tl0_idx = last_vp9_.tl0_pic_idx; | |
| 1958 if (vp9.temporal_idx == 0) | |
| 1959 ++expected_tl0_idx; | |
| 1960 EXPECT_EQ(expected_tl0_idx, vp9.tl0_pic_idx); | |
| 1961 } | |
| 1962 | |
| 1963 bool IsNewPictureId(const RTPVideoHeaderVP9& vp9) const { | |
| 1964 return frames_sent_ > 0 && (vp9.picture_id != last_vp9_.picture_id); | |
| 1965 } | |
| 1966 | |
| 1967 bool IsKeyFrame(const RTPVideoHeaderVP9& vp9) const { | |
|
stefan-webrtc
2015/11/23 10:27:13
I'm not sure if !inter_pic_predicted is enough to
åsapersson
2015/11/23 15:01:49
For the second example (layer frame using inter-la
| |
| 1968 return !vp9.inter_pic_predicted; // P | |
| 1969 } | |
| 1970 | |
| 1971 bool IsFirstPacketInKeyFrame(const RTPVideoHeaderVP9& vp9) const { | |
| 1972 return IsKeyFrame(vp9) && vp9.beginning_of_frame && vp9.spatial_idx == 0; | |
| 1973 } | |
| 1974 | |
| 1975 // Flexible mode (F=1): Non-flexible mode (F=0): | |
| 1976 // | |
| 1977 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
| 1978 // |I|P|L|F|B|E|V|-| |I|P|L|F|B|E|V|-| | |
| 1979 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
| 1980 // I: |M| PICTURE ID | I: |M| PICTURE ID | | |
| 1981 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
| 1982 // M: | EXTENDED PID | M: | EXTENDED PID | | |
| 1983 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
| 1984 // L: | T |U| S |D| L: | T |U| S |D| | |
| 1985 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
| 1986 // P,F: | P_DIFF |X|N| | TL0PICIDX | | |
| 1987 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
| 1988 // X: |EXTENDED P_DIFF| V: | SS .. | | |
| 1989 // +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ | |
| 1990 // V: | SS .. | | |
| 1991 // +-+-+-+-+-+-+-+-+ | |
| 1992 void VerifyCommonHeader(const RTPVideoHeaderVP9& vp9) const { | |
| 1993 EXPECT_EQ(kMaxTwoBytePictureId, vp9.max_picture_id); // M:1 | |
| 1994 EXPECT_NE(kNoPictureId, vp9.picture_id); // I:1 | |
| 1995 EXPECT_EQ(vp9_settings_.flexibleMode, vp9.flexible_mode); // F | |
| 1996 EXPECT_GE(vp9.spatial_idx, 0); // S | |
| 1997 EXPECT_LT(vp9.spatial_idx, vp9_settings_.numberOfSpatialLayers); | |
| 1998 if (vp9.ss_data_available) // V | |
| 1999 VerifySsData(vp9); | |
| 2000 | |
| 2001 if (packets_sent_ == 0) | |
| 2002 EXPECT_TRUE(IsFirstPacketInKeyFrame(vp9)); | |
| 2003 if (frames_sent_ == 0) | |
| 2004 EXPECT_TRUE(IsKeyFrame(vp9)); | |
| 2005 | |
| 2006 if (IsKeyFrame(vp9)) { | |
| 2007 EXPECT_TRUE(vp9.temporal_idx == 0 || vp9.temporal_idx == kNoTemporalIdx); | |
| 2008 EXPECT_FALSE(vp9.temporal_up_switch); | |
| 2009 } | |
| 2010 } | |
| 2011 | |
| 2012 // Scalability structure (SS). | |
| 2013 // | |
| 2014 // +-+-+-+-+-+-+-+-+ | |
| 2015 // V: | N_S |Y|G|-|-|-| | |
| 2016 // +-+-+-+-+-+-+-+-+ | |
| 2017 // Y: | WIDTH | N_S + 1 times | |
| 2018 // +-+-+-+-+-+-+-+-+ | |
| 2019 // | HEIGHT | | |
| 2020 // +-+-+-+-+-+-+-+-+ | |
| 2021 // G: | N_G | | |
| 2022 // +-+-+-+-+-+-+-+-+ | |
| 2023 // N_G: | T |U| R |-|-| N_G times | |
| 2024 // +-+-+-+-+-+-+-+-+ | |
| 2025 // | P_DIFF | R times | |
| 2026 // +-+-+-+-+-+-+-+-+ | |
| 2027 void VerifySsData(const RTPVideoHeaderVP9& vp9) const { | |
| 2028 EXPECT_TRUE(vp9.ss_data_available); // V | |
| 2029 EXPECT_EQ(vp9_settings_.numberOfSpatialLayers, // N_S + 1 | |
| 2030 vp9.num_spatial_layers); | |
| 2031 EXPECT_TRUE(vp9.spatial_layer_resolution_present); // Y:1 | |
| 2032 size_t expected_width = encoder_config_.streams[0].width; | |
| 2033 size_t expected_height = encoder_config_.streams[0].height; | |
| 2034 for (int i = vp9.num_spatial_layers - 1; i >= 0; --i) { | |
| 2035 EXPECT_EQ(expected_width, vp9.width[i]); // WIDTH | |
| 2036 EXPECT_EQ(expected_height, vp9.height[i]); // HEIGHT | |
| 2037 expected_width /= 2; | |
| 2038 expected_height /= 2; | |
| 2039 } | |
| 2040 } | |
| 2041 | |
| 2042 void CompareConsecutiveFrames(const RTPHeader& header, | |
| 2043 const RTPVideoHeader& video) const { | |
| 2044 const RTPVideoHeaderVP9& vp9 = video.codecHeader.VP9; | |
| 2045 | |
| 2046 bool new_frame = packets_sent_ == 0 || | |
| 2047 IsNewerTimestamp(header.timestamp, last_header_.timestamp); | |
| 2048 EXPECT_EQ(new_frame, video.isFirstPacket); | |
| 2049 if (!new_frame) { | |
| 2050 EXPECT_FALSE(last_header_.markerBit); | |
| 2051 EXPECT_EQ(last_header_.timestamp, header.timestamp); | |
| 2052 EXPECT_EQ(last_vp9_.picture_id, vp9.picture_id); | |
| 2053 EXPECT_EQ(last_vp9_.temporal_idx, vp9.temporal_idx); | |
| 2054 EXPECT_EQ(last_vp9_.tl0_pic_idx, vp9.tl0_pic_idx); | |
| 2055 VerifySpatialIdxWithinFrame(vp9); | |
| 2056 return; | |
| 2057 } | |
| 2058 // New frame. | |
| 2059 EXPECT_TRUE(vp9.beginning_of_frame); | |
| 2060 | |
| 2061 // Compare with last packet in previous frame. | |
| 2062 if (frames_sent_ == 0) | |
| 2063 return; | |
| 2064 EXPECT_TRUE(last_vp9_.end_of_frame); | |
| 2065 EXPECT_TRUE(last_header_.markerBit); | |
| 2066 EXPECT_TRUE(ContinuousPictureId(vp9)); | |
| 2067 VerifyTl0Idx(vp9); | |
| 2068 } | |
| 2069 | |
| 1863 rtc::scoped_ptr<VP9Encoder> vp9_encoder_; | 2070 rtc::scoped_ptr<VP9Encoder> vp9_encoder_; |
| 1864 VideoCodecVP9 vp9_settings_; | 2071 VideoCodecVP9 vp9_settings_; |
| 2072 webrtc::VideoEncoderConfig encoder_config_; | |
| 2073 RTPHeader last_header_; | |
| 2074 RTPVideoHeaderVP9 last_vp9_; | |
| 2075 size_t packets_sent_; | |
| 2076 size_t frames_sent_; | |
| 1865 }; | 2077 }; |
| 1866 | 2078 |
| 1867 TEST_F(VideoSendStreamTest, DISABLED_VP9FlexMode) { | 2079 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_1Tl1SLayers) { |
| 1868 class FlexibleMode : public VP9HeaderObeserver { | 2080 const uint8_t kNumTemporalLayers = 1; |
| 2081 const uint8_t kNumSpatialLayers = 1; | |
| 2082 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers); | |
| 2083 } | |
| 2084 | |
| 2085 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_2Tl1SLayers) { | |
| 2086 const uint8_t kNumTemporalLayers = 2; | |
| 2087 const uint8_t kNumSpatialLayers = 1; | |
| 2088 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers); | |
| 2089 } | |
| 2090 | |
| 2091 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_3Tl1SLayers) { | |
| 2092 const uint8_t kNumTemporalLayers = 3; | |
| 2093 const uint8_t kNumSpatialLayers = 1; | |
| 2094 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers); | |
| 2095 } | |
| 2096 | |
| 2097 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_1Tl2SLayers) { | |
| 2098 const uint8_t kNumTemporalLayers = 1; | |
| 2099 const uint8_t kNumSpatialLayers = 2; | |
| 2100 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers); | |
| 2101 } | |
| 2102 | |
| 2103 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_2Tl2SLayers) { | |
| 2104 const uint8_t kNumTemporalLayers = 2; | |
| 2105 const uint8_t kNumSpatialLayers = 2; | |
| 2106 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers); | |
| 2107 } | |
| 2108 | |
| 2109 TEST_F(VideoSendStreamTest, Vp9NonFlexMode_3Tl2SLayers) { | |
| 2110 const uint8_t kNumTemporalLayers = 3; | |
| 2111 const uint8_t kNumSpatialLayers = 2; | |
| 2112 TestVp9NonFlexMode(kNumTemporalLayers, kNumSpatialLayers); | |
| 2113 } | |
| 2114 | |
| 2115 void VideoSendStreamTest::TestVp9NonFlexMode(uint8_t num_temporal_layers, | |
| 2116 uint8_t num_spatial_layers) { | |
| 2117 static const size_t kNumFramesToSend = 100; | |
| 2118 // Set to < kNumFramesToSend and coprime to length of temporal layer | |
| 2119 // structures to verify temporal id reset on key frame. | |
| 2120 static const int kKeyFrameInterval = 31; | |
| 2121 class NonFlexibleMode : public Vp9HeaderObserver { | |
| 2122 public: | |
| 2123 NonFlexibleMode(uint8_t num_temporal_layers, uint8_t num_spatial_layers) | |
| 2124 : num_temporal_layers_(num_temporal_layers), | |
| 2125 num_spatial_layers_(num_spatial_layers), | |
| 2126 l_field_(num_temporal_layers > 1 || num_spatial_layers > 1) { | |
| 2127 } | |
| 1869 void ModifyConfigsHook( | 2128 void ModifyConfigsHook( |
| 1870 VideoSendStream::Config* send_config, | 2129 VideoSendStream::Config* send_config, |
| 1871 std::vector<VideoReceiveStream::Config>* receive_configs, | 2130 std::vector<VideoReceiveStream::Config>* receive_configs, |
| 1872 VideoEncoderConfig* encoder_config) override { | 2131 VideoEncoderConfig* encoder_config) override { |
| 1873 vp9_settings_.flexibleMode = true; | 2132 vp9_settings_.flexibleMode = false; |
| 1874 } | 2133 vp9_settings_.frameDroppingOn = false; |
| 1875 | 2134 vp9_settings_.keyFrameInterval = kKeyFrameInterval; |
| 1876 void InspectHeader(RTPVideoHeaderVP9* vp9videoHeader) override { | 2135 vp9_settings_.numberOfTemporalLayers = num_temporal_layers_; |
| 1877 EXPECT_TRUE(vp9videoHeader->flexible_mode); | 2136 vp9_settings_.numberOfSpatialLayers = num_spatial_layers_; |
| 1878 observation_complete_->Set(); | 2137 } |
| 1879 } | 2138 |
| 1880 } test; | 2139 void InspectHeader(const RTPVideoHeaderVP9& vp9) override { |
| 2140 EXPECT_EQ(IsFirstPacketInKeyFrame(vp9), vp9.ss_data_available); | |
| 2141 EXPECT_EQ(vp9.spatial_idx > 0, vp9.inter_layer_predicted); // D | |
| 2142 | |
| 2143 if (IsNewPictureId(vp9)) { | |
| 2144 EXPECT_EQ(0, vp9.spatial_idx); | |
| 2145 EXPECT_EQ(num_spatial_layers_ - 1, last_vp9_.spatial_idx); | |
| 2146 } | |
| 2147 | |
| 2148 VerifyFixedTemporalLayerStructure(vp9, | |
| 2149 l_field_ ? num_temporal_layers_ : 0); | |
| 2150 | |
| 2151 if (frames_sent_ > kNumFramesToSend) | |
| 2152 observation_complete_->Set(); | |
| 2153 } | |
| 2154 const uint8_t num_temporal_layers_; | |
| 2155 const uint8_t num_spatial_layers_; | |
| 2156 const bool l_field_; | |
| 2157 } test(num_temporal_layers, num_spatial_layers); | |
| 1881 | 2158 |
| 1882 RunBaseTest(&test, FakeNetworkPipe::Config()); | 2159 RunBaseTest(&test, FakeNetworkPipe::Config()); |
| 1883 } | 2160 } |
| 1884 | 2161 |
| 1885 TEST_F(VideoSendStreamTest, VP9FlexModeHasPictureId) { | 2162 TEST_F(VideoSendStreamTest, Vp9FlexModeRefCount) { |
| 1886 class FlexibleMode : public VP9HeaderObeserver { | 2163 class FlexibleMode : public Vp9HeaderObserver { |
| 1887 void ModifyConfigsHook( | 2164 void ModifyConfigsHook( |
| 1888 VideoSendStream::Config* send_config, | 2165 VideoSendStream::Config* send_config, |
| 1889 std::vector<VideoReceiveStream::Config>* receive_configs, | 2166 std::vector<VideoReceiveStream::Config>* receive_configs, |
| 1890 VideoEncoderConfig* encoder_config) override { | 2167 VideoEncoderConfig* encoder_config) override { |
| 2168 encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen; | |
| 1891 vp9_settings_.flexibleMode = true; | 2169 vp9_settings_.flexibleMode = true; |
| 1892 } | 2170 vp9_settings_.numberOfTemporalLayers = 1; |
| 1893 | 2171 vp9_settings_.numberOfSpatialLayers = 2; |
| 1894 void InspectHeader(RTPVideoHeaderVP9* vp9videoHeader) override { | 2172 } |
| 1895 EXPECT_NE(vp9videoHeader->picture_id, kNoPictureId); | 2173 |
| 1896 observation_complete_->Set(); | 2174 void InspectHeader(const RTPVideoHeaderVP9& vp9_header) override { |
| 1897 } | 2175 EXPECT_TRUE(vp9_header.flexible_mode); |
| 1898 } test; | 2176 EXPECT_EQ(kNoTl0PicIdx, vp9_header.tl0_pic_idx); |
| 1899 | 2177 if (vp9_header.inter_pic_predicted) { |
| 1900 RunBaseTest(&test, FakeNetworkPipe::Config()); | 2178 EXPECT_GT(vp9_header.num_ref_pics, 0u); |
| 1901 } | |
| 1902 | |
| 1903 TEST_F(VideoSendStreamTest, VP9FlexModeRefCount) { | |
| 1904 class FlexibleMode : public VP9HeaderObeserver { | |
| 1905 void ModifyConfigsHook( | |
| 1906 VideoSendStream::Config* send_config, | |
| 1907 std::vector<VideoReceiveStream::Config>* receive_configs, | |
| 1908 VideoEncoderConfig* encoder_config) override { | |
| 1909 vp9_settings_.flexibleMode = true; | |
| 1910 } | |
| 1911 | |
| 1912 void InspectHeader(RTPVideoHeaderVP9* vp9videoHeader) override { | |
| 1913 EXPECT_TRUE(vp9videoHeader->flexible_mode); | |
| 1914 if (vp9videoHeader->inter_pic_predicted) { | |
| 1915 EXPECT_GT(vp9videoHeader->num_ref_pics, 0u); | |
| 1916 observation_complete_->Set(); | 2179 observation_complete_->Set(); |
| 1917 } | 2180 } |
| 1918 } | 2181 } |
| 1919 } test; | 2182 } test; |
| 1920 | 2183 |
| 1921 RunBaseTest(&test, FakeNetworkPipe::Config()); | 2184 RunBaseTest(&test, FakeNetworkPipe::Config()); |
| 1922 } | |
| 1923 | |
| 1924 TEST_F(VideoSendStreamTest, VP9FlexModeRefs) { | |
| 1925 class FlexibleMode : public VP9HeaderObeserver { | |
| 1926 void ModifyConfigsHook( | |
| 1927 VideoSendStream::Config* send_config, | |
| 1928 std::vector<VideoReceiveStream::Config>* receive_configs, | |
| 1929 VideoEncoderConfig* encoder_config) override { | |
| 1930 vp9_settings_.flexibleMode = true; | |
| 1931 } | |
| 1932 | |
| 1933 void InspectHeader(RTPVideoHeaderVP9* vp9videoHeader) override { | |
| 1934 EXPECT_TRUE(vp9videoHeader->flexible_mode); | |
| 1935 if (vp9videoHeader->inter_pic_predicted) { | |
| 1936 EXPECT_GT(vp9videoHeader->num_ref_pics, 0u); | |
| 1937 observation_complete_->Set(); | |
| 1938 } | |
| 1939 } | |
| 1940 | |
| 1941 } test; | |
| 1942 | |
| 1943 RunBaseTest(&test, FakeNetworkPipe::Config()); | |
| 1944 } | 2185 } |
| 1945 | 2186 |
| 1946 } // namespace webrtc | 2187 } // namespace webrtc |
| OLD | NEW |