OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 #include "webrtc/video/video_capture_input.h" | |
11 | |
12 #include <memory> | |
13 #include <vector> | |
14 | |
15 #include "testing/gtest/include/gtest/gtest.h" | |
16 #include "webrtc/base/event.h" | |
17 #include "webrtc/base/refcount.h" | |
18 #include "webrtc/test/fake_texture_frame.h" | |
19 #include "webrtc/test/frame_utils.h" | |
20 #include "webrtc/video/send_statistics_proxy.h" | |
21 | |
22 // If an output frame does not arrive in 500ms, the test will fail. | |
23 #define FRAME_TIMEOUT_MS 500 | |
24 | |
25 namespace webrtc { | |
26 | |
27 bool EqualFramesVector(const std::vector<std::unique_ptr<VideoFrame>>& frames1, | |
28 const std::vector<std::unique_ptr<VideoFrame>>& frames2); | |
29 std::unique_ptr<VideoFrame> CreateVideoFrame(uint8_t length); | |
30 | |
31 class VideoCaptureInputTest : public ::testing::Test { | |
32 protected: | |
33 VideoCaptureInputTest() | |
34 : stats_proxy_(Clock::GetRealTimeClock(), | |
35 webrtc::VideoSendStream::Config(nullptr), | |
36 webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo), | |
37 capture_event_(false, false) {} | |
38 | |
39 virtual void SetUp() { | |
40 overuse_detector_.reset( | |
41 new OveruseFrameDetector(Clock::GetRealTimeClock(), CpuOveruseOptions(), | |
42 nullptr, nullptr, &stats_proxy_)); | |
43 input_.reset(new internal::VideoCaptureInput( | |
44 &capture_event_, nullptr, &stats_proxy_, overuse_detector_.get())); | |
45 } | |
46 | |
47 void AddInputFrame(VideoFrame* frame) { | |
48 input_->IncomingCapturedFrame(*frame); | |
49 } | |
50 | |
51 void WaitOutputFrame() { | |
52 EXPECT_TRUE(capture_event_.Wait(FRAME_TIMEOUT_MS)); | |
53 VideoFrame frame; | |
54 EXPECT_TRUE(input_->GetVideoFrame(&frame)); | |
55 ASSERT_TRUE(frame.video_frame_buffer()); | |
56 if (!frame.video_frame_buffer()->native_handle()) { | |
57 output_frame_ybuffers_.push_back(frame.video_frame_buffer()->DataY()); | |
58 } | |
59 output_frames_.push_back( | |
60 std::unique_ptr<VideoFrame>(new VideoFrame(frame))); | |
61 } | |
62 | |
63 SendStatisticsProxy stats_proxy_; | |
64 | |
65 rtc::Event capture_event_; | |
66 | |
67 std::unique_ptr<OveruseFrameDetector> overuse_detector_; | |
68 | |
69 // Used to send input capture frames to VideoCaptureInput. | |
70 std::unique_ptr<internal::VideoCaptureInput> input_; | |
71 | |
72 // Input capture frames of VideoCaptureInput. | |
73 std::vector<std::unique_ptr<VideoFrame>> input_frames_; | |
74 | |
75 // Output delivered frames of VideoCaptureInput. | |
76 std::vector<std::unique_ptr<VideoFrame>> output_frames_; | |
77 | |
78 // The pointers of Y plane buffers of output frames. This is used to verify | |
79 // the frame are swapped and not copied. | |
80 std::vector<const uint8_t*> output_frame_ybuffers_; | |
81 }; | |
82 | |
83 TEST_F(VideoCaptureInputTest, DoesNotRetainHandleNorCopyBuffer) { | |
84 // Indicate an output frame has arrived. | |
85 rtc::Event frame_destroyed_event(false, false); | |
86 class TestBuffer : public webrtc::I420Buffer { | |
87 public: | |
88 explicit TestBuffer(rtc::Event* event) : I420Buffer(5, 5), event_(event) {} | |
89 | |
90 private: | |
91 friend class rtc::RefCountedObject<TestBuffer>; | |
92 ~TestBuffer() override { event_->Set(); } | |
93 rtc::Event* const event_; | |
94 }; | |
95 | |
96 { | |
97 VideoFrame frame( | |
98 new rtc::RefCountedObject<TestBuffer>(&frame_destroyed_event), 1, 1, | |
99 kVideoRotation_0); | |
100 | |
101 AddInputFrame(&frame); | |
102 WaitOutputFrame(); | |
103 | |
104 EXPECT_EQ(output_frames_[0]->video_frame_buffer().get(), | |
105 frame.video_frame_buffer().get()); | |
106 output_frames_.clear(); | |
107 } | |
108 EXPECT_TRUE(frame_destroyed_event.Wait(FRAME_TIMEOUT_MS)); | |
109 } | |
110 | |
111 TEST_F(VideoCaptureInputTest, TestNtpTimeStampSetIfRenderTimeSet) { | |
112 input_frames_.push_back(CreateVideoFrame(0)); | |
113 input_frames_[0]->set_render_time_ms(5); | |
114 input_frames_[0]->set_ntp_time_ms(0); | |
115 | |
116 AddInputFrame(input_frames_[0].get()); | |
117 WaitOutputFrame(); | |
118 EXPECT_GT(output_frames_[0]->ntp_time_ms(), | |
119 input_frames_[0]->render_time_ms()); | |
120 } | |
121 | |
122 TEST_F(VideoCaptureInputTest, TestRtpTimeStampSet) { | |
123 input_frames_.push_back(CreateVideoFrame(0)); | |
124 input_frames_[0]->set_render_time_ms(0); | |
125 input_frames_[0]->set_ntp_time_ms(1); | |
126 input_frames_[0]->set_timestamp(0); | |
127 | |
128 AddInputFrame(input_frames_[0].get()); | |
129 WaitOutputFrame(); | |
130 EXPECT_EQ(output_frames_[0]->timestamp(), | |
131 input_frames_[0]->ntp_time_ms() * 90); | |
132 } | |
133 | |
134 TEST_F(VideoCaptureInputTest, DropsFramesWithSameOrOldNtpTimestamp) { | |
135 input_frames_.push_back(CreateVideoFrame(0)); | |
136 | |
137 input_frames_[0]->set_ntp_time_ms(17); | |
138 AddInputFrame(input_frames_[0].get()); | |
139 WaitOutputFrame(); | |
140 EXPECT_EQ(output_frames_[0]->timestamp(), | |
141 input_frames_[0]->ntp_time_ms() * 90); | |
142 | |
143 // Repeat frame with the same NTP timestamp should drop. | |
144 AddInputFrame(input_frames_[0].get()); | |
145 EXPECT_FALSE(capture_event_.Wait(FRAME_TIMEOUT_MS)); | |
146 | |
147 // As should frames with a decreased NTP timestamp. | |
148 input_frames_[0]->set_ntp_time_ms(input_frames_[0]->ntp_time_ms() - 1); | |
149 AddInputFrame(input_frames_[0].get()); | |
150 EXPECT_FALSE(capture_event_.Wait(FRAME_TIMEOUT_MS)); | |
151 | |
152 // But delivering with an increased NTP timestamp should succeed. | |
153 input_frames_[0]->set_ntp_time_ms(4711); | |
154 AddInputFrame(input_frames_[0].get()); | |
155 WaitOutputFrame(); | |
156 EXPECT_EQ(output_frames_[1]->timestamp(), | |
157 input_frames_[0]->ntp_time_ms() * 90); | |
158 } | |
159 | |
160 TEST_F(VideoCaptureInputTest, TestTextureFrames) { | |
161 const int kNumFrame = 3; | |
162 for (int i = 0 ; i < kNumFrame; ++i) { | |
163 test::FakeNativeHandle* dummy_handle = new test::FakeNativeHandle(); | |
164 // Add one to |i| so that width/height > 0. | |
165 input_frames_.push_back(std::unique_ptr<VideoFrame>(new VideoFrame( | |
166 test::FakeNativeHandle::CreateFrame(dummy_handle, i + 1, i + 1, i + 1, | |
167 i + 1, webrtc::kVideoRotation_0)))); | |
168 AddInputFrame(input_frames_[i].get()); | |
169 WaitOutputFrame(); | |
170 ASSERT_TRUE(output_frames_[i]->video_frame_buffer()); | |
171 EXPECT_EQ(dummy_handle, | |
172 output_frames_[i]->video_frame_buffer()->native_handle()); | |
173 } | |
174 | |
175 EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); | |
176 } | |
177 | |
178 TEST_F(VideoCaptureInputTest, TestI420Frames) { | |
179 const int kNumFrame = 4; | |
180 std::vector<const uint8_t*> ybuffer_pointers; | |
181 for (int i = 0; i < kNumFrame; ++i) { | |
182 input_frames_.push_back(CreateVideoFrame(static_cast<uint8_t>(i + 1))); | |
183 ybuffer_pointers.push_back(input_frames_[i]->video_frame_buffer()->DataY()); | |
184 AddInputFrame(input_frames_[i].get()); | |
185 WaitOutputFrame(); | |
186 } | |
187 | |
188 EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); | |
189 // Make sure the buffer is not copied. | |
190 for (int i = 0; i < kNumFrame; ++i) | |
191 EXPECT_EQ(ybuffer_pointers[i], output_frame_ybuffers_[i]); | |
192 } | |
193 | |
194 TEST_F(VideoCaptureInputTest, TestI420FrameAfterTextureFrame) { | |
195 test::FakeNativeHandle* dummy_handle = new test::FakeNativeHandle(); | |
196 input_frames_.push_back(std::unique_ptr<VideoFrame>( | |
197 new VideoFrame(test::FakeNativeHandle::CreateFrame( | |
198 dummy_handle, 1, 1, 1, 1, webrtc::kVideoRotation_0)))); | |
199 AddInputFrame(input_frames_[0].get()); | |
200 WaitOutputFrame(); | |
201 ASSERT_TRUE(output_frames_[0]->video_frame_buffer()); | |
202 EXPECT_EQ(dummy_handle, | |
203 output_frames_[0]->video_frame_buffer()->native_handle()); | |
204 | |
205 input_frames_.push_back(CreateVideoFrame(2)); | |
206 AddInputFrame(input_frames_[1].get()); | |
207 WaitOutputFrame(); | |
208 | |
209 EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); | |
210 } | |
211 | |
212 TEST_F(VideoCaptureInputTest, TestTextureFrameAfterI420Frame) { | |
213 input_frames_.push_back(CreateVideoFrame(1)); | |
214 AddInputFrame(input_frames_[0].get()); | |
215 WaitOutputFrame(); | |
216 | |
217 test::FakeNativeHandle* dummy_handle = new test::FakeNativeHandle(); | |
218 input_frames_.push_back(std::unique_ptr<VideoFrame>( | |
219 new VideoFrame(test::FakeNativeHandle::CreateFrame( | |
220 dummy_handle, 1, 1, 2, 2, webrtc::kVideoRotation_0)))); | |
221 AddInputFrame(input_frames_[1].get()); | |
222 WaitOutputFrame(); | |
223 | |
224 EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); | |
225 } | |
226 | |
227 bool EqualFramesVector( | |
228 const std::vector<std::unique_ptr<VideoFrame>>& frames1, | |
229 const std::vector<std::unique_ptr<VideoFrame>>& frames2) { | |
230 if (frames1.size() != frames2.size()) | |
231 return false; | |
232 for (size_t i = 0; i < frames1.size(); ++i) { | |
233 // Compare frame buffers, since we don't care about differing timestamps. | |
234 if (!test::FrameBufsEqual(frames1[i]->video_frame_buffer(), | |
235 frames2[i]->video_frame_buffer())) { | |
236 return false; | |
237 } | |
238 } | |
239 return true; | |
240 } | |
241 | |
242 std::unique_ptr<VideoFrame> CreateVideoFrame(uint8_t data) { | |
243 std::unique_ptr<VideoFrame> frame(new VideoFrame()); | |
244 const int width = 36; | |
245 const int height = 24; | |
246 const int kSizeY = width * height * 2; | |
247 uint8_t buffer[kSizeY]; | |
248 memset(buffer, data, kSizeY); | |
249 frame->CreateFrame(buffer, buffer, buffer, width, height, width, width / 2, | |
250 width / 2, kVideoRotation_0); | |
251 frame->set_render_time_ms(data); | |
252 return frame; | |
253 } | |
254 | |
255 } // namespace webrtc | |
OLD | NEW |