| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 #include "webrtc/video/video_capture_input.h" | 10 #include "webrtc/video/video_capture_input.h" |
| 11 | 11 |
| 12 #include <memory> | 12 #include <memory> |
| 13 #include <vector> | 13 #include <vector> |
| 14 | 14 |
| 15 #include "testing/gtest/include/gtest/gtest.h" | 15 #include "testing/gtest/include/gtest/gtest.h" |
| 16 #include "webrtc/base/event.h" | 16 #include "webrtc/base/event.h" |
| 17 #include "webrtc/base/refcount.h" | 17 #include "webrtc/base/refcount.h" |
| 18 #include "webrtc/system_wrappers/include/scoped_vector.h" | |
| 19 #include "webrtc/test/fake_texture_frame.h" | 18 #include "webrtc/test/fake_texture_frame.h" |
| 20 #include "webrtc/video/send_statistics_proxy.h" | 19 #include "webrtc/video/send_statistics_proxy.h" |
| 21 | 20 |
| 22 // If an output frame does not arrive in 500ms, the test will fail. | 21 // If an output frame does not arrive in 500ms, the test will fail. |
| 23 #define FRAME_TIMEOUT_MS 500 | 22 #define FRAME_TIMEOUT_MS 500 |
| 24 | 23 |
| 25 namespace webrtc { | 24 namespace webrtc { |
| 26 | 25 |
| 27 bool EqualFrames(const VideoFrame& frame1, const VideoFrame& frame2); | 26 bool EqualFrames(const VideoFrame& frame1, const VideoFrame& frame2); |
| 28 bool EqualTextureFrames(const VideoFrame& frame1, const VideoFrame& frame2); | 27 bool EqualTextureFrames(const VideoFrame& frame1, const VideoFrame& frame2); |
| 29 bool EqualBufferFrames(const VideoFrame& frame1, const VideoFrame& frame2); | 28 bool EqualBufferFrames(const VideoFrame& frame1, const VideoFrame& frame2); |
| 30 bool EqualFramesVector(const ScopedVector<VideoFrame>& frames1, | 29 bool EqualFramesVector(const std::vector<std::unique_ptr<VideoFrame>>& frames1, |
| 31 const ScopedVector<VideoFrame>& frames2); | 30 const std::vector<std::unique_ptr<VideoFrame>>& frames2); |
| 32 VideoFrame* CreateVideoFrame(uint8_t length); | 31 std::unique_ptr<VideoFrame> CreateVideoFrame(uint8_t length); |
| 33 | 32 |
| 34 class VideoCaptureInputTest : public ::testing::Test { | 33 class VideoCaptureInputTest : public ::testing::Test { |
| 35 protected: | 34 protected: |
| 36 VideoCaptureInputTest() | 35 VideoCaptureInputTest() |
| 37 : stats_proxy_(Clock::GetRealTimeClock(), | 36 : stats_proxy_(Clock::GetRealTimeClock(), |
| 38 webrtc::VideoSendStream::Config(nullptr), | 37 webrtc::VideoSendStream::Config(nullptr), |
| 39 webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo), | 38 webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo), |
| 40 capture_event_(false, false) {} | 39 capture_event_(false, false) {} |
| 41 | 40 |
| 42 virtual void SetUp() { | 41 virtual void SetUp() { |
| 43 overuse_detector_.reset( | 42 overuse_detector_.reset( |
| 44 new OveruseFrameDetector(Clock::GetRealTimeClock(), CpuOveruseOptions(), | 43 new OveruseFrameDetector(Clock::GetRealTimeClock(), CpuOveruseOptions(), |
| 45 nullptr, nullptr, &stats_proxy_)); | 44 nullptr, nullptr, &stats_proxy_)); |
| 46 input_.reset(new internal::VideoCaptureInput( | 45 input_.reset(new internal::VideoCaptureInput( |
| 47 &capture_event_, nullptr, &stats_proxy_, overuse_detector_.get())); | 46 &capture_event_, nullptr, &stats_proxy_, overuse_detector_.get())); |
| 48 } | 47 } |
| 49 | 48 |
| 50 void AddInputFrame(VideoFrame* frame) { | 49 void AddInputFrame(VideoFrame* frame) { |
| 51 input_->IncomingCapturedFrame(*frame); | 50 input_->IncomingCapturedFrame(*frame); |
| 52 } | 51 } |
| 53 | 52 |
| 54 void WaitOutputFrame() { | 53 void WaitOutputFrame() { |
| 55 EXPECT_TRUE(capture_event_.Wait(FRAME_TIMEOUT_MS)); | 54 EXPECT_TRUE(capture_event_.Wait(FRAME_TIMEOUT_MS)); |
| 56 VideoFrame frame; | 55 VideoFrame frame; |
| 57 EXPECT_TRUE(input_->GetVideoFrame(&frame)); | 56 EXPECT_TRUE(input_->GetVideoFrame(&frame)); |
| 58 if (!frame.native_handle()) { | 57 if (!frame.native_handle()) { |
| 59 output_frame_ybuffers_.push_back( | 58 output_frame_ybuffers_.push_back( |
| 60 static_cast<const VideoFrame*>(&frame)->buffer(kYPlane)); | 59 static_cast<const VideoFrame*>(&frame)->buffer(kYPlane)); |
| 61 } | 60 } |
| 62 output_frames_.push_back(new VideoFrame(frame)); | 61 output_frames_.push_back( |
| 62 std::unique_ptr<VideoFrame>(new VideoFrame(frame))); |
| 63 } | 63 } |
| 64 | 64 |
| 65 SendStatisticsProxy stats_proxy_; | 65 SendStatisticsProxy stats_proxy_; |
| 66 | 66 |
| 67 rtc::Event capture_event_; | 67 rtc::Event capture_event_; |
| 68 | 68 |
| 69 std::unique_ptr<OveruseFrameDetector> overuse_detector_; | 69 std::unique_ptr<OveruseFrameDetector> overuse_detector_; |
| 70 | 70 |
| 71 // Used to send input capture frames to VideoCaptureInput. | 71 // Used to send input capture frames to VideoCaptureInput. |
| 72 std::unique_ptr<internal::VideoCaptureInput> input_; | 72 std::unique_ptr<internal::VideoCaptureInput> input_; |
| 73 | 73 |
| 74 // Input capture frames of VideoCaptureInput. | 74 // Input capture frames of VideoCaptureInput. |
| 75 ScopedVector<VideoFrame> input_frames_; | 75 std::vector<std::unique_ptr<VideoFrame>> input_frames_; |
| 76 | 76 |
| 77 // Output delivered frames of VideoCaptureInput. | 77 // Output delivered frames of VideoCaptureInput. |
| 78 ScopedVector<VideoFrame> output_frames_; | 78 std::vector<std::unique_ptr<VideoFrame>> output_frames_; |
| 79 | 79 |
| 80 // The pointers of Y plane buffers of output frames. This is used to verify | 80 // The pointers of Y plane buffers of output frames. This is used to verify |
| 81 // the frame are swapped and not copied. | 81 // the frame are swapped and not copied. |
| 82 std::vector<const uint8_t*> output_frame_ybuffers_; | 82 std::vector<const uint8_t*> output_frame_ybuffers_; |
| 83 }; | 83 }; |
| 84 | 84 |
| 85 TEST_F(VideoCaptureInputTest, DoesNotRetainHandleNorCopyBuffer) { | 85 TEST_F(VideoCaptureInputTest, DoesNotRetainHandleNorCopyBuffer) { |
| 86 // Indicate an output frame has arrived. | 86 // Indicate an output frame has arrived. |
| 87 rtc::Event frame_destroyed_event(false, false); | 87 rtc::Event frame_destroyed_event(false, false); |
| 88 class TestBuffer : public webrtc::I420Buffer { | 88 class TestBuffer : public webrtc::I420Buffer { |
| (...skipping 18 matching lines...) Expand all Loading... |
| 107 output_frames_.clear(); | 107 output_frames_.clear(); |
| 108 frame.Reset(); | 108 frame.Reset(); |
| 109 EXPECT_TRUE(frame_destroyed_event.Wait(FRAME_TIMEOUT_MS)); | 109 EXPECT_TRUE(frame_destroyed_event.Wait(FRAME_TIMEOUT_MS)); |
| 110 } | 110 } |
| 111 | 111 |
| 112 TEST_F(VideoCaptureInputTest, TestNtpTimeStampSetIfRenderTimeSet) { | 112 TEST_F(VideoCaptureInputTest, TestNtpTimeStampSetIfRenderTimeSet) { |
| 113 input_frames_.push_back(CreateVideoFrame(0)); | 113 input_frames_.push_back(CreateVideoFrame(0)); |
| 114 input_frames_[0]->set_render_time_ms(5); | 114 input_frames_[0]->set_render_time_ms(5); |
| 115 input_frames_[0]->set_ntp_time_ms(0); | 115 input_frames_[0]->set_ntp_time_ms(0); |
| 116 | 116 |
| 117 AddInputFrame(input_frames_[0]); | 117 AddInputFrame(input_frames_[0].get()); |
| 118 WaitOutputFrame(); | 118 WaitOutputFrame(); |
| 119 EXPECT_GT(output_frames_[0]->ntp_time_ms(), | 119 EXPECT_GT(output_frames_[0]->ntp_time_ms(), |
| 120 input_frames_[0]->render_time_ms()); | 120 input_frames_[0]->render_time_ms()); |
| 121 } | 121 } |
| 122 | 122 |
| 123 TEST_F(VideoCaptureInputTest, TestRtpTimeStampSet) { | 123 TEST_F(VideoCaptureInputTest, TestRtpTimeStampSet) { |
| 124 input_frames_.push_back(CreateVideoFrame(0)); | 124 input_frames_.push_back(CreateVideoFrame(0)); |
| 125 input_frames_[0]->set_render_time_ms(0); | 125 input_frames_[0]->set_render_time_ms(0); |
| 126 input_frames_[0]->set_ntp_time_ms(1); | 126 input_frames_[0]->set_ntp_time_ms(1); |
| 127 input_frames_[0]->set_timestamp(0); | 127 input_frames_[0]->set_timestamp(0); |
| 128 | 128 |
| 129 AddInputFrame(input_frames_[0]); | 129 AddInputFrame(input_frames_[0].get()); |
| 130 WaitOutputFrame(); | 130 WaitOutputFrame(); |
| 131 EXPECT_EQ(output_frames_[0]->timestamp(), | 131 EXPECT_EQ(output_frames_[0]->timestamp(), |
| 132 input_frames_[0]->ntp_time_ms() * 90); | 132 input_frames_[0]->ntp_time_ms() * 90); |
| 133 } | 133 } |
| 134 | 134 |
| 135 TEST_F(VideoCaptureInputTest, DropsFramesWithSameOrOldNtpTimestamp) { | 135 TEST_F(VideoCaptureInputTest, DropsFramesWithSameOrOldNtpTimestamp) { |
| 136 input_frames_.push_back(CreateVideoFrame(0)); | 136 input_frames_.push_back(CreateVideoFrame(0)); |
| 137 | 137 |
| 138 input_frames_[0]->set_ntp_time_ms(17); | 138 input_frames_[0]->set_ntp_time_ms(17); |
| 139 AddInputFrame(input_frames_[0]); | 139 AddInputFrame(input_frames_[0].get()); |
| 140 WaitOutputFrame(); | 140 WaitOutputFrame(); |
| 141 EXPECT_EQ(output_frames_[0]->timestamp(), | 141 EXPECT_EQ(output_frames_[0]->timestamp(), |
| 142 input_frames_[0]->ntp_time_ms() * 90); | 142 input_frames_[0]->ntp_time_ms() * 90); |
| 143 | 143 |
| 144 // Repeat frame with the same NTP timestamp should drop. | 144 // Repeat frame with the same NTP timestamp should drop. |
| 145 AddInputFrame(input_frames_[0]); | 145 AddInputFrame(input_frames_[0].get()); |
| 146 EXPECT_FALSE(capture_event_.Wait(FRAME_TIMEOUT_MS)); | 146 EXPECT_FALSE(capture_event_.Wait(FRAME_TIMEOUT_MS)); |
| 147 | 147 |
| 148 // As should frames with a decreased NTP timestamp. | 148 // As should frames with a decreased NTP timestamp. |
| 149 input_frames_[0]->set_ntp_time_ms(input_frames_[0]->ntp_time_ms() - 1); | 149 input_frames_[0]->set_ntp_time_ms(input_frames_[0]->ntp_time_ms() - 1); |
| 150 AddInputFrame(input_frames_[0]); | 150 AddInputFrame(input_frames_[0].get()); |
| 151 EXPECT_FALSE(capture_event_.Wait(FRAME_TIMEOUT_MS)); | 151 EXPECT_FALSE(capture_event_.Wait(FRAME_TIMEOUT_MS)); |
| 152 | 152 |
| 153 // But delivering with an increased NTP timestamp should succeed. | 153 // But delivering with an increased NTP timestamp should succeed. |
| 154 input_frames_[0]->set_ntp_time_ms(4711); | 154 input_frames_[0]->set_ntp_time_ms(4711); |
| 155 AddInputFrame(input_frames_[0]); | 155 AddInputFrame(input_frames_[0].get()); |
| 156 WaitOutputFrame(); | 156 WaitOutputFrame(); |
| 157 EXPECT_EQ(output_frames_[1]->timestamp(), | 157 EXPECT_EQ(output_frames_[1]->timestamp(), |
| 158 input_frames_[0]->ntp_time_ms() * 90); | 158 input_frames_[0]->ntp_time_ms() * 90); |
| 159 } | 159 } |
| 160 | 160 |
| 161 TEST_F(VideoCaptureInputTest, TestTextureFrames) { | 161 TEST_F(VideoCaptureInputTest, TestTextureFrames) { |
| 162 const int kNumFrame = 3; | 162 const int kNumFrame = 3; |
| 163 for (int i = 0 ; i < kNumFrame; ++i) { | 163 for (int i = 0 ; i < kNumFrame; ++i) { |
| 164 test::FakeNativeHandle* dummy_handle = new test::FakeNativeHandle(); | 164 test::FakeNativeHandle* dummy_handle = new test::FakeNativeHandle(); |
| 165 // Add one to |i| so that width/height > 0. | 165 // Add one to |i| so that width/height > 0. |
| 166 input_frames_.push_back(new VideoFrame(test::FakeNativeHandle::CreateFrame( | 166 input_frames_.push_back(std::unique_ptr<VideoFrame>(new VideoFrame( |
| 167 dummy_handle, i + 1, i + 1, i + 1, i + 1, webrtc::kVideoRotation_0))); | 167 test::FakeNativeHandle::CreateFrame(dummy_handle, i + 1, i + 1, i + 1, |
| 168 AddInputFrame(input_frames_[i]); | 168 i + 1, webrtc::kVideoRotation_0)))); |
| 169 AddInputFrame(input_frames_[i].get()); |
| 169 WaitOutputFrame(); | 170 WaitOutputFrame(); |
| 170 EXPECT_EQ(dummy_handle, output_frames_[i]->native_handle()); | 171 EXPECT_EQ(dummy_handle, output_frames_[i]->native_handle()); |
| 171 } | 172 } |
| 172 | 173 |
| 173 EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); | 174 EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); |
| 174 } | 175 } |
| 175 | 176 |
| 176 TEST_F(VideoCaptureInputTest, TestI420Frames) { | 177 TEST_F(VideoCaptureInputTest, TestI420Frames) { |
| 177 const int kNumFrame = 4; | 178 const int kNumFrame = 4; |
| 178 std::vector<const uint8_t*> ybuffer_pointers; | 179 std::vector<const uint8_t*> ybuffer_pointers; |
| 179 for (int i = 0; i < kNumFrame; ++i) { | 180 for (int i = 0; i < kNumFrame; ++i) { |
| 180 input_frames_.push_back(CreateVideoFrame(static_cast<uint8_t>(i + 1))); | 181 input_frames_.push_back(CreateVideoFrame(static_cast<uint8_t>(i + 1))); |
| 181 const VideoFrame* const_input_frame = input_frames_[i]; | 182 const VideoFrame* const_input_frame = input_frames_[i].get(); |
| 182 ybuffer_pointers.push_back(const_input_frame->buffer(kYPlane)); | 183 ybuffer_pointers.push_back(const_input_frame->buffer(kYPlane)); |
| 183 AddInputFrame(input_frames_[i]); | 184 AddInputFrame(input_frames_[i].get()); |
| 184 WaitOutputFrame(); | 185 WaitOutputFrame(); |
| 185 } | 186 } |
| 186 | 187 |
| 187 EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); | 188 EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); |
| 188 // Make sure the buffer is not copied. | 189 // Make sure the buffer is not copied. |
| 189 for (int i = 0; i < kNumFrame; ++i) | 190 for (int i = 0; i < kNumFrame; ++i) |
| 190 EXPECT_EQ(ybuffer_pointers[i], output_frame_ybuffers_[i]); | 191 EXPECT_EQ(ybuffer_pointers[i], output_frame_ybuffers_[i]); |
| 191 } | 192 } |
| 192 | 193 |
| 193 TEST_F(VideoCaptureInputTest, TestI420FrameAfterTextureFrame) { | 194 TEST_F(VideoCaptureInputTest, TestI420FrameAfterTextureFrame) { |
| 194 test::FakeNativeHandle* dummy_handle = new test::FakeNativeHandle(); | 195 test::FakeNativeHandle* dummy_handle = new test::FakeNativeHandle(); |
| 195 input_frames_.push_back(new VideoFrame(test::FakeNativeHandle::CreateFrame( | 196 input_frames_.push_back(std::unique_ptr<VideoFrame>( |
| 196 dummy_handle, 1, 1, 1, 1, webrtc::kVideoRotation_0))); | 197 new VideoFrame(test::FakeNativeHandle::CreateFrame( |
| 197 AddInputFrame(input_frames_[0]); | 198 dummy_handle, 1, 1, 1, 1, webrtc::kVideoRotation_0)))); |
| 199 AddInputFrame(input_frames_[0].get()); |
| 198 WaitOutputFrame(); | 200 WaitOutputFrame(); |
| 199 EXPECT_EQ(dummy_handle, output_frames_[0]->native_handle()); | 201 EXPECT_EQ(dummy_handle, output_frames_[0]->native_handle()); |
| 200 | 202 |
| 201 input_frames_.push_back(CreateVideoFrame(2)); | 203 input_frames_.push_back(CreateVideoFrame(2)); |
| 202 AddInputFrame(input_frames_[1]); | 204 AddInputFrame(input_frames_[1].get()); |
| 203 WaitOutputFrame(); | 205 WaitOutputFrame(); |
| 204 | 206 |
| 205 EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); | 207 EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); |
| 206 } | 208 } |
| 207 | 209 |
| 208 TEST_F(VideoCaptureInputTest, TestTextureFrameAfterI420Frame) { | 210 TEST_F(VideoCaptureInputTest, TestTextureFrameAfterI420Frame) { |
| 209 input_frames_.push_back(CreateVideoFrame(1)); | 211 input_frames_.push_back(CreateVideoFrame(1)); |
| 210 AddInputFrame(input_frames_[0]); | 212 AddInputFrame(input_frames_[0].get()); |
| 211 WaitOutputFrame(); | 213 WaitOutputFrame(); |
| 212 | 214 |
| 213 test::FakeNativeHandle* dummy_handle = new test::FakeNativeHandle(); | 215 test::FakeNativeHandle* dummy_handle = new test::FakeNativeHandle(); |
| 214 input_frames_.push_back(new VideoFrame(test::FakeNativeHandle::CreateFrame( | 216 input_frames_.push_back(std::unique_ptr<VideoFrame>( |
| 215 dummy_handle, 1, 1, 2, 2, webrtc::kVideoRotation_0))); | 217 new VideoFrame(test::FakeNativeHandle::CreateFrame( |
| 216 AddInputFrame(input_frames_[1]); | 218 dummy_handle, 1, 1, 2, 2, webrtc::kVideoRotation_0)))); |
| 219 AddInputFrame(input_frames_[1].get()); |
| 217 WaitOutputFrame(); | 220 WaitOutputFrame(); |
| 218 | 221 |
| 219 EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); | 222 EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); |
| 220 } | 223 } |
| 221 | 224 |
| 222 bool EqualFrames(const VideoFrame& frame1, const VideoFrame& frame2) { | 225 bool EqualFrames(const VideoFrame& frame1, const VideoFrame& frame2) { |
| 223 if (frame1.native_handle() != NULL || frame2.native_handle() != NULL) | 226 if (frame1.native_handle() != NULL || frame2.native_handle() != NULL) |
| 224 return EqualTextureFrames(frame1, frame2); | 227 return EqualTextureFrames(frame1, frame2); |
| 225 return EqualBufferFrames(frame1, frame2); | 228 return EqualBufferFrames(frame1, frame2); |
| 226 } | 229 } |
| (...skipping 14 matching lines...) Expand all Loading... |
| 241 (frame1.allocated_size(kUPlane) == frame2.allocated_size(kUPlane)) && | 244 (frame1.allocated_size(kUPlane) == frame2.allocated_size(kUPlane)) && |
| 242 (frame1.allocated_size(kVPlane) == frame2.allocated_size(kVPlane)) && | 245 (frame1.allocated_size(kVPlane) == frame2.allocated_size(kVPlane)) && |
| 243 (memcmp(frame1.buffer(kYPlane), frame2.buffer(kYPlane), | 246 (memcmp(frame1.buffer(kYPlane), frame2.buffer(kYPlane), |
| 244 frame1.allocated_size(kYPlane)) == 0) && | 247 frame1.allocated_size(kYPlane)) == 0) && |
| 245 (memcmp(frame1.buffer(kUPlane), frame2.buffer(kUPlane), | 248 (memcmp(frame1.buffer(kUPlane), frame2.buffer(kUPlane), |
| 246 frame1.allocated_size(kUPlane)) == 0) && | 249 frame1.allocated_size(kUPlane)) == 0) && |
| 247 (memcmp(frame1.buffer(kVPlane), frame2.buffer(kVPlane), | 250 (memcmp(frame1.buffer(kVPlane), frame2.buffer(kVPlane), |
| 248 frame1.allocated_size(kVPlane)) == 0)); | 251 frame1.allocated_size(kVPlane)) == 0)); |
| 249 } | 252 } |
| 250 | 253 |
| 251 bool EqualFramesVector(const ScopedVector<VideoFrame>& frames1, | 254 bool EqualFramesVector( |
| 252 const ScopedVector<VideoFrame>& frames2) { | 255 const std::vector<std::unique_ptr<VideoFrame>>& frames1, |
| 256 const std::vector<std::unique_ptr<VideoFrame>>& frames2) { |
| 253 if (frames1.size() != frames2.size()) | 257 if (frames1.size() != frames2.size()) |
| 254 return false; | 258 return false; |
| 255 for (size_t i = 0; i < frames1.size(); ++i) { | 259 for (size_t i = 0; i < frames1.size(); ++i) { |
| 256 if (!EqualFrames(*frames1[i], *frames2[i])) | 260 if (!EqualFrames(*frames1[i], *frames2[i])) |
| 257 return false; | 261 return false; |
| 258 } | 262 } |
| 259 return true; | 263 return true; |
| 260 } | 264 } |
| 261 | 265 |
| 262 VideoFrame* CreateVideoFrame(uint8_t data) { | 266 std::unique_ptr<VideoFrame> CreateVideoFrame(uint8_t data) { |
| 263 VideoFrame* frame = new VideoFrame(); | 267 std::unique_ptr<VideoFrame> frame(new VideoFrame()); |
| 264 const int width = 36; | 268 const int width = 36; |
| 265 const int height = 24; | 269 const int height = 24; |
| 266 const int kSizeY = width * height * 2; | 270 const int kSizeY = width * height * 2; |
| 267 uint8_t buffer[kSizeY]; | 271 uint8_t buffer[kSizeY]; |
| 268 memset(buffer, data, kSizeY); | 272 memset(buffer, data, kSizeY); |
| 269 frame->CreateFrame(buffer, buffer, buffer, width, height, width, width / 2, | 273 frame->CreateFrame(buffer, buffer, buffer, width, height, width, width / 2, |
| 270 width / 2, kVideoRotation_0); | 274 width / 2, kVideoRotation_0); |
| 271 frame->set_render_time_ms(data); | 275 frame->set_render_time_ms(data); |
| 272 return frame; | 276 return frame; |
| 273 } | 277 } |
| 274 | 278 |
| 275 } // namespace webrtc | 279 } // namespace webrtc |
| OLD | NEW |