Index: webrtc/video/video_capture_input_unittest.cc |
diff --git a/webrtc/video/video_capture_input_unittest.cc b/webrtc/video/video_capture_input_unittest.cc |
index 9c8987f9a3e7384de59d6432b6d84cef4fea56b5..0cb1139880effe4ea4acfe46d96965e0cb1b1cf7 100644 |
--- a/webrtc/video/video_capture_input_unittest.cc |
+++ b/webrtc/video/video_capture_input_unittest.cc |
@@ -15,7 +15,6 @@ |
#include "testing/gtest/include/gtest/gtest.h" |
#include "webrtc/base/event.h" |
#include "webrtc/base/refcount.h" |
-#include "webrtc/system_wrappers/include/scoped_vector.h" |
#include "webrtc/test/fake_texture_frame.h" |
#include "webrtc/video/send_statistics_proxy.h" |
@@ -27,9 +26,9 @@ namespace webrtc { |
bool EqualFrames(const VideoFrame& frame1, const VideoFrame& frame2); |
bool EqualTextureFrames(const VideoFrame& frame1, const VideoFrame& frame2); |
bool EqualBufferFrames(const VideoFrame& frame1, const VideoFrame& frame2); |
-bool EqualFramesVector(const ScopedVector<VideoFrame>& frames1, |
- const ScopedVector<VideoFrame>& frames2); |
-VideoFrame* CreateVideoFrame(uint8_t length); |
+bool EqualFramesVector(const std::vector<std::unique_ptr<VideoFrame>>& frames1, |
+ const std::vector<std::unique_ptr<VideoFrame>>& frames2); |
+std::unique_ptr<VideoFrame> CreateVideoFrame(uint8_t length); |
class VideoCaptureInputTest : public ::testing::Test { |
protected: |
@@ -59,7 +58,8 @@ class VideoCaptureInputTest : public ::testing::Test { |
output_frame_ybuffers_.push_back( |
static_cast<const VideoFrame*>(&frame)->buffer(kYPlane)); |
} |
- output_frames_.push_back(new VideoFrame(frame)); |
+ output_frames_.push_back( |
+ std::unique_ptr<VideoFrame>(new VideoFrame(frame))); |
} |
SendStatisticsProxy stats_proxy_; |
@@ -72,10 +72,10 @@ class VideoCaptureInputTest : public ::testing::Test { |
std::unique_ptr<internal::VideoCaptureInput> input_; |
// Input capture frames of VideoCaptureInput. |
- ScopedVector<VideoFrame> input_frames_; |
+ std::vector<std::unique_ptr<VideoFrame>> input_frames_; |
// Output delivered frames of VideoCaptureInput. |
- ScopedVector<VideoFrame> output_frames_; |
+ std::vector<std::unique_ptr<VideoFrame>> output_frames_; |
// The pointers of Y plane buffers of output frames. This is used to verify |
// the frame are swapped and not copied. |
@@ -114,7 +114,7 @@ TEST_F(VideoCaptureInputTest, TestNtpTimeStampSetIfRenderTimeSet) { |
input_frames_[0]->set_render_time_ms(5); |
input_frames_[0]->set_ntp_time_ms(0); |
- AddInputFrame(input_frames_[0]); |
+ AddInputFrame(input_frames_[0].get()); |
WaitOutputFrame(); |
EXPECT_GT(output_frames_[0]->ntp_time_ms(), |
input_frames_[0]->render_time_ms()); |
@@ -126,7 +126,7 @@ TEST_F(VideoCaptureInputTest, TestRtpTimeStampSet) { |
input_frames_[0]->set_ntp_time_ms(1); |
input_frames_[0]->set_timestamp(0); |
- AddInputFrame(input_frames_[0]); |
+ AddInputFrame(input_frames_[0].get()); |
WaitOutputFrame(); |
EXPECT_EQ(output_frames_[0]->timestamp(), |
input_frames_[0]->ntp_time_ms() * 90); |
@@ -136,23 +136,23 @@ TEST_F(VideoCaptureInputTest, DropsFramesWithSameOrOldNtpTimestamp) { |
input_frames_.push_back(CreateVideoFrame(0)); |
input_frames_[0]->set_ntp_time_ms(17); |
- AddInputFrame(input_frames_[0]); |
+ AddInputFrame(input_frames_[0].get()); |
WaitOutputFrame(); |
EXPECT_EQ(output_frames_[0]->timestamp(), |
input_frames_[0]->ntp_time_ms() * 90); |
// Repeat frame with the same NTP timestamp should drop. |
- AddInputFrame(input_frames_[0]); |
+ AddInputFrame(input_frames_[0].get()); |
EXPECT_FALSE(capture_event_.Wait(FRAME_TIMEOUT_MS)); |
// As should frames with a decreased NTP timestamp. |
input_frames_[0]->set_ntp_time_ms(input_frames_[0]->ntp_time_ms() - 1); |
- AddInputFrame(input_frames_[0]); |
+ AddInputFrame(input_frames_[0].get()); |
EXPECT_FALSE(capture_event_.Wait(FRAME_TIMEOUT_MS)); |
// But delivering with an increased NTP timestamp should succeed. |
input_frames_[0]->set_ntp_time_ms(4711); |
- AddInputFrame(input_frames_[0]); |
+ AddInputFrame(input_frames_[0].get()); |
WaitOutputFrame(); |
EXPECT_EQ(output_frames_[1]->timestamp(), |
input_frames_[0]->ntp_time_ms() * 90); |
@@ -163,9 +163,10 @@ TEST_F(VideoCaptureInputTest, TestTextureFrames) { |
for (int i = 0 ; i < kNumFrame; ++i) { |
test::FakeNativeHandle* dummy_handle = new test::FakeNativeHandle(); |
// Add one to |i| so that width/height > 0. |
- input_frames_.push_back(new VideoFrame(test::FakeNativeHandle::CreateFrame( |
- dummy_handle, i + 1, i + 1, i + 1, i + 1, webrtc::kVideoRotation_0))); |
- AddInputFrame(input_frames_[i]); |
+ input_frames_.push_back(std::unique_ptr<VideoFrame>(new VideoFrame( |
+ test::FakeNativeHandle::CreateFrame(dummy_handle, i + 1, i + 1, i + 1, |
+ i + 1, webrtc::kVideoRotation_0)))); |
+ AddInputFrame(input_frames_[i].get()); |
WaitOutputFrame(); |
EXPECT_EQ(dummy_handle, output_frames_[i]->native_handle()); |
} |
@@ -178,9 +179,9 @@ TEST_F(VideoCaptureInputTest, TestI420Frames) { |
std::vector<const uint8_t*> ybuffer_pointers; |
for (int i = 0; i < kNumFrame; ++i) { |
input_frames_.push_back(CreateVideoFrame(static_cast<uint8_t>(i + 1))); |
- const VideoFrame* const_input_frame = input_frames_[i]; |
+ const VideoFrame* const_input_frame = input_frames_[i].get(); |
ybuffer_pointers.push_back(const_input_frame->buffer(kYPlane)); |
- AddInputFrame(input_frames_[i]); |
+ AddInputFrame(input_frames_[i].get()); |
WaitOutputFrame(); |
} |
@@ -192,14 +193,15 @@ TEST_F(VideoCaptureInputTest, TestI420Frames) { |
TEST_F(VideoCaptureInputTest, TestI420FrameAfterTextureFrame) { |
test::FakeNativeHandle* dummy_handle = new test::FakeNativeHandle(); |
- input_frames_.push_back(new VideoFrame(test::FakeNativeHandle::CreateFrame( |
- dummy_handle, 1, 1, 1, 1, webrtc::kVideoRotation_0))); |
- AddInputFrame(input_frames_[0]); |
+ input_frames_.push_back(std::unique_ptr<VideoFrame>( |
+ new VideoFrame(test::FakeNativeHandle::CreateFrame( |
+ dummy_handle, 1, 1, 1, 1, webrtc::kVideoRotation_0)))); |
+ AddInputFrame(input_frames_[0].get()); |
WaitOutputFrame(); |
EXPECT_EQ(dummy_handle, output_frames_[0]->native_handle()); |
input_frames_.push_back(CreateVideoFrame(2)); |
- AddInputFrame(input_frames_[1]); |
+ AddInputFrame(input_frames_[1].get()); |
WaitOutputFrame(); |
EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); |
@@ -207,13 +209,14 @@ TEST_F(VideoCaptureInputTest, TestI420FrameAfterTextureFrame) { |
TEST_F(VideoCaptureInputTest, TestTextureFrameAfterI420Frame) { |
input_frames_.push_back(CreateVideoFrame(1)); |
- AddInputFrame(input_frames_[0]); |
+ AddInputFrame(input_frames_[0].get()); |
WaitOutputFrame(); |
test::FakeNativeHandle* dummy_handle = new test::FakeNativeHandle(); |
- input_frames_.push_back(new VideoFrame(test::FakeNativeHandle::CreateFrame( |
- dummy_handle, 1, 1, 2, 2, webrtc::kVideoRotation_0))); |
- AddInputFrame(input_frames_[1]); |
+ input_frames_.push_back(std::unique_ptr<VideoFrame>( |
+ new VideoFrame(test::FakeNativeHandle::CreateFrame( |
+ dummy_handle, 1, 1, 2, 2, webrtc::kVideoRotation_0)))); |
+ AddInputFrame(input_frames_[1].get()); |
WaitOutputFrame(); |
EXPECT_TRUE(EqualFramesVector(input_frames_, output_frames_)); |
@@ -248,8 +251,9 @@ bool EqualBufferFrames(const VideoFrame& frame1, const VideoFrame& frame2) { |
frame1.allocated_size(kVPlane)) == 0)); |
} |
-bool EqualFramesVector(const ScopedVector<VideoFrame>& frames1, |
- const ScopedVector<VideoFrame>& frames2) { |
+bool EqualFramesVector( |
+ const std::vector<std::unique_ptr<VideoFrame>>& frames1, |
+ const std::vector<std::unique_ptr<VideoFrame>>& frames2) { |
if (frames1.size() != frames2.size()) |
return false; |
for (size_t i = 0; i < frames1.size(); ++i) { |
@@ -259,8 +263,8 @@ bool EqualFramesVector(const ScopedVector<VideoFrame>& frames1, |
return true; |
} |
-VideoFrame* CreateVideoFrame(uint8_t data) { |
- VideoFrame* frame = new VideoFrame(); |
+std::unique_ptr<VideoFrame> CreateVideoFrame(uint8_t data) { |
+ std::unique_ptr<VideoFrame> frame(new VideoFrame()); |
const int width = 36; |
const int height = 24; |
const int kSizeY = width * height * 2; |